From 7a23cf23a465905e27cdaaec844c8881cbe4c30e Mon Sep 17 00:00:00 2001 From: Xiaohai Xu Date: Sat, 7 Mar 2020 16:35:51 +0800 Subject: [PATCH 1/8] update config test Signed-off-by: Xiaohai Xu --- tests/milvus_python_test/test_config.py | 984 ++++++++++++++++++++++-- 1 file changed, 941 insertions(+), 43 deletions(-) diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index a235b50485..b321aa7fa3 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -10,13 +10,7 @@ from utils import * import ujson -dim = 128 -index_file_size = 10 CONFIG_TIMEOUT = 80 -nprobe = 1 -top_k = 1 -tag = "1970-01-01" -nb = 6000 class TestCacheConfig: @@ -111,6 +105,42 @@ class TestCacheConfig: status, config_value = connect.get_config("cache_config", "insert_buffer_size") assert status.OK() + @pytest.mark.level(2) + def test_get_cache_insert_data_invalid_parent_key(self, connect, table): + ''' + target: get invalid parent key + method: call get_config without parent_key: cache_config + expected: status not ok + ''' + invalid_configs = gen_invalid_cache_config() + invalid_configs.extend(["Cache_config", "cache config", "cache_Config", "cacheconfig", "cache_config\n", "cache_config\t"]) + for config in invalid_configs: + status, config_value = connect.get_config(config, "cache_insert_data") + assert not status.OK() + + @pytest.mark.level(2) + def test_get_cache_insert_data_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: cache_insert_data + expected: status not ok + ''' + invalid_configs = gen_invalid_cache_config() + invalid_configs.extend(["Cache_insert_data", "cacheinsertdata", " cache_insert_data"]) + for config in invalid_configs: + status, config_value = connect.get_config("cache_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_cache_insert_data_valid(self, connect, table): + ''' + target: get cache_insert_data + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("cache_config", "cache_insert_data") + assert status.OK() + """ ****************************************************************** @@ -207,10 +237,11 @@ class TestCacheConfig: ''' target: set insert_buffer_size method: call get_config correctly - expected: status ok + expected: status ok, set successfully ''' self.reset_configs(connect) status, reply = connect.set_config("cache_config", "insert_buffer_size", 2) + assert status.OK() status, config_value = connect.get_config("cache_config", "insert_buffer_size") assert status.OK() assert config_value == '2' @@ -251,9 +282,8 @@ class TestCacheConfig: status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available + 1) assert not status.OK() - # TODO: CI FAIL @pytest.mark.timeout(CONFIG_TIMEOUT) - def _test_set_cache_config_out_of_memory_value_B(self, connect, table): + def test_set_cache_config_out_of_memory_value_B(self, connect, table): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory method: call set_config with invalid values @@ -266,10 +296,10 @@ class TestCacheConfig: assert status.OK() status, insert_buffer_size = connect.get_config("cache_config", "insert_buffer_size") assert status.OK() - # status, reply = connect.set_config("cache_config", "cpu_cache_capacity", mem_available - int(insert_buffer_size) + 1) - # assert not status.OK() - # status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available - int(cpu_cache_capacity) + 1) - # assert not status.OK() + status, reply = connect.set_config("cache_config", "cpu_cache_capacity", mem_available - int(insert_buffer_size) + 1) + assert not status.OK() + status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available - int(cpu_cache_capacity) + 1) + assert not status.OK() def test_set_cache_config_out_of_memory_value_C(self, connect, table): ''' @@ -293,6 +323,37 @@ class TestCacheConfig: assert int(insert_buffer_size_new) == int(insert_buffer_size) + 1 self.reset_configs(connect) + @pytest.mark.level(2) + def test_set_cache_insert_data_invalid_parent_key(self, connect, table): + ''' + target: set invalid parent key + method: call set_config without parent_key: cache_config + expected: status not ok + ''' + self.reset_configs(connect) + invalid_configs = gen_invalid_cache_config() + invalid_configs.extend(["Cache_config", "cache config", "cache_Config", "cacheconfig", "cache_config\n", "cache_config\t"]) + for config in invalid_configs: + status, reply = connect.set_config(config, "cache_insert_data", "1") + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_cache_insert_data_valid(self, connect, table): + ''' + target: set cache_insert_data + method: call get_config correctly + expected: status ok, set successfully + ''' + self.reset_configs(connect) + # On/Off true/false 1/0 YES/NO + valid_configs = ["Off", "false", 0, "NO", "On", "true", "1", "YES"] + for config in valid_configs: + status, reply = connect.set_config("cache_config", "cache_insert_data", config) + assert status.OK() + status, config_value = connect.get_config("cache_config", "cache_insert_data") + assert status.OK() + assert config_value == str(config) + class TestEngineConfig: """ @@ -753,30 +814,13 @@ class TestGPUResourceConfig: ''' if str(connect._cmd("mode")[1]) == "CPU": pytest.skip("Only support GPU mode") - status, reply = connect.set_config("gpu_resource_config", "enable", "false") - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "enable") - assert config_value == "false" - status, reply = connect.set_config("gpu_resource_config", "enable", "true") - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "enable") - assert config_value == "true" - status, reply = connect.set_config("gpu_resource_config", "enable", 0) - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "enable") - assert config_value == "0" - status, reply = connect.set_config("gpu_resource_config", "enable", 1) - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "enable") - assert config_value == "1" - status, reply = connect.set_config("gpu_resource_config", "enable", "off") - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "enable") - assert config_value == "off" - status, reply = connect.set_config("gpu_resource_config", "enable", "ON") - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "enable") - assert config_value == "ON" + valid_configs = ["off", "False", "0", "nO", "on", "True", 1, "yES"] + for config in valid_configs: + status, reply = connect.set_config("gpu_resource_config", "enable", config) + assert status.OK() + status, config_value = connect.get_config("gpu_resource_config", "enable") + assert status.OK() + assert config_value == str(config) @pytest.mark.timeout(CONFIG_TIMEOUT) def test_set_cache_capacity_invalid_parent_key(self, connect, table): @@ -843,11 +887,10 @@ class TestGPUResourceConfig: ''' if str(connect._cmd("mode")[1]) == "CPU": pytest.skip("Only support GPU mode") - for i in ["gpu0"]: - status, reply = connect.set_config("gpu_resource_config", "search_resources", i) - assert status.OK() - status, config_value = connect.get_config("gpu_resource_config", "search_resources") - assert config_value == i + status, reply = connect.set_config("gpu_resource_config", "search_resources", "gpu0") + assert status.OK() + status, config_value = connect.get_config("gpu_resource_config", "search_resources") + assert config_value == "gpu0" @pytest.mark.timeout(CONFIG_TIMEOUT) def test_set_search_resources_invalid_values(self, connect, table): @@ -886,7 +929,7 @@ class TestGPUResourceConfig: ''' if str(connect._cmd("mode")[1]) == "CPU": pytest.skip("Only support GPU mode") - for i in ["gpu0"]: + for i in ["gpu0", "gpu0,gpu1", "gpu1,gpu0"]: status, reply = connect.set_config("gpu_resource_config", "build_index_resources", i) assert status.OK() @@ -903,3 +946,858 @@ class TestGPUResourceConfig: status, reply = connect.set_config("gpu_resource_config", "build_index_resources", i) assert not status.OK() self.reset_configs(connect) + + +class TestServerConfig: + """ + ****************************************************************** + The following cases are used to test `get_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_address_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: address + expected: status not ok + ''' + invalid_configs = ["Address", "addresses", "address "] + for config in invalid_configs: + status, config_value = connect.get_config("server_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_address_valid(self, connect, table): + ''' + target: get address + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("server_config", "address") + assert status.OK() + + @pytest.mark.level(2) + def test_get_port_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: port + expected: status not ok + ''' + invalid_configs = ["Port", "PORT", "port "] + for config in invalid_configs: + status, config_value = connect.get_config("server_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_port_valid(self, connect, table): + ''' + target: get port + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("server_config", "port") + assert status.OK() + + @pytest.mark.level(2) + def test_get_deploy_mode_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: deploy_mode + expected: status not ok + ''' + invalid_configs = ["Deploy_mode", "deploymode", "deploy_mode "] + for config in invalid_configs: + status, config_value = connect.get_config("server_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_deploy_mode_valid(self, connect, table): + ''' + target: get deploy_mode + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("server_config", "deploy_mode") + assert status.OK() + + @pytest.mark.level(2) + def test_get_time_zone_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: time_zone + expected: status not ok + ''' + invalid_configs = ["time", "timezone", "time_zone "] + for config in invalid_configs: + status, config_value = connect.get_config("server_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_time_zone_valid(self, connect, table): + ''' + target: get time_zone + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("server_config", "time_zone") + assert status.OK() + assert "UTC" in config_value + + @pytest.mark.level(2) + def test_get_web_port_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: web_port + expected: status not ok + ''' + invalid_configs = ["webport", "Web_port", "web_port "] + for config in invalid_configs: + status, config_value = connect.get_config("server_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_web_port_valid(self, connect, table): + ''' + target: get web_port + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("server_config", "web_port") + assert status.OK() + + + """ + ****************************************************************** + The following cases are used to test `set_config` function + ****************************************************************** + """ + def gen_valid_timezones(self): + time_zones = [] + for i in range(0, 13): + time_zones.append("UTC+" + str(i)) + time_zones.append("UTC-" + str(i)) + time_zones.extend(["UTC+13", "UTC+14"]) + return time_zones + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_server_config_invalid_child_key(self, connect, table): + ''' + target: set invalid child key + method: call set_config with invalid child_key + expected: status not ok + ''' + status, reply = connect.set_config("server_config", "child_key", 19530) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_address_valid(self, connect, table): + ''' + target: set address + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("server_config", "address", '0.0.0.0') + assert status.OK() + status, config_value = connect.get_config("server_config", "address") + assert status.OK() + assert config_value == '0.0.0.0' + + def test_set_port_valid(self, connect, table): + ''' + target: set port + method: call set_config correctly + expected: status ok, set successfully + ''' + status, web_port = connect.get_config("server_config", "web_port") + for valid_port in [1025, 65534, 12345, "19530"]: + if str(web_port) == str(valid_port): + # cannot be the same + continue + status, reply = connect.set_config("server_config", "port", valid_port) + assert status.OK() + status, config_value = connect.get_config("server_config", "port") + assert status.OK() + assert config_value == str(valid_port) + + def test_set_port_invalid(self, connect, table): + ''' + target: set port + method: call set_config with port number out of range(1024, 65535), or same as web_port number + expected: status not ok + ''' + for invalid_port in [1024, 65535, "0", "True", "19530 ", "10000000000"]: + status, reply = connect.set_config("server_config", "port", invalid_port) + assert not status.OK() + status, web_port = connect.get_config("server_config", "web_port") + logging.getLogger().info(web_port) + status, reply = connect.set_config("server_config", "port", web_port) + assert not status.OK() + + def test_set_deploy_mode_valid(self, connect, table): + ''' + target: set deploy_mode + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_deploy_mode in ["cluster_readonly", "cluster_writable", "single"]: + status, reply = connect.set_config("server_config", "deploy_mode", valid_deploy_mode) + assert status.OK() + status, config_value = connect.get_config("server_config", "deploy_mode") + assert status.OK() + assert config_value == valid_deploy_mode + + def test_set_deploy_mode_invalid(self, connect, table): + ''' + target: set deploy_mode + method: call set_config with invalid deploy_mode + expected: status not ok + ''' + for invalid_deploy_mode in [65535, "0", "Single", "cluster", "cluster-readonly"]: + status, reply = connect.set_config("server_config", "deploy_mode", invalid_deploy_mode) + assert not status.OK() + + def test_set_time_zone_valid(self, connect, table): + ''' + target: set time_zone + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_time_zone in self.gen_valid_timezones(): + status, reply = connect.set_config("server_config", "time_zone", valid_time_zone) + assert status.OK() + status, config_value = connect.get_config("server_config", "time_zone") + assert status.OK() + assert config_value == valid_time_zone + # reset to default + status, reply = connect.set_config("server_config", "time_zone", "UTC+8") + assert status.OK() + + def test_set_time_zone_invalid(self, connect, table): + ''' + target: set time_zone + method: call set_config with invalid time_zone + expected: status not ok + ''' + for invalid_time_zone in ["utc+8", "UTC8", "UTC-13", "UTC+15", "UTC+8:30"]: + logging.getLogger().info(invalid_time_zone) + status, reply = connect.set_config("server_config", "time_zone", invalid_time_zone) + assert not status.OK() + + def test_set_web_port_valid(self, connect, table): + ''' + target: set web_port + method: call set_config correctly + expected: status ok, set successfully + ''' + status, port = connect.get_config("server_config", "port") + for valid_web_port in [1025, 65534, 19121, "19530"]: + if str(valid_web_port) == str(port): + continue + status, reply = connect.set_config("server_config", "web_port", valid_web_port) + assert status.OK() + status, config_value = connect.get_config("server_config", "web_port") + assert status.OK() + assert config_value == str(valid_web_port) + + def test_set_web_port_invalid(self, connect, table): + ''' + target: set web_port + method: call set_config with web_port number out of range(1024, 65535), or same as port number + expected: status not ok + ''' + for invalid_web_port in [1024, 65535, "0", "True", "19530 ", "10000000000"]: + status, reply = connect.set_config("server_config", "web_port", invalid_web_port) + assert not status.OK() + status, port = connect.get_config("server_config", "port") + logging.getLogger().info(port) + status, reply = connect.set_config("server_config", "web_port", port) + assert not status.OK() + + +class TestDBConfig: + """ + ****************************************************************** + The following cases are used to test `get_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_backend_url_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: backend_url + expected: status not ok + ''' + invalid_configs = ["backend_Url", "backend-url", "backend_url "] + for config in invalid_configs: + status, config_value = connect.get_config("db_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_backend_url_valid(self, connect, table): + ''' + target: get backend_url + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("db_config", "backend_url") + assert status.OK() + + @pytest.mark.level(2) + def test_get_preload_table_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: preload_table + expected: status not ok + ''' + invalid_configs = ["preloadtable", "preload_table "] + for config in invalid_configs: + status, config_value = connect.get_config("db_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_preload_table_valid(self, connect, table): + ''' + target: get preload_table + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("db_config", "preload_table") + assert status.OK() + + @pytest.mark.level(2) + def test_get_auto_flush_interval_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: auto_flush_interval + expected: status not ok + ''' + invalid_configs = ["autoFlushInterval", "auto_flush", "auto_flush_interval "] + for config in invalid_configs: + status, config_value = connect.get_config("db_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_auto_flush_interval_valid(self, connect, table): + ''' + target: get auto_flush_interval + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("db_config", "auto_flush_interval") + assert status.OK() + + + """ + ****************************************************************** + The following cases are used to test `set_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_db_config_invalid_child_key(self, connect, table): + ''' + target: set invalid child key + method: call set_config with invalid child_key + expected: status not ok + ''' + status, reply = connect.set_config("db_config", "child_key", 1) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_backend_url_valid(self, connect, table): + ''' + target: set backend_url + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("db_config", "backend_url", 'sqlite://:@:/') + assert status.OK() + status, config_value = connect.get_config("db_config", "backend_url") + assert status.OK() + assert config_value == 'sqlite://:@:/' + + def test_set_preload_table_valid(self, connect, table): + ''' + target: set preload_table + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("db_config", "preload_table", "") + assert status.OK() + status, config_value = connect.get_config("db_config", "preload_table") + assert status.OK() + assert config_value == "" + + def test_set_auto_flush_interval_valid(self, connect, table): + ''' + target: set auto_flush_interval + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_auto_flush_interval in [0, 15, "3", 1]: + status, reply = connect.set_config("db_config", "auto_flush_interval", valid_auto_flush_interval) + assert status.OK() + status, config_value = connect.get_config("db_config", "auto_flush_interval") + assert status.OK() + assert config_value == str(valid_auto_flush_interval) + + def test_set_auto_flush_interval_invalid(self, connect, table): + ''' + target: set auto_flush_interval + method: call set_config with invalid auto_flush_interval + expected: status not ok + ''' + for invalid_auto_flush_interval in [-1, "1.5", "invalid", "1+2"]: + status, reply = connect.set_config("db_config", "auto_flush_interval", invalid_auto_flush_interval) + assert not status.OK() + + +class TestStorageConfig: + """ + ****************************************************************** + The following cases are used to test `get_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_primary_path_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: primary_path + expected: status not ok + ''' + invalid_configs = ["Primary_path", "primarypath", "primary_path "] + for config in invalid_configs: + status, config_value = connect.get_config("storage_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_primary_path_valid(self, connect, table): + ''' + target: get primary_path + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("storage_config", "primary_path") + assert status.OK() + + @pytest.mark.level(2) + def test_get_secondary_path_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: secondary_path + expected: status not ok + ''' + invalid_configs = ["secondarypath", "secondary_path "] + for config in invalid_configs: + status, config_value = connect.get_config("storage_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_secondary_path_valid(self, connect, table): + ''' + target: get secondary_path + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("storage_config", "secondary_path") + assert status.OK() + + + """ + ****************************************************************** + The following cases are used to test `set_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_storage_config_invalid_child_key(self, connect, table): + ''' + target: set invalid child key + method: call set_config with invalid child_key + expected: status not ok + ''' + status, reply = connect.set_config("storage_config", "child_key", "") + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_primary_path_valid(self, connect, table): + ''' + target: set primary_path + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("storage_config", "primary_path", '/var/lib/milvus') + assert status.OK() + status, config_value = connect.get_config("storage_config", "primary_path") + assert status.OK() + assert config_value == '/var/lib/milvus' + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_secondary_path_valid(self, connect, table): + ''' + target: set secondary_path + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("storage_config", "secondary_path", "") + assert status.OK() + status, config_value = connect.get_config("storage_config", "secondary_path") + assert status.OK() + assert config_value == "" + + +class TestMetricConfig: + """ + ****************************************************************** + The following cases are used to test `get_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_enable_monitor_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: enable_monitor + expected: status not ok + ''' + invalid_configs = ["enablemonitor", "Enable_monitor", "enable_monitor "] + for config in invalid_configs: + status, config_value = connect.get_config("metric_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_enable_monitor_valid(self, connect, table): + ''' + target: get enable_monitor + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("metric_config", "enable_monitor") + assert status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_address_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: address + expected: status not ok + ''' + invalid_configs = ["Address", "addresses", "address "] + for config in invalid_configs: + status, config_value = connect.get_config("metric_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_address_valid(self, connect, table): + ''' + target: get address + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("metric_config", "address") + assert status.OK() + + @pytest.mark.level(2) + def test_get_port_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: port + expected: status not ok + ''' + invalid_configs = ["Port", "PORT", "port "] + for config in invalid_configs: + status, config_value = connect.get_config("metric_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_port_valid(self, connect, table): + ''' + target: get port + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("metric_config", "port") + assert status.OK() + + + """ + ****************************************************************** + The following cases are used to test `set_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_metric_config_invalid_child_key(self, connect, table): + ''' + target: set invalid child key + method: call set_config with invalid child_key + expected: status not ok + ''' + status, reply = connect.set_config("metric_config", "child_key", 19530) + assert not status.OK() + + def test_set_enable_monitor_valid(self, connect, table): + ''' + target: set enable_monitor + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_enable_monitor in ["Off", "false", 0, "yes", "On", "true", "1", "NO"]: + status, reply = connect.set_config("metric_config", "enable_monitor", valid_enable_monitor) + assert status.OK() + status, config_value = connect.get_config("metric_config", "enable_monitor") + assert status.OK() + assert config_value == str(valid_enable_monitor) + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_address_valid(self, connect, table): + ''' + target: set address + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("metric_config", "address", '127.0.0.1') + assert status.OK() + status, config_value = connect.get_config("metric_config", "address") + assert status.OK() + assert config_value == '127.0.0.1' + + def test_set_port_valid(self, connect, table): + ''' + target: set port + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_port in [1025, 65534, "19530", "9091"]: + status, reply = connect.set_config("metric_config", "port", valid_port) + assert status.OK() + status, config_value = connect.get_config("metric_config", "port") + assert status.OK() + assert config_value == str(valid_port) + + def test_set_port_invalid(self, connect, table): + ''' + target: set port + method: call set_config with port number out of range(1024, 65535), or same as web_port number + expected: status not ok + ''' + for invalid_port in [1024, 65535, "0", "True", "19530 ", "10000000000"]: + status, reply = connect.set_config("metric_config", "port", invalid_port) + assert not status.OK() + + +class TestTracingConfig: + """ + ****************************************************************** + The following cases are used to test `get_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_json_config_path_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: json_config_path + expected: status not ok + ''' + invalid_configs = ["json_config", "jsonconfigpath", "json_config_path "] + for config in invalid_configs: + status, config_value = connect.get_config("tracing_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_json_config_path_valid(self, connect, table): + ''' + target: get json_config_path + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("tracing_config", "json_config_path") + assert status.OK() + + + """ + ****************************************************************** + The following cases are used to test `set_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_tracing_config_invalid_child_key(self, connect, table): + ''' + target: set invalid child key + method: call set_config with invalid child_key + expected: status not ok + ''' + status, reply = connect.set_config("tracing_config", "child_key", "") + assert not status.OK() + + @pytest.mark.skip(reason="Currently not supported") + def test_set_json_config_path_valid(self, connect, table): + ''' + target: set json_config_path + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("tracing_config", "json_config_path", "") + assert status.OK() + status, config_value = connect.get_config("tracing_config", "json_config_path") + assert status.OK() + assert config_value == "" + + +class TestWALConfig: + """ + ****************************************************************** + The following cases are used to test `get_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_enable_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: enable + expected: status not ok + ''' + invalid_configs = ["enabled", "Enable", "enable "] + for config in invalid_configs: + status, config_value = connect.get_config("wal_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_enable_valid(self, connect, table): + ''' + target: get enable + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("wal_config", "enable") + assert status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_recovery_error_ignore_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: recovery_error_ignore + expected: status not ok + ''' + invalid_configs = ["recovery-error-ignore", "Recovery_error_ignore", "recovery_error_ignore "] + for config in invalid_configs: + status, config_value = connect.get_config("wal_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_recovery_error_ignore_valid(self, connect, table): + ''' + target: get recovery_error_ignore + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("wal_config", "recovery_error_ignore") + assert status.OK() + + @pytest.mark.level(2) + def test_get_buffer_size_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: buffer_size + expected: status not ok + ''' + invalid_configs = ["buffersize", "Buffer_size", "buffer_size "] + for config in invalid_configs: + status, config_value = connect.get_config("wal_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_buffer_size_valid(self, connect, table): + ''' + target: get buffer_size + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("wal_config", "buffer_size") + assert status.OK() + + @pytest.mark.level(2) + def test_get_wal_path_invalid_child_key(self, connect, table): + ''' + target: get invalid child key + method: call get_config without child_key: wal_path + expected: status not ok + ''' + invalid_configs = ["wal", "Wal_path", "wal_path "] + for config in invalid_configs: + status, config_value = connect.get_config("wal_config", config) + assert not status.OK() + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_get_wal_path_valid(self, connect, table): + ''' + target: get wal_path + method: call get_config correctly + expected: status ok + ''' + status, config_value = connect.get_config("wal_config", "wal_path") + assert status.OK() + + + """ + ****************************************************************** + The following cases are used to test `set_config` function + ****************************************************************** + """ + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_wal_config_invalid_child_key(self, connect, table): + ''' + target: set invalid child key + method: call set_config with invalid child_key + expected: status not ok + ''' + status, reply = connect.set_config("wal_config", "child_key", 256) + assert not status.OK() + + def test_set_enable_valid(self, connect, table): + ''' + target: set enable + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_enable in ["Off", "false", 0, "no", "On", "true", "1", "YES"]: + status, reply = connect.set_config("wal_config", "enable", valid_enable) + assert status.OK() + status, config_value = connect.get_config("wal_config", "enable") + assert status.OK() + assert config_value == str(valid_enable) + + def test_set_recovery_error_ignore_valid(self, connect, table): + ''' + target: set recovery_error_ignore + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_recovery_error_ignore in ["Off", "false", "0", "no", "On", "true", "1", "YES"]: + status, reply = connect.set_config("wal_config", "recovery_error_ignore", valid_recovery_error_ignore) + assert status.OK() + status, config_value = connect.get_config("wal_config", "recovery_error_ignore") + assert status.OK() + assert config_value == valid_recovery_error_ignore + + def test_set_buffer_size_valid_A(self, connect, table): + ''' + target: set buffer_size + method: call set_config correctly + expected: status ok, set successfully + ''' + for valid_buffer_size in [64, 128, "4096", 1000, "256"]: + status, reply = connect.set_config("wal_config", "buffer_size", valid_buffer_size) + assert status.OK() + status, config_value = connect.get_config("wal_config", "buffer_size") + assert status.OK() + assert config_value == str(valid_buffer_size) + + @pytest.mark.timeout(CONFIG_TIMEOUT) + def test_set_wal_path_valid(self, connect, table): + ''' + target: set wal_path + method: call set_config correctly + expected: status ok, set successfully + ''' + status, reply = connect.set_config("wal_config", "wal_path", "/var/lib/milvus/wal") + assert status.OK() + status, config_value = connect.get_config("wal_config", "wal_path") + assert status.OK() + assert config_value == "/var/lib/milvus/wal" From 8e0dccdde0062e6b20e05561e9fa284955ade28e Mon Sep 17 00:00:00 2001 From: Xiaohai Xu Date: Sat, 7 Mar 2020 17:00:02 +0800 Subject: [PATCH 2/8] update config test Signed-off-by: Xiaohai Xu --- tests/milvus_python_test/test_config.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index b321aa7fa3..d9708550b8 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -10,7 +10,13 @@ from utils import * import ujson +dim = 128 +index_file_size = 10 CONFIG_TIMEOUT = 80 +nprobe = 1 +top_k = 1 +tag = "1970-01-01" +nb = 6000 class TestCacheConfig: @@ -929,9 +935,10 @@ class TestGPUResourceConfig: ''' if str(connect._cmd("mode")[1]) == "CPU": pytest.skip("Only support GPU mode") - for i in ["gpu0", "gpu0,gpu1", "gpu1,gpu0"]: - status, reply = connect.set_config("gpu_resource_config", "build_index_resources", i) - assert status.OK() + status, reply = connect.set_config("gpu_resource_config", "build_index_resources", "gpu0") + assert status.OK() + status, config_value = connect.get_config("gpu_resource_config", "build_index_resources") + assert config_value == "gpu0" @pytest.mark.timeout(CONFIG_TIMEOUT) def test_set_build_index_resources_invalid_values(self, connect, table): From f3e7e64716dafcd6c33eab27feeed5c01517094d Mon Sep 17 00:00:00 2001 From: sahuang Date: Mon, 9 Mar 2020 01:41:46 +0000 Subject: [PATCH 3/8] update config test Signed-off-by: sahuang --- tests/milvus_python_test/test_config.py | 30 +++++++------------------ 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index d9708550b8..4f6305dc77 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -1115,11 +1115,7 @@ class TestServerConfig: method: call set_config correctly expected: status ok, set successfully ''' - status, web_port = connect.get_config("server_config", "web_port") for valid_port in [1025, 65534, 12345, "19530"]: - if str(web_port) == str(valid_port): - # cannot be the same - continue status, reply = connect.set_config("server_config", "port", valid_port) assert status.OK() status, config_value = connect.get_config("server_config", "port") @@ -1129,16 +1125,13 @@ class TestServerConfig: def test_set_port_invalid(self, connect, table): ''' target: set port - method: call set_config with port number out of range(1024, 65535), or same as web_port number + method: call set_config with port number out of range(1024, 65535) expected: status not ok ''' - for invalid_port in [1024, 65535, "0", "True", "19530 ", "10000000000"]: + for invalid_port in [1024, 65535, "0", "True", "19530 ", "100000"]: + logging.getLogger().info(invalid_port) status, reply = connect.set_config("server_config", "port", invalid_port) assert not status.OK() - status, web_port = connect.get_config("server_config", "web_port") - logging.getLogger().info(web_port) - status, reply = connect.set_config("server_config", "port", web_port) - assert not status.OK() def test_set_deploy_mode_valid(self, connect, table): ''' @@ -1185,7 +1178,7 @@ class TestServerConfig: method: call set_config with invalid time_zone expected: status not ok ''' - for invalid_time_zone in ["utc+8", "UTC8", "UTC-13", "UTC+15", "UTC+8:30"]: + for invalid_time_zone in ["utc+8", "UTC++8", "GMT+8"]: logging.getLogger().info(invalid_time_zone) status, reply = connect.set_config("server_config", "time_zone", invalid_time_zone) assert not status.OK() @@ -1196,10 +1189,7 @@ class TestServerConfig: method: call set_config correctly expected: status ok, set successfully ''' - status, port = connect.get_config("server_config", "port") - for valid_web_port in [1025, 65534, 19121, "19530"]: - if str(valid_web_port) == str(port): - continue + for valid_web_port in [1025, 65534, "12345", 19121]: status, reply = connect.set_config("server_config", "web_port", valid_web_port) assert status.OK() status, config_value = connect.get_config("server_config", "web_port") @@ -1209,16 +1199,12 @@ class TestServerConfig: def test_set_web_port_invalid(self, connect, table): ''' target: set web_port - method: call set_config with web_port number out of range(1024, 65535), or same as port number + method: call set_config with web_port number out of range(1024, 65535) expected: status not ok ''' - for invalid_web_port in [1024, 65535, "0", "True", "19530 ", "10000000000"]: + for invalid_web_port in [1024, 65535, "0", "True", "19530 ", "1000000"]: status, reply = connect.set_config("server_config", "web_port", invalid_web_port) assert not status.OK() - status, port = connect.get_config("server_config", "port") - logging.getLogger().info(port) - status, reply = connect.set_config("server_config", "web_port", port) - assert not status.OK() class TestDBConfig: @@ -1584,7 +1570,7 @@ class TestMetricConfig: method: call set_config with port number out of range(1024, 65535), or same as web_port number expected: status not ok ''' - for invalid_port in [1024, 65535, "0", "True", "19530 ", "10000000000"]: + for invalid_port in [1024, 65535, "0", "True", "19530 ", "100000"]: status, reply = connect.set_config("metric_config", "port", invalid_port) assert not status.OK() From 9ee91b4751cbec48b7a7f5102bc1cb0795c75aad Mon Sep 17 00:00:00 2001 From: Xiaohai Xu Date: Mon, 9 Mar 2020 12:00:37 +0800 Subject: [PATCH 4/8] change table to collection Signed-off-by: Xiaohai Xu --- tests/milvus_python_test/conftest.py | 10 +- tests/milvus_python_test/test_add_vectors.py | 808 ++++++------- tests/milvus_python_test/test_collection.py | 1051 +++++++++++++++++ .../test_collection_count.py | 644 ++++++++++ ..._table_info.py => test_collection_info.py} | 164 +-- tests/milvus_python_test/test_compact.py | 768 ++++++------ tests/milvus_python_test/test_config.py | 272 ++--- tests/milvus_python_test/test_connect.py | 2 +- .../milvus_python_test/test_delete_vectors.py | 278 ++--- tests/milvus_python_test/test_flush.py | 148 +-- .../test_get_vector_by_id.py | 190 +-- .../milvus_python_test/test_get_vector_ids.py | 240 ++-- tests/milvus_python_test/test_index.py | 904 +++++++------- tests/milvus_python_test/test_mix.py | 76 +- tests/milvus_python_test/test_partition.py | 184 +-- tests/milvus_python_test/test_search_by_id.py | 544 ++++----- .../milvus_python_test/test_search_vectors.py | 444 +++---- tests/milvus_python_test/test_table.py | 1051 ----------------- tests/milvus_python_test/test_table_count.py | 644 ---------- tests/milvus_python_test/test_wal.py | 72 +- tests/milvus_python_test/utils.py | 10 +- 21 files changed, 4252 insertions(+), 4252 deletions(-) create mode 100644 tests/milvus_python_test/test_collection.py create mode 100644 tests/milvus_python_test/test_collection_count.py rename tests/milvus_python_test/{test_table_info.py => test_collection_info.py} (54%) delete mode 100644 tests/milvus_python_test/test_table.py delete mode 100644 tests/milvus_python_test/test_table_count.py diff --git a/tests/milvus_python_test/conftest.py b/tests/milvus_python_test/conftest.py index bc8ebe15b2..88f24453b5 100644 --- a/tests/milvus_python_test/conftest.py +++ b/tests/milvus_python_test/conftest.py @@ -106,7 +106,7 @@ def table(request, connect): def teardown(): status, table_names = connect.show_tables() for table_name in table_names: - connect.delete_table(table_name) + connect.drop_collection(table_name) request.addfinalizer(teardown) @@ -130,7 +130,7 @@ def ip_table(request, connect): def teardown(): status, table_names = connect.show_tables() for table_name in table_names: - connect.delete_table(table_name) + connect.drop_collection(table_name) request.addfinalizer(teardown) @@ -154,7 +154,7 @@ def jac_table(request, connect): def teardown(): status, table_names = connect.show_tables() for table_name in table_names: - connect.delete_table(table_name) + connect.drop_collection(table_name) request.addfinalizer(teardown) @@ -177,7 +177,7 @@ def ham_table(request, connect): def teardown(): status, table_names = connect.show_tables() for table_name in table_names: - connect.delete_table(table_name) + connect.drop_collection(table_name) request.addfinalizer(teardown) @@ -200,7 +200,7 @@ def tanimoto_table(request, connect): def teardown(): status, table_names = connect.show_tables() for table_name in table_names: - connect.delete_table(table_name) + connect.drop_collection(table_name) request.addfinalizer(teardown) diff --git a/tests/milvus_python_test/test_add_vectors.py b/tests/milvus_python_test/test_add_vectors.py index e646610ee6..866d766699 100644 --- a/tests/milvus_python_test/test_add_vectors.py +++ b/tests/milvus_python_test/test_add_vectors.py @@ -10,7 +10,7 @@ from utils import * dim = 128 index_file_size = 10 -table_id = "test_add" +collection_id = "test_add" ADD_TIMEOUT = 60 tag = "1970-01-01" add_interval_time = 1.5 @@ -35,122 +35,122 @@ class TestAddBase: pytest.skip("Skip PQ Temporary") return request.param - def test_add_vector_create_table(self, connect, table): + def test_add_vector_create_collection(self, connect, collection): ''' - target: test add vector, then create table again - method: add vector and create table + target: test add vector, then create collection again + method: add vector and create collection expected: status not ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - param = {'table_name': table, + status, ids = connect.add_vectors(collection, vector) + param = {'collection_name': collection, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) assert not status.OK() - def test_add_vector_has_table(self, connect, table): + def test_add_vector_has_collection(self, connect, collection): ''' - target: test add vector, then check table existence - method: add vector and call HasTable - expected: table exists, status ok + target: test add vector, then check collection existence + method: add vector and call Hascollection + expected: collection exists, status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - assert assert_has_table(connect, table) + status, ids = connect.add_vectors(collection, vector) + assert assert_has_collection(connect, collection) @pytest.mark.timeout(ADD_TIMEOUT) - def test_delete_table_add_vector(self, connect, table): + def test_drop_collection_add_vector(self, connect, collection): ''' - target: test add vector after table deleted - method: delete table and add vector + target: test add vector after collection deleted + method: delete collection and add vector expected: status not ok ''' - status = connect.delete_table(table) + status = connect.drop_collection(collection) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_delete_table_add_vector_another(self, connect, table): + def test_drop_collection_add_vector_another(self, connect, collection): ''' - target: test add vector to table_1 after table_2 deleted - method: delete table_2 and add vector to table_1 + target: test add vector to collection_1 after collection_2 deleted + method: delete collection_2 and add vector to collection_1 expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.delete_table(table) + status = connect.create_collection(param) + status = connect.drop_collection(collection) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(param['table_name'], vector) + status, ids = connect.add_vectors(param['collection_name'], vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_delete_table(self, connect, table): + def test_add_vector_drop_collection(self, connect, collection): ''' - target: test delete table after add vector - method: add vector and delete table + target: test delete collection after add vector + method: add vector and delete collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - status = connect.delete_table(table) + status, ids = connect.add_vectors(collection, vector) + status = connect.drop_collection(collection) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_delete_another_table(self, connect, table): + def test_add_vector_delete_another_collection(self, connect, collection): ''' - target: test delete table_1 table after add vector to table_2 - method: add vector and delete table + target: test delete collection_1 collection after add vector to collection_2 + method: add vector and delete collection expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_delete_table(self, connect, table): + def test_add_vector_sleep_drop_collection(self, connect, collection): ''' - target: test delete table after add vector for a while - method: add vector, sleep, and delete table + target: test delete collection after add vector for a while + method: add vector, sleep, and delete collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - connect.flush([table]) - status = connect.delete_table(table) + connect.flush([collection]) + status = connect.drop_collection(collection) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_delete_another_table(self, connect, table): + def test_add_vector_sleep_delete_another_collection(self, connect, collection): ''' - target: test delete table_1 table after add vector to table_2 for a while - method: add vector , sleep, and delete table + target: test delete collection_1 collection after add vector to collection_2 for a while + method: add vector , sleep, and delete collection expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - connect.flush([table]) - status = connect.delete_table(param['table_name']) + status, ids = connect.add_vectors(collection, vector) + connect.flush([collection]) + status = connect.drop_collection(param['collection_name']) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_create_index_add_vector(self, connect, table, get_simple_index): + def test_create_index_add_vector(self, connect, collection, get_simple_index): ''' target: test add vector after build index method: build index and add vector @@ -158,33 +158,33 @@ class TestAddBase: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_create_index_add_vector_another(self, connect, table, get_simple_index): + def test_create_index_add_vector_another(self, connect, collection, get_simple_index): ''' - target: test add vector to table_2 after build index for table_1 + target: test add vector to collection_2 after build index for collection_1 method: build index and add vector expected: status ok ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.create_index(table, index_type, index_param) + status = connect.create_collection(param) + status = connect.create_index(collection, index_type, index_param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - connect.delete_table(param['table_name']) + status, ids = connect.add_vectors(collection, vector) + connect.drop_collection(param['collection_name']) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_create_index(self, connect, table, get_simple_index): + def test_add_vector_create_index(self, connect, collection, get_simple_index): ''' target: test build index add after vector method: add vector and build index @@ -194,31 +194,31 @@ class TestAddBase: index_type = get_simple_index["index_type"] logging.getLogger().info(index_param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - status = connect.create_index(table, index_type, index_param) + status, ids = connect.add_vectors(collection, vector) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_create_index_another(self, connect, table, get_simple_index): + def test_add_vector_create_index_another(self, connect, collection, get_simple_index): ''' - target: test add vector to table_2 after build index for table_1 + target: test add vector to collection_2 after build index for collection_1 method: build index and add vector expected: status ok ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - status = connect.create_index(param['table_name'], index_type, index_param) + status, ids = connect.add_vectors(collection, vector) + status = connect.create_index(param['collection_name'], index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_create_index(self, connect, table, get_simple_index): + def test_add_vector_sleep_create_index(self, connect, collection, get_simple_index): ''' target: test build index add after vector for a while method: add vector and build index @@ -227,120 +227,120 @@ class TestAddBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - connect.flush([table]) - status = connect.create_index(table, index_type, index_param) + status, ids = connect.add_vectors(collection, vector) + connect.flush([collection]) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_create_index_another(self, connect, table, get_simple_index): + def test_add_vector_sleep_create_index_another(self, connect, collection, get_simple_index): ''' - target: test add vector to table_2 after build index for table_1 for a while + target: test add vector to collection_2 after build index for collection_1 for a while method: build index and add vector expected: status ok ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - connect.flush([table]) - status = connect.create_index(param['table_name'], index_type, index_param) + status, ids = connect.add_vectors(collection, vector) + connect.flush([collection]) + status = connect.create_index(param['collection_name'], index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_search_vector_add_vector(self, connect, table): + def test_search_vector_add_vector(self, connect, collection): ''' - target: test add vector after search table - method: search table and add vector + target: test add vector after search collection + method: search collection and add vector expected: status ok ''' vector = gen_single_vector(dim) - status, result = connect.search_vectors(table, 1, vector) - status, ids = connect.add_vectors(table, vector) + status, result = connect.search_vectors(collection, 1, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_search_vector_add_vector_another(self, connect, table): + def test_search_vector_add_vector_another(self, connect, collection): ''' - target: test add vector to table_1 after search table_2 - method: search table and add vector + target: test add vector to collection_1 after search collection_2 + method: search collection and add vector expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, result = connect.search_vectors(table, 1, vector) - status, ids = connect.add_vectors(param['table_name'], vector) + status, result = connect.search_vectors(collection, 1, vector) + status, ids = connect.add_vectors(param['collection_name'], vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_search_vector(self, connect, table): + def test_add_vector_search_vector(self, connect, collection): ''' target: test search vector after add vector - method: add vector and search table + method: add vector and search collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - connect.flush([table]) - status, result = connect.search_vectors(table, 1, vector) + connect.flush([collection]) + status, result = connect.search_vectors(collection, 1, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_search_vector_another(self, connect, table): + def test_add_vector_search_vector_another(self, connect, collection): ''' - target: test add vector to table_1 after search table_2 - method: search table and add vector + target: test add vector to collection_1 after search collection_2 + method: search collection and add vector expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - status, result = connect.search_vectors(param['table_name'], 1, vector) + status, ids = connect.add_vectors(collection, vector) + status, result = connect.search_vectors(param['collection_name'], 1, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_search_vector(self, connect, table): + def test_add_vector_sleep_search_vector(self, connect, collection): ''' target: test search vector after add vector after a while - method: add vector, sleep, and search table + method: add vector, sleep, and search collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - connect.flush([table]) - status, result = connect.search_vectors(table, 1, vector) + status, ids = connect.add_vectors(collection, vector) + connect.flush([collection]) + status, result = connect.search_vectors(collection, 1, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_search_vector_another(self, connect, table): + def test_add_vector_sleep_search_vector_another(self, connect, collection): ''' - target: test add vector to table_1 after search table_2 a while - method: search table , sleep, and add vector + target: test add vector to collection_1 after search collection_2 a while + method: search collection , sleep, and add vector expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) - connect.flush([table]) - status, result = connect.search_vectors(param['table_name'], 1, vector) + status, ids = connect.add_vectors(collection, vector) + connect.flush([collection]) + status, result = connect.search_vectors(param['collection_name'], 1, vector) assert status.OK() """ @@ -350,27 +350,27 @@ class TestAddBase: """ @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_ids(self, connect, table): + def test_add_vectors_ids(self, connect, collection): ''' - target: test add vectors in table, use customize ids - method: create table and add vectors in it, check the ids returned and the table length after vectors added - expected: the length of ids and the table row count + target: test add vectors in collection, use customize ids + method: create collection and add vectors in it, check the ids returned and the collection length after vectors added + expected: the length of ids and the collection row count ''' nq = 5; top_k = 1; vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.add_vectors(table, vectors, ids) - connect.flush([table]) + status, ids = connect.add_vectors(collection, vectors, ids) + connect.flush([collection]) assert status.OK() assert len(ids) == nq - status, result = connect.search_vectors(table, top_k, query_records=vectors) + status, result = connect.search_vectors(collection, top_k, query_records=vectors) logging.getLogger().info(result) assert len(result) == nq for i in range(nq): assert result[i][0].id == i @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_twice_ids_no_ids(self, connect, table): + def test_add_vectors_twice_ids_no_ids(self, connect, collection): ''' target: check the result of add_vectors, with params ids and no ids method: test add vectors twice, use customize ids first, and then use no ids @@ -379,15 +379,15 @@ class TestAddBase: nq = 5; top_k = 1; vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.add_vectors(table, vectors, ids) + status, ids = connect.add_vectors(collection, vectors, ids) assert status.OK() - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) logging.getLogger().info(status) logging.getLogger().info(ids) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_twice_not_ids_ids(self, connect, table): + def test_add_vectors_twice_not_ids_ids(self, connect, collection): ''' target: check the result of add_vectors, with params ids and no ids method: test add vectors twice, use not ids first, and then use customize ids @@ -396,25 +396,25 @@ class TestAddBase: nq = 5; top_k = 1; vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status, ids = connect.add_vectors(table, vectors, ids) + status, ids = connect.add_vectors(collection, vectors, ids) logging.getLogger().info(status) logging.getLogger().info(ids) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_ids_length_not_match(self, connect, table): + def test_add_vectors_ids_length_not_match(self, connect, collection): ''' - target: test add vectors in table, use customize ids, len(ids) != len(vectors) - method: create table and add vectors in it + target: test add vectors in collection, use customize ids, len(ids) != len(vectors) + method: create collection and add vectors in it expected: raise an exception ''' nq = 5 vectors = gen_vectors(nq, dim) ids = [i for i in range(1, nq)] with pytest.raises(Exception) as e: - status, ids = connect.add_vectors(table, vectors, ids) + status, ids = connect.add_vectors(collection, vectors, ids) @pytest.fixture( scope="function", @@ -424,10 +424,10 @@ class TestAddBase: yield request.param @pytest.mark.level(2) - def test_add_vectors_ids_invalid(self, connect, table, get_vector_id): + def test_add_vectors_ids_invalid(self, connect, collection, get_vector_id): ''' - target: test add vectors in table, use customize ids, which are not int64 - method: create table and add vectors in it + target: test add vectors in collection, use customize ids, which are not int64 + method: create collection and add vectors in it expected: raise an exception ''' nq = 5 @@ -435,137 +435,137 @@ class TestAddBase: vector_id = get_vector_id ids = [vector_id for _ in range(nq)] with pytest.raises(Exception): - connect.add_vectors(table, vectors, ids) + connect.add_vectors(collection, vectors, ids) @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors(self, connect, table): + def test_add_vectors(self, connect, collection): ''' - target: test add vectors in table created before - method: create table and add vectors in it, check the ids returned and the table length after vectors added - expected: the length of ids and the table row count + target: test add vectors in collection created before + method: create collection and add vectors in it, check the ids returned and the collection length after vectors added + expected: the length of ids and the collection row count ''' nq = 5 vectors = gen_vectors(nq, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() assert len(ids) == nq @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_tag(self, connect, table): + def test_add_vectors_tag(self, connect, collection): ''' - target: test add vectors in table created before - method: create table and add vectors in it, with the partition_tag param - expected: the table row count equals to nq + target: test add vectors in collection created before + method: create collection and add vectors in it, with the partition_tag param + expected: the collection row count equals to nq ''' nq = 5 vectors = gen_vectors(nq, dim) - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() assert len(ids) == nq @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_tag_A(self, connect, table): + def test_add_vectors_tag_A(self, connect, collection): ''' - target: test add vectors in table created before + target: test add vectors in collection created before method: create partition and add vectors in it - expected: the table row count equals to nq + expected: the collection row count equals to nq ''' nq = 5 vectors = gen_vectors(nq, dim) - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() assert len(ids) == nq @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_tag_not_existed(self, connect, table): + def test_add_vectors_tag_not_existed(self, connect, collection): ''' - target: test add vectors in table created before - method: create table and add vectors in it, with the not existed partition_tag param + target: test add vectors in collection created before + method: create collection and add vectors in it, with the not existed partition_tag param expected: status not ok ''' nq = 5 vectors = gen_vectors(nq, dim) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_tag_not_existed_A(self, connect, table): + def test_add_vectors_tag_not_existed_A(self, connect, collection): ''' - target: test add vectors in table created before + target: test add vectors in collection created before method: create partition, add vectors with the not existed partition_tag param expected: status not ok ''' nq = 5 vectors = gen_vectors(nq, dim) new_tag = "new_tag" - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=new_tag) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=new_tag) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_tag_existed(self, connect, table): + def test_add_vectors_tag_existed(self, connect, collection): ''' - target: test add vectors in table created before - method: create table and add vectors in it repeatly, with the partition_tag param - expected: the table row count equals to nq + target: test add vectors in collection created before + method: create collection and add vectors in it repeatly, with the partition_tag param + expected: the collection row count equals to nq ''' nq = 5 vectors = gen_vectors(nq, dim) - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) for i in range(5): - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() assert len(ids) == nq @pytest.mark.level(2) - def test_add_vectors_without_connect(self, dis_connect, table): + def test_add_vectors_without_connect(self, dis_connect, collection): ''' target: test add vectors without connection - method: create table and add vectors in it, check if added successfully + method: create collection and add vectors in it, check if added successfully expected: raise exception ''' nq = 5 vectors = gen_vectors(nq, dim) with pytest.raises(Exception) as e: - status, ids = dis_connect.add_vectors(table, vectors) + status, ids = dis_connect.add_vectors(collection, vectors) - def test_add_table_not_existed(self, connect): + def test_add_collection_not_existed(self, connect): ''' - target: test add vectors in table, which not existed before - method: add vectors table not existed, check the status + target: test add vectors in collection, which not existed before + method: add vectors collection not existed, check the status expected: status not ok ''' nq = 5 vector = gen_single_vector(dim) - status, ids = connect.add_vectors(gen_unique_str("not_exist_table"), vector) + status, ids = connect.add_vectors(gen_unique_str("not_exist_collection"), vector) assert not status.OK() assert not ids - def test_add_vector_dim_not_matched(self, connect, table): + def test_add_vector_dim_not_matched(self, connect, collection): ''' - target: test add vector, the vector dimension is not equal to the table dimension - method: the vector dimension is half of the table dimension, check the status + target: test add vector, the vector dimension is not equal to the collection dimension + method: the vector dimension is half of the collection dimension, check the status expected: status not ok ''' vector = gen_single_vector(int(dim)//2) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert not status.OK() - def test_add_vectors_dim_not_matched(self, connect, table): + def test_add_vectors_dim_not_matched(self, connect, collection): ''' - target: test add vectors, the vector dimension is not equal to the table dimension - method: the vectors dimension is half of the table dimension, check the status + target: test add vectors, the vector dimension is not equal to the collection dimension + method: the vectors dimension is half of the collection dimension, check the status expected: status not ok ''' nq = 5 vectors = gen_vectors(nq, int(dim)//2) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert not status.OK() - def test_add_vector_query_after_sleep(self, connect, table): + def test_add_vector_query_after_sleep(self, connect, collection): ''' target: test add vectors, and search it after sleep method: set vector[0][1] as query vectors @@ -573,9 +573,9 @@ class TestAddBase: ''' nq = 5 vectors = gen_vectors(nq, dim) - status, ids = connect.add_vectors(table, vectors) - connect.flush([table]) - status, result = connect.search_vectors(table, 1, [vectors[0]]) + status, ids = connect.add_vectors(collection, vectors) + connect.flush([collection]) + status, result = connect.search_vectors(collection, 1, [vectors[0]]) assert status.OK() assert len(result) == 1 @@ -588,15 +588,15 @@ class TestAddBase: method: 10 processed add vectors concurrently expected: status ok and result length is equal to the length off added vectors ''' - table = gen_unique_str() + collection = gen_unique_str() uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} milvus = get_milvus(args["handler"]) milvus.connect(uri=uri) - milvus.create_table(param) + milvus.create_collection(param) vector = gen_single_vector(dim) process_num = 4 loop_num = 5 @@ -606,7 +606,7 @@ class TestAddBase: milvus.connect(uri=uri) i = 0 while i < loop_num: - status, ids = milvus.add_vectors(table, vector) + status, ids = milvus.add_vectors(collection, vector) i = i + 1 milvus.disconnect() for i in range(process_num): @@ -617,37 +617,37 @@ class TestAddBase: for p in processes: p.join() time.sleep(2) - status, count = milvus.get_table_row_count(table) + status, count = milvus.count_collection(collection) assert count == process_num * loop_num @pytest.mark.level(2) @pytest.mark.timeout(30) - def test_table_add_rows_count_multi_threading(self, args): + def test_collection_add_rows_count_multi_threading(self, args): ''' - target: test table rows_count is correct or not with multi threading - method: create table and add vectors in it(idmap), - assert the value returned by get_table_row_count method is equal to length of vectors + target: test collection rows_count is correct or not with multi threading + method: create collection and add vectors in it(idmap), + assert the value returned by count_collection method is equal to length of vectors expected: the count is equal to the length of vectors ''' thread_num = 8 threads = [] - table = gen_unique_str() + collection = gen_unique_str() uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} milvus = get_milvus(args["handler"]) milvus.connect(uri=uri) - milvus.create_table(param) + milvus.create_collection(param) vectors = gen_vectors(nb, dim) def add(thread_i): logging.getLogger().info("In thread-%d" % thread_i) milvus = get_milvus() milvus.connect(uri=uri) - status, result = milvus.add_vectors(table, records=vectors) + status, result = milvus.add_vectors(collection, records=vectors) assert status.OK() - status = milvus.flush([table]) + status = milvus.flush([collection]) assert status.OK() for i in range(thread_num): x = threading.Thread(target=add, args=(i, )) @@ -655,29 +655,29 @@ class TestAddBase: x.start() for th in threads: th.join() - status, res = milvus.get_table_row_count(table) + status, res = milvus.count_collection(collection) assert res == thread_num * nb - def test_add_vector_multi_tables(self, connect): + def test_add_vector_multi_collections(self, connect): ''' - target: test add vectors is correct or not with multiple tables of L2 - method: create 50 tables and add vectors into them in turn + target: test add vectors is correct or not with multiple collections of L2 + method: create 50 collections and add vectors into them in turn expected: status ok ''' nq = 100 vectors = gen_vectors(nq, dim) - table_list = [] + collection_list = [] for i in range(20): - table_name = gen_unique_str('test_add_vector_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str('test_add_vector_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - connect.create_table(param) + connect.create_collection(param) for j in range(5): for i in range(20): - status, ids = connect.add_vectors(table_name=table_list[i], records=vectors) + status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors) assert status.OK() class TestAddIP: @@ -698,122 +698,122 @@ class TestAddIP: pytest.skip("Skip PQ Temporary") return request.param - def test_add_vector_create_table(self, connect, ip_table): + def test_add_vector_create_collection(self, connect, ip_collection): ''' - target: test add vector, then create table again - method: add vector and create table + target: test add vector, then create collection again + method: add vector and create collection expected: status not ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - param = {'table_name': ip_table, + status, ids = connect.add_vectors(ip_collection, vector) + param = {'collection_name': ip_collection, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) assert not status.OK() - def test_add_vector_has_table(self, connect, ip_table): + def test_add_vector_has_collection(self, connect, ip_collection): ''' - target: test add vector, then check table existence - method: add vector and call HasTable - expected: table exists, status ok + target: test add vector, then check collection existence + method: add vector and call Hascollection + expected: collection exists, status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - assert assert_has_table(connect, ip_table) + status, ids = connect.add_vectors(ip_collection, vector) + assert assert_has_collection(connect, ip_collection) @pytest.mark.timeout(ADD_TIMEOUT) - def test_delete_table_add_vector(self, connect, ip_table): + def test_drop_collection_add_vector(self, connect, ip_collection): ''' - target: test add vector after table deleted - method: delete table and add vector + target: test add vector after collection deleted + method: delete collection and add vector expected: status not ok ''' - status = connect.delete_table(ip_table) + status = connect.drop_collection(ip_collection) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_delete_table_add_vector_another(self, connect, ip_table): + def test_drop_collection_add_vector_another(self, connect, ip_collection): ''' - target: test add vector to table_1 after table_2 deleted - method: delete table_2 and add vector to table_1 + target: test add vector to collection_1 after collection_2 deleted + method: delete collection_2 and add vector to collection_1 expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.delete_table(ip_table) + status = connect.create_collection(param) + status = connect.drop_collection(ip_collection) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(param['table_name'], vector) + status, ids = connect.add_vectors(param['collection_name'], vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_delete_table(self, connect, ip_table): + def test_add_vector_drop_collection(self, connect, ip_collection): ''' - target: test delete table after add vector - method: add vector and delete table + target: test delete collection after add vector + method: add vector and delete collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - status = connect.delete_table(ip_table) + status, ids = connect.add_vectors(ip_collection, vector) + status = connect.drop_collection(ip_collection) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_delete_another_table(self, connect, ip_table): + def test_add_vector_delete_another_collection(self, connect, ip_collection): ''' - target: test delete table_1 table after add vector to table_2 - method: add vector and delete table + target: test delete collection_1 collection after add vector to collection_2 + method: add vector and delete collection expected: status ok ''' - param = {'table_name': 'test_add_vector_delete_another_table', + param = {'collection_name': 'test_add_vector_delete_another_collection', 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - status = connect.delete_table(param['table_name']) + status, ids = connect.add_vectors(ip_collection, vector) + status = connect.drop_collection(param['collection_name']) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_delete_table(self, connect, ip_table): + def test_add_vector_sleep_drop_collection(self, connect, ip_collection): ''' - target: test delete table after add vector for a while - method: add vector, sleep, and delete table + target: test delete collection after add vector for a while + method: add vector, sleep, and delete collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - connect.flush([ip_table]) - status = connect.delete_table(ip_table) + status, ids = connect.add_vectors(ip_collection, vector) + connect.flush([ip_collection]) + status = connect.drop_collection(ip_collection) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_delete_another_table(self, connect, ip_table): + def test_add_vector_sleep_delete_another_collection(self, connect, ip_collection): ''' - target: test delete table_1 table after add vector to table_2 for a while - method: add vector , sleep, and delete table + target: test delete collection_1 collection after add vector to collection_2 for a while + method: add vector , sleep, and delete collection expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - connect.flush([ip_table]) - status = connect.delete_table(param['table_name']) + status, ids = connect.add_vectors(ip_collection, vector) + connect.flush([ip_collection]) + status = connect.drop_collection(param['collection_name']) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_create_index_add_vector(self, connect, ip_table, get_simple_index): + def test_create_index_add_vector(self, connect, ip_collection, get_simple_index): ''' target: test add vector after build index method: build index and add vector @@ -821,32 +821,32 @@ class TestAddIP: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_create_index_add_vector_another(self, connect, ip_table, get_simple_index): + def test_create_index_add_vector_another(self, connect, ip_collection, get_simple_index): ''' - target: test add vector to table_2 after build index for table_1 + target: test add vector to collection_2 after build index for collection_1 method: build index and add vector expected: status ok ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_collection(param) + status = connect.create_index(ip_collection, index_type, index_param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_create_index(self, connect, ip_table, get_simple_index): + def test_add_vector_create_index(self, connect, ip_collection, get_simple_index): ''' target: test build index add after vector method: add vector and build index @@ -855,36 +855,36 @@ class TestAddIP: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) status, mode = connect._cmd("mode") assert status.OK() - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ): assert not status.OK() else: assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_create_index_another(self, connect, ip_table, get_simple_index): + def test_add_vector_create_index_another(self, connect, ip_collection, get_simple_index): ''' - target: test add vector to table_2 after build index for table_1 + target: test add vector to collection_2 after build index for collection_1 method: build index and add vector expected: status ok ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - status = connect.create_index(param['table_name'], index_type, index_param) + status, ids = connect.add_vectors(ip_collection, vector) + status = connect.create_index(param['collection_name'], index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_create_index(self, connect, ip_table, get_simple_index): + def test_add_vector_sleep_create_index(self, connect, ip_collection, get_simple_index): ''' target: test build index add after vector for a while method: add vector and build index @@ -895,123 +895,123 @@ class TestAddIP: if index_type == IndexType.IVF_PQ: pytest.skip("Skip some PQ cases") vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() time.sleep(add_interval_time) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_create_index_another(self, connect, ip_table, get_simple_index): + def test_add_vector_sleep_create_index_another(self, connect, ip_collection, get_simple_index): ''' - target: test add vector to table_2 after build index for table_1 for a while + target: test add vector to collection_2 after build index for collection_1 for a while method: build index and add vector expected: status ok ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - connect.flush([ip_table]) - status = connect.create_index(param['table_name'], index_type, index_param) + status, ids = connect.add_vectors(ip_collection, vector) + connect.flush([ip_collection]) + status = connect.create_index(param['collection_name'], index_type, index_param) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_search_vector_add_vector(self, connect, ip_table): + def test_search_vector_add_vector(self, connect, ip_collection): ''' - target: test add vector after search table - method: search table and add vector + target: test add vector after search collection + method: search collection and add vector expected: status ok ''' vector = gen_single_vector(dim) - status, result = connect.search_vectors(ip_table, 1, vector) - status, ids = connect.add_vectors(ip_table, vector) + status, result = connect.search_vectors(ip_collection, 1, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_search_vector_add_vector_another(self, connect, ip_table): + def test_search_vector_add_vector_another(self, connect, ip_collection): ''' - target: test add vector to table_1 after search table_2 - method: search table and add vector + target: test add vector to collection_1 after search collection_2 + method: search collection and add vector expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, result = connect.search_vectors(ip_table, 1, vector) - status, ids = connect.add_vectors(param['table_name'], vector) + status, result = connect.search_vectors(ip_collection, 1, vector) + status, ids = connect.add_vectors(param['collection_name'], vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_search_vector(self, connect, ip_table): + def test_add_vector_search_vector(self, connect, ip_collection): ''' target: test search vector after add vector - method: add vector and search table + method: add vector and search collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() - connect.flush([ip_table]) - status, result = connect.search_vectors(ip_table, 1, vector) + connect.flush([ip_collection]) + status, result = connect.search_vectors(ip_collection, 1, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_search_vector_another(self, connect, ip_table): + def test_add_vector_search_vector_another(self, connect, ip_collection): ''' - target: test add vector to table_1 after search table_2 - method: search table and add vector + target: test add vector to collection_1 after search collection_2 + method: search collection and add vector expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) - connect.flush([ip_table]) - status, result = connect.search_vectors(param['table_name'], 1, vector) + status, ids = connect.add_vectors(ip_collection, vector) + connect.flush([ip_collection]) + status, result = connect.search_vectors(param['collection_name'], 1, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_search_vector(self, connect, ip_table): + def test_add_vector_sleep_search_vector(self, connect, ip_collection): ''' target: test search vector after add vector after a while - method: add vector, sleep, and search table + method: add vector, sleep, and search collection expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) time.sleep(add_interval_time) - status, result = connect.search_vectors(ip_table, 1, vector) + status, result = connect.search_vectors(ip_collection, 1, vector) assert status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vector_sleep_search_vector_another(self, connect, ip_table): + def test_add_vector_sleep_search_vector_another(self, connect, ip_collection): ''' - target: test add vector to table_1 after search table_2 a while - method: search table , sleep, and add vector + target: test add vector to collection_1 after search collection_2 a while + method: search collection , sleep, and add vector expected: status ok ''' - param = {'table_name': gen_unique_str(), + param = {'collection_name': gen_unique_str(), 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() time.sleep(add_interval_time) - status, result = connect.search_vectors(param['table_name'], 1, vector) + status, result = connect.search_vectors(param['collection_name'], 1, vector) assert status.OK() """ @@ -1021,28 +1021,28 @@ class TestAddIP: """ @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_ids(self, connect, ip_table): + def test_add_vectors_ids(self, connect, ip_collection): ''' - target: test add vectors in table, use customize ids - method: create table and add vectors in it, check the ids returned and the table length after vectors added - expected: the length of ids and the table row count + target: test add vectors in collection, use customize ids + method: create collection and add vectors in it, check the ids returned and the collection length after vectors added + expected: the length of ids and the collection row count ''' nq = 5; top_k = 1 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.add_vectors(ip_table, vectors, ids) + status, ids = connect.add_vectors(ip_collection, vectors, ids) assert status.OK() - connect.flush([ip_table]) + connect.flush([ip_collection]) assert len(ids) == nq # check search result - status, result = connect.search_vectors(ip_table, top_k, vectors) + status, result = connect.search_vectors(ip_collection, top_k, vectors) logging.getLogger().info(result) assert len(result) == nq for i in range(nq): assert result[i][0].id == i @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_twice_ids_no_ids(self, connect, ip_table): + def test_add_vectors_twice_ids_no_ids(self, connect, ip_collection): ''' target: check the result of add_vectors, with params ids and no ids method: test add vectors twice, use customize ids first, and then use no ids @@ -1051,15 +1051,15 @@ class TestAddIP: nq = 5; top_k = 1 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.add_vectors(ip_table, vectors, ids) + status, ids = connect.add_vectors(ip_collection, vectors, ids) assert status.OK() - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) logging.getLogger().info(status) logging.getLogger().info(ids) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_twice_not_ids_ids(self, connect, ip_table): + def test_add_vectors_twice_not_ids_ids(self, connect, ip_collection): ''' target: check the result of add_vectors, with params ids and no ids method: test add vectors twice, use not ids first, and then use customize ids @@ -1068,25 +1068,25 @@ class TestAddIP: nq = 5; top_k = 1 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status, ids = connect.add_vectors(ip_table, vectors, ids) + status, ids = connect.add_vectors(ip_collection, vectors, ids) logging.getLogger().info(status) logging.getLogger().info(ids) assert not status.OK() @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors_ids_length_not_match(self, connect, ip_table): + def test_add_vectors_ids_length_not_match(self, connect, ip_collection): ''' - target: test add vectors in table, use customize ids, len(ids) != len(vectors) - method: create table and add vectors in it + target: test add vectors in collection, use customize ids, len(ids) != len(vectors) + method: create collection and add vectors in it expected: raise an exception ''' nq = 5 vectors = gen_vectors(nq, dim) ids = [i for i in range(1, nq)] with pytest.raises(Exception) as e: - status, ids = connect.add_vectors(ip_table, vectors, ids) + status, ids = connect.add_vectors(ip_collection, vectors, ids) @pytest.fixture( scope="function", @@ -1096,10 +1096,10 @@ class TestAddIP: yield request.param @pytest.mark.level(2) - def test_add_vectors_ids_invalid(self, connect, ip_table, get_vector_id): + def test_add_vectors_ids_invalid(self, connect, ip_collection, get_vector_id): ''' - target: test add vectors in table, use customize ids, which are not int64 - method: create table and add vectors in it + target: test add vectors in collection, use customize ids, which are not int64 + method: create collection and add vectors in it expected: raise an exception ''' nq = 5 @@ -1107,55 +1107,55 @@ class TestAddIP: vector_id = get_vector_id ids = [vector_id for i in range(nq)] with pytest.raises(Exception) as e: - status, ids = connect.add_vectors(ip_table, vectors, ids) + status, ids = connect.add_vectors(ip_collection, vectors, ids) @pytest.mark.timeout(ADD_TIMEOUT) - def test_add_vectors(self, connect, ip_table): + def test_add_vectors(self, connect, ip_collection): ''' - target: test add vectors in table created before - method: create table and add vectors in it, check the ids returned and the table length after vectors added - expected: the length of ids and the table row count + target: test add vectors in collection created before + method: create collection and add vectors in it, check the ids returned and the collection length after vectors added + expected: the length of ids and the collection row count ''' nq = 5 vectors = gen_vectors(nq, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() assert len(ids) == nq @pytest.mark.level(2) - def test_add_vectors_without_connect(self, dis_connect, ip_table): + def test_add_vectors_without_connect(self, dis_connect, ip_collection): ''' target: test add vectors without connection - method: create table and add vectors in it, check if added successfully + method: create collection and add vectors in it, check if added successfully expected: raise exception ''' nq = 5 vectors = gen_vectors(nq, dim) with pytest.raises(Exception) as e: - status, ids = dis_connect.add_vectors(ip_table, vectors) + status, ids = dis_connect.add_vectors(ip_collection, vectors) - def test_add_vector_dim_not_matched(self, connect, ip_table): + def test_add_vector_dim_not_matched(self, connect, ip_collection): ''' - target: test add vector, the vector dimension is not equal to the table dimension - method: the vector dimension is half of the table dimension, check the status + target: test add vector, the vector dimension is not equal to the collection dimension + method: the vector dimension is half of the collection dimension, check the status expected: status not ok ''' vector = gen_single_vector(int(dim)//2) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert not status.OK() - def test_add_vectors_dim_not_matched(self, connect, ip_table): + def test_add_vectors_dim_not_matched(self, connect, ip_collection): ''' - target: test add vectors, the vector dimension is not equal to the table dimension - method: the vectors dimension is half of the table dimension, check the status + target: test add vectors, the vector dimension is not equal to the collection dimension + method: the vectors dimension is half of the collection dimension, check the status expected: status not ok ''' nq = 5 vectors = gen_vectors(nq, int(dim)//2) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert not status.OK() - def test_add_vector_query_after_sleep(self, connect, ip_table): + def test_add_vector_query_after_sleep(self, connect, ip_collection): ''' target: test add vectors, and search it after sleep method: set vector[0][1] as query vectors @@ -1163,32 +1163,32 @@ class TestAddIP: ''' nq = 5 vectors = gen_vectors(nq, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) time.sleep(add_interval_time) - status, result = connect.search_vectors(ip_table, 1, [vectors[0]]) + status, result = connect.search_vectors(ip_collection, 1, [vectors[0]]) assert status.OK() assert len(result) == 1 - def test_add_vector_multi_tables(self, connect): + def test_add_vector_multi_collections(self, connect): ''' - target: test add vectors is correct or not with multiple tables of IP - method: create 50 tables and add vectors into them in turn + target: test add vectors is correct or not with multiple collections of IP + method: create 50 collections and add vectors into them in turn expected: status ok ''' nq = 100 vectors = gen_vectors(nq, dim) - table_list = [] + collection_list = [] for i in range(20): - table_name = gen_unique_str('test_add_vector_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str('test_add_vector_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} - connect.create_table(param) + connect.create_collection(param) for j in range(10): for i in range(20): - status, ids = connect.add_vectors(table_name=table_list[i], records=vectors) + status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors) assert status.OK() class TestAddAdvance: @@ -1207,7 +1207,7 @@ class TestAddAdvance: def insert_count(self, request): yield request.param - def test_insert_much(self, connect, table, insert_count): + def test_insert_much(self, connect, collection, insert_count): ''' target: test add vectors with different length of vectors method: set different vectors as add method params @@ -1215,11 +1215,11 @@ class TestAddAdvance: ''' nb = insert_count insert_vec_list = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, insert_vec_list) + status, ids = connect.add_vectors(collection, insert_vec_list) assert len(ids) == nb assert status.OK() - def test_insert_much_ip(self, connect, ip_table, insert_count): + def test_insert_much_ip(self, connect, ip_collection, insert_count): ''' target: test add vectors with different length of vectors method: set different vectors as add method params @@ -1227,11 +1227,11 @@ class TestAddAdvance: ''' nb = insert_count insert_vec_list = gen_vectors(nb, dim) - status, ids = connect.add_vectors(ip_table, insert_vec_list) + status, ids = connect.add_vectors(ip_collection, insert_vec_list) assert len(ids) == nb assert status.OK() - def test_insert_much_jaccard(self, connect, jac_table, insert_count): + def test_insert_much_jaccard(self, connect, jac_collection, insert_count): ''' target: test add vectors with different length of vectors method: set different vectors as add method params @@ -1239,11 +1239,11 @@ class TestAddAdvance: ''' nb = insert_count tmp, insert_vec_list = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, insert_vec_list) + status, ids = connect.add_vectors(jac_collection, insert_vec_list) assert len(ids) == nb assert status.OK() - def test_insert_much_hamming(self, connect, ham_table, insert_count): + def test_insert_much_hamming(self, connect, ham_collection, insert_count): ''' target: test add vectors with different length of vectors method: set different vectors as add method params @@ -1251,11 +1251,11 @@ class TestAddAdvance: ''' nb = insert_count tmp, insert_vec_list = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(ham_table, insert_vec_list) + status, ids = connect.add_vectors(ham_collection, insert_vec_list) assert len(ids) == nb assert status.OK() - def test_insert_much_tanimoto(self, connect, tanimoto_table, insert_count): + def test_insert_much_tanimoto(self, connect, tanimoto_collection, insert_count): ''' target: test add vectors with different length of vectors method: set different vectors as add method params @@ -1263,46 +1263,46 @@ class TestAddAdvance: ''' nb = insert_count tmp, insert_vec_list = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(tanimoto_table, insert_vec_list) + status, ids = connect.add_vectors(tanimoto_collection, insert_vec_list) assert len(ids) == nb assert status.OK() class TestNameInvalid(object): """ - Test adding vectors with invalid table names + Test adding vectors with invalid collection names """ @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) def get_tag_name(self, request): yield request.param @pytest.mark.level(2) - def test_add_vectors_with_invalid_table_name(self, connect, get_table_name): - table_name = get_table_name + def test_add_vectors_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name vectors = gen_vectors(1, dim) - status, result = connect.add_vectors(table_name, vectors) + status, result = connect.add_vectors(collection_name, vectors) assert not status.OK() @pytest.mark.level(2) - def test_add_vectors_with_invalid_tag_name(self, connect, get_table_name, get_tag_name): - table_name = get_table_name + def test_add_vectors_with_invalid_tag_name(self, connect, get_collection_name, get_tag_name): + collection_name = get_collection_name tag_name = get_tag_name vectors = gen_vectors(1, dim) - status, result = connect.add_vectors(table_name, vectors, partition_tag=tag_name) + status, result = connect.add_vectors(collection_name, vectors, partition_tag=tag_name) assert not status.OK() -class TestAddTableVectorsInvalid(object): +class TestAddcollectionVectorsInvalid(object): single_vector = gen_single_vector(dim) vectors = gen_vectors(2, dim) @@ -1317,29 +1317,29 @@ class TestAddTableVectorsInvalid(object): yield request.param @pytest.mark.level(2) - def test_add_vector_with_invalid_vectors(self, connect, table, gen_vector): + def test_add_vector_with_invalid_vectors(self, connect, collection, gen_vector): tmp_single_vector = copy.deepcopy(self.single_vector) tmp_single_vector[0][1] = gen_vector with pytest.raises(Exception) as e: - status, result = connect.add_vectors(table, tmp_single_vector) + status, result = connect.add_vectors(collection, tmp_single_vector) @pytest.mark.level(2) - def test_add_vectors_with_invalid_vectors(self, connect, table, gen_vector): + def test_add_vectors_with_invalid_vectors(self, connect, collection, gen_vector): tmp_vectors = copy.deepcopy(self.vectors) tmp_vectors[1][1] = gen_vector with pytest.raises(Exception) as e: - status, result = connect.add_vectors(table, tmp_vectors) + status, result = connect.add_vectors(collection, tmp_vectors) @pytest.mark.level(2) - def test_add_vectors_with_invalid_vectors_jaccard(self, connect, jac_table, gen_vector): + def test_add_vectors_with_invalid_vectors_jaccard(self, connect, jac_collection, gen_vector): tmp_vectors = copy.deepcopy(self.vectors) tmp_vectors[1][1] = gen_vector with pytest.raises(Exception) as e: - status, result = connect.add_vectors(jac_table, tmp_vectors) + status, result = connect.add_vectors(jac_collection, tmp_vectors) @pytest.mark.level(2) - def test_add_vectors_with_invalid_vectors_hamming(self, connect, ham_table, gen_vector): + def test_add_vectors_with_invalid_vectors_hamming(self, connect, ham_collection, gen_vector): tmp_vectors = copy.deepcopy(self.vectors) tmp_vectors[1][1] = gen_vector with pytest.raises(Exception) as e: - status, result = connect.add_vectors(ham_table, tmp_vectors) + status, result = connect.add_vectors(ham_collection, tmp_vectors) diff --git a/tests/milvus_python_test/test_collection.py b/tests/milvus_python_test/test_collection.py new file mode 100644 index 0000000000..5e215c2cd2 --- /dev/null +++ b/tests/milvus_python_test/test_collection.py @@ -0,0 +1,1051 @@ +import pdb +import pytest +import logging +import itertools +from time import sleep +from multiprocessing import Process +from milvus import IndexType, MetricType +from utils import * + +dim = 128 +drop_collection_interval_time = 3 +index_file_size = 10 +vectors = gen_vectors(100, dim) + + +class TestCollection: + + """ + ****************************************************************** + The following cases are used to test `create_collection` function + ****************************************************************** + """ + + def test_create_collection(self, connect): + ''' + target: test create normal collection + method: create collection with corrent params + expected: create status return ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + status = connect.create_collection(param) + assert status.OK() + + def test_create_collection_ip(self, connect): + ''' + target: test create normal collection + method: create collection with corrent params + expected: create status return ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + status = connect.create_collection(param) + assert status.OK() + + def test_create_collection_jaccard(self, connect): + ''' + target: test create normal collection + method: create collection with corrent params + expected: create status return ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.JACCARD} + status = connect.create_collection(param) + assert status.OK() + + def test_create_collection_hamming(self, connect): + ''' + target: test create normal collection + method: create collection with corrent params + expected: create status return ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.HAMMING} + status = connect.create_collection(param) + assert status.OK() + + @pytest.mark.level(2) + def test_create_collection_without_connection(self, dis_connect): + ''' + target: test create collection, without connection + method: create collection with correct params, with a disconnected instance + expected: create raise exception + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + with pytest.raises(Exception) as e: + status = dis_connect.create_collection(param) + + def test_create_collection_existed(self, connect): + ''' + target: test create collection but the collection name have already existed + method: create collection with the same collection_name + expected: create status return not ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + status = connect.create_collection(param) + status = connect.create_collection(param) + assert not status.OK() + + @pytest.mark.level(2) + def test_create_collection_existed_ip(self, connect): + ''' + target: test create collection but the collection name have already existed + method: create collection with the same collection_name + expected: create status return not ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + status = connect.create_collection(param) + status = connect.create_collection(param) + assert not status.OK() + + def test_create_collection_None(self, connect): + ''' + target: test create collection but the collection name is None + method: create collection, param collection_name is None + expected: create raise error + ''' + param = {'collection_name': None, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + with pytest.raises(Exception) as e: + status = connect.create_collection(param) + + def test_create_collection_no_dimension(self, connect): + ''' + target: test create collection with no dimension params + method: create collection with corrent params + expected: create status return ok + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + with pytest.raises(Exception) as e: + status = connect.create_collection(param) + + def test_create_collection_no_file_size(self, connect): + ''' + target: test create collection with no index_file_size params + method: create collection with corrent params + expected: create status return ok, use default 1024 + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'metric_type': MetricType.L2} + status = connect.create_collection(param) + logging.getLogger().info(status) + status, result = connect.describe_collection(collection_name) + logging.getLogger().info(result) + assert result.index_file_size == 1024 + + def test_create_collection_no_metric_type(self, connect): + ''' + target: test create collection with no metric_type params + method: create collection with corrent params + expected: create status return ok, use default L2 + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size} + status = connect.create_collection(param) + status, result = connect.describe_collection(collection_name) + logging.getLogger().info(result) + assert result.metric_type == MetricType.L2 + + """ + ****************************************************************** + The following cases are used to test `describe_collection` function + ****************************************************************** + """ + + def test_collection_describe_result(self, connect): + ''' + target: test describe collection created with correct params + method: create collection, assert the value returned by describe method + expected: collection_name equals with the collection name created + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + status, res = connect.describe_collection(collection_name) + assert res.collection_name == collection_name + assert res.metric_type == MetricType.L2 + + @pytest.mark.level(2) + def test_collection_describe_collection_name_ip(self, connect): + ''' + target: test describe collection created with correct params + method: create collection, assert the value returned by describe method + expected: collection_name equals with the collection name created + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + connect.create_collection(param) + status, res = connect.describe_collection(collection_name) + assert res.collection_name == collection_name + assert res.metric_type == MetricType.IP + + @pytest.mark.level(2) + def test_collection_describe_collection_name_jaccard(self, connect): + ''' + target: test describe collection created with correct params + method: create collection, assert the value returned by describe method + expected: collection_name equals with the collection name created + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.JACCARD} + connect.create_collection(param) + status, res = connect.describe_collection(collection_name) + assert res.collection_name == collection_name + assert res.metric_type == MetricType.JACCARD + + @pytest.mark.level(2) + def test_collection_describe_collection_name_hamming(self, connect): + ''' + target: test describe collection created with correct params + method: create collection, assert the value returned by describe method + expected: collection_name equals with the collection name created + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.HAMMING} + connect.create_collection(param) + status, res = connect.describe_collection(collection_name) + assert res.collection_name == collection_name + assert res.metric_type == MetricType.HAMMING + + # TODO: enable + @pytest.mark.level(2) + def _test_collection_describe_collection_name_multiprocessing(self, connect, args): + ''' + target: test describe collection created with multiprocess + method: create collection, assert the value returned by describe method + expected: collection_name equals with the collection name created + ''' + collection_name = gen_unique_str("test_collection") + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + + def describecollection(milvus): + status, res = milvus.describe_collection(collection_name) + assert res.collection_name == collection_name + + process_num = 4 + processes = [] + for i in range(process_num): + milvus = get_milvus(args["handler"]) + milvus.connect(uri=uri) + p = Process(target=describecollection, args=(milvus,)) + processes.append(p) + p.start() + for p in processes: + p.join() + + @pytest.mark.level(2) + def test_collection_describe_without_connection(self, collection, dis_connect): + ''' + target: test describe collection, without connection + method: describe collection with correct params, with a disconnected instance + expected: describe raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.describe_collection(collection) + + def test_collection_describe_dimension(self, connect): + ''' + target: test describe collection created with correct params + method: create collection, assert the dimention value returned by describe method + expected: dimention equals with dimention when created + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim+1, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + status, res = connect.describe_collection(collection_name) + assert res.dimension == dim+1 + + """ + ****************************************************************** + The following cases are used to test `drop_collection` function + ****************************************************************** + """ + + def test_drop_collection(self, connect, collection): + ''' + target: test delete collection created with correct params + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + status = connect.drop_collection(collection) + assert not assert_has_collection(connect, collection) + + @pytest.mark.level(2) + def test_drop_collection_ip(self, connect, ip_collection): + ''' + target: test delete collection created with correct params + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + status = connect.drop_collection(ip_collection) + assert not assert_has_collection(connect, ip_collection) + + @pytest.mark.level(2) + def test_drop_collection_jaccard(self, connect, jac_collection): + ''' + target: test delete collection created with correct params + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + status = connect.drop_collection(jac_collection) + assert not assert_has_collection(connect, jac_collection) + + @pytest.mark.level(2) + def test_drop_collection_hamming(self, connect, ham_collection): + ''' + target: test delete collection created with correct params + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + status = connect.drop_collection(ham_collection) + assert not assert_has_collection(connect, ham_collection) + + @pytest.mark.level(2) + def test_collection_delete_without_connection(self, collection, dis_connect): + ''' + target: test describe collection, without connection + method: describe collection with correct params, with a disconnected instance + expected: describe raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.drop_collection(collection) + + def test_drop_collection_not_existed(self, connect): + ''' + target: test delete collection not in index + method: delete all collections, and delete collection again, + assert the value returned by delete method + expected: status not ok + ''' + collection_name = gen_unique_str("test_collection") + status = connect.drop_collection(collection_name) + assert not status.OK() + + def test_drop_collection_repeatedly(self, connect): + ''' + target: test delete collection created with correct params + method: create collection and delete new collection repeatedly, + assert the value returned by delete method + expected: create ok and delete ok + ''' + loops = 1 + for i in range(loops): + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + status = connect.drop_collection(collection_name) + time.sleep(1) + assert not assert_has_collection(connect, collection_name) + + def test_delete_create_collection_repeatedly(self, connect): + ''' + target: test delete and create the same collection repeatedly + method: try to create the same collection and delete repeatedly, + assert the value returned by delete method + expected: create ok and delete ok + ''' + loops = 5 + for i in range(loops): + collection_name = "test_collection" + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + status = connect.drop_collection(collection_name) + time.sleep(2) + assert status.OK() + + def test_delete_create_collection_repeatedly_ip(self, connect): + ''' + target: test delete and create the same collection repeatedly + method: try to create the same collection and delete repeatedly, + assert the value returned by delete method + expected: create ok and delete ok + ''' + loops = 5 + for i in range(loops): + collection_name = "test_collection" + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + connect.create_collection(param) + status = connect.drop_collection(collection_name) + time.sleep(2) + assert status.OK() + + # TODO: enable + @pytest.mark.level(2) + def _test_drop_collection_multiprocessing(self, args): + ''' + target: test delete collection with multiprocess + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + process_num = 6 + processes = [] + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + + def deletecollection(milvus): + status = milvus.drop_collection(collection) + # assert not status.code==0 + assert assert_has_collection(milvus, collection) + assert status.OK() + + for i in range(process_num): + milvus = get_milvus(args["handler"]) + milvus.connect(uri=uri) + p = Process(target=deletecollection, args=(milvus,)) + processes.append(p) + p.start() + for p in processes: + p.join() + + # TODO: enable + @pytest.mark.level(2) + def _test_drop_collection_multiprocessing_multicollection(self, connect): + ''' + target: test delete collection with multiprocess + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + process_num = 5 + loop_num = 2 + processes = [] + + collection = [] + j = 0 + while j < (process_num*loop_num): + collection_name = gen_unique_str("test_drop_collection_with_multiprocessing") + collection.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + j = j + 1 + + def delete(connect,ids): + i = 0 + while i < loop_num: + status = connect.drop_collection(collection[ids*process_num+i]) + time.sleep(2) + assert status.OK() + assert not assert_has_collection(connect, collection[ids*process_num+i]) + i = i + 1 + + for i in range(process_num): + ids = i + p = Process(target=delete, args=(connect,ids)) + processes.append(p) + p.start() + for p in processes: + p.join() + + """ + ****************************************************************** + The following cases are used to test `has_collection` function + ****************************************************************** + """ + + def test_has_collection(self, connect): + ''' + target: test if the created collection existed + method: create collection, assert the value returned by has_collection method + expected: True + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + assert assert_has_collection(connect, collection_name) + + def test_has_collection_ip(self, connect): + ''' + target: test if the created collection existed + method: create collection, assert the value returned by has_collection method + expected: True + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + connect.create_collection(param) + assert assert_has_collection(connect, collection_name) + + def test_has_collection_jaccard(self, connect): + ''' + target: test if the created collection existed + method: create collection, assert the value returned by has_collection method + expected: True + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.JACCARD} + connect.create_collection(param) + assert assert_has_collection(connect, collection_name) + + def test_has_collection_hamming(self, connect): + ''' + target: test if the created collection existed + method: create collection, assert the value returned by has_collection method + expected: True + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.HAMMING} + connect.create_collection(param) + assert assert_has_collection(connect, collection_name) + + @pytest.mark.level(2) + def test_has_collection_without_connection(self, collection, dis_connect): + ''' + target: test has collection, without connection + method: calling has collection with correct params, with a disconnected instance + expected: has collection raise exception + ''' + with pytest.raises(Exception) as e: + assert_has_collection(dis_connect, collection) + + def test_has_collection_not_existed(self, connect): + ''' + target: test if collection not created + method: random a collection name, which not existed in db, + assert the value returned by has_collection method + expected: False + ''' + collection_name = gen_unique_str("test_collection") + assert not assert_has_collection(connect, collection_name) + + """ + ****************************************************************** + The following cases are used to test `show_collections` function + ****************************************************************** + """ + + def test_show_collections(self, connect): + ''' + target: test show collections is correct or not, if collection created + method: create collection, assert the value returned by show_collections method is equal to 0 + expected: collection_name in show collections + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + status, result = connect.show_collections() + assert status.OK() + assert collection_name in result + + def test_show_collections_ip(self, connect): + ''' + target: test show collections is correct or not, if collection created + method: create collection, assert the value returned by show_collections method is equal to 0 + expected: collection_name in show collections + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + connect.create_collection(param) + status, result = connect.show_collections() + assert status.OK() + assert collection_name in result + + def test_show_collections_jaccard(self, connect): + ''' + target: test show collections is correct or not, if collection created + method: create collection, assert the value returned by show_collections method is equal to 0 + expected: collection_name in show collections + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.JACCARD} + connect.create_collection(param) + status, result = connect.show_collections() + assert status.OK() + assert collection_name in result + + def test_show_collections_hamming(self, connect): + ''' + target: test show collections is correct or not, if collection created + method: create collection, assert the value returned by show_collections method is equal to 0 + expected: collection_name in show collections + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.HAMMING} + connect.create_collection(param) + status, result = connect.show_collections() + assert status.OK() + assert collection_name in result + + @pytest.mark.level(2) + def test_show_collections_without_connection(self, dis_connect): + ''' + target: test show_collections, without connection + method: calling show_collections with correct params, with a disconnected instance + expected: show_collections raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.show_collections() + + def test_show_collections_no_collection(self, connect): + ''' + target: test show collections is correct or not, if no collection in db + method: delete all collections, + assert the value returned by show_collections method is equal to [] + expected: the status is ok, and the result is equal to [] + ''' + status, result = connect.show_collections() + if result: + for collection_name in result: + connect.drop_collection(collection_name) + time.sleep(drop_collection_interval_time) + status, result = connect.show_collections() + assert status.OK() + assert len(result) == 0 + + # TODO: enable + @pytest.mark.level(2) + def _test_show_collections_multiprocessing(self, connect, args): + ''' + target: test show collections is correct or not with processes + method: create collection, assert the value returned by show_collections method is equal to 0 + expected: collection_name in show collections + ''' + collection_name = gen_unique_str("test_collection") + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + + def showcollections(milvus): + status, result = milvus.show_collections() + assert status.OK() + assert collection_name in result + + process_num = 8 + processes = [] + + for i in range(process_num): + milvus = get_milvus(args["handler"]) + milvus.connect(uri=uri) + p = Process(target=showcollections, args=(milvus,)) + processes.append(p) + p.start() + for p in processes: + p.join() + + """ + ****************************************************************** + The following cases are used to test `preload_collection` function + ****************************************************************** + """ + + """ + generate valid create_index params + """ + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + if str(connect._cmd("mode")[1]) == "CPU": + if request.param["index_type"] == IndexType.IVF_SQ8H: + pytest.skip("sq8h not support in cpu mode") + if request.param["index_type"] == IndexType.IVF_PQ: + pytest.skip("Skip PQ Temporary") + return request.param + + @pytest.mark.level(1) + def test_preload_collection(self, connect, collection, get_simple_index): + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) + status = connect.preload_collection(collection) + assert status.OK() + + @pytest.mark.level(1) + def test_preload_collection_ip(self, connect, ip_collection, get_simple_index): + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) + status = connect.preload_collection(ip_collection) + assert status.OK() + + @pytest.mark.level(1) + def test_preload_collection_jaccard(self, connect, jac_collection, get_simple_index): + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + status, ids = connect.add_vectors(jac_collection, vectors) + status = connect.create_index(jac_collection, index_type, index_param) + status = connect.preload_collection(jac_collection) + assert status.OK() + + @pytest.mark.level(1) + def test_preload_collection_hamming(self, connect, ham_collection, get_simple_index): + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + status, ids = connect.add_vectors(ham_collection, vectors) + status = connect.create_index(ham_collection, index_type, index_param) + status = connect.preload_collection(ham_collection) + assert status.OK() + + @pytest.mark.level(2) + def test_preload_collection_not_existed(self, connect, collection, get_simple_index): + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + collection_name = gen_unique_str() + status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) + status = connect.preload_collection(collection_name) + assert not status.OK() + + @pytest.mark.level(2) + def test_preload_collection_not_existed_ip(self, connect, ip_collection, get_simple_index): + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + collection_name = gen_unique_str() + status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) + status = connect.preload_collection(collection_name) + assert not status.OK() + + @pytest.mark.level(1) + def test_preload_collection_no_vectors(self, connect, collection): + status = connect.preload_collection(collection) + assert status.OK() + + @pytest.mark.level(2) + def test_preload_collection_no_vectors_ip(self, connect, ip_collection): + status = connect.preload_collection(ip_collection) + assert status.OK() + + # TODO: psutils get memory usage + @pytest.mark.level(1) + def test_preload_collection_memory_usage(self, connect, collection): + pass + + +class TestCollectionInvalid(object): + """ + Test creating collection with invalid collection names + """ + @pytest.fixture( + scope="function", + params=gen_invalid_collection_names() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.level(2) + def test_create_collection_with_invalid_collectionname(self, connect, get_collection_name): + collection_name = get_collection_name + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + status = connect.create_collection(param) + assert not status.OK() + + def test_create_collection_with_empty_collectionname(self, connect): + collection_name = '' + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + with pytest.raises(Exception) as e: + status = connect.create_collection(param) + + def test_preload_collection_with_invalid_collectionname(self, connect): + collection_name = '' + with pytest.raises(Exception) as e: + status = connect.preload_collection(collection_name) + + +class TestCreateCollectionDimInvalid(object): + """ + Test creating collection with invalid dimension + """ + @pytest.fixture( + scope="function", + params=gen_invalid_dims() + ) + def get_dim(self, request): + yield request.param + + @pytest.mark.level(2) + @pytest.mark.timeout(5) + def test_create_collection_with_invalid_dimension(self, connect, get_dim): + dimension = get_dim + collection = gen_unique_str("test_create_collection_with_invalid_dimension") + param = {'collection_name': collection, + 'dimension': dimension, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + if isinstance(dimension, int): + status = connect.create_collection(param) + assert not status.OK() + else: + with pytest.raises(Exception) as e: + status = connect.create_collection(param) + + +# TODO: max / min index file size +class TestCreateCollectionIndexSizeInvalid(object): + """ + Test creating collections with invalid index_file_size + """ + @pytest.fixture( + scope="function", + params=gen_invalid_file_sizes() + ) + def get_file_size(self, request): + yield request.param + + @pytest.mark.level(2) + def test_create_collection_with_invalid_file_size(self, connect, collection, get_file_size): + file_size = get_file_size + param = {'collection_name': collection, + 'dimension': dim, + 'index_file_size': file_size, + 'metric_type': MetricType.L2} + if isinstance(file_size, int): + status = connect.create_collection(param) + assert not status.OK() + else: + with pytest.raises(Exception) as e: + status = connect.create_collection(param) + + +class TestCreateMetricTypeInvalid(object): + """ + Test creating collections with invalid metric_type + """ + @pytest.fixture( + scope="function", + params=gen_invalid_metric_types() + ) + def get_metric_type(self, request): + yield request.param + + @pytest.mark.level(2) + def test_create_collection_with_invalid_file_size(self, connect, collection, get_metric_type): + metric_type = get_metric_type + param = {'collection_name': collection, + 'dimension': dim, + 'index_file_size': 10, + 'metric_type': metric_type} + with pytest.raises(Exception) as e: + status = connect.create_collection(param) + + +def create_collection(connect, **params): + param = {'collection_name': params["collection_name"], + 'dimension': params["dimension"], + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + status = connect.create_collection(param) + return status + +def search_collection(connect, **params): + status, result = connect.search_vectors( + params["collection_name"], + params["top_k"], + params["query_vectors"], + params={"nprobe": params["nprobe"]}) + return status + +def preload_collection(connect, **params): + status = connect.preload_collection(params["collection_name"]) + return status + +def has(connect, **params): + status, result = connect.has_collection(params["collection_name"]) + return status + +def show(connect, **params): + status, result = connect.show_collections() + return status + +def delete(connect, **params): + status = connect.drop_collection(params["collection_name"]) + return status + +def describe(connect, **params): + status, result = connect.describe_collection(params["collection_name"]) + return status + +def rowcount(connect, **params): + status, result = connect.count_collection(params["collection_name"]) + return status + +def create_index(connect, **params): + status = connect.create_index(params["collection_name"], params["index_type"], params["index_param"]) + return status + +func_map = { + # 0:has, + 1:show, + 10:create_collection, + 11:describe, + 12:rowcount, + 13:search_collection, + 14:preload_collection, + 15:create_index, + 30:delete +} + +def gen_sequence(): + raw_seq = func_map.keys() + result = itertools.permutations(raw_seq) + for x in result: + yield x + +class TestCollectionLogic(object): + + @pytest.mark.parametrize("logic_seq", gen_sequence()) + @pytest.mark.level(2) + def test_logic(self, connect, logic_seq): + if self.is_right(logic_seq): + self.execute(logic_seq, connect) + else: + self.execute_with_error(logic_seq, connect) + + def is_right(self, seq): + if sorted(seq) == seq: + return True + + not_created = True + has_deleted = False + for i in range(len(seq)): + if seq[i] > 10 and not_created: + return False + elif seq [i] > 10 and has_deleted: + return False + elif seq[i] == 10: + not_created = False + elif seq[i] == 30: + has_deleted = True + + return True + + def execute(self, logic_seq, connect): + basic_params = self.gen_params() + for i in range(len(logic_seq)): + # logging.getLogger().info(logic_seq[i]) + f = func_map[logic_seq[i]] + status = f(connect, **basic_params) + assert status.OK() + + def execute_with_error(self, logic_seq, connect): + basic_params = self.gen_params() + + error_flag = False + for i in range(len(logic_seq)): + f = func_map[logic_seq[i]] + status = f(connect, **basic_params) + if not status.OK(): + # logging.getLogger().info(logic_seq[i]) + error_flag = True + break + assert error_flag == True + + def gen_params(self): + collection_name = gen_unique_str("test_collection") + top_k = 1 + vectors = gen_vectors(2, dim) + param = {'collection_name': collection_name, + 'dimension': dim, + 'metric_type': MetricType.L2, + 'nprobe': 1, + 'top_k': top_k, + 'index_type': IndexType.IVF_SQ8, + 'index_param': { + 'nlist': 16384 + }, + 'query_vectors': vectors} + return param diff --git a/tests/milvus_python_test/test_collection_count.py b/tests/milvus_python_test/test_collection_count.py new file mode 100644 index 0000000000..d235d7bf23 --- /dev/null +++ b/tests/milvus_python_test/test_collection_count.py @@ -0,0 +1,644 @@ +import pdb +import pytest +import logging +import itertools +from time import sleep +import threading +from multiprocessing import Process +from milvus import IndexType, MetricType +from utils import * + +dim = 128 +index_file_size = 10 +add_time_interval = 3 +tag = "1970-01-01" +nb = 6000 + +class TestCollectionCount: + """ + params means different nb, the nb value may trigger merge, or not + """ + @pytest.fixture( + scope="function", + params=[ + 1, + 5000, + 100000, + ], + ) + def add_vectors_nb(self, request): + yield request.param + + """ + generate valid create_index params + """ + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + if str(connect._cmd("mode")[1]) == "CPU": + if request.param["index_type"] == IndexType.IVF_SQ8H: + pytest.skip("sq8h not support in cpu mode") + if request.param["index_type"] == IndexType.IVF_PQ: + pytest.skip("Skip PQ Temporary") + return request.param + + def test_collection_rows_count(self, connect, collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nb = add_vectors_nb + vectors = gen_vectors(nb, dim) + res = connect.add_vectors(collection_name=collection, records=vectors) + connect.flush([collection]) + status, res = connect.count_collection(collection) + assert res == nb + + def test_collection_rows_count_partition(self, connect, collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection, create partition and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nb = add_vectors_nb + vectors = gen_vectors(nb, dim) + status = connect.create_partition(collection, tag) + assert status.OK() + res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag) + connect.flush([collection]) + status, res = connect.count_collection(collection) + assert res == nb + + def test_collection_rows_count_multi_partitions_A(self, connect, collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + new_tag = "new_tag" + nb = add_vectors_nb + vectors = gen_vectors(nb, dim) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, new_tag) + assert status.OK() + res = connect.add_vectors(collection_name=collection, records=vectors) + connect.flush([collection]) + status, res = connect.count_collection(collection) + assert res == nb + + def test_collection_rows_count_multi_partitions_B(self, connect, collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add vectors in one of the partitions, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + new_tag = "new_tag" + nb = add_vectors_nb + vectors = gen_vectors(nb, dim) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, new_tag) + assert status.OK() + res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag) + connect.flush([collection]) + status, res = connect.count_collection(collection) + assert res == nb + + def test_collection_rows_count_multi_partitions_C(self, connect, collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add vectors in one of the partitions, + assert the value returned by count_collection method is equal to length of vectors + expected: the collection count is equal to the length of vectors + ''' + new_tag = "new_tag" + nb = add_vectors_nb + vectors = gen_vectors(nb, dim) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, new_tag) + assert status.OK() + res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag) + res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=new_tag) + connect.flush([collection]) + status, res = connect.count_collection(collection) + assert res == nb * 2 + + def test_collection_rows_count_after_index_created(self, connect, collection, get_simple_index): + ''' + target: test count_collection, after index have been created + method: add vectors in db, and create index, then calling count_collection with correct params + expected: count_collection raise exception + ''' + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + nb = 100 + vectors = gen_vectors(nb, dim) + res = connect.add_vectors(collection_name=collection, records=vectors) + connect.flush([collection]) + connect.create_index(collection, index_type, index_param) + status, res = connect.count_collection(collection) + assert res == nb + + @pytest.mark.level(2) + def test_count_without_connection(self, collection, dis_connect): + ''' + target: test count_collection, without connection + method: calling count_collection with correct params, with a disconnected instance + expected: count_collection raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.count_collection(collection) + + def test_collection_rows_count_no_vectors(self, connect, collection): + ''' + target: test collection rows_count is correct or not, if collection is empty + method: create collection and no vectors in it, + assert the value returned by count_collection method is equal to 0 + expected: the count is equal to 0 + ''' + collection_name = gen_unique_str() + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size} + connect.create_collection(param) + status, res = connect.count_collection(collection) + assert res == 0 + + # TODO: enable + @pytest.mark.level(2) + @pytest.mark.timeout(20) + def _test_collection_rows_count_multiprocessing(self, connect, collection, args): + ''' + target: test collection rows_count is correct or not with multiprocess + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nq = 2 + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + vectors = gen_vectors(nq, dim) + res = connect.add_vectors(collection_name=collection, records=vectors) + time.sleep(add_time_interval) + + def rows_count(milvus): + status, res = milvus.count_collection(collection) + logging.getLogger().info(status) + assert res == nq + + process_num = 8 + processes = [] + for i in range(process_num): + milvus = get_milvus(args["handler"]) + milvus.connect(uri=uri) + p = Process(target=rows_count, args=(milvus, )) + processes.append(p) + p.start() + logging.getLogger().info(p) + for p in processes: + p.join() + + def test_collection_rows_count_multi_collections(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of L2 + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nq = 100 + vectors = gen_vectors(nq, dim) + collection_list = [] + for i in range(20): + collection_name = gen_unique_str() + collection_list.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + res = connect.add_vectors(collection_name=collection_name, records=vectors) + connect.flush(collection_list) + for i in range(20): + status, res = connect.count_collection(collection_list[i]) + assert status.OK() + assert res == nq + + +class TestCollectionCountIP: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 5000, + 100000, + ], + ) + def add_vectors_nb(self, request): + yield request.param + + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + if str(connect._cmd("mode")[1]) == "CPU": + if request.param["index_type"] == IndexType.IVF_SQ8H: + pytest.skip("sq8h not support in CPU mode") + if request.param["index_type"] == IndexType.IVF_PQ: + pytest.skip("Skip PQ Temporary") + return request.param + + def test_collection_rows_count(self, connect, ip_collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nb = add_vectors_nb + vectors = gen_vectors(nb, dim) + res = connect.add_vectors(collection_name=ip_collection, records=vectors) + connect.flush([ip_collection]) + status, res = connect.count_collection(ip_collection) + assert res == nb + + def test_collection_rows_count_after_index_created(self, connect, ip_collection, get_simple_index): + ''' + target: test count_collection, after index have been created + method: add vectors in db, and create index, then calling count_collection with correct params + expected: count_collection raise exception + ''' + index_param = get_simple_index["index_param"] + index_type = get_simple_index["index_type"] + nb = 100 + vectors = gen_vectors(nb, dim) + res = connect.add_vectors(collection_name=ip_collection, records=vectors) + connect.flush([ip_collection]) + connect.create_index(ip_collection, index_type, index_param) + status, res = connect.count_collection(ip_collection) + assert res == nb + + @pytest.mark.level(2) + def test_count_without_connection(self, ip_collection, dis_connect): + ''' + target: test count_collection, without connection + method: calling count_collection with correct params, with a disconnected instance + expected: count_collection raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.count_collection(ip_collection) + + def test_collection_rows_count_no_vectors(self, connect, ip_collection): + ''' + target: test collection rows_count is correct or not, if collection is empty + method: create collection and no vectors in it, + assert the value returned by count_collection method is equal to 0 + expected: the count is equal to 0 + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size} + connect.create_collection(param) + status, res = connect.count_collection(ip_collection) + assert res == 0 + + # TODO: enable + @pytest.mark.timeout(60) + def _test_collection_rows_count_multiprocessing(self, connect, ip_collection, args): + ''' + target: test collection rows_count is correct or not with multiprocess + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nq = 2 + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + vectors = gen_vectors(nq, dim) + res = connect.add_vectors(collection_name=ip_collection, records=vectors) + time.sleep(add_time_interval) + + def rows_count(milvus): + status, res = milvus.count_collection(ip_collection) + logging.getLogger().info(status) + assert res == nq + + process_num = 8 + processes = [] + for i in range(process_num): + milvus = get_milvus(args["handler"]) + milvus.connect(uri=uri) + p = Process(target=rows_count, args=(milvus,)) + processes.append(p) + p.start() + logging.getLogger().info(p) + for p in processes: + p.join() + + def test_collection_rows_count_multi_collections(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of IP + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nq = 100 + vectors = gen_vectors(nq, dim) + collection_list = [] + for i in range(20): + collection_name = gen_unique_str('test_collection_rows_count_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + connect.create_collection(param) + res = connect.add_vectors(collection_name=collection_name, records=vectors) + connect.flush(collection_list) + for i in range(20): + status, res = connect.count_collection(collection_list[i]) + assert status.OK() + assert res == nq + + +class TestCollectionCountJAC: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 5000, + 100000, + ], + ) + def add_vectors_nb(self, request): + yield request.param + + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_jaccard_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: + return request.param + else: + pytest.skip("Skip index Temporary") + + def test_collection_rows_count(self, connect, jac_collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nb = add_vectors_nb + tmp, vectors = gen_binary_vectors(nb, dim) + res = connect.add_vectors(collection_name=jac_collection, records=vectors) + connect.flush([jac_collection]) + status, res = connect.count_collection(jac_collection) + assert res == nb + + def test_collection_rows_count_after_index_created(self, connect, jac_collection, get_jaccard_index): + ''' + target: test count_collection, after index have been created + method: add vectors in db, and create index, then calling count_collection with correct params + expected: count_collection raise exception + ''' + nb = 100 + index_param = get_jaccard_index["index_param"] + index_type = get_jaccard_index["index_type"] + tmp, vectors = gen_binary_vectors(nb, dim) + res = connect.add_vectors(collection_name=jac_collection, records=vectors) + connect.flush([jac_collection]) + connect.create_index(jac_collection, index_type, index_param) + status, res = connect.count_collection(jac_collection) + assert res == nb + + @pytest.mark.level(2) + def test_count_without_connection(self, jac_collection, dis_connect): + ''' + target: test count_collection, without connection + method: calling count_collection with correct params, with a disconnected instance + expected: count_collection raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.count_collection(jac_collection) + + def test_collection_rows_count_no_vectors(self, connect, jac_collection): + ''' + target: test collection rows_count is correct or not, if collection is empty + method: create collection and no vectors in it, + assert the value returned by count_collection method is equal to 0 + expected: the count is equal to 0 + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size} + connect.create_collection(param) + status, res = connect.count_collection(jac_collection) + assert res == 0 + + def test_collection_rows_count_multi_collections(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of IP + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nq = 100 + tmp, vectors = gen_binary_vectors(nq, dim) + collection_list = [] + for i in range(20): + collection_name = gen_unique_str('test_collection_rows_count_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.JACCARD} + connect.create_collection(param) + res = connect.add_vectors(collection_name=collection_name, records=vectors) + connect.flush(collection_list) + for i in range(20): + status, res = connect.count_collection(collection_list[i]) + assert status.OK() + assert res == nq + +class TestCollectionCountHAM: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 5000, + 100000, + ], + ) + def add_vectors_nb(self, request): + yield request.param + + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_hamming_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: + return request.param + else: + pytest.skip("Skip index Temporary") + + def test_collection_rows_count(self, connect, ham_collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nb = add_vectors_nb + tmp, vectors = gen_binary_vectors(nb, dim) + res = connect.add_vectors(collection_name=ham_collection, records=vectors) + connect.flush([ham_collection]) + status, res = connect.count_collection(ham_collection) + assert res == nb + + def test_collection_rows_count_after_index_created(self, connect, ham_collection, get_hamming_index): + ''' + target: test count_collection, after index have been created + method: add vectors in db, and create index, then calling count_collection with correct params + expected: count_collection raise exception + ''' + nb = 100 + index_type = get_hamming_index["index_type"] + index_param = get_hamming_index["index_param"] + tmp, vectors = gen_binary_vectors(nb, dim) + res = connect.add_vectors(collection_name=ham_collection, records=vectors) + connect.flush([ham_collection]) + connect.create_index(ham_collection, index_type, index_param) + status, res = connect.count_collection(ham_collection) + assert res == nb + + @pytest.mark.level(2) + def test_count_without_connection(self, ham_collection, dis_connect): + ''' + target: test count_collection, without connection + method: calling count_collection with correct params, with a disconnected instance + expected: count_collection raise exception + ''' + with pytest.raises(Exception) as e: + status = dis_connect.count_collection(ham_collection) + + def test_collection_rows_count_no_vectors(self, connect, ham_collection): + ''' + target: test collection rows_count is correct or not, if collection is empty + method: create collection and no vectors in it, + assert the value returned by count_collection method is equal to 0 + expected: the count is equal to 0 + ''' + collection_name = gen_unique_str("test_collection") + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size} + connect.create_collection(param) + status, res = connect.count_collection(ham_collection) + assert res == 0 + + def test_collection_rows_count_multi_collections(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of IP + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nq = 100 + tmp, vectors = gen_binary_vectors(nq, dim) + collection_list = [] + for i in range(20): + collection_name = gen_unique_str('test_collection_rows_count_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.HAMMING} + connect.create_collection(param) + res = connect.add_vectors(collection_name=collection_name, records=vectors) + connect.flush(collection_list) + for i in range(20): + status, res = connect.count_collection(collection_list[i]) + assert status.OK() + assert res == nq + + +class TestCollectionCountTANIMOTO: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 5000, + 100000, + ], + ) + def add_vectors_nb(self, request): + yield request.param + + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_tanimoto_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: + return request.param + else: + pytest.skip("Skip index Temporary") + + def test_collection_rows_count(self, connect, tanimoto_collection, add_vectors_nb): + ''' + target: test collection rows_count is correct or not + method: create collection and add vectors in it, + assert the value returned by count_collection method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + nb = add_vectors_nb + tmp, vectors = gen_binary_vectors(nb, dim) + res = connect.add_vectors(collection_name=tanimoto_collection, records=vectors) + connect.flush([tanimoto_collection]) + status, res = connect.count_collection(tanimoto_collection) + assert status.OK() + assert res == nb diff --git a/tests/milvus_python_test/test_table_info.py b/tests/milvus_python_test/test_collection_info.py similarity index 54% rename from tests/milvus_python_test/test_table_info.py rename to tests/milvus_python_test/test_collection_info.py index 0b68130c92..c0574d1827 100644 --- a/tests/milvus_python_test/test_table_info.py +++ b/tests/milvus_python_test/test_collection_info.py @@ -18,7 +18,7 @@ nb = 6000 nlist = 1024 -class TestTableInfoBase: +class TestCollectionInfoBase: def index_string_convert(self, index_string, index_type): if index_string == "IDMAP" and index_type == IndexType.FLAT: return True @@ -30,87 +30,87 @@ class TestTableInfoBase: """ ****************************************************************** - The following cases are used to test `table_info` function + The following cases are used to test `collection_info` function ****************************************************************** """ @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_name_None(self, connect, table): + def test_get_collection_info_name_None(self, connect, collection): ''' - target: get table info where table name is None - method: call table_info with the table_name: None + target: get collection info where collection name is None + method: call collection_info with the collection_name: None expected: status not ok ''' - table_name = None - status, info = connect.table_info(table_name) + collection_name = None + status, info = connect.collection_info(collection_name) assert not status.OK() @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_name_not_existed(self, connect, table): + def test_get_collection_info_name_not_existed(self, connect, collection): ''' - target: get table info where table name does not exist - method: call table_info with a random table_name, which is not in db + target: get collection info where collection name does not exist + method: call collection_info with a random collection_name, which is not in db expected: status not ok ''' - table_name = gen_unique_str("not_existed_table") - status, info = connect.table_info(table_name) + collection_name = gen_unique_str("not_existed_collection") + status, info = connect.collection_info(collection_name) assert not status.OK() @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_name_invalid(self, connect, get_table_name): + def test_get_collection_info_name_invalid(self, connect, get_collection_name): ''' - target: get table info where table name is invalid - method: call table_info with invalid table_name + target: get collection info where collection name is invalid + method: call collection_info with invalid collection_name expected: status not ok ''' - table_name = get_table_name - status, info = connect.table_info(table_name) + collection_name = get_collection_name + status, info = connect.collection_info(collection_name) assert not status.OK() @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_table_row_count(self, connect, table): + def test_get_collection_info_collection_row_count(self, connect, collection): ''' - target: get row count with table_info - method: add and delete vectors, check count in table info + target: get row count with collection_info + method: add and delete vectors, check count in collection info expected: status ok, count as expected ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() assert info.count == nb # delete a few vectors delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() assert info.count == nb - 2 @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_partition_stats_A(self, connect, table): + def test_get_collection_info_partition_stats_A(self, connect, collection): ''' - target: get partition info in a table - method: no partition, call table_info and check partition_stats + target: get partition info in a collection + method: no partition, call collection_info and check partition_stats expected: status ok, "_default" partition is listed ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) assert len(info.partitions_stat) == 1 @@ -119,19 +119,19 @@ class TestTableInfoBase: @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_partition_stats_B(self, connect, table): + def test_get_collection_info_partition_stats_B(self, connect, collection): ''' - target: get partition info in a table - method: call table_info after partition created and check partition_stats + target: get partition info in a collection + method: call collection_info after partition created and check partition_stats expected: status ok, vectors added to partition ''' vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) assert len(info.partitions_stat) == 2 @@ -139,23 +139,23 @@ class TestTableInfoBase: assert info.partitions_stat[1].count == nb @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_partition_stats_C(self, connect, table): + def test_get_collection_info_partition_stats_C(self, connect, collection): ''' - target: get partition info in a table - method: create two partitions, add vectors in one of the partitions, call table_info and check + target: get partition info in a collection + method: create two partitions, add vectors in one of the partitions, call collection_info and check expected: status ok, vectors added to one partition but not the other ''' new_tag = "new_tag" vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - status = connect.create_partition(table, new_tag) + status = connect.create_partition(collection, new_tag) assert status.OK() - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) for partition in info.partitions_stat: @@ -165,25 +165,25 @@ class TestTableInfoBase: assert partition.count == 0 @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_partition_stats_D(self, connect, table): + def test_get_collection_info_partition_stats_D(self, connect, collection): ''' - target: get partition info in a table - method: create two partitions, add vectors in both partitions, call table_info and check + target: get partition info in a collection + method: create two partitions, add vectors in both partitions, call collection_info and check expected: status ok, vectors added to both partitions ''' new_tag = "new_tag" vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - status = connect.create_partition(table, new_tag) + status = connect.create_partition(collection, new_tag) assert status.OK() - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() - status, ids = connect.add_vectors(table, vectors, partition_tag=new_tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=new_tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() assert info.count == nb * 2 for partition in info.partitions_stat: @@ -205,24 +205,24 @@ class TestTableInfoBase: return request.param @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_after_index_created(self, connect, table, get_simple_index): + def test_get_collection_info_after_index_created(self, connect, collection, get_simple_index): ''' - target: test table info after index created - method: create table, add vectors, create index and call table_info + target: test collection info after index created + method: create collection, add vectors, create index and call collection_info expected: status ok, index created and shown in segments_stat ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) index_string = info.partitions_stat[0].segments_stat[0].index_name @@ -231,24 +231,24 @@ class TestTableInfoBase: assert nb == info.partitions_stat[0].segments_stat[0].count @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_after_create_same_index_repeatedly(self, connect, table, get_simple_index): + def test_get_collection_info_after_create_same_index_repeatedly(self, connect, collection, get_simple_index): ''' - target: test table info after index created repeatedly - method: create table, add vectors, create index and call table_info multiple times + target: test collection info after index created repeatedly + method: create collection, add vectors, create index and call collection_info multiple times expected: status ok, index info shown in segments_stat ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) - status = connect.create_index(table, index_type, index_param) - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) index_string = info.partitions_stat[0].segments_stat[0].index_name @@ -257,22 +257,22 @@ class TestTableInfoBase: assert nb == info.partitions_stat[0].segments_stat[0].count @pytest.mark.timeout(INFO_TIMEOUT) - def test_get_table_info_after_create_different_index_repeatedly(self, connect, table, get_simple_index): + def test_get_collection_info_after_create_different_index_repeatedly(self, connect, collection, get_simple_index): ''' - target: test table info after index created repeatedly - method: create table, add vectors, create index and call table_info multiple times + target: test collection info after index created repeatedly + method: create collection, add vectors, create index and call collection_info multiple times expected: status ok, index info shown in segments_stat ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() index_param = {"nlist": nlist} for index_type in [IndexType.FLAT, IndexType.IVFLAT, IndexType.IVF_SQ8]: - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) index_string = info.partitions_stat[0].segments_stat[0].index_name diff --git a/tests/milvus_python_test/test_compact.py b/tests/milvus_python_test/test_compact.py index a523c56714..1ff4b50613 100644 --- a/tests/milvus_python_test/test_compact.py +++ b/tests/milvus_python_test/test_compact.py @@ -23,127 +23,127 @@ class TestCompactBase: ****************************************************************** """ @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_compact_table_name_None(self, connect, table): + def test_compact_collection_name_None(self, connect, collection): ''' - target: compact table where table name is None - method: compact with the table_name: None + target: compact collection where collection name is None + method: compact with the collection_name: None expected: exception raised ''' - table_name = None + collection_name = None with pytest.raises(Exception) as e: - status = connect.compact(table_name) + status = connect.compact(collection_name) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_compact_table_name_not_existed(self, connect, table): + def test_compact_collection_name_not_existed(self, connect, collection): ''' - target: compact table not existed - method: compact with a random table_name, which is not in db + target: compact collection not existed + method: compact with a random collection_name, which is not in db expected: status not ok ''' - table_name = gen_unique_str("not_existed_table") - status = connect.compact(table_name) + collection_name = gen_unique_str("not_existed_collection") + status = connect.compact(collection_name) assert not status.OK() @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_compact_table_name_invalid(self, connect, get_table_name): + def test_compact_collection_name_invalid(self, connect, get_collection_name): ''' - target: compact table with invalid name - method: compact with invalid table_name + target: compact collection with invalid name + method: compact with invalid collection_name expected: status not ok ''' - table_name = get_table_name - status = connect.compact(table_name) + collection_name = get_collection_name + status = connect.compact(collection_name) assert not status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_and_compact(self, connect, table): + def test_add_vector_and_compact(self, connect, collection): ''' target: test add vector and compact - method: add vector and compact table + method: add vector and compact collection expected: status ok, vector added ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info) size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_and_compact(self, connect, table): + def test_add_vectors_and_compact(self, connect, collection): ''' target: test add vectors and compact - method: add vectors and compact table + method: add vectors and compact collection expected: status ok, vectors added ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_part_and_compact(self, connect, table): + def test_add_vectors_delete_part_and_compact(self, connect, collection): ''' target: test add vectors, delete part of them and compact - method: add vectors, delete a few and compact table + method: add vectors, delete a few and compact collection expected: status ok, data size is smaller after compact ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_before = info.partitions_stat[0].segments_stat[0].data_size logging.getLogger().info(size_before) - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_after = info.partitions_stat[0].segments_stat[0].data_size @@ -151,30 +151,30 @@ class TestCompactBase: assert(size_before > size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_all_and_compact(self, connect, table): + def test_add_vectors_delete_all_and_compact(self, connect, collection): ''' target: test add vectors, delete them and compact - method: add vectors, delete all and compact table - expected: status ok, no data size in table info because table is empty + method: add vectors, delete all and compact collection + expected: status ok, no data size in collection info because collection is empty ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info.partitions_stat) assert(len(info.partitions_stat[0].segments_stat) == 0) @@ -191,9 +191,9 @@ class TestCompactBase: pytest.skip("Only support CPU mode") return request.param - def test_compact_after_index_created(self, connect, table, get_simple_index): + def test_compact_after_index_created(self, connect, collection, get_simple_index): ''' - target: test compact table after index created + target: test compact collection after index created method: add vectors, create index, delete part of vectors and compact expected: status ok, index description no change, data size smaller after compact ''' @@ -201,256 +201,256 @@ class TestCompactBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(count, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size logging.getLogger().info(info.partitions_stat) delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before > size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_and_compact_twice(self, connect, table): + def test_add_vector_and_compact_twice(self, connect, collection): ''' target: test add vector and compact twice - method: add vector and compact table twice + method: add vector and compact collection twice expected: status ok, data size no change ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact twice - status, info = connect.table_info(table) + # get collection info after compact twice + status, info = connect.collection_info(collection) assert status.OK() size_after_twice = info.partitions_stat[0].segments_stat[0].data_size assert(size_after == size_after_twice) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_part_and_compact_twice(self, connect, table): + def test_add_vectors_delete_part_and_compact_twice(self, connect, collection): ''' target: test add vectors, delete part of them and compact twice - method: add vectors, delete part and compact table twice + method: add vectors, delete part and compact collection twice expected: status ok, data size smaller after first compact, no change after second ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before > size_after) - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact twice - status, info = connect.table_info(table) + # get collection info after compact twice + status, info = connect.collection_info(collection) assert status.OK() size_after_twice = info.partitions_stat[0].segments_stat[0].data_size assert(size_after == size_after_twice) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_compact_multi_tables(self, connect): + def test_compact_multi_collections(self, connect): ''' - target: test compact works or not with multiple tables - method: create 50 tables, add vectors into them and compact in turn + target: test compact works or not with multiple collections + method: create 50 collections, add vectors into them and compact in turn expected: status ok ''' nq = 100 - num_tables = 50 + num_collections = 50 vectors = gen_vectors(nq, dim) - table_list = [] - for i in range(num_tables): - table_name = gen_unique_str("test_compact_multi_table_%d" % i) - table_list.append(table_name) - param = {'table_name': table_name, + collection_list = [] + for i in range(num_collections): + collection_name = gen_unique_str("test_compact_multi_collection_%d" % i) + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - connect.create_table(param) + connect.create_collection(param) time.sleep(6) - for i in range(num_tables): - status, ids = connect.add_vectors(table_name=table_list[i], records=vectors) + for i in range(num_collections): + status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors) assert status.OK() - status = connect.compact(table_list[i]) + status = connect.compact(collection_list[i]) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_after_compact(self, connect, table): + def test_add_vector_after_compact(self, connect, collection): ''' target: test add vector after compact method: after compact operation, add vector expected: status ok, vector added ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(table) + # get collection info before compact + status, info = connect.collection_info(collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_index_creation_after_compact(self, connect, table, get_simple_index): + def test_index_creation_after_compact(self, connect, collection, get_simple_index): ''' target: test index creation after compact method: after compact operation, create index expected: status ok, index description no change ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(table) - assert result._table_name == table + status, result = connect.describe_index(collection) + assert result._collection_name == collection assert result._index_type == index_type @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_delete_vectors_after_compact(self, connect, table): + def test_delete_vectors_after_compact(self, connect, collection): ''' target: test delete vectors after compact method: after compact operation, delete vectors expected: status ok, vectors deleted ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_search_after_compact(self, connect, table): + def test_search_after_compact(self, connect, collection): ''' target: test search after compact method: after compact operation, search vector expected: status ok ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.compact(table) + status = connect.compact(collection) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() query_vecs = [vectors[0]] - status, res = connect.search_vectors(table, top_k, query_records=query_vecs) + status, res = connect.search_vectors(collection, top_k, query_records=query_vecs) logging.getLogger().info(res) assert status.OK() - def test_compact_server_crashed_recovery(self, connect, table): + def test_compact_server_crashed_recovery(self, connect, collection): ''' target: test compact when server crashed unexpectedly and restarted - method: add vectors, delete and compact table; server stopped and restarted during compact + method: add vectors, delete and compact collection; server stopped and restarted during compact expected: status ok, request recovered ''' vectors = gen_vector(nb * 100, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = ids[0:1000] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() # start to compact, kill and restart server logging.getLogger().info("compact starting...") - status = connect.compact(table) + status = connect.compact(collection) # pdb.set_trace() assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(table) + # get collection info after compact + status, info = connect.collection_info(collection) assert status.OK() assert info.partitions_stat[0].count == nb * 100 - 1000 @@ -462,86 +462,86 @@ class TestCompactJAC: ****************************************************************** """ @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_and_compact(self, connect, jac_table): + def test_add_vector_and_compact(self, connect, jac_collection): ''' target: test add vector and compact - method: add vector and compact table + method: add vector and compact collection expected: status ok, vector added ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_and_compact(self, connect, jac_table): + def test_add_vectors_and_compact(self, connect, jac_collection): ''' target: test add vectors and compact - method: add vectors and compact table + method: add vectors and compact collection expected: status ok, vectors added ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_part_and_compact(self, connect, jac_table): + def test_add_vectors_delete_part_and_compact(self, connect, jac_collection): ''' target: test add vectors, delete part of them and compact - method: add vectors, delete a few and compact table + method: add vectors, delete a few and compact collection expected: status ok, data size is smaller after compact ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(jac_table, delete_ids) + status = connect.delete_by_id(jac_collection, delete_ids) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_before = info.partitions_stat[0].segments_stat[0].data_size logging.getLogger().info(size_before) - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_after = info.partitions_stat[0].segments_stat[0].data_size @@ -549,207 +549,207 @@ class TestCompactJAC: assert(size_before > size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_all_and_compact(self, connect, jac_table): + def test_add_vectors_delete_all_and_compact(self, connect, jac_collection): ''' target: test add vectors, delete them and compact - method: add vectors, delete all and compact table - expected: status ok, no data size in table info because table is empty + method: add vectors, delete all and compact collection + expected: status ok, no data size in collection info because collection is empty ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.delete_by_id(jac_table, ids) + status = connect.delete_by_id(jac_collection, ids) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() logging.getLogger().info(info.partitions_stat) assert(len(info.partitions_stat[0].segments_stat) == 0) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_and_compact_twice(self, connect, jac_table): + def test_add_vector_and_compact_twice(self, connect, jac_collection): ''' target: test add vector and compact twice - method: add vector and compact table twice + method: add vector and compact collection twice expected: status ok ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact twice - status, info = connect.table_info(jac_table) + # get collection info after compact twice + status, info = connect.collection_info(jac_collection) assert status.OK() size_after_twice = info.partitions_stat[0].segments_stat[0].data_size assert(size_after == size_after_twice) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_part_and_compact_twice(self, connect, jac_table): + def test_add_vectors_delete_part_and_compact_twice(self, connect, jac_collection): ''' target: test add vectors, delete part of them and compact twice - method: add vectors, delete part and compact table twice + method: add vectors, delete part and compact collection twice expected: status ok, data size smaller after first compact, no change after second ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(jac_table, delete_ids) + status = connect.delete_by_id(jac_collection, delete_ids) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before > size_after) - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact twice - status, info = connect.table_info(jac_table) + # get collection info after compact twice + status, info = connect.collection_info(jac_collection) assert status.OK() size_after_twice = info.partitions_stat[0].segments_stat[0].data_size assert(size_after == size_after_twice) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_compact_multi_tables(self, connect): + def test_compact_multi_collections(self, connect): ''' - target: test compact works or not with multiple tables - method: create 50 tables, add vectors into them and compact in turn + target: test compact works or not with multiple collections + method: create 50 collections, add vectors into them and compact in turn expected: status ok ''' nq = 100 - num_tables = 10 + num_collections = 10 tmp, vectors = gen_binary_vectors(nq, dim) - table_list = [] - for i in range(num_tables): - table_name = gen_unique_str("test_compact_multi_table_%d" % i) - table_list.append(table_name) - param = {'table_name': table_name, + collection_list = [] + for i in range(num_collections): + collection_name = gen_unique_str("test_compact_multi_collection_%d" % i) + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.JACCARD} - connect.create_table(param) + connect.create_collection(param) time.sleep(6) - for i in range(num_tables): - status, ids = connect.add_vectors(table_name=table_list[i], records=vectors) + for i in range(num_collections): + status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors) assert status.OK() - status = connect.delete_by_id(table_list[i], [ids[0], ids[-1]]) + status = connect.delete_by_id(collection_list[i], [ids[0], ids[-1]]) assert status.OK() - status = connect.flush([table_list[i]]) + status = connect.flush([collection_list[i]]) assert status.OK() - status = connect.compact(table_list[i]) + status = connect.compact(collection_list[i]) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_after_compact(self, connect, jac_table): + def test_add_vector_after_compact(self, connect, jac_collection): ''' target: test add vector after compact method: after compact operation, add vector expected: status ok, vector added ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(jac_table) + # get collection info before compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(jac_table) + # get collection info after compact + status, info = connect.collection_info(jac_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_delete_vectors_after_compact(self, connect, jac_table): + def test_delete_vectors_after_compact(self, connect, jac_collection): ''' target: test delete vectors after compact method: after compact operation, delete vectors expected: status ok, vectors deleted ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.delete_by_id(jac_table, ids) + status = connect.delete_by_id(jac_collection, ids) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_search_after_compact(self, connect, jac_table): + def test_search_after_compact(self, connect, jac_collection): ''' target: test search after compact method: after compact operation, search vector expected: status ok ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.compact(jac_table) + status = connect.compact(jac_collection) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() query_vecs = [vectors[0]] - status, res = connect.search_vectors(jac_table, top_k, query_records=query_vecs) + status, res = connect.search_vectors(jac_collection, top_k, query_records=query_vecs) logging.getLogger().info(res) assert status.OK() @@ -761,86 +761,86 @@ class TestCompactIP: ****************************************************************** """ @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_and_compact(self, connect, ip_table): + def test_add_vector_and_compact(self, connect, ip_collection): ''' target: test add vector and compact - method: add vector and compact table + method: add vector and compact collection expected: status ok, vector added ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_and_compact(self, connect, ip_table): + def test_add_vectors_and_compact(self, connect, ip_collection): ''' target: test add vectors and compact - method: add vectors and compact table + method: add vectors and compact collection expected: status ok, vectors added ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_part_and_compact(self, connect, ip_table): + def test_add_vectors_delete_part_and_compact(self, connect, ip_collection): ''' target: test add vectors, delete part of them and compact - method: add vectors, delete a few and compact table + method: add vectors, delete a few and compact collection expected: status ok, data size is smaller after compact ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(ip_table, delete_ids) + status = connect.delete_by_id(ip_collection, delete_ids) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_before = info.partitions_stat[0].segments_stat[0].data_size logging.getLogger().info(size_before) - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() logging.getLogger().info(info.partitions_stat) size_after = info.partitions_stat[0].segments_stat[0].data_size @@ -848,202 +848,202 @@ class TestCompactIP: assert(size_before > size_after) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_all_and_compact(self, connect, ip_table): + def test_add_vectors_delete_all_and_compact(self, connect, ip_collection): ''' target: test add vectors, delete them and compact - method: add vectors, delete all and compact table - expected: status ok, no data size in table info because table is empty + method: add vectors, delete all and compact collection + expected: status ok, no data size in collection info because collection is empty ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status = connect.delete_by_id(ip_table, ids) + status = connect.delete_by_id(ip_collection, ids) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() logging.getLogger().info(info.partitions_stat) assert(len(info.partitions_stat[0].segments_stat) == 0) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_and_compact_twice(self, connect, ip_table): + def test_add_vector_and_compact_twice(self, connect, ip_collection): ''' target: test add vector and compact twice - method: add vector and compact table twice + method: add vector and compact collection twice expected: status ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact twice - status, info = connect.table_info(ip_table) + # get collection info after compact twice + status, info = connect.collection_info(ip_collection) assert status.OK() size_after_twice = info.partitions_stat[0].segments_stat[0].data_size assert(size_after == size_after_twice) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vectors_delete_part_and_compact_twice(self, connect, ip_table): + def test_add_vectors_delete_part_and_compact_twice(self, connect, ip_collection): ''' target: test add vectors, delete part of them and compact twice - method: add vectors, delete part and compact table twice + method: add vectors, delete part and compact collection twice expected: status ok, data size smaller after first compact, no change after second ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] - status = connect.delete_by_id(ip_table, delete_ids) + status = connect.delete_by_id(ip_collection, delete_ids) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before > size_after) - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact twice - status, info = connect.table_info(ip_table) + # get collection info after compact twice + status, info = connect.collection_info(ip_collection) assert status.OK() size_after_twice = info.partitions_stat[0].segments_stat[0].data_size assert(size_after == size_after_twice) @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_compact_multi_tables(self, connect): + def test_compact_multi_collections(self, connect): ''' - target: test compact works or not with multiple tables - method: create 50 tables, add vectors into them and compact in turn + target: test compact works or not with multiple collections + method: create 50 collections, add vectors into them and compact in turn expected: status ok ''' nq = 100 - num_tables = 50 + num_collections = 50 vectors = gen_vectors(nq, dim) - table_list = [] - for i in range(num_tables): - table_name = gen_unique_str("test_compact_multi_table_%d" % i) - table_list.append(table_name) - param = {'table_name': table_name, + collection_list = [] + for i in range(num_collections): + collection_name = gen_unique_str("test_compact_multi_collection_%d" % i) + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} - connect.create_table(param) + connect.create_collection(param) time.sleep(6) - for i in range(num_tables): - status, ids = connect.add_vectors(table_name=table_list[i], records=vectors) + for i in range(num_collections): + status, ids = connect.add_vectors(collection_name=collection_list[i], records=vectors) assert status.OK() - status = connect.compact(table_list[i]) + status = connect.compact(collection_list[i]) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_add_vector_after_compact(self, connect, ip_table): + def test_add_vector_after_compact(self, connect, ip_collection): ''' target: test add vector after compact method: after compact operation, add vector expected: status ok, vector added ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info before compact - status, info = connect.table_info(ip_table) + # get collection info before compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_before = info.partitions_stat[0].segments_stat[0].data_size - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - # get table info after compact - status, info = connect.table_info(ip_table) + # get collection info after compact + status, info = connect.collection_info(ip_collection) assert status.OK() size_after = info.partitions_stat[0].segments_stat[0].data_size assert(size_before == size_after) vector = gen_single_vector(dim) - status, ids = connect.add_vectors(ip_table, vector) + status, ids = connect.add_vectors(ip_collection, vector) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_delete_vectors_after_compact(self, connect, ip_table): + def test_delete_vectors_after_compact(self, connect, ip_collection): ''' target: test delete vectors after compact method: after compact operation, delete vectors expected: status ok, vectors deleted ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status = connect.delete_by_id(ip_table, ids) + status = connect.delete_by_id(ip_collection, ids) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() @pytest.mark.timeout(COMPACT_TIMEOUT) - def test_search_after_compact(self, connect, ip_table): + def test_search_after_compact(self, connect, ip_collection): ''' target: test search after compact method: after compact operation, search vector expected: status ok ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status = connect.compact(ip_table) + status = connect.compact(ip_collection) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() query_vecs = [vectors[0]] - status, res = connect.search_vectors(ip_table, top_k, query_records=query_vecs) + status, res = connect.search_vectors(ip_collection, top_k, query_records=query_vecs) logging.getLogger().info(res) assert status.OK() diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index 4f6305dc77..c5de3f2829 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -28,7 +28,7 @@ class TestCacheConfig: @pytest.mark.timeout(CONFIG_TIMEOUT) def reset_configs(self, connect): ''' - reset configs so the tests are stable + reset configs so the tests are scollection ''' status, reply = connect.set_config("cache_config", "cpu_cache_capacity", 4) assert status.OK() @@ -40,7 +40,7 @@ class TestCacheConfig: assert config_value == '1' @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_cpu_cache_capacity_invalid_parent_key(self, connect, table): + def test_get_cpu_cache_capacity_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: cache_config @@ -53,7 +53,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_cpu_cache_capacity_invalid_child_key(self, connect, table): + def test_get_cpu_cache_capacity_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: cpu_cache_capacity @@ -66,7 +66,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_cpu_cache_capacity_valid(self, connect, table): + def test_get_cpu_cache_capacity_valid(self, connect, collection): ''' target: get cpu_cache_capacity method: call get_config correctly @@ -76,7 +76,7 @@ class TestCacheConfig: assert status.OK() @pytest.mark.level(2) - def test_get_insert_buffer_size_invalid_parent_key(self, connect, table): + def test_get_insert_buffer_size_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: cache_config @@ -89,7 +89,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.level(2) - def test_get_insert_buffer_size_invalid_child_key(self, connect, table): + def test_get_insert_buffer_size_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: insert_buffer_size @@ -102,7 +102,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_insert_buffer_size_valid(self, connect, table): + def test_get_insert_buffer_size_valid(self, connect, collection): ''' target: get insert_buffer_size method: call get_config correctly @@ -112,7 +112,7 @@ class TestCacheConfig: assert status.OK() @pytest.mark.level(2) - def test_get_cache_insert_data_invalid_parent_key(self, connect, table): + def test_get_cache_insert_data_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: cache_config @@ -125,7 +125,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.level(2) - def test_get_cache_insert_data_invalid_child_key(self, connect, table): + def test_get_cache_insert_data_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: cache_insert_data @@ -138,7 +138,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_cache_insert_data_valid(self, connect, table): + def test_get_cache_insert_data_valid(self, connect, collection): ''' target: get cache_insert_data method: call get_config correctly @@ -164,7 +164,7 @@ class TestCacheConfig: return int(mem_available / 1024 / 1024 / 1024) @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cpu_cache_capacity_invalid_parent_key(self, connect, table): + def test_set_cpu_cache_capacity_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: cache_config @@ -178,7 +178,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_config_invalid_child_key(self, connect, table): + def test_set_cache_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -191,7 +191,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cpu_cache_capacity_valid(self, connect, table): + def test_set_cpu_cache_capacity_valid(self, connect, collection): ''' target: set cpu_cache_capacity method: call set_config correctly @@ -204,7 +204,7 @@ class TestCacheConfig: assert status.OK() assert config_value == '8' - def test_set_cpu_cache_capacity_valid_multiple_times(self, connect, table): + def test_set_cpu_cache_capacity_valid_multiple_times(self, connect, collection): ''' target: set cpu_cache_capacity method: call set_config correctly and repeatedly @@ -225,7 +225,7 @@ class TestCacheConfig: assert config_value == '8' @pytest.mark.level(2) - def test_set_insert_buffer_size_invalid_parent_key(self, connect, table): + def test_set_insert_buffer_size_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: cache_config @@ -239,7 +239,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_insert_buffer_size_valid(self, connect, table): + def test_set_insert_buffer_size_valid(self, connect, collection): ''' target: set insert_buffer_size method: call get_config correctly @@ -253,7 +253,7 @@ class TestCacheConfig: assert config_value == '2' @pytest.mark.level(2) - def test_set_insert_buffer_size_valid_multiple_times(self, connect, table): + def test_set_insert_buffer_size_valid_multiple_times(self, connect, collection): ''' target: set insert_buffer_size method: call get_config correctly and repeatedly @@ -274,7 +274,7 @@ class TestCacheConfig: assert config_value == '2' @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_config_out_of_memory_value_A(self, connect, table): + def test_set_cache_config_out_of_memory_value_A(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory method: call set_config with child values bigger than current system memory @@ -289,7 +289,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_config_out_of_memory_value_B(self, connect, table): + def test_set_cache_config_out_of_memory_value_B(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory method: call set_config with invalid values @@ -307,7 +307,7 @@ class TestCacheConfig: status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available - int(cpu_cache_capacity) + 1) assert not status.OK() - def test_set_cache_config_out_of_memory_value_C(self, connect, table): + def test_set_cache_config_out_of_memory_value_C(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory method: call set_config multiple times @@ -330,7 +330,7 @@ class TestCacheConfig: self.reset_configs(connect) @pytest.mark.level(2) - def test_set_cache_insert_data_invalid_parent_key(self, connect, table): + def test_set_cache_insert_data_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: cache_config @@ -344,7 +344,7 @@ class TestCacheConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_insert_data_valid(self, connect, table): + def test_set_cache_insert_data_valid(self, connect, collection): ''' target: set cache_insert_data method: call get_config correctly @@ -368,7 +368,7 @@ class TestEngineConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_use_blas_threshold_invalid_parent_key(self, connect, table): + def test_get_use_blas_threshold_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: engine_config @@ -381,7 +381,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_use_blas_threshold_invalid_child_key(self, connect, table): + def test_get_use_blas_threshold_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: use_blas_threshold @@ -394,7 +394,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_use_blas_threshold_valid(self, connect, table): + def test_get_use_blas_threshold_valid(self, connect, collection): ''' target: get use_blas_threshold method: call get_config correctly @@ -404,7 +404,7 @@ class TestEngineConfig: assert status.OK() @pytest.mark.level(2) - def test_get_gpu_search_threshold_invalid_parent_key(self, connect, table): + def test_get_gpu_search_threshold_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: engine_config @@ -419,7 +419,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.level(2) - def test_get_gpu_search_threshold_invalid_child_key(self, connect, table): + def test_get_gpu_search_threshold_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: gpu_search_threshold @@ -434,7 +434,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_gpu_search_threshold_valid(self, connect, table): + def test_get_gpu_search_threshold_valid(self, connect, collection): ''' target: get gpu_search_threshold method: call get_config correctly @@ -452,7 +452,7 @@ class TestEngineConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_use_blas_threshold_invalid_parent_key(self, connect, table): + def test_set_use_blas_threshold_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: engine_config @@ -465,7 +465,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_engine_config_invalid_child_key(self, connect, table): + def test_set_engine_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -477,7 +477,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_use_blas_threshold_valid(self, connect, table): + def test_set_use_blas_threshold_valid(self, connect, collection): ''' target: set use_blas_threshold method: call set_config correctly @@ -490,7 +490,7 @@ class TestEngineConfig: assert config_value == '2000' @pytest.mark.level(2) - def test_set_use_blas_threshold_valid_multiple_times(self, connect, table): + def test_set_use_blas_threshold_valid_multiple_times(self, connect, collection): ''' target: set use_blas_threshold method: call set_config correctly and repeatedly @@ -504,7 +504,7 @@ class TestEngineConfig: assert config_value == str(i * 100) @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_gpu_search_threshold_invalid_parent_key(self, connect, table): + def test_set_gpu_search_threshold_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: engine_config @@ -519,7 +519,7 @@ class TestEngineConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_gpu_search_threshold_valid(self, connect, table): + def test_set_gpu_search_threshold_valid(self, connect, collection): ''' target: set gpu_search_threshold method: call set_config correctly @@ -534,7 +534,7 @@ class TestEngineConfig: assert config_value == '2000' @pytest.mark.level(2) - def test_set_gpu_search_threshold_valid_multiple_times(self, connect, table): + def test_set_gpu_search_threshold_valid_multiple_times(self, connect, collection): ''' target: set gpu_search_threshold method: call set_config correctly and repeatedly @@ -553,7 +553,7 @@ class TestEngineConfig: assert status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_engine_config_invalid_values(self, connect, table): + def test_set_engine_config_invalid_values(self, connect, collection): ''' target: set engine_config method: call set_config with invalid child values @@ -576,7 +576,7 @@ class TestGPUResourceConfig: @pytest.mark.timeout(CONFIG_TIMEOUT) def reset_configs(self, connect): ''' - reset configs so the tests are stable + reset configs so the tests are scollection ''' status, reply = connect.set_config("gpu_resource_config", "enable", "true") assert status.OK() @@ -596,7 +596,7 @@ class TestGPUResourceConfig: assert config_value == 'gpu0' @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_gpu_enable_invalid_parent_key(self, connect, table): + def test_get_gpu_enable_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: gpu_resource_config @@ -611,7 +611,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_gpu_enable_invalid_child_key(self, connect, table): + def test_get_gpu_enable_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: enable @@ -625,7 +625,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_gpu_enable_valid(self, connect, table): + def test_get_gpu_enable_valid(self, connect, collection): ''' target: get enable status method: call get_config correctly @@ -638,7 +638,7 @@ class TestGPUResourceConfig: assert config_value == "true" or config_value == "false" @pytest.mark.level(2) - def test_get_cache_capacity_invalid_parent_key(self, connect, table): + def test_get_cache_capacity_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: gpu_resource_config @@ -653,7 +653,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.level(2) - def test_get_cache_capacity_invalid_child_key(self, connect, table): + def test_get_cache_capacity_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: cache_capacity @@ -667,7 +667,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_cache_capacity_valid(self, connect, table): + def test_get_cache_capacity_valid(self, connect, collection): ''' target: get cache_capacity method: call get_config correctly @@ -679,7 +679,7 @@ class TestGPUResourceConfig: assert status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_search_resources_invalid_parent_key(self, connect, table): + def test_get_search_resources_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: gpu_resource_config @@ -694,7 +694,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_search_resources_invalid_child_key(self, connect, table): + def test_get_search_resources_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: search_resources @@ -708,7 +708,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_search_resources_valid(self, connect, table): + def test_get_search_resources_valid(self, connect, collection): ''' target: get search_resources method: call get_config correctly @@ -721,7 +721,7 @@ class TestGPUResourceConfig: assert status.OK() @pytest.mark.level(2) - def test_get_build_index_resources_invalid_parent_key(self, connect, table): + def test_get_build_index_resources_invalid_parent_key(self, connect, collection): ''' target: get invalid parent key method: call get_config without parent_key: gpu_resource_config @@ -736,7 +736,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.level(2) - def test_get_build_index_resources_invalid_child_key(self, connect, table): + def test_get_build_index_resources_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: build_index_resources @@ -750,7 +750,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_build_index_resources_valid(self, connect, table): + def test_get_build_index_resources_valid(self, connect, collection): ''' target: get build_index_resources method: call get_config correctly @@ -769,7 +769,7 @@ class TestGPUResourceConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_gpu_enable_invalid_parent_key(self, connect, table): + def test_set_gpu_enable_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: gpu_resource_config @@ -784,7 +784,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_gpu_resource_config_invalid_child_key(self, connect, table): + def test_set_gpu_resource_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -799,7 +799,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_gpu_enable_invalid_values(self, connect, table): + def test_set_gpu_enable_invalid_values(self, connect, collection): ''' target: set "enable" param method: call set_config with invalid child values @@ -812,7 +812,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_gpu_enable_valid(self, connect, table): + def test_set_gpu_enable_valid(self, connect, collection): ''' target: set "enable" param method: call set_config correctly @@ -829,7 +829,7 @@ class TestGPUResourceConfig: assert config_value == str(config) @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_capacity_invalid_parent_key(self, connect, table): + def test_set_cache_capacity_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: gpu_resource_config @@ -844,7 +844,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_capacity_valid(self, connect, table): + def test_set_cache_capacity_valid(self, connect, collection): ''' target: set cache_capacity method: call set_config correctly @@ -856,7 +856,7 @@ class TestGPUResourceConfig: assert status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_cache_capacity_invalid_values(self, connect, table): + def test_set_cache_capacity_invalid_values(self, connect, collection): ''' target: set cache_capacity method: call set_config with invalid child values @@ -870,7 +870,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_search_resources_invalid_parent_key(self, connect, table): + def test_set_search_resources_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: gpu_resource_config @@ -885,7 +885,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_search_resources_valid(self, connect, table): + def test_set_search_resources_valid(self, connect, collection): ''' target: set search_resources method: call set_config correctly @@ -899,7 +899,7 @@ class TestGPUResourceConfig: assert config_value == "gpu0" @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_search_resources_invalid_values(self, connect, table): + def test_set_search_resources_invalid_values(self, connect, collection): ''' target: set search_resources method: call set_config with invalid child values @@ -912,7 +912,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_build_index_resources_invalid_parent_key(self, connect, table): + def test_set_build_index_resources_invalid_parent_key(self, connect, collection): ''' target: set invalid parent key method: call set_config without parent_key: gpu_resource_config @@ -927,7 +927,7 @@ class TestGPUResourceConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_build_index_resources_valid(self, connect, table): + def test_set_build_index_resources_valid(self, connect, collection): ''' target: set build_index_resources method: call set_config correctly @@ -941,7 +941,7 @@ class TestGPUResourceConfig: assert config_value == "gpu0" @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_build_index_resources_invalid_values(self, connect, table): + def test_set_build_index_resources_invalid_values(self, connect, collection): ''' target: set build_index_resources method: call set_config with invalid child values @@ -962,7 +962,7 @@ class TestServerConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_address_invalid_child_key(self, connect, table): + def test_get_address_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: address @@ -974,7 +974,7 @@ class TestServerConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_address_valid(self, connect, table): + def test_get_address_valid(self, connect, collection): ''' target: get address method: call get_config correctly @@ -984,7 +984,7 @@ class TestServerConfig: assert status.OK() @pytest.mark.level(2) - def test_get_port_invalid_child_key(self, connect, table): + def test_get_port_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: port @@ -996,7 +996,7 @@ class TestServerConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_port_valid(self, connect, table): + def test_get_port_valid(self, connect, collection): ''' target: get port method: call get_config correctly @@ -1006,7 +1006,7 @@ class TestServerConfig: assert status.OK() @pytest.mark.level(2) - def test_get_deploy_mode_invalid_child_key(self, connect, table): + def test_get_deploy_mode_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: deploy_mode @@ -1018,7 +1018,7 @@ class TestServerConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_deploy_mode_valid(self, connect, table): + def test_get_deploy_mode_valid(self, connect, collection): ''' target: get deploy_mode method: call get_config correctly @@ -1028,7 +1028,7 @@ class TestServerConfig: assert status.OK() @pytest.mark.level(2) - def test_get_time_zone_invalid_child_key(self, connect, table): + def test_get_time_zone_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: time_zone @@ -1040,7 +1040,7 @@ class TestServerConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_time_zone_valid(self, connect, table): + def test_get_time_zone_valid(self, connect, collection): ''' target: get time_zone method: call get_config correctly @@ -1051,7 +1051,7 @@ class TestServerConfig: assert "UTC" in config_value @pytest.mark.level(2) - def test_get_web_port_invalid_child_key(self, connect, table): + def test_get_web_port_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: web_port @@ -1063,7 +1063,7 @@ class TestServerConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_web_port_valid(self, connect, table): + def test_get_web_port_valid(self, connect, collection): ''' target: get web_port method: call get_config correctly @@ -1087,7 +1087,7 @@ class TestServerConfig: return time_zones @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_server_config_invalid_child_key(self, connect, table): + def test_set_server_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -1097,7 +1097,7 @@ class TestServerConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_address_valid(self, connect, table): + def test_set_address_valid(self, connect, collection): ''' target: set address method: call set_config correctly @@ -1109,7 +1109,7 @@ class TestServerConfig: assert status.OK() assert config_value == '0.0.0.0' - def test_set_port_valid(self, connect, table): + def test_set_port_valid(self, connect, collection): ''' target: set port method: call set_config correctly @@ -1122,7 +1122,7 @@ class TestServerConfig: assert status.OK() assert config_value == str(valid_port) - def test_set_port_invalid(self, connect, table): + def test_set_port_invalid(self, connect, collection): ''' target: set port method: call set_config with port number out of range(1024, 65535) @@ -1133,20 +1133,20 @@ class TestServerConfig: status, reply = connect.set_config("server_config", "port", invalid_port) assert not status.OK() - def test_set_deploy_mode_valid(self, connect, table): + def test_set_deploy_mode_valid(self, connect, collection): ''' target: set deploy_mode method: call set_config correctly expected: status ok, set successfully ''' - for valid_deploy_mode in ["cluster_readonly", "cluster_writable", "single"]: + for valid_deploy_mode in ["cluster_readonly", "cluster_wricollection", "single"]: status, reply = connect.set_config("server_config", "deploy_mode", valid_deploy_mode) assert status.OK() status, config_value = connect.get_config("server_config", "deploy_mode") assert status.OK() assert config_value == valid_deploy_mode - def test_set_deploy_mode_invalid(self, connect, table): + def test_set_deploy_mode_invalid(self, connect, collection): ''' target: set deploy_mode method: call set_config with invalid deploy_mode @@ -1156,7 +1156,7 @@ class TestServerConfig: status, reply = connect.set_config("server_config", "deploy_mode", invalid_deploy_mode) assert not status.OK() - def test_set_time_zone_valid(self, connect, table): + def test_set_time_zone_valid(self, connect, collection): ''' target: set time_zone method: call set_config correctly @@ -1172,7 +1172,7 @@ class TestServerConfig: status, reply = connect.set_config("server_config", "time_zone", "UTC+8") assert status.OK() - def test_set_time_zone_invalid(self, connect, table): + def test_set_time_zone_invalid(self, connect, collection): ''' target: set time_zone method: call set_config with invalid time_zone @@ -1183,7 +1183,7 @@ class TestServerConfig: status, reply = connect.set_config("server_config", "time_zone", invalid_time_zone) assert not status.OK() - def test_set_web_port_valid(self, connect, table): + def test_set_web_port_valid(self, connect, collection): ''' target: set web_port method: call set_config correctly @@ -1196,7 +1196,7 @@ class TestServerConfig: assert status.OK() assert config_value == str(valid_web_port) - def test_set_web_port_invalid(self, connect, table): + def test_set_web_port_invalid(self, connect, collection): ''' target: set web_port method: call set_config with web_port number out of range(1024, 65535) @@ -1214,7 +1214,7 @@ class TestDBConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_backend_url_invalid_child_key(self, connect, table): + def test_get_backend_url_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: backend_url @@ -1226,7 +1226,7 @@ class TestDBConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_backend_url_valid(self, connect, table): + def test_get_backend_url_valid(self, connect, collection): ''' target: get backend_url method: call get_config correctly @@ -1236,29 +1236,29 @@ class TestDBConfig: assert status.OK() @pytest.mark.level(2) - def test_get_preload_table_invalid_child_key(self, connect, table): + def test_get_preload_collection_invalid_child_key(self, connect, collection): ''' target: get invalid child key - method: call get_config without child_key: preload_table + method: call get_config without child_key: preload_collection expected: status not ok ''' - invalid_configs = ["preloadtable", "preload_table "] + invalid_configs = ["preloadcollection", "preload_collection "] for config in invalid_configs: status, config_value = connect.get_config("db_config", config) assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_preload_table_valid(self, connect, table): + def test_get_preload_collection_valid(self, connect, collection): ''' - target: get preload_table + target: get preload_collection method: call get_config correctly expected: status ok ''' - status, config_value = connect.get_config("db_config", "preload_table") + status, config_value = connect.get_config("db_config", "preload_collection") assert status.OK() @pytest.mark.level(2) - def test_get_auto_flush_interval_invalid_child_key(self, connect, table): + def test_get_auto_flush_interval_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: auto_flush_interval @@ -1270,7 +1270,7 @@ class TestDBConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_auto_flush_interval_valid(self, connect, table): + def test_get_auto_flush_interval_valid(self, connect, collection): ''' target: get auto_flush_interval method: call get_config correctly @@ -1286,7 +1286,7 @@ class TestDBConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_db_config_invalid_child_key(self, connect, table): + def test_set_db_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -1296,7 +1296,7 @@ class TestDBConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_backend_url_valid(self, connect, table): + def test_set_backend_url_valid(self, connect, collection): ''' target: set backend_url method: call set_config correctly @@ -1308,19 +1308,19 @@ class TestDBConfig: assert status.OK() assert config_value == 'sqlite://:@:/' - def test_set_preload_table_valid(self, connect, table): + def test_set_preload_collection_valid(self, connect, collection): ''' - target: set preload_table + target: set preload_collection method: call set_config correctly expected: status ok, set successfully ''' - status, reply = connect.set_config("db_config", "preload_table", "") + status, reply = connect.set_config("db_config", "preload_collection", "") assert status.OK() - status, config_value = connect.get_config("db_config", "preload_table") + status, config_value = connect.get_config("db_config", "preload_collection") assert status.OK() assert config_value == "" - def test_set_auto_flush_interval_valid(self, connect, table): + def test_set_auto_flush_interval_valid(self, connect, collection): ''' target: set auto_flush_interval method: call set_config correctly @@ -1333,7 +1333,7 @@ class TestDBConfig: assert status.OK() assert config_value == str(valid_auto_flush_interval) - def test_set_auto_flush_interval_invalid(self, connect, table): + def test_set_auto_flush_interval_invalid(self, connect, collection): ''' target: set auto_flush_interval method: call set_config with invalid auto_flush_interval @@ -1351,7 +1351,7 @@ class TestStorageConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_primary_path_invalid_child_key(self, connect, table): + def test_get_primary_path_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: primary_path @@ -1363,7 +1363,7 @@ class TestStorageConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_primary_path_valid(self, connect, table): + def test_get_primary_path_valid(self, connect, collection): ''' target: get primary_path method: call get_config correctly @@ -1373,7 +1373,7 @@ class TestStorageConfig: assert status.OK() @pytest.mark.level(2) - def test_get_secondary_path_invalid_child_key(self, connect, table): + def test_get_secondary_path_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: secondary_path @@ -1385,7 +1385,7 @@ class TestStorageConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_secondary_path_valid(self, connect, table): + def test_get_secondary_path_valid(self, connect, collection): ''' target: get secondary_path method: call get_config correctly @@ -1401,7 +1401,7 @@ class TestStorageConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_storage_config_invalid_child_key(self, connect, table): + def test_set_storage_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -1411,7 +1411,7 @@ class TestStorageConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_primary_path_valid(self, connect, table): + def test_set_primary_path_valid(self, connect, collection): ''' target: set primary_path method: call set_config correctly @@ -1424,7 +1424,7 @@ class TestStorageConfig: assert config_value == '/var/lib/milvus' @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_secondary_path_valid(self, connect, table): + def test_set_secondary_path_valid(self, connect, collection): ''' target: set secondary_path method: call set_config correctly @@ -1444,7 +1444,7 @@ class TestMetricConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_enable_monitor_invalid_child_key(self, connect, table): + def test_get_enable_monitor_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: enable_monitor @@ -1456,7 +1456,7 @@ class TestMetricConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_enable_monitor_valid(self, connect, table): + def test_get_enable_monitor_valid(self, connect, collection): ''' target: get enable_monitor method: call get_config correctly @@ -1466,7 +1466,7 @@ class TestMetricConfig: assert status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_address_invalid_child_key(self, connect, table): + def test_get_address_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: address @@ -1478,7 +1478,7 @@ class TestMetricConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_address_valid(self, connect, table): + def test_get_address_valid(self, connect, collection): ''' target: get address method: call get_config correctly @@ -1488,7 +1488,7 @@ class TestMetricConfig: assert status.OK() @pytest.mark.level(2) - def test_get_port_invalid_child_key(self, connect, table): + def test_get_port_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: port @@ -1500,7 +1500,7 @@ class TestMetricConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_port_valid(self, connect, table): + def test_get_port_valid(self, connect, collection): ''' target: get port method: call get_config correctly @@ -1516,7 +1516,7 @@ class TestMetricConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_metric_config_invalid_child_key(self, connect, table): + def test_set_metric_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -1525,7 +1525,7 @@ class TestMetricConfig: status, reply = connect.set_config("metric_config", "child_key", 19530) assert not status.OK() - def test_set_enable_monitor_valid(self, connect, table): + def test_set_enable_monitor_valid(self, connect, collection): ''' target: set enable_monitor method: call set_config correctly @@ -1539,7 +1539,7 @@ class TestMetricConfig: assert config_value == str(valid_enable_monitor) @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_address_valid(self, connect, table): + def test_set_address_valid(self, connect, collection): ''' target: set address method: call set_config correctly @@ -1551,7 +1551,7 @@ class TestMetricConfig: assert status.OK() assert config_value == '127.0.0.1' - def test_set_port_valid(self, connect, table): + def test_set_port_valid(self, connect, collection): ''' target: set port method: call set_config correctly @@ -1564,7 +1564,7 @@ class TestMetricConfig: assert status.OK() assert config_value == str(valid_port) - def test_set_port_invalid(self, connect, table): + def test_set_port_invalid(self, connect, collection): ''' target: set port method: call set_config with port number out of range(1024, 65535), or same as web_port number @@ -1582,7 +1582,7 @@ class TestTracingConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_json_config_path_invalid_child_key(self, connect, table): + def test_get_json_config_path_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: json_config_path @@ -1594,7 +1594,7 @@ class TestTracingConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_json_config_path_valid(self, connect, table): + def test_get_json_config_path_valid(self, connect, collection): ''' target: get json_config_path method: call get_config correctly @@ -1610,7 +1610,7 @@ class TestTracingConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_tracing_config_invalid_child_key(self, connect, table): + def test_set_tracing_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -1620,7 +1620,7 @@ class TestTracingConfig: assert not status.OK() @pytest.mark.skip(reason="Currently not supported") - def test_set_json_config_path_valid(self, connect, table): + def test_set_json_config_path_valid(self, connect, collection): ''' target: set json_config_path method: call set_config correctly @@ -1640,7 +1640,7 @@ class TestWALConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_enable_invalid_child_key(self, connect, table): + def test_get_enable_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: enable @@ -1652,7 +1652,7 @@ class TestWALConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_enable_valid(self, connect, table): + def test_get_enable_valid(self, connect, collection): ''' target: get enable method: call get_config correctly @@ -1662,7 +1662,7 @@ class TestWALConfig: assert status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_recovery_error_ignore_invalid_child_key(self, connect, table): + def test_get_recovery_error_ignore_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: recovery_error_ignore @@ -1674,7 +1674,7 @@ class TestWALConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_recovery_error_ignore_valid(self, connect, table): + def test_get_recovery_error_ignore_valid(self, connect, collection): ''' target: get recovery_error_ignore method: call get_config correctly @@ -1684,7 +1684,7 @@ class TestWALConfig: assert status.OK() @pytest.mark.level(2) - def test_get_buffer_size_invalid_child_key(self, connect, table): + def test_get_buffer_size_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: buffer_size @@ -1696,7 +1696,7 @@ class TestWALConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_buffer_size_valid(self, connect, table): + def test_get_buffer_size_valid(self, connect, collection): ''' target: get buffer_size method: call get_config correctly @@ -1706,7 +1706,7 @@ class TestWALConfig: assert status.OK() @pytest.mark.level(2) - def test_get_wal_path_invalid_child_key(self, connect, table): + def test_get_wal_path_invalid_child_key(self, connect, collection): ''' target: get invalid child key method: call get_config without child_key: wal_path @@ -1718,7 +1718,7 @@ class TestWALConfig: assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_wal_path_valid(self, connect, table): + def test_get_wal_path_valid(self, connect, collection): ''' target: get wal_path method: call get_config correctly @@ -1734,7 +1734,7 @@ class TestWALConfig: ****************************************************************** """ @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_wal_config_invalid_child_key(self, connect, table): + def test_set_wal_config_invalid_child_key(self, connect, collection): ''' target: set invalid child key method: call set_config with invalid child_key @@ -1743,7 +1743,7 @@ class TestWALConfig: status, reply = connect.set_config("wal_config", "child_key", 256) assert not status.OK() - def test_set_enable_valid(self, connect, table): + def test_set_enable_valid(self, connect, collection): ''' target: set enable method: call set_config correctly @@ -1756,7 +1756,7 @@ class TestWALConfig: assert status.OK() assert config_value == str(valid_enable) - def test_set_recovery_error_ignore_valid(self, connect, table): + def test_set_recovery_error_ignore_valid(self, connect, collection): ''' target: set recovery_error_ignore method: call set_config correctly @@ -1769,7 +1769,7 @@ class TestWALConfig: assert status.OK() assert config_value == valid_recovery_error_ignore - def test_set_buffer_size_valid_A(self, connect, table): + def test_set_buffer_size_valid_A(self, connect, collection): ''' target: set buffer_size method: call set_config correctly @@ -1783,7 +1783,7 @@ class TestWALConfig: assert config_value == str(valid_buffer_size) @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_set_wal_path_valid(self, connect, table): + def test_set_wal_path_valid(self, connect, collection): ''' target: set wal_path method: call set_config correctly diff --git a/tests/milvus_python_test/test_connect.py b/tests/milvus_python_test/test_connect.py index ec26aa7247..efc0f92359 100644 --- a/tests/milvus_python_test/test_connect.py +++ b/tests/milvus_python_test/test_connect.py @@ -285,7 +285,7 @@ class TestConnect: b. data_set not too large incase disconnection happens when data is underd-preparing c. data_set not too small incase disconnection happens when data has already been transferred d. make sure disconnection happens when data is in-transport - Expected: Failure, get_table_row_count == 0 + Expected: Failure, count_collection == 0 ''' pass diff --git a/tests/milvus_python_test/test_delete_vectors.py b/tests/milvus_python_test/test_delete_vectors.py index 0dc1f39c27..7c756d20a8 100644 --- a/tests/milvus_python_test/test_delete_vectors.py +++ b/tests/milvus_python_test/test_delete_vectors.py @@ -11,7 +11,7 @@ from utils import * dim = 128 index_file_size = 10 -table_id = "test_delete" +collection_id = "test_delete" DELETE_TIMEOUT = 60 nprobe = 1 epsilon = 0.001 @@ -38,7 +38,7 @@ class TestDeleteBase: pytest.skip("Only support CPU mode") return request.param - def test_delete_vector_search(self, connect, table, get_simple_index): + def test_delete_vector_search(self, connect, collection, get_simple_index): ''' target: test delete vector method: add vector and delete @@ -47,22 +47,22 @@ class TestDeleteBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, vector, params=search_param) + status, res = connect.search_vectors(collection, top_k, vector, params=search_param) logging.getLogger().info(res) assert status.OK() assert len(res) == 0 - def test_delete_vector_multi_same_ids(self, connect, table, get_simple_index): + def test_delete_vector_multi_same_ids(self, connect, collection, get_simple_index): ''' target: test delete vector, with some same ids method: add vector and delete @@ -71,58 +71,58 @@ class TestDeleteBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vectors(nb, dim) - connect.add_vectors(table, vectors, ids=[1 for i in range(nb)]) - status = connect.flush([table]) + connect.add_vectors(collection, vectors, ids=[1 for i in range(nb)]) + status = connect.flush([collection]) # Bloom filter error assert status.OK() - status = connect.delete_by_id(table, [1]) + status = connect.delete_by_id(collection, [1]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, [vectors[0]], params=search_param) + status, res = connect.search_vectors(collection, top_k, [vectors[0]], params=search_param) logging.getLogger().info(res) assert status.OK() assert len(res) == 0 - def test_delete_vector_table_count(self, connect, table): + def test_delete_vector_collection_count(self, connect, collection): ''' target: test delete vector method: add vector and delete expected: status ok, vector deleted ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() - status = connect.flush([table]) - status, res = connect.get_table_row_count(table) + status = connect.flush([collection]) + status, res = connect.count_collection(collection) assert status.OK() assert res == 0 - def test_delete_vector_table_count_no_flush(self, connect, table): + def test_delete_vector_collection_count_no_flush(self, connect, collection): ''' target: test delete vector method: add vector and delete, no flush(using auto flush) expected: status ok, vector deleted ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() time.sleep(2) - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == 0 - def test_delete_vector_id_not_exised(self, connect, table, get_simple_index): + def test_delete_vector_id_not_exised(self, connect, collection, get_simple_index): ''' target: test delete vector, params vector_id not existed method: add vector and delete @@ -131,34 +131,34 @@ class TestDeleteBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.delete_by_id(table, [0]) + status = connect.delete_by_id(collection, [0]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, vector, params=search_param) + status, res = connect.search_vectors(collection, top_k, vector, params=search_param) assert status.OK() assert res[0][0].id == ids[0] - def test_delete_vector_table_not_existed(self, connect, table): + def test_delete_vector_collection_not_existed(self, connect, collection): ''' - target: test delete vector, params table_name not existed + target: test delete vector, params collection_name not existed method: add vector and delete expected: status not ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - table_new = gen_unique_str() - status = connect.delete_by_id(table_new, [0]) + collection_new = gen_unique_str() + status = connect.delete_by_id(collection_new, [0]) assert not status.OK() - def test_add_vectors_delete_vector(self, connect, table, get_simple_index): + def test_add_vectors_delete_vector(self, connect, collection, get_simple_index): ''' method: add vectors and delete expected: status ok, vectors deleted @@ -166,19 +166,19 @@ class TestDeleteBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, res = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert status.OK() logging.getLogger().info(res) assert res[0][0].distance > epsilon @@ -186,7 +186,7 @@ class TestDeleteBase: assert res[1][0].id == ids[1] assert res[2][0].distance > epsilon - def test_create_index_after_delete(self, connect, table, get_simple_index): + def test_create_index_after_delete(self, connect, collection, get_simple_index): ''' method: add vectors and delete, then create index expected: status ok, vectors deleted, index created @@ -194,19 +194,19 @@ class TestDeleteBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) - status = connect.create_index(table, index_type, index_param) + status = connect.flush([collection]) + status = connect.create_index(collection, index_type, index_param) assert status.OK() search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, res = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert status.OK() logging.getLogger().info(res) assert res[0][0].distance > epsilon @@ -214,7 +214,7 @@ class TestDeleteBase: assert res[1][0].id == ids[1] assert res[2][0].distance > epsilon - def test_add_vector_after_delete(self, connect, table, get_simple_index): + def test_add_vector_after_delete(self, connect, collection, get_simple_index): ''' method: add vectors and delete, then add vector expected: status ok, vectors deleted, vector added @@ -222,22 +222,22 @@ class TestDeleteBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) - status, tmp_ids = connect.add_vectors(table, [vectors[0], vectors[-1]]) + status = connect.flush([collection]) + status, tmp_ids = connect.add_vectors(collection, [vectors[0], vectors[-1]]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, res = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert status.OK() logging.getLogger().info(res) assert res[0][0].id == tmp_ids[0] @@ -246,41 +246,41 @@ class TestDeleteBase: assert res[2][0].id == tmp_ids[-1] assert res[2][0].distance < epsilon - def test_delete_multiable_times(self, connect, table): + def test_delete_multiable_times(self, connect, collection): ''' method: add vectors and delete id serveral times expected: status ok, vectors deleted, and status ok for next delete operation ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) for i in range(10): - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - def test_delete_no_flush_multiable_times(self, connect, table): + def test_delete_no_flush_multiable_times(self, connect, collection): ''' method: add vectors and delete id serveral times expected: status ok, vectors deleted, and status ok for next delete operation ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() for i in range(10): - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() assert status.OK() @@ -303,7 +303,7 @@ class TestDeleteIndexedVectors: pytest.skip("Only support CPU mode") return request.param - def test_delete_vectors_after_index_created_search(self, connect, table, get_simple_index): + def test_delete_vectors_after_index_created_search(self, connect, collection, get_simple_index): ''' target: test delete vector after index created method: add vector, create index and delete vector @@ -312,22 +312,22 @@ class TestDeleteIndexedVectors: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, vector, params=search_param) + status, res = connect.search_vectors(collection, top_k, vector, params=search_param) logging.getLogger().info(res) assert status.OK() assert len(res) == 0 - def test_add_vectors_delete_vector(self, connect, table, get_simple_index): + def test_add_vectors_delete_vector(self, connect, collection, get_simple_index): ''' method: add vectors and delete expected: status ok, vectors deleted @@ -335,21 +335,21 @@ class TestDeleteIndexedVectors: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(table, delete_ids) + status = connect.delete_by_id(collection, delete_ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, res = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert status.OK() logging.getLogger().info(res) assert res[0][0].distance > epsilon @@ -375,7 +375,7 @@ class TestDeleteBinary: else: pytest.skip("Skip index Temporary") - def test_delete_vector_search(self, connect, jac_table, get_simple_index): + def test_delete_vector_search(self, connect, jac_collection, get_simple_index): ''' target: test delete vector method: add vector and delete @@ -384,15 +384,15 @@ class TestDeleteBinary: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.delete_by_id(jac_table, ids) + status = connect.delete_by_id(jac_collection, ids) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(jac_table, top_k, vector, params=search_param) + status, res = connect.search_vectors(jac_collection, top_k, vector, params=search_param) logging.getLogger().info(res) assert status.OK() assert len(res) == 0 @@ -400,25 +400,25 @@ class TestDeleteBinary: assert len(res) == 0 # TODO: soft delete - def test_delete_vector_table_count(self, connect, jac_table): + def test_delete_vector_collection_count(self, connect, jac_collection): ''' target: test delete vector method: add vector and delete expected: status ok, vector deleted ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.delete_by_id(jac_table, ids) + status = connect.delete_by_id(jac_collection, ids) assert status.OK() - status = connect.flush([jac_table]) - status, res = connect.get_table_row_count(jac_table) + status = connect.flush([jac_collection]) + status, res = connect.count_collection(jac_collection) assert status.OK() assert res == 0 - def test_delete_vector_id_not_exised(self, connect, jac_table, get_simple_index): + def test_delete_vector_id_not_exised(self, connect, jac_collection, get_simple_index): ''' target: test delete vector, params vector_id not existed method: add vector and delete @@ -427,37 +427,37 @@ class TestDeleteBinary: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status = connect.delete_by_id(jac_table, [0]) + status = connect.delete_by_id(jac_collection, [0]) assert status.OK() - status = connect.flush([jac_table]) - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) + status = connect.flush([jac_collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(jac_table, top_k, vector, params=search_param) + status, res = connect.search_vectors(jac_collection, top_k, vector, params=search_param) assert status.OK() assert res[0][0].id == ids[0] - def test_delete_vector_table_not_existed(self, connect, jac_table): + def test_delete_vector_collection_not_existed(self, connect, jac_collection): ''' - target: test delete vector, params table_name not existed + target: test delete vector, params collection_name not existed method: add vector and delete expected: status not ok ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - table_new = gen_unique_str() - status = connect.delete_by_id(table_new, [0]) - table_new = gen_unique_str() - status = connect.delete_by_id(table_new, [0]) + collection_new = gen_unique_str() + status = connect.delete_by_id(collection_new, [0]) + collection_new = gen_unique_str() + status = connect.delete_by_id(collection_new, [0]) assert not status.OK() - def test_add_vectors_delete_vector(self, connect, jac_table, get_simple_index): + def test_add_vectors_delete_vector(self, connect, jac_collection, get_simple_index): ''' method: add vectors and delete expected: status ok, vectors deleted @@ -465,24 +465,24 @@ class TestDeleteBinary: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(jac_table, delete_ids) + status = connect.delete_by_id(jac_collection, delete_ids) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(jac_table, top_k, query_vecs, params=search_param) + status, res = connect.search_vectors(jac_collection, top_k, query_vecs, params=search_param) assert status.OK() logging.getLogger().info(res) assert res[0][0].id != ids[0] assert res[1][0].id == ids[1] assert res[2][0].id != ids[-1] - def test_add_after_delete_vector(self, connect, jac_table, get_simple_index): + def test_add_after_delete_vector(self, connect, jac_collection, get_simple_index): ''' method: add vectors and delete, add expected: status ok, vectors added @@ -490,20 +490,20 @@ class TestDeleteBinary: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() delete_ids = [ids[0], ids[-1]] query_vecs = [vectors[0], vectors[1], vectors[-1]] - status = connect.delete_by_id(jac_table, delete_ids) + status = connect.delete_by_id(jac_collection, delete_ids) assert status.OK() - status = connect.flush([jac_table]) - status, tmp_ids = connect.add_vectors(jac_table, [vectors[0], vectors[-1]]) + status = connect.flush([jac_collection]) + status, tmp_ids = connect.add_vectors(jac_collection, [vectors[0], vectors[-1]]) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) search_param = get_search_param(index_type) - status, res = connect.search_vectors(jac_table, top_k, query_vecs, params=search_param) + status, res = connect.search_vectors(jac_collection, top_k, query_vecs, params=search_param) assert status.OK() logging.getLogger().info(res) assert res[0][0].id == tmp_ids[0] @@ -526,32 +526,32 @@ class TestDeleteIdsIngalid(object): yield request.param @pytest.mark.level(1) - def test_delete_vector_id_invalid(self, connect, table, gen_invalid_id): + def test_delete_vector_id_invalid(self, connect, collection, gen_invalid_id): invalid_id = gen_invalid_id with pytest.raises(Exception) as e: - status = connect.delete_by_id(table, [invalid_id]) + status = connect.delete_by_id(collection, [invalid_id]) @pytest.mark.level(2) - def test_delete_vector_ids_invalid(self, connect, table, gen_invalid_id): + def test_delete_vector_ids_invalid(self, connect, collection, gen_invalid_id): invalid_id = gen_invalid_id with pytest.raises(Exception) as e: - status = connect.delete_by_id(table, [1, invalid_id]) + status = connect.delete_by_id(collection, [1, invalid_id]) -class TestTableNameInvalid(object): +class TestcollectionNameInvalid(object): """ - Test adding vectors with invalid table names + Test adding vectors with invalid collection names """ @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.level(2) - def test_delete_vectors_with_invalid_table_name(self, connect, get_table_name): - table_name = get_table_name - status = connect.delete_by_id(table_name, [1]) + def test_delete_vectors_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + status = connect.delete_by_id(collection_name, [1]) assert not status.OK() diff --git a/tests/milvus_python_test/test_flush.py b/tests/milvus_python_test/test_flush.py index 4ec5399977..debd5581fa 100644 --- a/tests/milvus_python_test/test_flush.py +++ b/tests/milvus_python_test/test_flush.py @@ -10,7 +10,7 @@ from utils import * dim = 128 index_file_size = 10 -table_id = "test_flush" +collection_id = "test_flush" DELETE_TIMEOUT = 60 nprobe = 1 tag = "1970-01-01" @@ -38,117 +38,117 @@ class TestFlushBase: pytest.skip("Only support CPU mode") return request.param - def test_flush_table_not_existed(self, connect, table): + def test_flush_collection_not_existed(self, connect, collection): ''' - target: test delete vector, params table_name not existed + target: test delete vector, params collection_name not existed method: add vector and delete expected: status not ok ''' - table_new = gen_unique_str() - status = connect.flush([table_new]) + collection_new = gen_unique_str() + status = connect.flush([collection_new]) assert not status.OK() - def test_flush_empty_table(self, connect, table): + def test_flush_empty_collection(self, connect, collection): ''' - method: flush table with no vectors + method: flush collection with no vectors expected: status ok ''' - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - def test_add_partition_flush(self, connect, table): + def test_add_partition_flush(self, connect, collection): ''' - method: add vectors into partition in table, flush serveral times + method: add vectors into partition in collection, flush serveral times expected: status ok ''' vectors = gen_vector(nb, dim) - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) vectors = gen_vectors(nb, dim) ids = [i for i in range(nb)] - status, ids = connect.insert(table, vectors, ids) - status = connect.flush([table]) - result, res = connect.get_table_row_count(table) + status, ids = connect.insert(collection, vectors, ids) + status = connect.flush([collection]) + result, res = connect.count_collection(collection) assert res == nb - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - result, res = connect.get_table_row_count(table) + result, res = connect.count_collection(collection) assert res == 2 * nb - def test_add_partitions_flush(self, connect, table): + def test_add_partitions_flush(self, connect, collection): ''' - method: add vectors into partitions in table, flush one + method: add vectors into partitions in collection, flush one expected: status ok ''' vectors = gen_vectors(nb, dim) tag_new = gen_unique_str() - status = connect.create_partition(table, tag) - status = connect.create_partition(table, tag_new) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, tag_new) ids = [i for i in range(nb)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) - status = connect.flush([table]) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) + status = connect.flush([collection]) assert status.OK() - status, ids = connect.insert(table, vectors, ids, partition_tag=tag_new) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag_new) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - result, res = connect.get_table_row_count(table) + result, res = connect.count_collection(collection) assert res == 2 * nb - def test_add_tables_flush(self, connect, table): + def test_add_collections_flush(self, connect, collection): ''' - method: add vectors into tables, flush one + method: add vectors into collections, flush one expected: status ok ''' vectors = gen_vectors(nb, dim) - table_new = gen_unique_str() - param = {'table_name': table_new, + collection_new = gen_unique_str() + param = {'collection_name': collection_new, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.create_partition(table, tag) - status = connect.create_partition(table_new, tag) + status = connect.create_collection(param) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection_new, tag) vectors = gen_vectors(nb, dim) ids = [i for i in range(nb)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) - status, ids = connect.insert(table_new, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection_new, vectors, ids, partition_tag=tag) assert status.OK() - status = connect.flush([table]) - status = connect.flush([table_new]) + status = connect.flush([collection]) + status = connect.flush([collection_new]) assert status.OK() - result, res = connect.get_table_row_count(table) + result, res = connect.count_collection(collection) assert res == nb - result, res = connect.get_table_row_count(table_new) + result, res = connect.count_collection(collection_new) assert res == nb - def test_add_flush_multiable_times(self, connect, table): + def test_add_flush_multiable_times(self, connect, collection): ''' method: add vectors, flush serveral times expected: status ok ''' vectors = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() for i in range(10): - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() query_vecs = [vectors[0], vectors[1], vectors[-1]] - status, res = connect.search_vectors(table, top_k, query_records=query_vecs) + status, res = connect.search_vectors(collection, top_k, query_records=query_vecs) assert status.OK() - def test_add_flush_auto(self, connect, table): + def test_add_flush_auto(self, connect, collection): ''' method: add vectors expected: status ok ''' vectors = gen_vectors(nb, dim) ids = [i for i in range(nb)] - status, ids = connect.add_vectors(table, vectors, ids) + status, ids = connect.add_vectors(collection, vectors, ids) assert status.OK() time.sleep(2) - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == nb @@ -163,7 +163,7 @@ class TestFlushBase: yield request.param # both autoflush / flush - def test_add_flush_same_ids(self, connect, table, same_ids): + def test_add_flush_same_ids(self, connect, collection, same_ids): ''' method: add vectors, with same ids, count(same ids) < 15, > 15 expected: status ok @@ -173,79 +173,79 @@ class TestFlushBase: for i, item in enumerate(ids): if item <= same_ids: ids[i] = 0 - status, ids = connect.add_vectors(table, vectors, ids) + status, ids = connect.add_vectors(collection, vectors, ids) time.sleep(2) - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == nb - def test_delete_flush_multiable_times(self, connect, table): + def test_delete_flush_multiable_times(self, connect, collection): ''' method: delete vectors, flush serveral times expected: status ok ''' vectors = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.delete_by_id(table, [ids[-1]]) + status = connect.delete_by_id(collection, [ids[-1]]) assert status.OK() for i in range(10): - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() query_vecs = [vectors[0], vectors[1], vectors[-1]] - status, res = connect.search_vectors(table, top_k, query_records=query_vecs) + status, res = connect.search_vectors(collection, top_k, query_records=query_vecs) assert status.OK() # TODO: CI fail, LOCAL pass - def _test_table_count_during_flush(self, connect, args): + def _test_collection_count_during_flush(self, connect, args): ''' - method: flush table at background, call `get_table_row_count` + method: flush collection at background, call `count_collection` expected: status ok ''' - table = gen_unique_str() + collection = gen_unique_str() uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} milvus = get_milvus() milvus.connect(uri=uri) - milvus.create_table(param) + milvus.create_collection(param) vectors = gen_vector(nb, dim) - status, ids = milvus.add_vectors(table, vectors, ids=[i for i in range(nb)]) - def flush(table_name): + status, ids = milvus.add_vectors(collection, vectors, ids=[i for i in range(nb)]) + def flush(collection_name): milvus = get_milvus() milvus.connect(uri=uri) - status = milvus.delete_by_id(table_name, [i for i in range(nb)]) + status = milvus.delete_by_id(collection_name, [i for i in range(nb)]) assert status.OK() - status = milvus.flush([table_name]) + status = milvus.flush([collection_name]) assert status.OK() - p = Process(target=flush, args=(table, )) + p = Process(target=flush, args=(collection, )) p.start() - status, res = milvus.get_table_row_count(table) + status, res = milvus.count_collection(collection) assert status.OK() p.join() - status, res = milvus.get_table_row_count(table) + status, res = milvus.count_collection(collection) assert status.OK() logging.getLogger().info(res) assert res == 0 -class TestTableNameInvalid(object): +class TestcollectionNameInvalid(object): """ - Test adding vectors with invalid table names + Test adding vectors with invalid collection names """ @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.level(2) - def test_flush_with_invalid_table_name(self, connect, get_table_name): - table_name = get_table_name + def test_flush_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name with pytest.raises(Exception) as e: - status, result = connect.flush(table_name) + status, result = connect.flush(collection_name) diff --git a/tests/milvus_python_test/test_get_vector_by_id.py b/tests/milvus_python_test/test_get_vector_by_id.py index bc02cecf90..5abcd3e98d 100644 --- a/tests/milvus_python_test/test_get_vector_by_id.py +++ b/tests/milvus_python_test/test_get_vector_by_id.py @@ -11,7 +11,7 @@ from utils import * dim = 128 index_file_size = 10 -table_id = "test_get_vector_by_id" +collection_id = "test_get_vector_by_id" DELETE_TIMEOUT = 60 nprobe = 1 tag = "1970-01-01" @@ -25,54 +25,54 @@ class TestGetBase: The following cases are used to test `get_vector_by_id` function ****************************************************************** """ - def test_get_vector_A(self, connect, table): + def test_get_vector_A(self, connect, collection): ''' target: test get_vector_by_id method: add vector, and get expected: status ok, vector returned ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, ids[0]) + status, res = connect.get_vector_by_id(collection, ids[0]) assert status.OK() assert_equal_vector(res, vector[0]) - def test_get_vector_B(self, connect, table): + def test_get_vector_B(self, connect, collection): ''' target: test get_vector_by_id method: add vector, and get expected: status ok, vector returned ''' vectors = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, ids[0]) + status, res = connect.get_vector_by_id(collection, ids[0]) assert status.OK() assert_equal_vector(res, vectors[0]) - def test_get_vector_partition(self, connect, table): + def test_get_vector_partition(self, connect, collection): ''' target: test get_vector_by_id method: add vector, and get expected: status ok, vector returned ''' vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, ids[0]) + status, res = connect.get_vector_by_id(collection, ids[0]) assert status.OK() assert_equal_vector(res, vectors[0]) - def test_get_vector_multi_same_ids(self, connect, table): + def test_get_vector_multi_same_ids(self, connect, collection): ''' target: test get_vector_by_id method: add vectors, with the same id, get vector by the given id @@ -81,10 +81,10 @@ class TestGetBase: vectors = gen_vectors(nb, dim) ids = [i for i in range(nb)] ids[1] = 0; ids[-1] = 0 - status, ids = connect.add_vectors(table, vectors, ids=ids) - status = connect.flush([table]) + status, ids = connect.add_vectors(collection, vectors, ids=ids) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, 0) + status, res = connect.get_vector_by_id(collection, 0) assert status.OK() assert_equal_vector(res, vectors[0]) @@ -101,75 +101,75 @@ class TestGetBase: def get_id(self, request): yield request.param - def test_get_vector_after_delete(self, connect, table, get_id): + def test_get_vector_after_delete(self, connect, collection, get_id): ''' target: test get_vector_by_id method: add vectors, and delete, get vector by the given id expected: status ok, get one vector ''' vectors = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() id = get_id - status = connect.delete_by_id(table, [ids[id]]) + status = connect.delete_by_id(collection, [ids[id]]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, ids[id]) + status, res = connect.get_vector_by_id(collection, ids[id]) assert status.OK() assert not res - def test_get_vector_after_delete_with_partition(self, connect, table, get_id): + def test_get_vector_after_delete_with_partition(self, connect, collection, get_id): ''' target: test get_vector_by_id method: add vectors into partition, and delete, get vector by the given id expected: status ok, get one vector ''' vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) - status, ids = connect.insert(table, vectors, partition_tag=tag) + status = connect.create_partition(collection, tag) + status, ids = connect.insert(collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() id = get_id - status = connect.delete_by_id(table, [ids[id]]) + status = connect.delete_by_id(collection, [ids[id]]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, ids[id]) + status, res = connect.get_vector_by_id(collection, ids[id]) assert status.OK() assert not res - def test_get_vector_id_not_exised(self, connect, table): + def test_get_vector_id_not_exised(self, connect, collection): ''' target: test get vector, params vector_id not existed method: add vector and get expected: status ok, empty result ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, 1) + status, res = connect.get_vector_by_id(collection, 1) assert status.OK() assert not res - def test_get_vector_table_not_existed(self, connect, table): + def test_get_vector_collection_not_existed(self, connect, collection): ''' - target: test get vector, params table_name not existed + target: test get vector, params collection_name not existed method: add vector and get expected: status not ok ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - table_new = gen_unique_str() - status, res = connect.get_vector_by_id(table_new, 1) + collection_new = gen_unique_str() + status, res = connect.get_vector_by_id(collection_new, 1) assert not status.OK() @@ -205,7 +205,7 @@ class TestGetIndexedVectors: def get_id(self, request): yield request.param - def test_get_vectors_after_index_created(self, connect, table, get_simple_index, get_id): + def test_get_vectors_after_index_created(self, connect, collection, get_simple_index, get_id): ''' target: test get vector after index created method: add vector, create index and get vector @@ -214,20 +214,20 @@ class TestGetIndexedVectors: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() id = get_id - status, res = connect.get_vector_by_id(table, ids[id]) + status, res = connect.get_vector_by_id(collection, ids[id]) assert status.OK() logging.getLogger().info(res) assert status.OK() assert_equal_vector(res, vectors[id]) - def test_get_vector_after_delete(self, connect, table, get_simple_index, get_id): + def test_get_vector_after_delete(self, connect, collection, get_simple_index, get_id): ''' target: test get_vector_by_id method: add vectors, and delete, get vector by the given id @@ -236,22 +236,22 @@ class TestGetIndexedVectors: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() id = get_id - status = connect.delete_by_id(table, [ids[id]]) + status = connect.delete_by_id(collection, [ids[id]]) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_vector_by_id(table, ids[id]) + status, res = connect.get_vector_by_id(collection, ids[id]) assert status.OK() assert not res - def test_get_vector_partition(self, connect, table, get_simple_index, get_id): + def test_get_vector_partition(self, connect, collection, get_simple_index, get_id): ''' target: test get_vector_by_id method: add vector, and get @@ -260,16 +260,16 @@ class TestGetIndexedVectors: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) ids = [i for i in range(nb)] - status, ids = connect.add_vectors(table, vectors, ids, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, ids, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() id = get_id - status, res = connect.get_vector_by_id(table, ids[id]) + status, res = connect.get_vector_by_id(collection, ids[id]) assert status.OK() assert_equal_vector(res, vectors[id]) @@ -280,37 +280,37 @@ class TestGetBinary: The following cases are used to test `get_vector_by_id` function ****************************************************************** """ - def test_get_vector_A(self, connect, jac_table): + def test_get_vector_A(self, connect, jac_collection): ''' target: test get_vector_by_id method: add vector, and get expected: status ok, vector returned ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, res = connect.get_vector_by_id(jac_table, ids[0]) + status, res = connect.get_vector_by_id(jac_collection, ids[0]) assert status.OK() assert res == vector[0] - def test_get_vector_B(self, connect, jac_table): + def test_get_vector_B(self, connect, jac_collection): ''' target: test get_vector_by_id method: add vector, and get expected: status ok, vector returned ''' tmp, vectors = gen_binary_vectors(nb, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, res = connect.get_vector_by_id(jac_table, ids[0]) + status, res = connect.get_vector_by_id(jac_collection, ids[0]) assert status.OK() assert res == vectors[0] - def test_get_vector_multi_same_ids(self, connect, jac_table): + def test_get_vector_multi_same_ids(self, connect, jac_collection): ''' target: test get_vector_by_id method: add vectors, with the same id, get vector by the given id @@ -319,56 +319,56 @@ class TestGetBinary: tmp, vectors = gen_binary_vectors(nb, dim) ids = [i for i in range(nb)] ids[0] = 0; ids[-1] = 0 - status, ids = connect.add_vectors(jac_table, vectors, ids=ids) - status = connect.flush([jac_table]) + status, ids = connect.add_vectors(jac_collection, vectors, ids=ids) + status = connect.flush([jac_collection]) assert status.OK() - status, res = connect.get_vector_by_id(jac_table, 0) + status, res = connect.get_vector_by_id(jac_collection, 0) assert status.OK() assert res == vectors[0] - def test_get_vector_id_not_exised(self, connect, jac_table): + def test_get_vector_id_not_exised(self, connect, jac_collection): ''' target: test get vector, params vector_id not existed method: add vector and get expected: status ok, empty result ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, res = connect.get_vector_by_id(jac_table, 1) + status, res = connect.get_vector_by_id(jac_collection, 1) assert status.OK() assert not res - def test_get_vector_table_not_existed(self, connect, jac_table): + def test_get_vector_collection_not_existed(self, connect, jac_collection): ''' - target: test get vector, params table_name not existed + target: test get vector, params collection_name not existed method: add vector and get expected: status not ok ''' tmp, vector = gen_binary_vectors(1, dim) - status, ids = connect.add_vectors(jac_table, vector) + status, ids = connect.add_vectors(jac_collection, vector) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - table_new = gen_unique_str() - status, res = connect.get_vector_by_id(table_new, 1) + collection_new = gen_unique_str() + status, res = connect.get_vector_by_id(collection_new, 1) assert not status.OK() - def test_get_vector_partition(self, connect, jac_table): + def test_get_vector_partition(self, connect, jac_collection): ''' target: test get_vector_by_id method: add vector, and get expected: status ok, vector returned ''' tmp, vectors = gen_binary_vectors(nb, dim) - status = connect.create_partition(jac_table, tag) - status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) + status = connect.create_partition(jac_collection, tag) + status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, res = connect.get_vector_by_id(jac_table, ids[0]) + status, res = connect.get_vector_by_id(jac_collection, ids[0]) assert status.OK() assert res == vectors[0] @@ -387,26 +387,26 @@ class TestGetVectorIdIngalid(object): yield request.param @pytest.mark.level(2) - def test_get_vector_id_invalid(self, connect, table, gen_invalid_id): + def test_get_vector_id_invalid(self, connect, collection, gen_invalid_id): invalid_id = gen_invalid_id with pytest.raises(Exception) as e: - status = connect.get_vector_by_id(table, invalid_id) + status = connect.get_vector_by_id(collection, invalid_id) -class TestTableNameInvalid(object): +class TestcollectionNameInvalid(object): """ - Test adding vectors with invalid table names + Test adding vectors with invalid collection names """ @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.level(2) - def test_get_vectors_with_invalid_table_name(self, connect, get_table_name): - table_name = get_table_name + def test_get_vectors_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name vectors = gen_vectors(1, dim) - status, result = connect.get_vector_by_id(table_name, 1) + status, result = connect.get_vector_by_id(collection_name, 1) assert not status.OK() diff --git a/tests/milvus_python_test/test_get_vector_ids.py b/tests/milvus_python_test/test_get_vector_ids.py index 30a4da948f..b1baee44ab 100644 --- a/tests/milvus_python_test/test_get_vector_ids.py +++ b/tests/milvus_python_test/test_get_vector_ids.py @@ -19,13 +19,13 @@ nb = 6000 class TestGetVectorIdsBase: - def get_valid_segment_name(self, connect, table): + def get_valid_segment_name(self, connect, collection): vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() logging.getLogger().info(info.partitions_stat[0].segments_stat[0].segment_name) return info.partitions_stat[0].segments_stat[0].segment_name @@ -36,88 +36,88 @@ class TestGetVectorIdsBase: ****************************************************************** """ @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_table_name_None(self, connect, table): + def test_get_vector_ids_collection_name_None(self, connect, collection): ''' - target: get vector ids where table name is None - method: call get_vector_ids with the table_name: None + target: get vector ids where collection name is None + method: call get_vector_ids with the collection_name: None expected: exception raised ''' - table_name = None - segment_name = self.get_valid_segment_name(connect, table) + collection_name = None + segment_name = self.get_valid_segment_name(connect, collection) with pytest.raises(Exception) as e: - status, vector_ids = connect.get_vector_ids(table_name, segment_name) + status, vector_ids = connect.get_vector_ids(collection_name, segment_name) @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_table_name_not_existed(self, connect, table): + def test_get_vector_ids_collection_name_not_existed(self, connect, collection): ''' - target: get vector ids where table name does not exist - method: call get_vector_ids with a random table_name, which is not in db + target: get vector ids where collection name does not exist + method: call get_vector_ids with a random collection_name, which is not in db expected: status not ok ''' - table_name = gen_unique_str("not_existed_table") - segment_name = self.get_valid_segment_name(connect, table) - status, vector_ids = connect.get_vector_ids(table_name, segment_name) + collection_name = gen_unique_str("not_existed_collection") + segment_name = self.get_valid_segment_name(connect, collection) + status, vector_ids = connect.get_vector_ids(collection_name, segment_name) assert not status.OK() @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_table_name_invalid(self, connect, table, get_table_name): + def test_get_vector_ids_collection_name_invalid(self, connect, collection, get_collection_name): ''' - target: get vector ids where table name is invalid - method: call get_vector_ids with invalid table_name + target: get vector ids where collection name is invalid + method: call get_vector_ids with invalid collection_name expected: status not ok ''' - table_name = get_table_name - segment_name = self.get_valid_segment_name(connect, table) - status, vector_ids = connect.get_vector_ids(table_name, segment_name) + collection_name = get_collection_name + segment_name = self.get_valid_segment_name(connect, collection) + status, vector_ids = connect.get_vector_ids(collection_name, segment_name) assert not status.OK() @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_segment_name_None(self, connect, table): + def test_get_vector_ids_segment_name_None(self, connect, collection): ''' target: get vector ids where segment name is None method: call get_vector_ids with the segment_name: None expected: exception raised ''' - valid_segment_name = self.get_valid_segment_name(connect, table) + valid_segment_name = self.get_valid_segment_name(connect, collection) segment = None with pytest.raises(Exception) as e: - status, vector_ids = connect.get_vector_ids(table, segment) + status, vector_ids = connect.get_vector_ids(collection, segment) @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_segment_name_not_existed(self, connect, table): + def test_get_vector_ids_segment_name_not_existed(self, connect, collection): ''' target: get vector ids where segment name does not exist method: call get_vector_ids with a random segment name expected: status not ok ''' - valid_segment_name = self.get_valid_segment_name(connect, table) + valid_segment_name = self.get_valid_segment_name(connect, collection) segment = gen_unique_str("not_existed_segment") - status, vector_ids = connect.get_vector_ids(table, segment) + status, vector_ids = connect.get_vector_ids(collection, segment) logging.getLogger().info(vector_ids) assert not status.OK() @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_without_index_A(self, connect, table): + def test_get_vector_ids_without_index_A(self, connect, collection): ''' target: get vector ids when there is no index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(collection, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): @@ -125,23 +125,23 @@ class TestGetVectorIdsBase: @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_without_index_B(self, connect, table): + def test_get_vector_ids_without_index_B(self, connect, collection): ''' target: get vector ids when there is no index but with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() assert info.partitions_stat[1].tag == tag - status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[1].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(collection, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): @@ -160,7 +160,7 @@ class TestGetVectorIdsBase: return request.param @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_with_index_A(self, connect, table, get_simple_index): + def test_get_vector_ids_with_index_A(self, connect, collection, get_simple_index): ''' target: get vector ids when there is index method: call get_vector_ids and check if the segment contains vectors @@ -168,65 +168,65 @@ class TestGetVectorIdsBase: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(collection, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_with_index_B(self, connect, table, get_simple_index): + def test_get_vector_ids_with_index_B(self, connect, collection, get_simple_index): ''' target: get vector ids when there is index and with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() assert info.partitions_stat[1].tag == tag - status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[1].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(collection, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_after_delete_vectors(self, connect, table): + def test_get_vector_ids_after_delete_vectors(self, connect, collection): ''' target: get vector ids after vectors are deleted method: add vectors and delete a few, call get_vector_ids expected: status ok, vector_ids decreased after vectors deleted ''' vectors = gen_vector(2, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() delete_ids = [ids[0]] - status = connect.delete_by_id(table, delete_ids) - status = connect.flush([table]) + status = connect.delete_by_id(collection, delete_ids) + status = connect.flush([collection]) assert status.OK() - status, info = connect.table_info(table) + status, info = connect.collection_info(collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(collection, info.partitions_stat[0].segments_stat[0].segment_name) assert len(vector_ids) == 1 assert vector_ids[0] == ids[1] @@ -238,20 +238,20 @@ class TestGetVectorIdsIP: ****************************************************************** """ @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_without_index_A(self, connect, ip_table): + def test_get_vector_ids_without_index_A(self, connect, ip_collection): ''' target: get vector ids when there is no index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status, info = connect.table_info(ip_table) + status, info = connect.collection_info(ip_collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(ip_collection, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): @@ -259,23 +259,23 @@ class TestGetVectorIdsIP: @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_without_index_B(self, connect, ip_table): + def test_get_vector_ids_without_index_B(self, connect, ip_collection): ''' target: get vector ids when there is no index but with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' - status = connect.create_partition(ip_table, tag) + status = connect.create_partition(ip_collection, tag) assert status.OK() vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status, info = connect.table_info(ip_table) + status, info = connect.collection_info(ip_collection) assert status.OK() assert info.partitions_stat[1].tag == tag - status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[1].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(ip_collection, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): @@ -294,7 +294,7 @@ class TestGetVectorIdsIP: return request.param @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_with_index_A(self, connect, ip_table, get_simple_index): + def test_get_vector_ids_with_index_A(self, connect, ip_collection, get_simple_index): ''' target: get vector ids when there is index method: call get_vector_ids and check if the segment contains vectors @@ -302,65 +302,65 @@ class TestGetVectorIdsIP: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status, info = connect.table_info(ip_table) + status, info = connect.collection_info(ip_collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(ip_collection, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_with_index_B(self, connect, ip_table, get_simple_index): + def test_get_vector_ids_with_index_B(self, connect, ip_collection, get_simple_index): ''' target: get vector ids when there is index and with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' - status = connect.create_partition(ip_table, tag) + status = connect.create_partition(ip_collection, tag) assert status.OK() index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() vectors = gen_vector(10, dim) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([ip_table]) + status = connect.flush([ip_collection]) assert status.OK() - status, info = connect.table_info(ip_table) + status, info = connect.collection_info(ip_collection) assert status.OK() assert info.partitions_stat[1].tag == tag - status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[1].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(ip_collection, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_after_delete_vectors(self, connect, ip_table): + def test_get_vector_ids_after_delete_vectors(self, connect, ip_collection): ''' target: get vector ids after vectors are deleted method: add vectors and delete a few, call get_vector_ids expected: status ok, vector_ids decreased after vectors deleted ''' vectors = gen_vector(2, dim) - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() delete_ids = [ids[0]] - status = connect.delete_by_id(ip_table, delete_ids) - status = connect.flush([ip_table]) + status = connect.delete_by_id(ip_collection, delete_ids) + status = connect.flush([ip_collection]) assert status.OK() - status, info = connect.table_info(ip_table) + status, info = connect.collection_info(ip_collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(ip_table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(ip_collection, info.partitions_stat[0].segments_stat[0].segment_name) assert len(vector_ids) == 1 assert vector_ids[0] == ids[1] @@ -372,20 +372,20 @@ class TestGetVectorIdsJAC: ****************************************************************** """ @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_without_index_A(self, connect, jac_table): + def test_get_vector_ids_without_index_A(self, connect, jac_collection): ''' target: get vector ids when there is no index method: call get_vector_ids and check if the segment contains vectors expected: status ok ''' tmp, vectors = gen_binary_vectors(10, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, info = connect.table_info(jac_table) + status, info = connect.collection_info(jac_collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(jac_collection, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): @@ -393,23 +393,23 @@ class TestGetVectorIdsJAC: @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_without_index_B(self, connect, jac_table): + def test_get_vector_ids_without_index_B(self, connect, jac_collection): ''' target: get vector ids when there is no index but with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' - status = connect.create_partition(jac_table, tag) + status = connect.create_partition(jac_collection, tag) assert status.OK() tmp, vectors = gen_binary_vectors(10, dim) - status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, info = connect.table_info(jac_table) + status, info = connect.collection_info(jac_collection) assert status.OK() assert info.partitions_stat[1].tag == tag - status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[1].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(jac_collection, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): @@ -427,7 +427,7 @@ class TestGetVectorIdsJAC: pytest.skip("Skip index Temporary") @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_with_index_A(self, connect, jac_table, get_jaccard_index): + def test_get_vector_ids_with_index_A(self, connect, jac_collection, get_jaccard_index): ''' target: get vector ids when there is index method: call get_vector_ids and check if the segment contains vectors @@ -435,64 +435,64 @@ class TestGetVectorIdsJAC: ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] - status = connect.create_index(jac_table, index_type, index_param) + status = connect.create_index(jac_collection, index_type, index_param) assert status.OK() tmp, vectors = gen_binary_vectors(10, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, info = connect.table_info(jac_table) + status, info = connect.collection_info(jac_collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(jac_collection, info.partitions_stat[0].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_with_index_B(self, connect, jac_table, get_jaccard_index): + def test_get_vector_ids_with_index_B(self, connect, jac_collection, get_jaccard_index): ''' target: get vector ids when there is index and with partition method: create partition, add vectors to it and call get_vector_ids, check if the segment contains vectors expected: status ok ''' - status = connect.create_partition(jac_table, tag) + status = connect.create_partition(jac_collection, tag) assert status.OK() index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] - status = connect.create_index(jac_table, index_type, index_param) + status = connect.create_index(jac_collection, index_type, index_param) assert status.OK() tmp, vectors = gen_binary_vectors(10, dim) - status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) + status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag) assert status.OK() - status = connect.flush([jac_table]) + status = connect.flush([jac_collection]) assert status.OK() - status, info = connect.table_info(jac_table) + status, info = connect.collection_info(jac_collection) assert status.OK() assert info.partitions_stat[1].tag == tag - status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[1].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(jac_collection, info.partitions_stat[1].segments_stat[0].segment_name) # vector_ids should match ids assert len(vector_ids) == 10 for i in range(10): assert vector_ids[i] == ids[i] @pytest.mark.timeout(GET_TIMEOUT) - def test_get_vector_ids_after_delete_vectors(self, connect, jac_table): + def test_get_vector_ids_after_delete_vectors(self, connect, jac_collection): ''' target: get vector ids after vectors are deleted method: add vectors and delete a few, call get_vector_ids expected: status ok, vector_ids decreased after vectors deleted ''' tmp, vectors = gen_binary_vectors(2, dim) - status, ids = connect.add_vectors(jac_table, vectors) + status, ids = connect.add_vectors(jac_collection, vectors) assert status.OK() delete_ids = [ids[0]] - status = connect.delete_by_id(jac_table, delete_ids) - status = connect.flush([jac_table]) + status = connect.delete_by_id(jac_collection, delete_ids) + status = connect.flush([jac_collection]) assert status.OK() - status, info = connect.table_info(jac_table) + status, info = connect.collection_info(jac_collection) assert status.OK() - status, vector_ids = connect.get_vector_ids(jac_table, info.partitions_stat[0].segments_stat[0].segment_name) + status, vector_ids = connect.get_vector_ids(jac_collection, info.partitions_stat[0].segments_stat[0].segment_name) assert len(vector_ids) == 1 assert vector_ids[0] == ids[1] \ No newline at end of file diff --git a/tests/milvus_python_test/test_index.py b/tests/milvus_python_test/test_index.py index 698bd96754..b84a6ba4df 100644 --- a/tests/milvus_python_test/test_index.py +++ b/tests/milvus_python_test/test_index.py @@ -59,109 +59,109 @@ class TestIndexBase: """ @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index(self, connect, table, get_simple_index): + def test_create_index(self, connect, collection, get_simple_index): ''' target: test create index interface - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) + status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_no_vectors(self, connect, table, get_simple_index): + def test_create_index_no_vectors(self, connect, collection, get_simple_index): ''' target: test create index interface - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_partition(self, connect, table, get_simple_index): + def test_create_index_partition(self, connect, collection, get_simple_index): ''' target: test create index interface - method: create table, create partition, and add vectors in it, create index + method: create collection, create partition, and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_partition_flush(self, connect, table, get_simple_index): + def test_create_index_partition_flush(self, connect, collection, get_simple_index): ''' target: test create index interface - method: create table, create partition, and add vectors in it, create index + method: create collection, create partition, and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - status, ids = connect.add_vectors(table, vectors, partition_tag=tag) + status = connect.create_partition(collection, tag) + status, ids = connect.add_vectors(collection, vectors, partition_tag=tag) connect.flush() - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.level(2) - def test_create_index_without_connect(self, dis_connect, table): + def test_create_index_without_connect(self, dis_connect, collection): ''' target: test create index without connection - method: create table and add vectors in it, check if added successfully + method: create collection and add vectors in it, check if added successfully expected: raise exception ''' nlist = NLIST index_type = IndexType.IVF_SQ8 index_param = {"nlist": nlist} with pytest.raises(Exception) as e: - status = dis_connect.create_index(table, index_type, index_param) + status = dis_connect.create_index(collection, index_type, index_param) @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_search_with_query_vectors(self, connect, table, get_simple_index): + def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index): ''' target: test create index interface, search with more query vectors - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) - logging.getLogger().info(connect.describe_index(table)) + status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) + logging.getLogger().info(connect.describe_index(collection)) query_vecs = [vectors[0], vectors[1], vectors[2]] top_k = 5 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert status.OK() assert len(result) == len(query_vecs) logging.getLogger().info(result) @pytest.mark.timeout(BUILD_TIMEOUT) @pytest.mark.level(2) - def test_create_index_multithread(self, connect, table, args): + def test_create_index_multithread(self, connect, collection, args): ''' target: test create index interface with multiprocess - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) def build(connect): - status = connect.create_index(table, IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() threads_num = 8 @@ -181,46 +181,46 @@ class TestIndexBase: query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k assert result[0][0].distance == 0.0 @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_multithread_multitable(self, connect, args): + def test_create_index_multithread_multicollection(self, connect, args): ''' target: test create index interface with multiprocess - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' threads_num = 8 loop_num = 8 threads = [] - table = [] + collection = [] j = 0 while j < (threads_num*loop_num): - table_name = gen_unique_str("test_create_index_multiprocessing") - table.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str("test_create_index_multiprocessing") + collection.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_type': IndexType.FLAT, 'store_raw_vector': False} - connect.create_table(param) + connect.create_collection(param) j = j + 1 def create_index(): i = 0 while i < loop_num: - # assert connect.has_table(table[ids*process_num+i]) - status, ids = connect.add_vectors(table[ids*threads_num+i], vectors) + # assert connect.has_collection(collection[ids*process_num+i]) + status, ids = connect.add_vectors(collection[ids*threads_num+i], vectors) - status = connect.create_index(table[ids*threads_num+i], IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(collection[ids*threads_num+i], IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(table[ids*threads_num+i], top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection[ids*threads_num+i], top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k assert result[0][0].distance == 0.0 @@ -241,13 +241,13 @@ class TestIndexBase: @pytest.mark.timeout(BUILD_TIMEOUT) @pytest.mark.level(2) - def test_create_index_a_multithreads(self, connect, table, args): - status, ids = connect.add_vectors(table, vectors) + def test_create_index_a_multithreads(self, connect, collection, args): + status, ids = connect.add_vectors(collection, vectors) def build(connect): - status = connect.create_index(table, IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() def count(connect): - status, count = connect.get_table_row_count(table) + status, count = connect.count_collection(collection) assert status.OK() assert count == nb @@ -271,16 +271,16 @@ class TestIndexBase: # TODO: enable @pytest.mark.timeout(BUILD_TIMEOUT) @pytest.mark.level(2) - def _test_create_index_multiprocessing(self, connect, table, args): + def _test_create_index_multiprocessing(self, connect, collection, args): ''' target: test create index interface with multiprocess - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) def build(connect): - status = connect.create_index(table, IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(collection, IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() process_num = 8 @@ -300,47 +300,47 @@ class TestIndexBase: query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k assert result[0][0].distance == 0.0 # TODO: enable @pytest.mark.timeout(BUILD_TIMEOUT) - def _test_create_index_multiprocessing_multitable(self, connect, args): + def _test_create_index_multiprocessing_multicollection(self, connect, args): ''' target: test create index interface with multiprocess - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' process_num = 8 loop_num = 8 processes = [] - table = [] + collection = [] j = 0 while j < (process_num*loop_num): - table_name = gen_unique_str("test_create_index_multiprocessing") - table.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str("test_create_index_multiprocessing") + collection.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_type': IndexType.FLAT, 'store_raw_vector': False} - connect.create_table(param) + connect.create_collection(param) j = j + 1 def create_index(): i = 0 while i < loop_num: - # assert connect.has_table(table[ids*process_num+i]) - status, ids = connect.add_vectors(table[ids*process_num+i], vectors) + # assert connect.has_collection(collection[ids*process_num+i]) + status, ids = connect.add_vectors(collection[ids*process_num+i], vectors) - status = connect.create_index(table[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(table[ids*process_num+i], top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection[ids*process_num+i], top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k assert result[0][0].distance == 0.0 @@ -359,48 +359,48 @@ class TestIndexBase: for p in processes: p.join() - def test_create_index_table_not_existed(self, connect): + def test_create_index_collection_not_existed(self, connect): ''' - target: test create index interface when table name not existed - method: create table and add vectors in it, create index - , make sure the table name not in index + target: test create index interface when collection name not existed + method: create collection and add vectors in it, create index + , make sure the collection name not in index expected: return code not equals to 0, create index failed ''' - table_name = gen_unique_str(self.__class__.__name__) + collection_name = gen_unique_str(self.__class__.__name__) nlist = NLIST index_type = IndexType.IVF_SQ8 index_param = {"nlist": nlist} - status = connect.create_index(table_name, index_type, index_param) + status = connect.create_index(collection_name, index_type, index_param) assert not status.OK() - def test_create_index_table_None(self, connect): + def test_create_index_collection_None(self, connect): ''' - target: test create index interface when table name is None - method: create table and add vectors in it, create index with an table_name: None + target: test create index interface when collection name is None + method: create collection and add vectors in it, create index with an collection_name: None expected: return code not equals to 0, create index failed ''' - table_name = None + collection_name = None nlist = NLIST index_type = IndexType.IVF_SQ8 index_param = {"nlist": nlist} with pytest.raises(Exception) as e: - status = connect.create_index(table_name, index_type, index_param) + status = connect.create_index(collection_name, index_type, index_param) @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_no_vectors_then_add_vectors(self, connect, table, get_simple_index): + def test_create_index_no_vectors_then_add_vectors(self, connect, collection, get_simple_index): ''' - target: test create index interface when there is no vectors in table, and does not affect the subsequent process - method: create table and add no vectors in it, and then create index, add vectors in it + target: test create index interface when there is no vectors in collection, and does not affect the subsequent process + method: create collection and add no vectors in it, and then create index, add vectors in it expected: return code equals to 0 ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(table, index_type, index_param) - status, ids = connect.add_vectors(table, vectors) + status = connect.create_index(collection, index_type, index_param) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_same_index_repeatedly(self, connect, table, get_simple_index): + def test_create_same_index_repeatedly(self, connect, collection, get_simple_index): ''' target: check if index can be created repeatedly, with the same create_index params method: create index after index have been built @@ -408,29 +408,29 @@ class TestIndexBase: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - status = connect.create_index(table, index_type, index_param) - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_different_index_repeatedly(self, connect, table): + def test_create_different_index_repeatedly(self, connect, collection): ''' target: check if index can be created repeatedly, with the different create_index params method: create another index with different index_params after index have been built expected: return code 0, and describe index result equals with the second index params ''' nlist = NLIST - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) index_type_1 = IndexType.IVF_SQ8 index_type_2 = IndexType.IVFLAT indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}] logging.getLogger().info(indexs) for index in indexs: - status = connect.create_index(table, index["index_type"], index["index_param"]) + status = connect.create_index(collection, index["index_type"], index["index_param"]) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) assert result._params["nlist"] == nlist - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == index_type_2 """ @@ -439,106 +439,106 @@ class TestIndexBase: ****************************************************************** """ - def test_describe_index(self, connect, table, get_index): + def test_describe_index(self, connect, collection, get_index): ''' target: test describe index interface - method: create table and add vectors in it, create index, call describe index + method: create collection and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_index["index_param"] index_type = get_index["index_type"] logging.getLogger().info(get_index) - # status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) - status, result = connect.describe_index(table) + # status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) + status, result = connect.describe_index(collection) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == index_type - def test_describe_and_drop_index_multi_tables(self, connect, get_simple_index): + def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index): ''' - target: test create, describe and drop index interface with multiple tables of L2 - method: create tables and add vectors in it, create index, call describe index + target: test create, describe and drop index interface with multiple collections of L2 + method: create collections and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' nq = 100 vectors = gen_vectors(nq, dim) - table_list = [] + collection_list = [] for i in range(10): - table_name = gen_unique_str() - table_list.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str() + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - connect.create_table(param) + connect.create_collection(param) index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status, ids = connect.add_vectors(table_name=table_name, records=vectors) - status = connect.create_index(table_name, index_type, index_param) + status, ids = connect.add_vectors(collection_name=collection_name, records=vectors) + status = connect.create_index(collection_name, index_type, index_param) assert status.OK() for i in range(10): - status, result = connect.describe_index(table_list[i]) + status, result = connect.describe_index(collection_list[i]) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == table_list[i] + assert result._collection_name == collection_list[i] assert result._index_type == index_type for i in range(10): - status = connect.drop_index(table_list[i]) + status = connect.drop_index(collection_list[i]) assert status.OK() - status, result = connect.describe_index(table_list[i]) + status, result = connect.describe_index(collection_list[i]) logging.getLogger().info(result) - assert result._table_name == table_list[i] + assert result._collection_name == collection_list[i] assert result._index_type == IndexType.FLAT @pytest.mark.level(2) - def test_describe_index_without_connect(self, dis_connect, table): + def test_describe_index_without_connect(self, dis_connect, collection): ''' target: test describe index without connection method: describe index, and check if describe successfully expected: raise exception ''' with pytest.raises(Exception) as e: - status = dis_connect.describe_index(table) + status = dis_connect.describe_index(collection) - def test_describe_index_table_not_existed(self, connect): + def test_describe_index_collection_not_existed(self, connect): ''' - target: test describe index interface when table name not existed - method: create table and add vectors in it, create index - , make sure the table name not in index + target: test describe index interface when collection name not existed + method: create collection and add vectors in it, create index + , make sure the collection name not in index expected: return code not equals to 0, describe index failed ''' - table_name = gen_unique_str(self.__class__.__name__) - status, result = connect.describe_index(table_name) + collection_name = gen_unique_str(self.__class__.__name__) + status, result = connect.describe_index(collection_name) assert not status.OK() - def test_describe_index_table_None(self, connect): + def test_describe_index_collection_None(self, connect): ''' - target: test describe index interface when table name is None - method: create table and add vectors in it, create index with an table_name: None + target: test describe index interface when collection name is None + method: create collection and add vectors in it, create index with an collection_name: None expected: return code not equals to 0, describe index failed ''' - table_name = None + collection_name = None with pytest.raises(Exception) as e: - status = connect.describe_index(table_name) + status = connect.describe_index(collection_name) - def test_describe_index_not_create(self, connect, table): + def test_describe_index_not_create(self, connect, collection): ''' target: test describe index interface when index not created - method: create table and add vectors in it, create index - , make sure the table name not in index + method: create collection and add vectors in it, create index + , make sure the collection name not in index expected: return code not equals to 0, describe index failed ''' - status, ids = connect.add_vectors(table, vectors) - status, result = connect.describe_index(table) + status, ids = connect.add_vectors(collection, vectors) + status, result = connect.describe_index(collection) logging.getLogger().info(result) assert status.OK() # assert result._params["nlist"] == index_params["nlist"] - # assert result._table_name == table + # assert result._collection_name == collection # assert result._index_type == index_params["index_type"] """ @@ -547,27 +547,27 @@ class TestIndexBase: ****************************************************************** """ - def test_drop_index(self, connect, table, get_simple_index): + def test_drop_index(self, connect, collection, get_simple_index): ''' target: test drop index interface - method: create table and add vectors in it, create index, call drop index + method: create collection and add vectors in it, create index, call drop index expected: return code 0, and default index param ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - # status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) + # status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - status = connect.drop_index(table) + status = connect.drop_index(collection) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == IndexType.FLAT - def test_drop_index_repeatly(self, connect, table, get_simple_index): + def test_drop_index_repeatly(self, connect, collection, get_simple_index): ''' target: test drop index repeatly method: create index, call drop index, and drop again @@ -575,66 +575,66 @@ class TestIndexBase: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - # status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) + # status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - status = connect.drop_index(table) + status = connect.drop_index(collection) assert status.OK() - status = connect.drop_index(table) + status = connect.drop_index(collection) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == IndexType.FLAT @pytest.mark.level(2) - def test_drop_index_without_connect(self, dis_connect, table): + def test_drop_index_without_connect(self, dis_connect, collection): ''' target: test drop index without connection method: drop index, and check if drop successfully expected: raise exception ''' with pytest.raises(Exception) as e: - status = dis_connect.drop_index(table) + status = dis_connect.drop_index(collection) - def test_drop_index_table_not_existed(self, connect): + def test_drop_index_collection_not_existed(self, connect): ''' - target: test drop index interface when table name not existed - method: create table and add vectors in it, create index - , make sure the table name not in index, and then drop it + target: test drop index interface when collection name not existed + method: create collection and add vectors in it, create index + , make sure the collection name not in index, and then drop it expected: return code not equals to 0, drop index failed ''' - table_name = gen_unique_str(self.__class__.__name__) - status = connect.drop_index(table_name) + collection_name = gen_unique_str(self.__class__.__name__) + status = connect.drop_index(collection_name) assert not status.OK() - def test_drop_index_table_None(self, connect): + def test_drop_index_collection_None(self, connect): ''' - target: test drop index interface when table name is None - method: create table and add vectors in it, create index with an table_name: None + target: test drop index interface when collection name is None + method: create collection and add vectors in it, create index with an collection_name: None expected: return code not equals to 0, drop index failed ''' - table_name = None + collection_name = None with pytest.raises(Exception) as e: - status = connect.drop_index(table_name) + status = connect.drop_index(collection_name) - def test_drop_index_table_not_create(self, connect, table): + def test_drop_index_collection_not_create(self, connect, collection): ''' target: test drop index interface when index not created - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code not equals to 0, drop index failed ''' - status, ids = connect.add_vectors(table, vectors) - status, result = connect.describe_index(table) + status, ids = connect.add_vectors(collection, vectors) + status, result = connect.describe_index(collection) logging.getLogger().info(result) # no create index - status = connect.drop_index(table) + status = connect.drop_index(collection) logging.getLogger().info(status) assert status.OK() - def test_create_drop_index_repeatly(self, connect, table, get_simple_index): + def test_create_drop_index_repeatly(self, connect, collection, get_simple_index): ''' target: test create / drop index repeatly, use the same index params method: create index, drop index, four times @@ -642,20 +642,20 @@ class TestIndexBase: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - # status, ids = connect.add_vectors(table, vectors) + # status, ids = connect.add_vectors(collection, vectors) for i in range(2): - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - status = connect.drop_index(table) + status = connect.drop_index(collection) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == IndexType.FLAT - def test_create_drop_index_repeatly_different_index_params(self, connect, table): + def test_create_drop_index_repeatly_different_index_params(self, connect, collection): ''' target: test create / drop index repeatly, use the different index params method: create index, drop index, four times, each tme use different index_params to create index @@ -663,17 +663,17 @@ class TestIndexBase: ''' nlist = NLIST indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}] - # status, ids = connect.add_vectors(table, vectors) + # status, ids = connect.add_vectors(collection, vectors) for i in range(2): - status = connect.create_index(table, indexs[i]["index_type"], indexs[i]["index_param"]) + status = connect.create_index(collection, indexs[i]["index_type"], indexs[i]["index_param"]) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - status = connect.drop_index(table) + status = connect.drop_index(collection) assert status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == IndexType.FLAT @@ -710,10 +710,10 @@ class TestIndexIP: """ @pytest.mark.level(2) @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index(self, connect, ip_table, get_simple_index): + def test_create_index(self, connect, ip_collection, get_simple_index): ''' target: test create index interface - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] @@ -721,15 +721,15 @@ class TestIndexIP: logging.getLogger().info(get_simple_index) if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") - status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) + status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_table(self, connect, ip_table, get_simple_index): + def test_create_index_collection(self, connect, ip_collection, get_simple_index): ''' target: test create index interface - method: create table, create partition, and add vectors in it, create index on table + method: create collection, create partition, and add vectors in it, create index on collection expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] @@ -737,29 +737,29 @@ class TestIndexIP: if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") logging.getLogger().info(get_simple_index) - status = connect.create_partition(ip_table, tag) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_partition(ip_collection, tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() @pytest.mark.level(2) - def test_create_index_without_connect(self, dis_connect, ip_table): + def test_create_index_without_connect(self, dis_connect, ip_collection): ''' target: test create index without connection - method: create table and add vectors in it, check if added successfully + method: create collection and add vectors in it, check if added successfully expected: raise exception ''' nlist = NLIST index_type = IndexType.IVF_SQ8 index_param = {"nlist": nlist} with pytest.raises(Exception) as e: - status = dis_connect.create_index(ip_table, index_type, index_param) + status = dis_connect.create_index(ip_collection, index_type, index_param) @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_search_with_query_vectors(self, connect, ip_table, get_simple_index): + def test_create_index_search_with_query_vectors(self, connect, ip_collection, get_simple_index): ''' target: test create index interface, search with more query vectors - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_simple_index["index_param"] @@ -767,13 +767,13 @@ class TestIndexIP: logging.getLogger().info(get_simple_index) if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") - status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) - logging.getLogger().info(connect.describe_index(ip_table)) + status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) + logging.getLogger().info(connect.describe_index(ip_collection)) query_vecs = [vectors[0], vectors[1], vectors[2]] top_k = 5 search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == len(query_vecs) @@ -781,16 +781,16 @@ class TestIndexIP: # TODO: enable @pytest.mark.timeout(BUILD_TIMEOUT) @pytest.mark.level(2) - def _test_create_index_multiprocessing(self, connect, ip_table, args): + def _test_create_index_multiprocessing(self, connect, ip_collection, args): ''' target: test create index interface with multiprocess - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) def build(connect): - status = connect.create_index(ip_table, IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(ip_collection, IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() process_num = 8 @@ -810,45 +810,45 @@ class TestIndexIP: query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(ip_table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k assert result[0][0].distance == 0.0 # TODO: enable @pytest.mark.timeout(BUILD_TIMEOUT) - def _test_create_index_multiprocessing_multitable(self, connect, args): + def _test_create_index_multiprocessing_multicollection(self, connect, args): ''' target: test create index interface with multiprocess - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' process_num = 8 loop_num = 8 processes = [] - table = [] + collection = [] j = 0 while j < (process_num*loop_num): - table_name = gen_unique_str("test_create_index_multiprocessing") - table.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str("test_create_index_multiprocessing") + collection.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim} - connect.create_table(param) + connect.create_collection(param) j = j + 1 def create_index(): i = 0 while i < loop_num: - # assert connect.has_table(table[ids*process_num+i]) - status, ids = connect.add_vectors(table[ids*process_num+i], vectors) + # assert connect.has_collection(collection[ids*process_num+i]) + status, ids = connect.add_vectors(collection[ids*process_num+i], vectors) - status = connect.create_index(table[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST}) + status = connect.create_index(collection[ids*process_num+i], IndexType.IVFLAT, {"nlist": NLIST}) assert status.OK() query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(table[ids*process_num+i], top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection[ids*process_num+i], top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k assert result[0][0].distance == 0.0 @@ -867,73 +867,73 @@ class TestIndexIP: for p in processes: p.join() - def test_create_index_no_vectors(self, connect, ip_table): + def test_create_index_no_vectors(self, connect, ip_collection): ''' - target: test create index interface when there is no vectors in table - method: create table and add no vectors in it, and then create index + target: test create index interface when there is no vectors in collection + method: create collection and add no vectors in it, and then create index expected: return code equals to 0 ''' nlist = NLIST index_type = IndexType.IVF_SQ8 index_param = {"nlist": nlist} - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_no_vectors_then_add_vectors(self, connect, ip_table, get_simple_index): + def test_create_index_no_vectors_then_add_vectors(self, connect, ip_collection, get_simple_index): ''' - target: test create index interface when there is no vectors in table, and does not affect the subsequent process - method: create table and add no vectors in it, and then create index, add vectors in it + target: test create index interface when there is no vectors in collection, and does not affect the subsequent process + method: create collection and add no vectors in it, and then create index, add vectors in it expected: return code equals to 0 ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") - status = connect.create_index(ip_table, index_type, index_param) - status, ids = connect.add_vectors(ip_table, vectors) + status = connect.create_index(ip_collection, index_type, index_param) + status, ids = connect.add_vectors(ip_collection, vectors) assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_same_index_repeatedly(self, connect, ip_table): + def test_create_same_index_repeatedly(self, connect, ip_collection): ''' target: check if index can be created repeatedly, with the same create_index params method: create index after index have been built expected: return code success, and search ok ''' nlist = NLIST - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) index_type = IndexType.IVF_SQ8 index_param = {"nlist": nlist} - status = connect.create_index(ip_table, index_type, index_param) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() query_vec = [vectors[0]] top_k = 1 search_param = {"nprobe": nprobe} - status, result = connect.search_vectors(ip_table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param) assert len(result) == 1 assert len(result[0]) == top_k @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_different_index_repeatedly(self, connect, ip_table): + def test_create_different_index_repeatedly(self, connect, ip_collection): ''' target: check if index can be created repeatedly, with the different create_index params method: create another index with different index_params after index have been built expected: return code 0, and describe index result equals with the second index params ''' nlist = NLIST - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) index_type_1 = IndexType.IVF_SQ8 index_type_2 = IndexType.IVFLAT indexs = [{"index_type": index_type_1, "index_param": {"nlist": nlist}}, {"index_type": index_type_2, "index_param": {"nlist": nlist}}] logging.getLogger().info(indexs) for index in indexs: - status = connect.create_index(ip_table, index["index_type"], index["index_param"]) + status = connect.create_index(ip_collection, index["index_type"], index["index_param"]) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) assert result._params["nlist"] == nlist - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == index_type_2 """ @@ -942,10 +942,10 @@ class TestIndexIP: ****************************************************************** """ - def test_describe_index(self, connect, ip_table, get_simple_index): + def test_describe_index(self, connect, ip_collection, get_simple_index): ''' target: test describe index interface - method: create table and add vectors in it, create index, call describe index + method: create collection and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_simple_index["index_param"] @@ -953,11 +953,11 @@ class TestIndexIP: logging.getLogger().info(get_simple_index) if index_type in [IndexType.RNSG]: pytest.skip() - # status, ids = connect.add_vectors(ip_table, vectors[:5000]) - status = connect.create_index(ip_table, index_type, index_param) - status, result = connect.describe_index(ip_table) + # status, ids = connect.add_vectors(ip_collection, vectors[:5000]) + status = connect.create_index(ip_collection, index_type, index_param) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection status, mode = connect._cmd("mode") if str(mode) == "GPU" and index_type == IndexType.IVF_PQ: assert result._index_type == IndexType.FLAT @@ -966,10 +966,10 @@ class TestIndexIP: assert result._index_type == index_type assert result._params == index_param - def test_describe_index_partition(self, connect, ip_table, get_simple_index): + def test_describe_index_partition(self, connect, ip_collection, get_simple_index): ''' target: test describe index interface - method: create table, create partition and add vectors in it, create index, call describe index + method: create collection, create partition and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_simple_index["index_param"] @@ -977,19 +977,19 @@ class TestIndexIP: if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") logging.getLogger().info(get_simple_index) - status = connect.create_partition(ip_table, tag) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) - status = connect.create_index(ip_table, index_type, index_param) - status, result = connect.describe_index(ip_table) + status = connect.create_partition(ip_collection, tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag) + status = connect.create_index(ip_collection, index_type, index_param) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == index_type - def test_describe_index_partition_A(self, connect, ip_table, get_simple_index): + def test_describe_index_partition_A(self, connect, ip_collection, get_simple_index): ''' target: test describe index interface - method: create table, create partitions and add vectors in it, create index on partitions, call describe index + method: create collection, create partitions and add vectors in it, create index on partitions, call describe index expected: return code 0, and index instructure ''' new_tag = "new_tag" @@ -998,79 +998,79 @@ class TestIndexIP: if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") logging.getLogger().info(get_simple_index) - status = connect.create_partition(ip_table, tag) - status = connect.create_partition(ip_table, new_tag) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=new_tag) - status = connect.create_index(ip_table, index_type, index_param) - status, result = connect.describe_index(ip_table) + status = connect.create_partition(ip_collection, tag) + status = connect.create_partition(ip_collection, new_tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=new_tag) + status = connect.create_index(ip_collection, index_type, index_param) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == index_type - def test_describe_and_drop_index_multi_tables(self, connect, get_simple_index): + def test_describe_and_drop_index_multi_collections(self, connect, get_simple_index): ''' - target: test create, describe and drop index interface with multiple tables of IP - method: create tables and add vectors in it, create index, call describe index + target: test create, describe and drop index interface with multiple collections of IP + method: create collections and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' nq = 100 vectors = gen_vectors(nq, dim) - table_list = [] + collection_list = [] for i in range(10): - table_name = gen_unique_str() - table_list.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str() + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} - connect.create_table(param) + connect.create_collection(param) index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") logging.getLogger().info(get_simple_index) - status, ids = connect.add_vectors(table_name=table_name, records=vectors) - status = connect.create_index(table_name, index_type, index_param) + status, ids = connect.add_vectors(collection_name=collection_name, records=vectors) + status = connect.create_index(collection_name, index_type, index_param) assert status.OK() for i in range(10): - status, result = connect.describe_index(table_list[i]) + status, result = connect.describe_index(collection_list[i]) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == table_list[i] + assert result._collection_name == collection_list[i] assert result._index_type == index_type for i in range(10): - status = connect.drop_index(table_list[i]) + status = connect.drop_index(collection_list[i]) assert status.OK() - status, result = connect.describe_index(table_list[i]) + status, result = connect.describe_index(collection_list[i]) logging.getLogger().info(result) - assert result._table_name == table_list[i] + assert result._collection_name == collection_list[i] assert result._index_type == IndexType.FLAT @pytest.mark.level(2) - def test_describe_index_without_connect(self, dis_connect, ip_table): + def test_describe_index_without_connect(self, dis_connect, ip_collection): ''' target: test describe index without connection method: describe index, and check if describe successfully expected: raise exception ''' with pytest.raises(Exception) as e: - status = dis_connect.describe_index(ip_table) + status = dis_connect.describe_index(ip_collection) - def test_describe_index_not_create(self, connect, ip_table): + def test_describe_index_not_create(self, connect, ip_collection): ''' target: test describe index interface when index not created - method: create table and add vectors in it, create index - , make sure the table name not in index + method: create collection and add vectors in it, create index + , make sure the collection name not in index expected: return code not equals to 0, describe index failed ''' - status, ids = connect.add_vectors(ip_table, vectors) - status, result = connect.describe_index(ip_table) + status, ids = connect.add_vectors(ip_collection, vectors) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) assert status.OK() # assert result._params["nlist"] == index_params["nlist"] - # assert result._table_name == table + # assert result._collection_name == collection # assert result._index_type == index_params["index_type"] """ @@ -1079,10 +1079,10 @@ class TestIndexIP: ****************************************************************** """ - def test_drop_index(self, connect, ip_table, get_simple_index): + def test_drop_index(self, connect, ip_collection, get_simple_index): ''' target: test drop index interface - method: create table and add vectors in it, create index, call drop index + method: create collection and add vectors in it, create index, call drop index expected: return code 0, and default index param ''' index_param = get_simple_index["index_param"] @@ -1091,48 +1091,48 @@ class TestIndexIP: assert status.OK() if index_type in [IndexType.RNSG]: pytest.skip() - # status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) + # status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ): assert not status.OK() else: assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == IndexType.FLAT - def test_drop_index_partition(self, connect, ip_table, get_simple_index): + def test_drop_index_partition(self, connect, ip_collection, get_simple_index): ''' target: test drop index interface - method: create table, create partition and add vectors in it, create index on table, call drop table index + method: create collection, create partition and add vectors in it, create index on collection, call drop collection index expected: return code 0, and default index param ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") - status = connect.create_partition(ip_table, tag) - status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_partition(ip_collection, tag) + status, ids = connect.add_vectors(ip_collection, vectors, partition_tag=tag) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == IndexType.FLAT - def test_drop_index_partition_C(self, connect, ip_table, get_simple_index): + def test_drop_index_partition_C(self, connect, ip_collection, get_simple_index): ''' target: test drop index interface - method: create table, create partitions and add vectors in it, create index on partitions, call drop partition index + method: create collection, create partitions and add vectors in it, create index on partitions, call drop partition index expected: return code 0, and default index param ''' new_tag = "new_tag" @@ -1140,19 +1140,19 @@ class TestIndexIP: index_type = get_simple_index["index_type"] if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") - status = connect.create_partition(ip_table, tag) - status = connect.create_partition(ip_table, new_tag) - status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_partition(ip_collection, tag) + status = connect.create_partition(ip_collection, new_tag) + status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == IndexType.FLAT - def test_drop_index_repeatly(self, connect, ip_table, get_simple_index): + def test_drop_index_repeatly(self, connect, ip_collection, get_simple_index): ''' target: test drop index repeatly method: create index, call drop index, and drop again @@ -1160,30 +1160,30 @@ class TestIndexIP: ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] - # status, ids = connect.add_vectors(ip_table, vectors) + # status, ids = connect.add_vectors(ip_collection, vectors) status, mode = connect._cmd("mode") assert status.OK() if index_type in [IndexType.RNSG]: pytest.skip() - # status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) + # status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ip_collection, index_type, index_param) if str(mode) == "GPU" and (index_type == IndexType.IVF_PQ): assert not status.OK() else: assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == IndexType.FLAT @pytest.mark.level(2) - def test_drop_index_without_connect(self, dis_connect, ip_table): + def test_drop_index_without_connect(self, dis_connect, ip_collection): ''' target: test drop index without connection method: drop index, and check if drop successfully @@ -1193,23 +1193,23 @@ class TestIndexIP: index_type = IndexType.IVFLAT index_param = {"nlist": nlist} with pytest.raises(Exception) as e: - status = dis_connect.drop_index(ip_table, index_type, index_param) + status = dis_connect.drop_index(ip_collection, index_type, index_param) - def test_drop_index_table_not_create(self, connect, ip_table): + def test_drop_index_collection_not_create(self, connect, ip_collection): ''' target: test drop index interface when index not created - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code not equals to 0, drop index failed ''' - status, ids = connect.add_vectors(ip_table, vectors) - status, result = connect.describe_index(ip_table) + status, ids = connect.add_vectors(ip_collection, vectors) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) # no create index - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) logging.getLogger().info(status) assert status.OK() - def test_create_drop_index_repeatly(self, connect, ip_table, get_simple_index): + def test_create_drop_index_repeatly(self, connect, ip_collection, get_simple_index): ''' target: test create / drop index repeatly, use the same index params method: create index, drop index, four times @@ -1219,20 +1219,20 @@ class TestIndexIP: index_type = get_simple_index["index_type"] if index_type in [IndexType.RNSG]: pytest.skip("Skip some RNSG cases") - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) for i in range(2): - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_index(ip_collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == IndexType.FLAT - def test_create_drop_index_repeatly_different_index_params(self, connect, ip_table): + def test_create_drop_index_repeatly_different_index_params(self, connect, ip_collection): ''' target: test create / drop index repeatly, use the different index params method: create index, drop index, four times, each tme use different index_params to create index @@ -1240,21 +1240,21 @@ class TestIndexIP: ''' nlist = NLIST indexs = [{"index_type": IndexType.IVFLAT, "index_param": {"nlist": nlist}}, {"index_type": IndexType.IVF_SQ8, "index_param": {"nlist": nlist}}] - status, ids = connect.add_vectors(ip_table, vectors) + status, ids = connect.add_vectors(ip_collection, vectors) for i in range(2): - status = connect.create_index(ip_table, indexs[i]["index_type"], indexs[i]["index_param"]) + status = connect.create_index(ip_collection, indexs[i]["index_type"], indexs[i]["index_param"]) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) assert result._params == indexs[i]["index_param"] - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == indexs[i]["index_type"] - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - status = connect.drop_index(ip_table) + status = connect.drop_index(ip_collection) assert status.OK() - status, result = connect.describe_index(ip_table) + status, result = connect.describe_index(ip_collection) logging.getLogger().info(result) - assert result._table_name == ip_table + assert result._collection_name == ip_collection assert result._index_type == IndexType.FLAT @@ -1304,66 +1304,66 @@ class TestIndexJAC: ****************************************************************** """ @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index(self, connect, jac_table, get_jaccard_index): + def test_create_index(self, connect, jac_collection, get_jaccard_index): ''' target: test create index interface - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] logging.getLogger().info(get_jaccard_index) - status, ids = connect.add_vectors(jac_table, self.vectors) - status = connect.create_index(jac_table, index_type, index_param) + status, ids = connect.add_vectors(jac_collection, self.vectors) + status = connect.create_index(jac_collection, index_type, index_param) if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT: assert not status.OK() else: assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_partition(self, connect, jac_table, get_jaccard_index): + def test_create_index_partition(self, connect, jac_collection, get_jaccard_index): ''' target: test create index interface - method: create table, create partition, and add vectors in it, create index + method: create collection, create partition, and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] logging.getLogger().info(get_jaccard_index) - status = connect.create_partition(jac_table, tag) - status, ids = connect.add_vectors(jac_table, self.vectors, partition_tag=tag) - status = connect.create_index(jac_table, index_type, index_param) + status = connect.create_partition(jac_collection, tag) + status, ids = connect.add_vectors(jac_collection, self.vectors, partition_tag=tag) + status = connect.create_index(jac_collection, index_type, index_param) assert status.OK() @pytest.mark.level(2) - def test_create_index_without_connect(self, dis_connect, jac_table): + def test_create_index_without_connect(self, dis_connect, jac_collection): ''' target: test create index without connection - method: create table and add vectors in it, check if added successfully + method: create collection and add vectors in it, check if added successfully expected: raise exception ''' nlist = NLIST index_param = {"nlist": nlist} with pytest.raises(Exception) as e: - status = dis_connect.create_index(jac_table, IndexType.IVF_SQ8, index_param) + status = dis_connect.create_index(jac_collection, IndexType.IVF_SQ8, index_param) @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_search_with_query_vectors(self, connect, jac_table, get_jaccard_index): + def test_create_index_search_with_query_vectors(self, connect, jac_collection, get_jaccard_index): ''' target: test create index interface, search with more query vectors - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] logging.getLogger().info(get_jaccard_index) - status, ids = connect.add_vectors(jac_table, self.vectors) - status = connect.create_index(jac_table, index_type, index_param) - logging.getLogger().info(connect.describe_index(jac_table)) + status, ids = connect.add_vectors(jac_collection, self.vectors) + status = connect.create_index(jac_collection, index_type, index_param) + logging.getLogger().info(connect.describe_index(jac_collection)) query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]] top_k = 5 search_param = get_search_param(index_type) - status, result = connect.search_vectors(jac_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(jac_collection, top_k, query_vecs, params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == len(query_vecs) @@ -1374,39 +1374,39 @@ class TestIndexJAC: ****************************************************************** """ - def test_describe_index(self, connect, jac_table, get_jaccard_index): + def test_describe_index(self, connect, jac_collection, get_jaccard_index): ''' target: test describe index interface - method: create table and add vectors in it, create index, call describe index + method: create collection and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] logging.getLogger().info(get_jaccard_index) - # status, ids = connect.add_vectors(jac_table, vectors[:5000]) - status = connect.create_index(jac_table, index_type, index_param) - status, result = connect.describe_index(jac_table) + # status, ids = connect.add_vectors(jac_collection, vectors[:5000]) + status = connect.create_index(jac_collection, index_type, index_param) + status, result = connect.describe_index(jac_collection) logging.getLogger().info(result) - assert result._table_name == jac_table + assert result._collection_name == jac_collection assert result._index_type == index_type assert result._params == index_param - def test_describe_index_partition(self, connect, jac_table, get_jaccard_index): + def test_describe_index_partition(self, connect, jac_collection, get_jaccard_index): ''' target: test describe index interface - method: create table, create partition and add vectors in it, create index, call describe index + method: create collection, create partition and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] logging.getLogger().info(get_jaccard_index) - status = connect.create_partition(jac_table, tag) - status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) - status = connect.create_index(jac_table, index_type, index_param) - status, result = connect.describe_index(jac_table) + status = connect.create_partition(jac_collection, tag) + status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag) + status = connect.create_index(jac_collection, index_type, index_param) + status, result = connect.describe_index(jac_collection) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == jac_table + assert result._collection_name == jac_collection assert result._index_type == index_type """ @@ -1415,47 +1415,47 @@ class TestIndexJAC: ****************************************************************** """ - def test_drop_index(self, connect, jac_table, get_jaccard_index): + def test_drop_index(self, connect, jac_collection, get_jaccard_index): ''' target: test drop index interface - method: create table and add vectors in it, create index, call drop index + method: create collection and add vectors in it, create index, call drop index expected: return code 0, and default index param ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] status, mode = connect._cmd("mode") assert status.OK() - # status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(jac_table, index_type, index_param) + # status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(jac_collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(jac_table) + status, result = connect.describe_index(jac_collection) logging.getLogger().info(result) - status = connect.drop_index(jac_table) + status = connect.drop_index(jac_collection) assert status.OK() - status, result = connect.describe_index(jac_table) + status, result = connect.describe_index(jac_collection) logging.getLogger().info(result) - assert result._table_name == jac_table + assert result._collection_name == jac_collection assert result._index_type == IndexType.FLAT - def test_drop_index_partition(self, connect, jac_table, get_jaccard_index): + def test_drop_index_partition(self, connect, jac_collection, get_jaccard_index): ''' target: test drop index interface - method: create table, create partition and add vectors in it, create index on table, call drop table index + method: create collection, create partition and add vectors in it, create index on collection, call drop collection index expected: return code 0, and default index param ''' index_param = get_jaccard_index["index_param"] index_type = get_jaccard_index["index_type"] - status = connect.create_partition(jac_table, tag) - status, ids = connect.add_vectors(jac_table, vectors, partition_tag=tag) - status = connect.create_index(jac_table, index_type, index_param) + status = connect.create_partition(jac_collection, tag) + status, ids = connect.add_vectors(jac_collection, vectors, partition_tag=tag) + status = connect.create_index(jac_collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(jac_table) + status, result = connect.describe_index(jac_collection) logging.getLogger().info(result) - status = connect.drop_index(jac_table) + status = connect.drop_index(jac_collection) assert status.OK() - status, result = connect.describe_index(jac_table) + status, result = connect.describe_index(jac_collection) logging.getLogger().info(result) - assert result._table_name == jac_table + assert result._collection_name == jac_collection assert result._index_type == IndexType.FLAT @@ -1503,68 +1503,68 @@ class TestIndexHAM: ****************************************************************** """ @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index(self, connect, ham_table, get_hamming_index): + def test_create_index(self, connect, ham_collection, get_hamming_index): ''' target: test create index interface - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] logging.getLogger().info(get_hamming_index) - status, ids = connect.add_vectors(ham_table, self.vectors) - status = connect.create_index(ham_table, index_type, index_param) + status, ids = connect.add_vectors(ham_collection, self.vectors) + status = connect.create_index(ham_collection, index_type, index_param) if index_type != IndexType.FLAT and index_type != IndexType.IVFLAT: assert not status.OK() else: assert status.OK() @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_partition(self, connect, ham_table, get_hamming_index): + def test_create_index_partition(self, connect, ham_collection, get_hamming_index): ''' target: test create index interface - method: create table, create partition, and add vectors in it, create index + method: create collection, create partition, and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] logging.getLogger().info(get_hamming_index) - status = connect.create_partition(ham_table, tag) - status, ids = connect.add_vectors(ham_table, self.vectors, partition_tag=tag) - status = connect.create_index(ham_table, index_type, index_param) + status = connect.create_partition(ham_collection, tag) + status, ids = connect.add_vectors(ham_collection, self.vectors, partition_tag=tag) + status = connect.create_index(ham_collection, index_type, index_param) assert status.OK() - status, res = connect.get_table_row_count(ham_table) + status, res = connect.count_collection(ham_collection) assert res == len(self.vectors) @pytest.mark.level(2) - def test_create_index_without_connect(self, dis_connect, ham_table): + def test_create_index_without_connect(self, dis_connect, ham_collection): ''' target: test create index without connection - method: create table and add vectors in it, check if added successfully + method: create collection and add vectors in it, check if added successfully expected: raise exception ''' nlist = NLIST index_param = {"nlist": nlist} with pytest.raises(Exception) as e: - status = dis_connect.create_index(ham_table, IndexType.IVF_SQ8, index_param) + status = dis_connect.create_index(ham_collection, IndexType.IVF_SQ8, index_param) @pytest.mark.timeout(BUILD_TIMEOUT) - def test_create_index_search_with_query_vectors(self, connect, ham_table, get_hamming_index): + def test_create_index_search_with_query_vectors(self, connect, ham_collection, get_hamming_index): ''' target: test create index interface, search with more query vectors - method: create table and add vectors in it, create index + method: create collection and add vectors in it, create index expected: return code equals to 0, and search success ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] logging.getLogger().info(get_hamming_index) - status, ids = connect.add_vectors(ham_table, self.vectors) - status = connect.create_index(ham_table, index_type, index_param) - logging.getLogger().info(connect.describe_index(ham_table)) + status, ids = connect.add_vectors(ham_collection, self.vectors) + status = connect.create_index(ham_collection, index_type, index_param) + logging.getLogger().info(connect.describe_index(ham_collection)) query_vecs = [self.vectors[0], self.vectors[1], self.vectors[2]] top_k = 5 search_param = get_search_param(index_type) - status, result = connect.search_vectors(ham_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(ham_collection, top_k, query_vecs, params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == len(query_vecs) @@ -1575,39 +1575,39 @@ class TestIndexHAM: ****************************************************************** """ - def test_describe_index(self, connect, ham_table, get_hamming_index): + def test_describe_index(self, connect, ham_collection, get_hamming_index): ''' target: test describe index interface - method: create table and add vectors in it, create index, call describe index + method: create collection and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] logging.getLogger().info(get_hamming_index) - # status, ids = connect.add_vectors(jac_table, vectors[:5000]) - status = connect.create_index(ham_table, index_type, index_param) - status, result = connect.describe_index(ham_table) + # status, ids = connect.add_vectors(jac_collection, vectors[:5000]) + status = connect.create_index(ham_collection, index_type, index_param) + status, result = connect.describe_index(ham_collection) logging.getLogger().info(result) - assert result._table_name == ham_table + assert result._collection_name == ham_collection assert result._index_type == index_type assert result._params == index_param - def test_describe_index_partition(self, connect, ham_table, get_hamming_index): + def test_describe_index_partition(self, connect, ham_collection, get_hamming_index): ''' target: test describe index interface - method: create table, create partition and add vectors in it, create index, call describe index + method: create collection, create partition and add vectors in it, create index, call describe index expected: return code 0, and index instructure ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] logging.getLogger().info(get_hamming_index) - status = connect.create_partition(ham_table, tag) - status, ids = connect.add_vectors(ham_table, vectors, partition_tag=tag) - status = connect.create_index(ham_table, index_type, index_param) - status, result = connect.describe_index(ham_table) + status = connect.create_partition(ham_collection, tag) + status, ids = connect.add_vectors(ham_collection, vectors, partition_tag=tag) + status = connect.create_index(ham_collection, index_type, index_param) + status, result = connect.describe_index(ham_collection) logging.getLogger().info(result) assert result._params == index_param - assert result._table_name == ham_table + assert result._collection_name == ham_collection assert result._index_type == index_type """ @@ -1616,84 +1616,84 @@ class TestIndexHAM: ****************************************************************** """ - def test_drop_index(self, connect, ham_table, get_hamming_index): + def test_drop_index(self, connect, ham_collection, get_hamming_index): ''' target: test drop index interface - method: create table and add vectors in it, create index, call drop index + method: create collection and add vectors in it, create index, call drop index expected: return code 0, and default index param ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] status, mode = connect._cmd("mode") assert status.OK() - # status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ham_table, index_type, index_param) + # status, ids = connect.add_vectors(ip_collection, vectors) + status = connect.create_index(ham_collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(ham_table) + status, result = connect.describe_index(ham_collection) logging.getLogger().info(result) - status = connect.drop_index(ham_table) + status = connect.drop_index(ham_collection) assert status.OK() - status, result = connect.describe_index(ham_table) + status, result = connect.describe_index(ham_collection) logging.getLogger().info(result) - assert result._table_name == ham_table + assert result._collection_name == ham_collection assert result._index_type == IndexType.FLAT - def test_drop_index_partition(self, connect, ham_table, get_hamming_index): + def test_drop_index_partition(self, connect, ham_collection, get_hamming_index): ''' target: test drop index interface - method: create table, create partition and add vectors in it, create index on table, call drop table index + method: create collection, create partition and add vectors in it, create index on collection, call drop collection index expected: return code 0, and default index param ''' index_param = get_hamming_index["index_param"] index_type = get_hamming_index["index_type"] - status = connect.create_partition(ham_table, tag) - status, ids = connect.add_vectors(ham_table, vectors, partition_tag=tag) - status = connect.create_index(ham_table, index_type, index_param) + status = connect.create_partition(ham_collection, tag) + status, ids = connect.add_vectors(ham_collection, vectors, partition_tag=tag) + status = connect.create_index(ham_collection, index_type, index_param) assert status.OK() - status, result = connect.describe_index(ham_table) + status, result = connect.describe_index(ham_collection) logging.getLogger().info(result) - status = connect.drop_index(ham_table) + status = connect.drop_index(ham_collection) assert status.OK() - status, result = connect.describe_index(ham_table) + status, result = connect.describe_index(ham_collection) logging.getLogger().info(result) - assert result._table_name == ham_table + assert result._collection_name == ham_collection assert result._index_type == IndexType.FLAT -class TestIndexTableInvalid(object): +class TestIndexcollectionInvalid(object): """ - Test create / describe / drop index interfaces with invalid table names + Test create / describe / drop index interfaces with invalid collection names """ @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.level(1) - def test_create_index_with_invalid_tablename(self, connect, get_table_name): - table_name = get_table_name + def test_create_index_with_invalid_collectionname(self, connect, get_collection_name): + collection_name = get_collection_name nlist = NLIST index_param = {"nlist": nlist} - status = connect.create_index(table_name, IndexType.IVF_SQ8, index_param) + status = connect.create_index(collection_name, IndexType.IVF_SQ8, index_param) assert not status.OK() @pytest.mark.level(1) - def test_describe_index_with_invalid_tablename(self, connect, get_table_name): - table_name = get_table_name - status, result = connect.describe_index(table_name) + def test_describe_index_with_invalid_collectionname(self, connect, get_collection_name): + collection_name = get_collection_name + status, result = connect.describe_index(collection_name) assert not status.OK() @pytest.mark.level(1) - def test_drop_index_with_invalid_tablename(self, connect, get_table_name): - table_name = get_table_name - status = connect.drop_index(table_name) + def test_drop_index_with_invalid_collectionname(self, connect, get_collection_name): + collection_name = get_collection_name + status = connect.drop_index(collection_name) assert not status.OK() class TestCreateIndexParamsInvalid(object): """ - Test Building index with invalid table names, table names not in db + Test Building index with invalid collection names, collection names not in db """ @pytest.fixture( scope="function", @@ -1703,16 +1703,16 @@ class TestCreateIndexParamsInvalid(object): yield request.param @pytest.mark.level(1) - def test_create_index_with_invalid_index_params(self, connect, table, get_index): + def test_create_index_with_invalid_index_params(self, connect, collection, get_index): index_param = get_index["index_param"] index_type = get_index["index_type"] logging.getLogger().info(get_index) - # status, ids = connect.add_vectors(table, vectors) + # status, ids = connect.add_vectors(collection, vectors) if (not index_type) or (not isinstance(index_type, IndexType)): with pytest.raises(Exception) as e: - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) else: - status = connect.create_index(table, index_type, index_param) + status = connect.create_index(collection, index_type, index_param) assert not status.OK() """ @@ -1725,22 +1725,22 @@ class TestCreateIndexParamsInvalid(object): def get_index_type(self, request): yield request.param - def test_create_index_with_invalid_nlist(self, connect, table, get_index_type): - status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, get_index_type, {"nlist": INVALID_NLIST}) + def test_create_index_with_invalid_nlist(self, connect, collection, get_index_type): + status, ids = connect.add_vectors(collection, vectors) + status = connect.create_index(collection, get_index_type, {"nlist": INVALID_NLIST}) if get_index_type != IndexType.FLAT: assert not status.OK() ''' Test Building index with empty params ''' - def test_create_index_with_empty_param(self, connect, table, get_index_type): + def test_create_index_with_empty_param(self, connect, collection, get_index_type): logging.getLogger().info(get_index_type) - status = connect.create_index(table, get_index_type, {}) + status = connect.create_index(collection, get_index_type, {}) if get_index_type != IndexType.FLAT : assert not status.OK() - status, result = connect.describe_index(table) + status, result = connect.describe_index(collection) logging.getLogger().info(result) - assert result._table_name == table + assert result._collection_name == collection assert result._index_type == IndexType.FLAT diff --git a/tests/milvus_python_test/test_mix.py b/tests/milvus_python_test/test_mix.py index 904053a276..29fbf9f420 100644 --- a/tests/milvus_python_test/test_mix.py +++ b/tests/milvus_python_test/test_mix.py @@ -12,7 +12,7 @@ from utils import * dim = 128 index_file_size = 10 -table_id = "test_mix" +collection_id = "test_mix" add_interval_time = 2 vectors = gen_vectors(10000, dim) vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2') @@ -29,35 +29,35 @@ class TestMixBase: # disable def _test_search_during_createIndex(self, args): loops = 10000 - table = gen_unique_str() + collection = gen_unique_str() query_vecs = [vectors[0], vectors[1]] uri = "tcp://%s:%s" % (args["ip"], args["port"]) id_0 = 0; id_1 = 0 milvus_instance = get_milvus(args["handler"]) milvus_instance.connect(uri=uri) - milvus_instance.create_table({'table_name': table, + milvus_instance.create_collection({'collection_name': collection, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2}) for i in range(10): - status, ids = milvus_instance.add_vectors(table, vectors) + status, ids = milvus_instance.add_vectors(collection, vectors) # logging.getLogger().info(ids) if i == 0: id_0 = ids[0]; id_1 = ids[1] def create_index(milvus_instance): logging.getLogger().info("In create index") - status = milvus_instance.create_index(table, index_params) + status = milvus_instance.create_index(collection, index_params) logging.getLogger().info(status) - status, result = milvus_instance.describe_index(table) + status, result = milvus_instance.describe_index(collection) logging.getLogger().info(result) def add_vectors(milvus_instance): logging.getLogger().info("In add vectors") - status, ids = milvus_instance.add_vectors(table, vectors) + status, ids = milvus_instance.add_vectors(collection, vectors) logging.getLogger().info(status) def search(milvus_instance): logging.getLogger().info("In search vectors") for i in range(loops): - status, result = milvus_instance.search_vectors(table, top_k, nprobe, query_vecs) + status, result = milvus_instance.search_vectors(collection, top_k, nprobe, query_vecs) logging.getLogger().info(status) assert result[0][0].id == id_0 assert result[1][0].id == id_1 @@ -72,82 +72,82 @@ class TestMixBase: p_create.join() @pytest.mark.level(2) - def test_mix_multi_tables(self, connect): + def test_mix_multi_collections(self, connect): ''' - target: test functions with multiple tables of different metric_types and index_types - method: create 60 tables which 30 are L2 and the other are IP, add vectors into them + target: test functions with multiple collections of different metric_types and index_types + method: create 60 collections which 30 are L2 and the other are IP, add vectors into them and test describe index and search expected: status ok ''' nq = 10000 - table_list = [] + collection_list = [] idx = [] index_param = {'nlist': nlist} - #create table and add vectors + #create collection and add vectors for i in range(30): - table_name = gen_unique_str('test_mix_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str('test_mix_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - connect.create_table(param) - status, ids = connect.add_vectors(table_name=table_name, records=vectors) + connect.create_collection(param) + status, ids = connect.add_vectors(collection_name=collection_name, records=vectors) idx.append(ids[0]) idx.append(ids[10]) idx.append(ids[20]) assert status.OK() for i in range(30): - table_name = gen_unique_str('test_mix_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, + collection_name = gen_unique_str('test_mix_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} - connect.create_table(param) - status, ids = connect.add_vectors(table_name=table_name, records=vectors) + connect.create_collection(param) + status, ids = connect.add_vectors(collection_name=collection_name, records=vectors) assert status.OK() - status = connect.flush([table_name]) + status = connect.flush([collection_name]) assert status.OK() idx.append(ids[0]) idx.append(ids[10]) idx.append(ids[20]) assert status.OK() for i in range(10): - status = connect.create_index(table_list[i], IndexType.FLAT, index_param) + status = connect.create_index(collection_list[i], IndexType.FLAT, index_param) assert status.OK() - status = connect.create_index(table_list[30 + i], IndexType.FLAT, index_param) + status = connect.create_index(collection_list[30 + i], IndexType.FLAT, index_param) assert status.OK() - status = connect.create_index(table_list[10 + i], IndexType.IVFLAT, index_param) + status = connect.create_index(collection_list[10 + i], IndexType.IVFLAT, index_param) assert status.OK() - status = connect.create_index(table_list[40 + i], IndexType.IVFLAT, index_param) + status = connect.create_index(collection_list[40 + i], IndexType.IVFLAT, index_param) assert status.OK() - status = connect.create_index(table_list[20 + i], IndexType.IVF_SQ8, index_param) + status = connect.create_index(collection_list[20 + i], IndexType.IVF_SQ8, index_param) assert status.OK() - status = connect.create_index(table_list[50 + i], IndexType.IVF_SQ8, index_param) + status = connect.create_index(collection_list[50 + i], IndexType.IVF_SQ8, index_param) assert status.OK() #describe index for i in range(10): - status, result = connect.describe_index(table_list[i]) + status, result = connect.describe_index(collection_list[i]) assert result._index_type == IndexType.FLAT - status, result = connect.describe_index(table_list[10 + i]) + status, result = connect.describe_index(collection_list[10 + i]) assert result._index_type == IndexType.IVFLAT - status, result = connect.describe_index(table_list[20 + i]) + status, result = connect.describe_index(collection_list[20 + i]) assert result._index_type == IndexType.IVF_SQ8 - status, result = connect.describe_index(table_list[30 + i]) + status, result = connect.describe_index(collection_list[30 + i]) assert result._index_type == IndexType.FLAT - status, result = connect.describe_index(table_list[40 + i]) + status, result = connect.describe_index(collection_list[40 + i]) assert result._index_type == IndexType.IVFLAT - status, result = connect.describe_index(table_list[50 + i]) + status, result = connect.describe_index(collection_list[50 + i]) assert result._index_type == IndexType.IVF_SQ8 #search query_vecs = [vectors[0], vectors[10], vectors[20]] for i in range(60): - table = table_list[i] - status, result = connect.search_vectors(table, top_k, query_records=query_vecs, params={"nprobe": 1}) + collection = collection_list[i] + status, result = connect.search_vectors(collection, top_k, query_records=query_vecs, params={"nprobe": 1}) assert status.OK() assert len(result) == len(query_vecs) logging.getLogger().info(i) diff --git a/tests/milvus_python_test/test_partition.py b/tests/milvus_python_test/test_partition.py index 83bc7c446a..cf8cd9db6f 100644 --- a/tests/milvus_python_test/test_partition.py +++ b/tests/milvus_python_test/test_partition.py @@ -11,7 +11,7 @@ from utils import * dim = 128 index_file_size = 10 -table_id = "test_partition" +collection_id = "test_partition" ADD_TIMEOUT = 60 nprobe = 1 tag = "1970-01-01" @@ -24,58 +24,58 @@ class TestCreateBase: The following cases are used to test `create_partition` function ****************************************************************** """ - def test_create_partition(self, connect, table): + def test_create_partition(self, connect, collection): ''' target: test create partition, check status returned method: call function: create_partition expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - def test_create_partition_repeat(self, connect, table): + def test_create_partition_repeat(self, connect, collection): ''' target: test create partition, check status returned method: call function: create_partition expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert not status.OK() - def test_create_partition_table_not_existed(self, connect): + def test_create_partition_collection_not_existed(self, connect): ''' - target: test create partition, its owner table name not existed in db, check status returned + target: test create partition, its owner collection name not existed in db, check status returned method: call function: create_partition expected: status not ok ''' - table_name = gen_unique_str() - status = connect.create_partition(table_name, tag) + collection_name = gen_unique_str() + status = connect.create_partition(collection_name, tag) assert not status.OK() - def test_create_partition_tag_name_None(self, connect, table): + def test_create_partition_tag_name_None(self, connect, collection): ''' target: test create partition, tag name set None, check status returned method: call function: create_partition expected: status ok ''' tag_name = None - status = connect.create_partition(table, tag_name) + status = connect.create_partition(collection, tag_name) assert not status.OK() - def test_create_different_partition_tags(self, connect, table): + def test_create_different_partition_tags(self, connect, collection): ''' target: test create partition twice with different names method: call function: create_partition, and again expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() tag_name = gen_unique_str() - status = connect.create_partition(table, tag_name) + status = connect.create_partition(collection, tag_name) assert status.OK() - status, res = connect.show_partitions(table) + status, res = connect.show_partitions(collection) assert status.OK() tag_list = [] for item in res: @@ -84,95 +84,95 @@ class TestCreateBase: assert tag_name in tag_list assert "_default" in tag_list - def test_create_partition_add_vectors_default(self, connect, table): + def test_create_partition_add_vectors_default(self, connect, collection): ''' target: test create partition, and insert vectors, check status returned method: call function: create_partition expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() nq = 100 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.insert(table, vectors, ids) + status, ids = connect.insert(collection, vectors, ids) assert status.OK() - def test_create_partition_insert_with_tag(self, connect, table): + def test_create_partition_insert_with_tag(self, connect, collection): ''' target: test create partition, and insert vectors, check status returned method: call function: create_partition expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() nq = 100 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) assert status.OK() - def test_create_partition_insert_with_tag_not_existed(self, connect, table): + def test_create_partition_insert_with_tag_not_existed(self, connect, collection): ''' target: test create partition, and insert vectors, check status returned method: call function: create_partition expected: status not ok ''' tag_new = "tag_new" - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() nq = 100 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag_new) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag_new) assert not status.OK() - def test_create_partition_insert_same_tags(self, connect, table): + def test_create_partition_insert_same_tags(self, connect, collection): ''' target: test create partition, and insert vectors, check status returned method: call function: create_partition expected: status ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() nq = 100 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) ids = [(i+100) for i in range(nq)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert res == nq * 2 - def test_create_partition_insert_same_tags_two_tables(self, connect, table): + def test_create_partition_insert_same_tags_two_collections(self, connect, collection): ''' - target: test create two partitions, and insert vectors with the same tag to each table, check status returned + target: test create two partitions, and insert vectors with the same tag to each collection, check status returned method: call function: create_partition - expected: status ok, table length is correct + expected: status ok, collection length is correct ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - table_new = gen_unique_str() - param = {'table_name': table_new, + collection_new = gen_unique_str() + param = {'collection_name': collection_new, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.create_partition(table_new, tag) + status = connect.create_collection(param) + status = connect.create_partition(collection_new, tag) nq = 100 vectors = gen_vectors(nq, dim) ids = [i for i in range(nq)] - status, ids = connect.insert(table, vectors, ids, partition_tag=tag) + status, ids = connect.insert(collection, vectors, ids, partition_tag=tag) ids = [(i+100) for i in range(nq)] - status, ids = connect.insert(table_new, vectors, ids, partition_tag=tag) - status = connect.flush([table, table_new]) + status, ids = connect.insert(collection_new, vectors, ids, partition_tag=tag) + status = connect.flush([collection, collection_new]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert res == nq - status, res = connect.get_table_row_count(table_new) + status, res = connect.count_collection(collection_new) assert res == nq @@ -183,35 +183,35 @@ class TestShowBase: The following cases are used to test `show_partitions` function ****************************************************************** """ - def test_show_partitions(self, connect, table): + def test_show_partitions(self, connect, collection): ''' target: test show partitions, check status and partitions returned method: create partition first, then call function: show_partitions expected: status ok, partition correct ''' - status = connect.create_partition(table, tag) - status, res = connect.show_partitions(table) + status = connect.create_partition(collection, tag) + status, res = connect.show_partitions(collection) assert status.OK() - def test_show_partitions_no_partition(self, connect, table): + def test_show_partitions_no_partition(self, connect, collection): ''' - target: test show partitions with table name, check status and partitions returned + target: test show partitions with collection name, check status and partitions returned method: call function: show_partitions expected: status ok, partitions correct ''' - status, res = connect.show_partitions(table) + status, res = connect.show_partitions(collection) assert status.OK() - def test_show_multi_partitions(self, connect, table): + def test_show_multi_partitions(self, connect, collection): ''' target: test show partitions, check status and partitions returned method: create partitions first, then call function: show_partitions expected: status ok, partitions correct ''' tag_new = gen_unique_str() - status = connect.create_partition(table, tag) - status = connect.create_partition(table, tag_new) - status, res = connect.show_partitions(table) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, tag_new) + status, res = connect.show_partitions(collection) assert status.OK() @@ -222,72 +222,72 @@ class TestDropBase: The following cases are used to test `drop_partition` function ****************************************************************** """ - def test_drop_partition(self, connect, table): + def test_drop_partition(self, connect, collection): ''' target: test drop partition, check status and partition if existed method: create partitions first, then call function: drop_partition expected: status ok, no partitions in db ''' - status = connect.create_partition(table, tag) - status = connect.drop_partition(table, tag) + status = connect.create_partition(collection, tag) + status = connect.drop_partition(collection, tag) assert status.OK() - status, res = connect.show_partitions(table) + status, res = connect.show_partitions(collection) tag_list = [] for item in res: tag_list.append(item.tag) assert tag not in tag_list - def test_drop_partition_tag_not_existed(self, connect, table): + def test_drop_partition_tag_not_existed(self, connect, collection): ''' target: test drop partition, but tag not existed method: create partitions first, then call function: drop_partition expected: status not ok ''' - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) new_tag = "new_tag" - status = connect.drop_partition(table, new_tag) + status = connect.drop_partition(collection, new_tag) assert not status.OK() - def test_drop_partition_tag_not_existed_A(self, connect, table): + def test_drop_partition_tag_not_existed_A(self, connect, collection): ''' - target: test drop partition, but table not existed + target: test drop partition, but collection not existed method: create partitions first, then call function: drop_partition expected: status not ok ''' - status = connect.create_partition(table, tag) - new_table = gen_unique_str() - status = connect.drop_partition(new_table, tag) + status = connect.create_partition(collection, tag) + new_collection = gen_unique_str() + status = connect.drop_partition(new_collection, tag) assert not status.OK() - def test_drop_partition_repeatedly(self, connect, table): + def test_drop_partition_repeatedly(self, connect, collection): ''' target: test drop partition twice, check status and partition if existed method: create partitions first, then call function: drop_partition expected: status not ok, no partitions in db ''' - status = connect.create_partition(table, tag) - status = connect.drop_partition(table, tag) - status = connect.drop_partition(table, tag) + status = connect.create_partition(collection, tag) + status = connect.drop_partition(collection, tag) + status = connect.drop_partition(collection, tag) time.sleep(2) assert not status.OK() - status, res = connect.show_partitions(table) + status, res = connect.show_partitions(collection) tag_list = [] for item in res: tag_list.append(item.tag) assert tag not in tag_list - def test_drop_partition_create(self, connect, table): + def test_drop_partition_create(self, connect, collection): ''' target: test drop partition, and create again, check status method: create partitions first, then call function: drop_partition, create_partition expected: status not ok, partition in db ''' - status = connect.create_partition(table, tag) - status = connect.drop_partition(table, tag) + status = connect.create_partition(collection, tag) + status = connect.drop_partition(collection, tag) time.sleep(2) - status = connect.create_partition(table, tag) + status = connect.create_partition(collection, tag) assert status.OK() - status, res = connect.show_partitions(table) + status, res = connect.show_partitions(collection) tag_list = [] for item in res: tag_list.append(item.tag) @@ -297,47 +297,47 @@ class TestDropBase: class TestNameInvalid(object): @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) def get_tag_name(self, request): yield request.param @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param - def test_drop_partition_with_invalid_table_name(self, connect, table, get_table_name): + def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name): ''' - target: test drop partition, with invalid table name, check status returned + target: test drop partition, with invalid collection name, check status returned method: call function: drop_partition expected: status not ok ''' - table_name = get_table_name - status = connect.create_partition(table, tag) - status = connect.drop_partition(table_name, tag) + collection_name = get_collection_name + status = connect.create_partition(collection, tag) + status = connect.drop_partition(collection_name, tag) assert not status.OK() - def test_drop_partition_with_invalid_tag_name(self, connect, table, get_tag_name): + def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name): ''' target: test drop partition, with invalid tag name, check status returned method: call function: drop_partition expected: status not ok ''' tag_name = get_tag_name - status = connect.create_partition(table, tag) - status = connect.drop_partition(table, tag_name) + status = connect.create_partition(collection, tag) + status = connect.drop_partition(collection, tag_name) assert not status.OK() - def test_show_partitions_with_invalid_table_name(self, connect, table, get_table_name): + def test_show_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name): ''' - target: test show partitions, with invalid table name, check status returned + target: test show partitions, with invalid collection name, check status returned method: call function: show_partitions expected: status not ok ''' - table_name = get_table_name - status = connect.create_partition(table, tag) - status, res = connect.show_partitions(table_name) + collection_name = get_collection_name + status = connect.create_partition(collection, tag) + status, res = connect.show_partitions(collection_name) assert not status.OK() diff --git a/tests/milvus_python_test/test_search_by_id.py b/tests/milvus_python_test/test_search_by_id.py index 15ddb434c9..2b9b022627 100755 --- a/tests/milvus_python_test/test_search_by_id.py +++ b/tests/milvus_python_test/test_search_by_id.py @@ -11,7 +11,7 @@ # from milvus import Milvus, IndexType, MetricType # from utils import * # dim = 128 -# table_id = "test_search" +# collection_id = "test_search" # add_interval_time = 2 # vectors = gen_vectors(6000, dim) # # vectors /= numpy.linalg.norm(vectors) @@ -34,127 +34,127 @@ # logging.getLogger().info(reason) # pytest.skip(reason) -# def init_data(self, connect, table, nb=6000): +# def init_data(self, connect, collection, nb=6000): # ''' -# Generate vectors and add it in table, before search vectors +# Generate vectors and add it in collection, before search vectors # ''' # global vectors # if nb == 6000: # add_vectors = vectors # else: # add_vectors = gen_vectors(nb, dim) -# status, ids = connect.add_vectors(table, add_vectors) +# status, ids = connect.add_vectors(collection, add_vectors) # sleep(add_interval_time) # return add_vectors, ids -# def init_data_binary(self, connect, table, nb=6000): +# def init_data_binary(self, connect, collection, nb=6000): # ''' -# Generate vectors and add it in table, before search vectors +# Generate vectors and add it in collection, before search vectors # ''' # global binary_vectors # if nb == 6000: # add_vectors = binary_vectors # else: # add_vectors = gen_binary_vectors(nb, dim) -# status, ids = connect.add_vectors(table, add_vectors) +# status, ids = connect.add_vectors(collection, add_vectors) # sleep(add_interval_time) # return add_vectors, ids -# def init_data_no_flush(self, connect, table, nb=6000): +# def init_data_no_flush(self, connect, collection, nb=6000): # global vectors # if nb == 6000: # add_vectors = vectors # else: # add_vectors = gen_vectors(nb, dim) -# status, ids = connect.add_vectors(table, add_vectors) +# status, ids = connect.add_vectors(collection, add_vectors) # # sleep(add_interval_time) # return add_vectors, ids -# def init_data_no_flush_ids(self, connect, table, nb=6000): +# def init_data_no_flush_ids(self, connect, collection, nb=6000): # global vectors # my_ids = [i for i in range(nb)] # if nb == 6000: # add_vectors = vectors # else: # add_vectors = gen_vectors(nb, dim) -# status, ids = connect.add_vectors(table, add_vectors, my_ids) +# status, ids = connect.add_vectors(collection, add_vectors, my_ids) # # sleep(add_interval_time) # return add_vectors, ids -# def init_data_ids(self, connect, table, nb=6000): +# def init_data_ids(self, connect, collection, nb=6000): # global vectors # my_ids = [i for i in range(nb)] # if nb == 6000: # add_vectors = vectors # else: # add_vectors = gen_vectors(nb, dim) -# status, ids = connect.add_vectors(table, add_vectors, my_ids) +# status, ids = connect.add_vectors(collection, add_vectors, my_ids) # sleep(add_interval_time) # return add_vectors, ids -# def add_data(self, connect, table, vectors): +# def add_data(self, connect, collection, vectors): # ''' -# Add specified vectors to table +# Add specified vectors to collection # ''' -# status, ids = connect.add_vectors(table, vectors) +# status, ids = connect.add_vectors(collection, vectors) # # sleep(add_interval_time) # sleep(10) # return vectors, ids -# def add_data_ids(self, connect, table, vectors): +# def add_data_ids(self, connect, collection, vectors): # my_ids = [i for i in range(len(vectors))] -# status, ids = connect.add_vectors(table, vectors, my_ids) +# status, ids = connect.add_vectors(collection, vectors, my_ids) # sleep(add_interval_time) # return vectors, ids -# def add_data_and_flush(self, connect, table, vectors): +# def add_data_and_flush(self, connect, collection, vectors): -# status, ids = connect.add_vectors(table, vectors) -# connect.flush([table]) +# status, ids = connect.add_vectors(collection, vectors) +# connect.flush([collection]) # return vectors, ids -# def add_data_and_flush_ids(self, connect, table, vectors): +# def add_data_and_flush_ids(self, connect, collection, vectors): # my_ids = [i for i in range(len(vectors))] -# status, ids = connect.add_vectors(table, vectors, my_ids) -# connect.flush([table]) +# status, ids = connect.add_vectors(collection, vectors, my_ids) +# connect.flush([collection]) # return vectors, ids -# def add_data_no_flush(self, connect, table, vectors): +# def add_data_no_flush(self, connect, collection, vectors): # ''' -# Add specified vectors to table +# Add specified vectors to collection # ''' -# status, ids = connect.add_vectors(table, vectors) +# status, ids = connect.add_vectors(collection, vectors) # return vectors, ids -# def add_data_no_flush_ids(self, connect, table, vectors): +# def add_data_no_flush_ids(self, connect, collection, vectors): # my_ids = [i for i in range(len(vectors))] -# status, ids = connect.add_vectors(table, vectors, my_ids) +# status, ids = connect.add_vectors(collection, vectors, my_ids) # return vectors, ids # # delete data and auto flush - timeout due to the flush interval in config file -# def delete_data(self, connect, table, ids): +# def delete_data(self, connect, collection, ids): # ''' # delete vectors by id # ''' -# status = connect.delete_by_id(table, ids) +# status = connect.delete_by_id(collection, ids) # sleep(add_interval_time) # return status # # delete data and auto flush - timeout due to the flush interval in config file -# def delete_data_no_flush(self, connect, table, ids): +# def delete_data_no_flush(self, connect, collection, ids): # ''' # delete vectors by id # ''' -# status = connect.delete_by_id(table, ids) +# status = connect.delete_by_id(collection, ids) # return status # # delete data and manual flush -# def delete_data_and_flush(self, connect, table, ids): +# def delete_data_and_flush(self, connect, collection, ids): # ''' # delete vectors by id # ''' -# status = connect.delete_by_id(table, ids) -# connect.flush([table]) +# status = connect.delete_by_id(collection, ids) +# connect.flush([collection]) # return status # def check_no_result(self, results): @@ -167,9 +167,9 @@ # return False # return flag -# def init_data_partition(self, connect, table, partition_tag, nb=6000): +# def init_data_partition(self, connect, collection, partition_tag, nb=6000): # ''' -# Generate vectors and add it in table, before search vectors +# Generate vectors and add it in collection, before search vectors # ''' # global vectors # if nb == 6000: @@ -178,13 +178,13 @@ # add_vectors = gen_vectors(nb, dim) # # add_vectors /= numpy.linalg.norm(add_vectors) # # add_vectors = add_vectors.tolist() -# status, ids = connect.add_vectors(table, add_vectors, partition_tag=partition_tag) +# status, ids = connect.add_vectors(collection, add_vectors, partition_tag=partition_tag) # sleep(add_interval_time) # return add_vectors, ids -# def init_data_and_flush(self, connect, table, nb=6000): +# def init_data_and_flush(self, connect, collection, nb=6000): # ''' -# Generate vectors and add it in table, before search vectors +# Generate vectors and add it in collection, before search vectors # ''' # global vectors # if nb == 6000: @@ -193,24 +193,24 @@ # add_vectors = gen_vectors(nb, dim) # # add_vectors /= numpy.linalg.norm(add_vectors) # # add_vectors = add_vectors.tolist() -# status, ids = connect.add_vectors(table, add_vectors) -# connect.flush([table]) +# status, ids = connect.add_vectors(collection, add_vectors) +# connect.flush([collection]) # return add_vectors, ids -# def init_data_and_flush_ids(self, connect, table, nb=6000): +# def init_data_and_flush_ids(self, connect, collection, nb=6000): # global vectors # my_ids = [i for i in range(nb)] # if nb == 6000: # add_vectors = vectors # else: # add_vectors = gen_vectors(nb, dim) -# status, ids = connect.add_vectors(table, add_vectors, my_ids) -# connect.flush([table]) +# status, ids = connect.add_vectors(collection, add_vectors, my_ids) +# connect.flush([collection]) # return add_vectors, ids -# def init_data_partition_and_flush(self, connect, table, partition_tag, nb=6000): +# def init_data_partition_and_flush(self, connect, collection, partition_tag, nb=6000): # ''' -# Generate vectors and add it in table, before search vectors +# Generate vectors and add it in collection, before search vectors # ''' # global vectors # if nb == 6000: @@ -219,8 +219,8 @@ # add_vectors = gen_vectors(nb, dim) # # add_vectors /= numpy.linalg.norm(add_vectors) # # add_vectors = add_vectors.tolist() -# status, ids = connect.add_vectors(table, add_vectors, partition_tag=partition_tag) -# connect.flush([table]) +# status, ids = connect.add_vectors(collection, add_vectors, partition_tag=partition_tag) +# connect.flush([collection]) # return add_vectors, ids # @pytest.fixture( @@ -284,81 +284,81 @@ # def get_top_k(self, request): # yield request.param # # auto flush -# def test_search_flat_normal_topk(self, connect, table, get_top_k): +# def test_search_flat_normal_topk(self, connect, collection, get_top_k): # ''' # target: test basic search fuction, all the search params is corrent, change top-k value # method: search with the given vector id, check the result # expected: search status ok, and the length of the result is top_k # ''' # top_k = get_top_k -# vectors, ids = self.init_data(connect, table, nb=small_size) +# vectors, ids = self.init_data(connect, collection, nb=small_size) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, query_id, params={}) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert result[0][0].distance <= epsilon # assert check_result(result[0], ids[0]) -# def test_search_flat_max_topk(self, connect, table): +# def test_search_flat_max_topk(self, connect, collection): # ''' # target: test basic search fuction, all the search params is corrent, change top-k value # method: search with the given vector id, check the result # expected: search status ok, and the length of the result is top_k # ''' # top_k = 2049 -# vectors, ids = self.init_data(connect, table, nb=small_size) +# vectors, ids = self.init_data(connect, collection, nb=small_size) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection, top_k, query_id, params={}) # assert not status.OK() -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert not status.OK() # -# def test_search_id_not_existed(self, connect, table): +# def test_search_id_not_existed(self, connect, collection): # ''' # target: test basic search fuction, all the search params is corrent, change top-k value # method: search with the given vector id, check the result # expected: search status ok, and the length of the result is top_k # ''' -# vectors, ids = self.init_data_and_flush(connect, table, nb=small_size) +# vectors, ids = self.init_data_and_flush(connect, collection, nb=small_size) # query_id = non_exist_id -# status, result = connect.search_by_id(table, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection, top_k, query_id, params={}) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # # # auto flush -# def test_search_ids(self, connect, table): -# vectors, ids = self.init_data_ids(connect, table, nb=small_size) +# def test_search_ids(self, connect, collection): +# vectors, ids = self.init_data_ids(connect, collection, nb=small_size) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, query_id, params={}) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert result[0][0].distance <= epsilon # assert check_result(result[0], ids[0]) # # manual flush -# def test_search_ids_flush(self, connect, table): -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) +# def test_search_ids_flush(self, connect, collection): +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) # query_id = non_exist_id -# status, result = connect.search_by_id(table, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection, top_k, query_id, params={}) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert self.check_no_result(result[0]) # # ------------------------------------------------------------- l2, add manual flush, delete, search ------------------------------------------------------------- # -# # ids, manual flush, search table, exist -# def test_search_index_l2(self, connect, table, get_simple_index): -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# # ids, manual flush, search collection, exist +# def test_search_index_l2(self, connect, collection, get_simple_index): +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert self.check_no_result(result[0]) # # # ------------------------------------------------------------- l2, add manual flush, delete, search ------------------------------------------------------------- # -# # ids, manual flush, search table, exist -# def test_search_index_l2(self, connect, table, get_simple_index_params): +# # ids, manual flush, search collection, exist +# def test_search_index_l2(self, connect, collection, get_simple_index_params): # ''' # target: test basic search fuction, all the search params is corrent, test all index params, and build # method: search with the given vectors, check the result @@ -366,26 +366,26 @@ # ''' # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = ids[0] # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert result[0][0].distance <= epsilon # assert check_result(result[0], ids[0]) -# # ids, manual flush, search table, non exist -# def test_search_index_l2_id_not_existed(self, connect, table, get_simple_index): +# # ids, manual flush, search collection, non exist +# def test_search_index_l2_id_not_existed(self, connect, collection, get_simple_index): # -# # ids, manual flush, search table, non exist -# def test_search_index_l2_id_not_existed(self, connect, table, get_simple_index_params): +# # ids, manual flush, search collection, non exist +# def test_search_index_l2_id_not_existed(self, connect, collection, get_simple_index_params): # ''' # target: test basic search fuction, all the search params is corrent, test all index params, and build # method: search with the given vectors, check the result @@ -393,284 +393,284 @@ # ''' # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = non_exist_id # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) -# # ids, manual flush, delete, manual flush, search table, exist -# def test_search_index_delete(self, connect, table, get_simple_index): +# # ids, manual flush, delete, manual flush, search collection, exist +# def test_search_index_delete(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = ids[0] -# status = self.delete_data_and_flush(connect, table, [query_id]) +# status = self.delete_data_and_flush(connect, collection, [query_id]) # assert status.OK() # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # assert status.OK() # assert self.check_no_result(result[0]) -# # ids, manual flush, delete, manual flush, search table, non exist -# def test_search_index_delete_id_not_existed(self, connect, table, get_simple_index): +# # ids, manual flush, delete, manual flush, search collection, non exist +# def test_search_index_delete_id_not_existed(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = non_exist_id -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # -# # ids, manual flush, delete, manual flush, search table, exist -# def test_search_index_delete(self, connect, table, get_simple_index_params): +# # ids, manual flush, delete, manual flush, search collection, exist +# def test_search_index_delete(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status = self.delete_data_and_flush(connect, table, [query_id]) +# status = self.delete_data_and_flush(connect, collection, [query_id]) # assert status.OK() -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert self.check_no_result(result[0]) # -# # ids, manual flush, delete, manual flush, search table, non exist -# def test_search_index_delete_id_not_existed(self, connect, table, get_simple_index_params): +# # ids, manual flush, delete, manual flush, search collection, non exist +# def test_search_index_delete_id_not_existed(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status = self.delete_data_and_flush(connect, table, [query_id]) +# status = self.delete_data_and_flush(connect, collection, [query_id]) # assert status.OK() # query_id = non_exist_id # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # assert status.OK() # assert self.check_no_result(result[0]) -# def test_search_index_delete_no_flush(self, connect, table, get_simple_index): +# def test_search_index_delete_no_flush(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = ids[0] -# status = self.delete_data_no_flush(connect, table, [query_id]) +# status = self.delete_data_no_flush(connect, collection, [query_id]) # assert status.OK() # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # assert status.OK() # assert check_result(result[0], query_id) -# # ids, manual flush, delete, no flush, search table, non exist -# def test_search_index_delete_no_flush_id_not_existed(self, connect, table, get_simple_index): +# # ids, manual flush, delete, no flush, search collection, non exist +# def test_search_index_delete_no_flush_id_not_existed(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert self.check_no_result(result[0]) # -# def test_search_index_delete_no_flush(self, connect, table, get_simple_index_params): +# def test_search_index_delete_no_flush(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status = self.delete_data_no_flush(connect, table, [query_id]) +# status = self.delete_data_no_flush(connect, collection, [query_id]) # assert status.OK() -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert check_result(result[0], query_id) # -# # ids, manual flush, delete, no flush, search table, non exist -# def test_search_index_delete_no_flush_id_not_existed(self, connect, table, get_simple_index_params): +# # ids, manual flush, delete, no flush, search collection, non exist +# def test_search_index_delete_no_flush_id_not_existed(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status = self.delete_data_no_flush(connect, table, [query_id]) +# status = self.delete_data_no_flush(connect, collection, [query_id]) # assert status.OK() # query_id = non_exist_id # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # assert status.OK() # assert self.check_no_result(result[0]) -# def test_search_index_delete_add(self, connect, table, get_simple_index): +# def test_search_index_delete_add(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert self.check_no_result(result[0]) # -# def test_search_index_delete_add(self, connect, table, get_simple_index_params): +# def test_search_index_delete_add(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# vectors, ids = self.init_data_and_flush_ids(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# vectors, ids = self.init_data_and_flush_ids(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status = self.delete_data_no_flush(connect, table, [query_id]) +# status = self.delete_data_no_flush(connect, collection, [query_id]) # assert status.OK() -# vectors, new_ids = self.add_data_and_flush_ids(connect, table, vectors) -# status = connect.create_index(table, index_type, index_param) +# vectors, new_ids = self.add_data_and_flush_ids(connect, collection, vectors) +# status = connect.create_index(collection, index_type, index_param) # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) -# status = connect.create_index(table, index_params) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) +# status = connect.create_index(collection, index_params) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert result[0][0].distance <= epsilon # assert check_result(result[0], query_id) -# status = self.delete_data_no_flush(connect, table, [query_id]) +# status = self.delete_data_no_flush(connect, collection, [query_id]) # assert status.OK() -# # add to table, auto flush, search table, search partition exist -# def test_search_l2_index_partition(self, connect, table, get_simple_index): +# # add to collection, auto flush, search collection, search partition exist +# def test_search_l2_index_partition(self, connect, collection, get_simple_index): # -# # add to table, auto flush, search table, search partition exist -# def test_search_l2_index_partition(self, connect, table, get_simple_index_params): +# # add to collection, auto flush, search collection, search partition exist +# def test_search_l2_index_partition(self, connect, collection, get_simple_index_params): # ''' # target: test basic search fuction, all the search params is corrent, test all index params, and build -# method: add vectors into table, search with the given vectors, check the result -# expected: search status ok, and the length of the result is top_k, search table with partition tag return empty +# method: add vectors into collection, search with the given vectors, check the result +# expected: search status ok, and the length of the result is top_k, search collection with partition tag return empty # ''' # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = ids[0] # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, params=search_param) # index_params = get_simple_index_params -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert check_result(result[0], ids[0]) # assert result[0][0].distance <= epsilon -# status, result = connect.search_by_id(table, top_k, query_id, partition_tags=[tag], params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, partition_tags=[tag], params=search_param) # assert status.OK() # assert len(result) == 0 # # add to partition, auto flush, search partition exist -# def test_search_l2_index_params_partition_2(self, connect, table, get_simple_index): +# def test_search_l2_index_params_partition_2(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data_partition(connect, table, tag, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data_partition(connect, collection, tag, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = ids[0] # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, partition_tags=[tag], params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, partition_tags=[tag], params=search_param) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert check_result(result[0], query_id) -# def test_search_l2_index_partition_id_not_existed(self, connect, table, get_simple_index): +# def test_search_l2_index_partition_id_not_existed(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data(connect, table, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = non_exist_id # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, partition_tags=[tag], params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, partition_tags=[tag], params=search_param) # assert status.OK() # assert len(result) == 0 -# # add to table, manual flush, search non-existing partition non exist -# def test_search_l2_index_partition_tag_not_existed(self, connect, table, get_simple_index): +# # add to collection, manual flush, search non-existing partition non exist +# def test_search_l2_index_partition_tag_not_existed(self, connect, collection, get_simple_index): # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data_partition_and_flush(connect, collection, tag, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = non_exist_id # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, partition_tags=['non_existing_tag'], params=search_param) +# status, result = connect.search_by_id(collection, top_k, query_id, partition_tags=['non_existing_tag'], params=search_param) # assert status.OK() # assert len(result) == 0 -# def test_search_l2_index_partitions(self, connect, table, get_simple_index): +# def test_search_l2_index_partitions(self, connect, collection, get_simple_index): # new_tag = "new_tag" # index_param = get_simple_index["index_param"] # index_type = get_simple_index["index_type"] -# status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag]) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id, partition_tag_array=[tag]) # assert status.OK() # assert len(result) == 0 # # # add to partition, auto flush, search partition exist -# def test_search_l2_index_params_partition_2(self, connect, table, get_simple_index_params): +# def test_search_l2_index_params_partition_2(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data_partition(connect, table, tag, nb=small_size) -# status = connect.create_index(table, index_params) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data_partition(connect, collection, tag, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag]) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id, partition_tag_array=[tag]) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert check_result(result[0], query_id) # -# def test_search_l2_index_partition_id_not_existed(self, connect, table, get_simple_index_params): +# def test_search_l2_index_partition_id_not_existed(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data(connect, table, nb=small_size) -# status = connect.create_index(table, index_params) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data(connect, collection, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = non_exist_id -# status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag]) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id, partition_tag_array=[tag]) # assert status.OK() # assert len(result) == 0 # -# # add to table, manual flush, search non-existing partition non exist -# def test_search_l2_index_partition_tag_not_existed(self, connect, table, get_simple_index_params): +# # add to collection, manual flush, search non-existing partition non exist +# def test_search_l2_index_partition_tag_not_existed(self, connect, collection, get_simple_index_params): # index_params = get_simple_index_params -# status = connect.create_partition(table, tag) -# vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size) -# status = connect.create_index(table, index_params) +# status = connect.create_partition(collection, tag) +# vectors, ids = self.init_data_partition_and_flush(connect, collection, tag, nb=small_size) +# status = connect.create_index(collection, index_params) # query_id = non_exist_id -# status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=['non_existing_tag']) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id, partition_tag_array=['non_existing_tag']) # assert status.OK() # assert len(result) == 0 # -# def test_search_l2_index_partitions(self, connect, table, get_simple_index_params): +# def test_search_l2_index_partitions(self, connect, collection, get_simple_index_params): # new_tag = "new_tag" # index_params = get_simple_index_params -# status = connect.create_partition(table, tag) -# status = connect.create_partition(table, new_tag) -# vectors, ids = self.init_data_partition_and_flush(connect, table, tag, nb=small_size) -# vectors, new_ids = self.init_data_partition_and_flush(connect, table, new_tag, nb=small_size) -# status = connect.create_index(table, index_type, index_param) +# status = connect.create_partition(collection, tag) +# status = connect.create_partition(collection, new_tag) +# vectors, ids = self.init_data_partition_and_flush(connect, collection, tag, nb=small_size) +# vectors, new_ids = self.init_data_partition_and_flush(connect, collection, new_tag, nb=small_size) +# status = connect.create_index(collection, index_type, index_param) # query_id = ids[0] # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(table, top_k, query_id, partition_tags=[tag, new_tag], search_param) -# status = connect.create_index(table, index_params) +# status, result = connect.search_by_id(collection, top_k, query_id, partition_tags=[tag, new_tag], search_param) +# status = connect.create_index(collection, index_params) # query_id = ids[0] -# status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag, new_tag]) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id, partition_tag_array=[tag, new_tag]) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert check_result(result[0], ids[0]) # assert result[0][0].distance <= epsilon # query_id = new_ids[0] -# status, result = connect.search_by_id(table, top_k, query_id, partition_tags=[tag, new_tag], search_param) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id, partition_tag_array=[tag, new_tag]) +# status, result = connect.search_by_id(collection, top_k, query_id, partition_tags=[tag, new_tag], search_param) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id, partition_tag_array=[tag, new_tag]) # assert status.OK() # assert len(result[0]) == min(len(vectors), top_k) # assert check_result(result[0], new_ids[0]) # assert result[0][0].distance <= epsilon # @pytest.mark.level(2) -# def test_search_by_id_without_connect(self, dis_connect, table): +# def test_search_by_id_without_connect(self, dis_connect, collection): # ''' # target: test search vectors without connection # method: use dis connected instance, call search method and check if search successfully @@ -678,74 +678,74 @@ # ''' # query_idtors = 123 # with pytest.raises(Exception) as e: -# status, ids = dis_connect.search_by_id(table, top_k, query_idtors, params={}) -# status, ids = dis_connect.search_by_id(table, top_k, nprobe, query_idtors) +# status, ids = dis_connect.search_by_id(collection, top_k, query_idtors, params={}) +# status, ids = dis_connect.search_by_id(collection, top_k, nprobe, query_idtors) # -# def test_search_table_name_not_existed(self, connect, table): +# def test_search_collection_name_not_existed(self, connect, collection): # ''' -# target: search table not existed -# method: search with the random table_name, which is not in db +# target: search collection not existed +# method: search with the random collection_name, which is not in db # expected: status not ok # ''' -# table_name = gen_unique_str("not_existed_table") +# collection_name = gen_unique_str("not_existed_collection") # query_id = non_exist_id -# status, result = connect.search_by_id(table_name, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection_name, top_k, query_id, params={}) # assert not status.OK() -# status, result = connect.search_by_id(table_name, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection_name, top_k, nprobe, query_id) # assert not status.OK() -# def test_search_table_name_None(self, connect, table): +# def test_search_collection_name_None(self, connect, collection): # ''' -# target: search table that table name is None -# method: search with the table_name: None +# target: search collection that collection name is None +# method: search with the collection_name: None # expected: status not ok # ''' -# table_name = None +# collection_name = None # query_ids = non_exist_id # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(table_name, top_k, query_id, params={}) +# status, result = connect.search_by_id(collection_name, top_k, query_id, params={}) -# def test_search_jac(self, connect, jac_table, get_jaccard_index): +# def test_search_jac(self, connect, jac_collection, get_jaccard_index): # index_param = get_jaccard_index["index_param"] # index_type = get_jaccard_index["index_type"] -# vectors, ids = self.init_data_binary(connect, jac_table) -# status = connect.create_index(jac_table, index_type, index_param) +# vectors, ids = self.init_data_binary(connect, jac_collection) +# status = connect.create_index(jac_collection, index_type, index_param) # assert status.OK() # query_id = ids[0] # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(jac_table, top_k, query_id, params=search_param) -# status, result = connect.search_by_id(table_name, top_k, nprobe, query_id) +# status, result = connect.search_by_id(jac_collection, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(collection_name, top_k, nprobe, query_id) # -# def test_search_jac(self, connect, jac_table, get_jaccard_index_params): +# def test_search_jac(self, connect, jac_collection, get_jaccard_index_params): # index_params = get_jaccard_index_params -# vectors, ids = self.init_data_binary(connect, jac_table) -# status = connect.create_index(jac_table, index_params) +# vectors, ids = self.init_data_binary(connect, jac_collection) +# status = connect.create_index(jac_collection, index_params) # assert status.OK() # query_id = ids[0] -# status, result = connect.search_by_id(jac_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(jac_collection, top_k, nprobe, query_id) # logging.getLogger().info(status) # logging.getLogger().info(result) # assert status.OK() # assert check_result(result[0], ids[0]) # assert result[0][0].distance <= epsilon -# def test_search_ham(self, connect, ham_table, get_hamming_index): +# def test_search_ham(self, connect, ham_collection, get_hamming_index): # index_param = get_hamming_index["index_param"] # index_param = get_hamming_index["index_type"] -# vectors, ids = self.init_data_binary(connect, ham_table) -# status = connect.create_index(ham_table, index_type, index_param) +# vectors, ids = self.init_data_binary(connect, ham_collection) +# status = connect.create_index(ham_collection, index_type, index_param) # assert status.OK() # query_id = ids[0] # search_param = get_search_param(index_type) -# status, result = connect.search_by_id(ham_table, top_k, query_id, params=search_param) +# status, result = connect.search_by_id(ham_collection, top_k, query_id, params=search_param) # -# def test_search_ham(self, connect, ham_table, get_hamming_index_params): +# def test_search_ham(self, connect, ham_collection, get_hamming_index_params): # index_params = get_hamming_index_params -# vectors, ids = self.init_data_binary(connect, ham_table) -# status = connect.create_index(ham_table, index_params) +# vectors, ids = self.init_data_binary(connect, ham_collection) +# status = connect.create_index(ham_collection, index_params) # assert status.OK() # query_id = ids[0] -# status, result = connect.search_by_id(ham_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(ham_collection, top_k, nprobe, query_id) # logging.getLogger().info(status) # logging.getLogger().info(result) # assert status.OK() @@ -755,7 +755,7 @@ # """ # ****************************************************************** # # The following cases are used to test `search_by_id` function -# # with invalid table_name top-k / nprobe / query_range +# # with invalid collection_name top-k / nprobe / query_range # ****************************************************************** # """ @@ -770,33 +770,33 @@ # index_param = {"index_type": IndexType.IVF_SQ8, "nlist": nlist} # # """ -# Test search table with invalid table names +# Test search collection with invalid collection names # """ # @pytest.fixture( # scope="function", -# params=gen_invalid_table_names() +# params=gen_invalid_collection_names() # ) -# def get_table_name(self, request): +# def get_collection_name(self, request): # yield request.param # @pytest.mark.level(2) -# def test_search_with_invalid_tablename(self, connect, get_table_name): -# table_name = get_table_name +# def test_search_with_invalid_collectionname(self, connect, get_collection_name): +# collection_name = get_collection_name # query_id = non_exist_id -# status, result = connect.search_by_id(table_name, top_k, query_id) +# status, result = connect.search_by_id(collection_name, top_k, query_id) # assert not status.OK( -# status, result = connect.search_by_id(table_name, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection_name, top_k, nprobe, query_id) # assert not status.OK() # # @pytest.mark.level(1) -# def test_search_with_invalid_tag_format(self, connect, table): +# def test_search_with_invalid_tag_format(self, connect, collection): # query_id = non_exist_id # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(table_name, top_k, query_id, partition_tags="tag") -# status, result = connect.search_by_id(table_name, top_k, nprobe, query_id, partition_tag_array="tag") +# status, result = connect.search_by_id(collection_name, top_k, query_id, partition_tags="tag") +# status, result = connect.search_by_id(collection_name, top_k, nprobe, query_id, partition_tag_array="tag") # # """ -# Test search table with invalid top-k +# Test search collection with invalid top-k # """ # @pytest.fixture( # scope="function", @@ -806,40 +806,40 @@ # yield request.param # @pytest.mark.level(1) -# def test_search_with_invalid_top_k(self, connect, table, get_top_k): +# def test_search_with_invalid_top_k(self, connect, collection, get_top_k): # top_k = get_top_k # query_id = non_exist_id # if isinstance(top_k, int): -# status, result = connect.search_by_id(table, top_k, query_id) +# status, result = connect.search_by_id(collection, top_k, query_id) # assert not status.OK() # else: # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(table, top_k, query_id) -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert not status.OK() # else: # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # # @pytest.mark.level(2) -# def test_search_with_invalid_top_k_ip(self, connect, ip_table, get_top_k): +# def test_search_with_invalid_top_k_ip(self, connect, ip_collection, get_top_k): # top_k = get_top_k # query_id = non_exist_id # if isinstance(top_k, int): -# status, result = connect.search_by_id(ip_table, top_k, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, query_id) # assert not status.OK() # else: # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(ip_table, top_k, query_id) -# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, nprobe, query_id) # assert not status.OK() # else: # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, nprobe, query_id) # # """ -# Test search table with invalid nprobe +# Test search collection with invalid nprobe # """ # @pytest.fixture( # scope="function", @@ -849,19 +849,19 @@ # yield request.param # @pytest.mark.level(1) -# def test_search_with_invalid_nprobe(self, connect, table, get_nprobes): +# def test_search_with_invalid_nprobe(self, connect, collection, get_nprobes): # nprobe = get_nprobes # logging.getLogger().info(nprobe) # query_id = non_exist_id # if isinstance(nprobe, int): -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # assert not status.OK() # else: # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # @pytest.mark.level(2) -# def test_search_with_invalid_nprobe_ip(self, connect, ip_table, get_nprobes): +# def test_search_with_invalid_nprobe_ip(self, connect, ip_collection, get_nprobes): # ''' # target: test search fuction, with the wrong top_k # method: search with top_k @@ -871,13 +871,13 @@ # logging.getLogger().info(nprobe) # query_id = non_exist_id # if isinstance(nprobe, int): -# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, nprobe, query_id) # assert not status.OK() # else: # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, nprobe, query_id) # """ -# Test search table with invalid ids +# Test search collection with invalid ids # """ # @pytest.fixture( # scope="function", @@ -887,7 +887,7 @@ # yield request.param # @pytest.mark.level(1) -# def test_search_flat_with_invalid_vector_id(self, connect, table, get_vector_ids): +# def test_search_flat_with_invalid_vector_id(self, connect, collection, get_vector_ids): # ''' # target: test search fuction, with the wrong query_range # method: search with query_range @@ -896,14 +896,14 @@ # query_id = get_vector_ids # logging.getLogger().info(query_id) # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(collection, top_k, nprobe, query_id) # @pytest.mark.level(2) -# def test_search_flat_with_invalid_vector_id_ip(self, connect, ip_table, get_vector_ids): +# def test_search_flat_with_invalid_vector_id_ip(self, connect, ip_collection, get_vector_ids): # query_id = get_vector_ids # logging.getLogger().info(query_id) # with pytest.raises(Exception) as e: -# status, result = connect.search_by_id(ip_table, top_k, nprobe, query_id) +# status, result = connect.search_by_id(ip_collection, top_k, nprobe, query_id) # def check_result(result, id): # if len(result) >= 5: diff --git a/tests/milvus_python_test/test_search_vectors.py b/tests/milvus_python_test/test_search_vectors.py index baecef7d9c..9371c38122 100644 --- a/tests/milvus_python_test/test_search_vectors.py +++ b/tests/milvus_python_test/test_search_vectors.py @@ -15,7 +15,7 @@ from milvus import IndexType, MetricType from utils import * dim = 128 -table_id = "test_search" +collection_id = "test_search" add_interval_time = 2 vectors = gen_vectors(6000, dim) vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2') @@ -27,9 +27,9 @@ raw_vectors, binary_vectors = gen_binary_vectors(6000, dim) class TestSearchBase: - def init_data(self, connect, table, nb=6000, partition_tags=None): + def init_data(self, connect, collection, nb=6000, partition_tags=None): ''' - Generate vectors and add it in table, before search vectors + Generate vectors and add it in collection, before search vectors ''' global vectors if nb == 6000: @@ -39,17 +39,17 @@ class TestSearchBase: vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2') vectors = vectors.tolist() if partition_tags is None: - status, ids = connect.add_vectors(table, add_vectors) + status, ids = connect.add_vectors(collection, add_vectors) assert status.OK() else: - status, ids = connect.add_vectors(table, add_vectors, partition_tag=partition_tags) + status, ids = connect.add_vectors(collection, add_vectors, partition_tag=partition_tags) assert status.OK() sleep(add_interval_time) return add_vectors, ids - def init_binary_data(self, connect, table, nb=6000, insert=True, partition_tags=None): + def init_binary_data(self, connect, collection, nb=6000, insert=True, partition_tags=None): ''' - Generate vectors and add it in table, before search vectors + Generate vectors and add it in collection, before search vectors ''' ids = [] global binary_vectors @@ -61,10 +61,10 @@ class TestSearchBase: add_raw_vectors, add_vectors = gen_binary_vectors(nb, dim) if insert is True: if partition_tags is None: - status, ids = connect.add_vectors(table, add_vectors) + status, ids = connect.add_vectors(collection, add_vectors) assert status.OK() else: - status, ids = connect.add_vectors(table, add_vectors, partition_tag=partition_tags) + status, ids = connect.add_vectors(collection, add_vectors, partition_tag=partition_tags) assert status.OK() sleep(add_interval_time) return add_raw_vectors, add_vectors, ids @@ -131,16 +131,16 @@ class TestSearchBase: yield request.param - def test_search_top_k_flat_index(self, connect, table, get_top_k): + def test_search_top_k_flat_index(self, connect, collection, get_top_k): ''' target: test basic search fuction, all the search params is corrent, change top-k value method: search with the given vectors, check the result expected: search status ok, and the length of the result is top_k ''' - vectors, ids = self.init_data(connect, table) + vectors, ids = self.init_data(connect, collection) query_vec = [vectors[0]] top_k = get_top_k - status, result = connect.search_vectors(table, top_k, query_vec) + status, result = connect.search_vectors(collection, top_k, query_vec) if top_k <= 2048: assert status.OK() assert len(result[0]) == min(len(vectors), top_k) @@ -149,7 +149,7 @@ class TestSearchBase: else: assert not status.OK() - def test_search_l2_index_params(self, connect, table, get_simple_index): + def test_search_l2_index_params(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors, check the result @@ -158,12 +158,12 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - vectors, ids = self.init_data(connect, table) - status = connect.create_index(table, index_type, index_param) + vectors, ids = self.init_data(connect, collection) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) if top_k <= 1024: assert status.OK() @@ -175,7 +175,7 @@ class TestSearchBase: else: assert not status.OK() - def test_search_l2_large_nq_index_params(self, connect, table, get_simple_index): + def test_search_l2_large_nq_index_params(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors, check the result @@ -184,14 +184,14 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - vectors, ids = self.init_data(connect, table) - status = connect.create_index(table, index_type, index_param) + vectors, ids = self.init_data(connect, collection) + status = connect.create_index(collection, index_type, index_param) query_vec = [] for i in range (1200): query_vec.append(vectors[i]) top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) assert status.OK() if index_type == IndexType.IVF_PQ: @@ -200,34 +200,34 @@ class TestSearchBase: assert check_result(result[0], ids[0]) assert result[0][0].distance <= epsilon - def test_search_l2_index_params_partition(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build - method: add vectors into table, search with the given vectors, check the result - expected: search status ok, and the length of the result is top_k, search table with partition tag return empty + method: add vectors into collection, search with the given vectors, check the result + expected: search status ok, and the length of the result is top_k, search collection with partition tag return empty ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - vectors, ids = self.init_data(connect, table) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + vectors, ids = self.init_data(connect, collection) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): assert len(result[0]) == min(len(vectors), top_k) assert check_result(result[0], ids[0]) assert result[0][0].distance <= epsilon - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=[tag], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=[tag], params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == 0 - def test_search_l2_index_params_partition_A(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition_A(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search partition with the given vectors, check the result @@ -236,18 +236,18 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - vectors, ids = self.init_data(connect, table) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + vectors, ids = self.init_data(connect, collection) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=[tag], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=[tag], params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == 0 - def test_search_l2_index_params_partition_B(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition_B(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors, check the result @@ -256,20 +256,20 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - vectors, ids = self.init_data(connect, table, partition_tags=tag) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + vectors, ids = self.init_data(connect, collection, partition_tags=tag) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): assert len(result[0]) == min(len(vectors), top_k) assert check_result(result[0], ids[0]) assert result[0][0].distance <= epsilon - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=[tag], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=[tag], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): @@ -277,22 +277,22 @@ class TestSearchBase: assert check_result(result[0], ids[0]) assert result[0][0].distance <= epsilon - def test_search_l2_index_params_partition_C(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition_C(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build - method: search with the given vectors and tags (one of the tags not existed in table), check the result + method: search with the given vectors and tags (one of the tags not existed in collection), check the result expected: search status ok, and the length of the result is top_k ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - vectors, ids = self.init_data(connect, table, partition_tags=tag) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + vectors, ids = self.init_data(connect, collection, partition_tags=tag) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=[tag, "new_tag"], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=[tag, "new_tag"], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): @@ -300,45 +300,45 @@ class TestSearchBase: assert check_result(result[0], ids[0]) assert result[0][0].distance <= epsilon - def test_search_l2_index_params_partition_D(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition_D(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build - method: search with the given vectors and tag (tag name not existed in table), check the result + method: search with the given vectors and tag (tag name not existed in collection), check the result expected: search status ok, and the length of the result is top_k ''' index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - vectors, ids = self.init_data(connect, table, partition_tags=tag) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + vectors, ids = self.init_data(connect, collection, partition_tags=tag) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=["new_tag"], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=["new_tag"], params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == 0 - def test_search_l2_index_params_partition_E(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition_E(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build - method: search table with the given vectors and tags, check the result + method: search collection with the given vectors and tags, check the result expected: search status ok, and the length of the result is top_k ''' new_tag = "new_tag" index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - status = connect.create_partition(table, new_tag) - vectors, ids = self.init_data(connect, table, partition_tags=tag) - new_vectors, new_ids = self.init_data(connect, table, nb=6001, partition_tags=new_tag) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, new_tag) + vectors, ids = self.init_data(connect, collection, partition_tags=tag) + new_vectors, new_ids = self.init_data(connect, collection, nb=6001, partition_tags=new_tag) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0], new_vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=[tag, new_tag], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=[tag, new_tag], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): @@ -347,7 +347,7 @@ class TestSearchBase: assert check_result(result[1], new_ids[0]) assert result[0][0].distance <= epsilon assert result[1][0].distance <= epsilon - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=[new_tag], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=[new_tag], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): @@ -355,10 +355,10 @@ class TestSearchBase: assert check_result(result[1], new_ids[0]) assert result[1][0].distance <= epsilon - def test_search_l2_index_params_partition_F(self, connect, table, get_simple_index): + def test_search_l2_index_params_partition_F(self, connect, collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build - method: search table with the given vectors and tags with "re" expr, check the result + method: search collection with the given vectors and tags with "re" expr, check the result expected: search status ok, and the length of the result is top_k ''' tag = "atag" @@ -366,28 +366,28 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - status = connect.create_partition(table, tag) - status = connect.create_partition(table, new_tag) - vectors, ids = self.init_data(connect, table, partition_tags=tag) - new_vectors, new_ids = self.init_data(connect, table, nb=6001, partition_tags=new_tag) - status = connect.create_index(table, index_type, index_param) + status = connect.create_partition(collection, tag) + status = connect.create_partition(collection, new_tag) + vectors, ids = self.init_data(connect, collection, partition_tags=tag) + new_vectors, new_ids = self.init_data(connect, collection, nb=6001, partition_tags=new_tag) + status = connect.create_index(collection, index_type, index_param) query_vec = [vectors[0], new_vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=["new(.*)"], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=["new(.*)"], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): assert result[0][0].distance > epsilon assert result[1][0].distance <= epsilon - status, result = connect.search_vectors(table, top_k, query_vec, partition_tags=["(.*)tag"], params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vec, partition_tags=["(.*)tag"], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): assert result[0][0].distance <= epsilon assert result[1][0].distance <= epsilon - def test_search_ip_index_params(self, connect, ip_table, get_simple_index): + def test_search_ip_index_params(self, connect, ip_collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors, check the result @@ -396,12 +396,12 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - vectors, ids = self.init_data(connect, ip_table) - status = connect.create_index(ip_table, index_type, index_param) + vectors, ids = self.init_data(connect, ip_collection) + status = connect.create_index(ip_collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) if top_k <= 1024: @@ -413,7 +413,7 @@ class TestSearchBase: else: assert not status.OK() - def test_search_ip_large_nq_index_params(self, connect, ip_table, get_simple_index): + def test_search_ip_large_nq_index_params(self, connect, ip_collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors, check the result @@ -422,14 +422,14 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(get_simple_index) - vectors, ids = self.init_data(connect, ip_table) - status = connect.create_index(ip_table, index_type, index_param) + vectors, ids = self.init_data(connect, ip_collection) + status = connect.create_index(ip_collection, index_type, index_param) query_vec = [] for i in range (1200): query_vec.append(vectors[i]) top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): @@ -437,7 +437,7 @@ class TestSearchBase: assert check_result(result[0], ids[0]) assert result[0][0].distance >= 1 - gen_inaccuracy(result[0][0].distance) - def test_search_ip_index_params_partition(self, connect, ip_table, get_simple_index): + def test_search_ip_index_params_partition(self, connect, ip_collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors, check the result @@ -446,25 +446,25 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(index_param) - status = connect.create_partition(ip_table, tag) - vectors, ids = self.init_data(connect, ip_table) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_partition(ip_collection, tag) + vectors, ids = self.init_data(connect, ip_collection) + status = connect.create_index(ip_collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vec, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): assert len(result[0]) == min(len(vectors), top_k) assert check_result(result[0], ids[0]) assert result[0][0].distance >= 1 - gen_inaccuracy(result[0][0].distance) - status, result = connect.search_vectors(ip_table, top_k, query_vec, partition_tags=[tag], params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, partition_tags=[tag], params=search_param) logging.getLogger().info(result) assert status.OK() assert len(result) == 0 - def test_search_ip_index_params_partition_A(self, connect, ip_table, get_simple_index): + def test_search_ip_index_params_partition_A(self, connect, ip_collection, get_simple_index): ''' target: test basic search fuction, all the search params is corrent, test all index params, and build method: search with the given vectors and tag, check the result @@ -473,13 +473,13 @@ class TestSearchBase: index_param = get_simple_index["index_param"] index_type = get_simple_index["index_type"] logging.getLogger().info(index_param) - status = connect.create_partition(ip_table, tag) - vectors, ids = self.init_data(connect, ip_table, partition_tags=tag) - status = connect.create_index(ip_table, index_type, index_param) + status = connect.create_partition(ip_collection, tag) + vectors, ids = self.init_data(connect, ip_collection, partition_tags=tag) + status = connect.create_index(ip_collection, index_type, index_param) query_vec = [vectors[0]] top_k = 10 search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vec, partition_tags=[tag], params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vec, partition_tags=[tag], params=search_param) logging.getLogger().info(result) assert status.OK() if(index_type != IndexType.IVF_PQ): @@ -488,7 +488,7 @@ class TestSearchBase: assert result[0][0].distance >= 1 - gen_inaccuracy(result[0][0].distance) @pytest.mark.level(2) - def test_search_vectors_without_connect(self, dis_connect, table): + def test_search_vectors_without_connect(self, dis_connect, collection): ''' target: test search vectors without connection method: use dis connected instance, call search method and check if search successfully @@ -498,182 +498,182 @@ class TestSearchBase: top_k = 1 nprobe = 1 with pytest.raises(Exception) as e: - status, ids = dis_connect.search_vectors(table, top_k, query_vectors) + status, ids = dis_connect.search_vectors(collection, top_k, query_vectors) - def test_search_table_name_not_existed(self, connect, table): + def test_search_collection_name_not_existed(self, connect, collection): ''' - target: search table not existed - method: search with the random table_name, which is not in db + target: search collection not existed + method: search with the random collection_name, which is not in db expected: status not ok ''' - table_name = gen_unique_str("not_existed_table") + collection_name = gen_unique_str("not_existed_collection") top_k = 1 nprobe = 1 query_vecs = [vectors[0]] - status, result = connect.search_vectors(table_name, top_k, query_vecs) + status, result = connect.search_vectors(collection_name, top_k, query_vecs) assert not status.OK() - def test_search_table_name_None(self, connect, table): + def test_search_collection_name_None(self, connect, collection): ''' - target: search table that table name is None - method: search with the table_name: None + target: search collection that collection name is None + method: search with the collection_name: None expected: status not ok ''' - table_name = None + collection_name = None top_k = 1 nprobe = 1 query_vecs = [vectors[0]] with pytest.raises(Exception) as e: - status, result = connect.search_vectors(table_name, top_k, query_vecs) + status, result = connect.search_vectors(collection_name, top_k, query_vecs) - def test_search_top_k_query_records(self, connect, table): + def test_search_top_k_query_records(self, connect, collection): ''' target: test search fuction, with search params: query_records method: search with the given query_records, which are subarrays of the inserted vectors expected: status ok and the returned vectors should be query_records ''' top_k = 10 - vectors, ids = self.init_data(connect, table) + vectors, ids = self.init_data(connect, collection) query_vecs = [vectors[0],vectors[55],vectors[99]] - status, result = connect.search_vectors(table, top_k, query_vecs) + status, result = connect.search_vectors(collection, top_k, query_vecs) assert status.OK() assert len(result) == len(query_vecs) for i in range(len(query_vecs)): assert len(result[i]) == top_k assert result[i][0].distance <= epsilon - def test_search_distance_l2_flat_index(self, connect, table): + def test_search_distance_l2_flat_index(self, connect, collection): ''' - target: search table, and check the result: distance + target: search collection, and check the result: distance method: compare the return distance value with value computed with Euclidean expected: the return distance equals to the computed value ''' nb = 2 top_k = 1 - vectors, ids = self.init_data(connect, table, nb=nb) + vectors, ids = self.init_data(connect, collection, nb=nb) query_vecs = [[0.50 for i in range(dim)]] distance_0 = numpy.linalg.norm(numpy.array(query_vecs[0]) - numpy.array(vectors[0])) distance_1 = numpy.linalg.norm(numpy.array(query_vecs[0]) - numpy.array(vectors[1])) - status, result = connect.search_vectors(table, top_k, query_vecs) + status, result = connect.search_vectors(collection, top_k, query_vecs) assert abs(numpy.sqrt(result[0][0].distance) - min(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance) - def test_search_distance_ip_flat_index(self, connect, ip_table): + def test_search_distance_ip_flat_index(self, connect, ip_collection): ''' - target: search ip_table, and check the result: distance + target: search ip_collection, and check the result: distance method: compare the return distance value with value computed with Inner product expected: the return distance equals to the computed value ''' nb = 2 top_k = 1 nprobe = 1 - vectors, ids = self.init_data(connect, ip_table, nb=nb) + vectors, ids = self.init_data(connect, ip_collection, nb=nb) index_type = IndexType.FLAT index_param = { "nlist": 16384 } - connect.create_index(ip_table, index_type, index_param) - logging.getLogger().info(connect.describe_index(ip_table)) + connect.create_index(ip_collection, index_type, index_param) + logging.getLogger().info(connect.describe_index(ip_collection)) query_vecs = [[0.50 for i in range(dim)]] distance_0 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[0])) distance_1 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[1])) search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param) assert abs(result[0][0].distance - max(distance_0, distance_1)) <= gen_inaccuracy(result[0][0].distance) - def test_search_distance_jaccard_flat_index(self, connect, jac_table): + def test_search_distance_jaccard_flat_index(self, connect, jac_collection): ''' - target: search ip_table, and check the result: distance + target: search ip_collection, and check the result: distance method: compare the return distance value with value computed with Inner product expected: the return distance equals to the computed value ''' # from scipy.spatial import distance top_k = 1 nprobe = 512 - int_vectors, vectors, ids = self.init_binary_data(connect, jac_table, nb=2) + int_vectors, vectors, ids = self.init_binary_data(connect, jac_collection, nb=2) index_type = IndexType.FLAT index_param = { "nlist": 16384 } - connect.create_index(jac_table, index_type, index_param) - logging.getLogger().info(connect.describe_table(jac_table)) - logging.getLogger().info(connect.describe_index(jac_table)) - query_int_vectors, query_vecs, tmp_ids = self.init_binary_data(connect, jac_table, nb=1, insert=False) + connect.create_index(jac_collection, index_type, index_param) + logging.getLogger().info(connect.describe_collection(jac_collection)) + logging.getLogger().info(connect.describe_index(jac_collection)) + query_int_vectors, query_vecs, tmp_ids = self.init_binary_data(connect, jac_collection, nb=1, insert=False) distance_0 = jaccard(query_int_vectors[0], int_vectors[0]) distance_1 = jaccard(query_int_vectors[0], int_vectors[1]) search_param = get_search_param(index_type) - status, result = connect.search_vectors(jac_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(jac_collection, top_k, query_vecs, params=search_param) logging.getLogger().info(status) logging.getLogger().info(result) assert abs(result[0][0].distance - min(distance_0, distance_1)) <= epsilon - def test_search_distance_hamming_flat_index(self, connect, ham_table): + def test_search_distance_hamming_flat_index(self, connect, ham_collection): ''' - target: search ip_table, and check the result: distance + target: search ip_collection, and check the result: distance method: compare the return distance value with value computed with Inner product expected: the return distance equals to the computed value ''' # from scipy.spatial import distance top_k = 1 nprobe = 512 - int_vectors, vectors, ids = self.init_binary_data(connect, ham_table, nb=2) + int_vectors, vectors, ids = self.init_binary_data(connect, ham_collection, nb=2) index_type = IndexType.FLAT index_param = { "nlist": 16384 } - connect.create_index(ham_table, index_type, index_param) - logging.getLogger().info(connect.describe_table(ham_table)) - logging.getLogger().info(connect.describe_index(ham_table)) - query_int_vectors, query_vecs, tmp_ids = self.init_binary_data(connect, ham_table, nb=1, insert=False) + connect.create_index(ham_collection, index_type, index_param) + logging.getLogger().info(connect.describe_collection(ham_collection)) + logging.getLogger().info(connect.describe_index(ham_collection)) + query_int_vectors, query_vecs, tmp_ids = self.init_binary_data(connect, ham_collection, nb=1, insert=False) distance_0 = hamming(query_int_vectors[0], int_vectors[0]) distance_1 = hamming(query_int_vectors[0], int_vectors[1]) search_param = get_search_param(index_type) - status, result = connect.search_vectors(ham_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(ham_collection, top_k, query_vecs, params=search_param) logging.getLogger().info(status) logging.getLogger().info(result) assert abs(result[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon - def test_search_distance_tanimoto_flat_index(self, connect, tanimoto_table): + def test_search_distance_tanimoto_flat_index(self, connect, tanimoto_collection): ''' - target: search ip_table, and check the result: distance + target: search ip_collection, and check the result: distance method: compare the return distance value with value computed with Inner product expected: the return distance equals to the computed value ''' # from scipy.spatial import distance top_k = 1 nprobe = 512 - int_vectors, vectors, ids = self.init_binary_data(connect, tanimoto_table, nb=2) + int_vectors, vectors, ids = self.init_binary_data(connect, tanimoto_collection, nb=2) index_type = IndexType.FLAT index_param = { "nlist": 16384 } - connect.create_index(tanimoto_table, index_type, index_param) - logging.getLogger().info(connect.describe_table(tanimoto_table)) - logging.getLogger().info(connect.describe_index(tanimoto_table)) - query_int_vectors, query_vecs, tmp_ids = self.init_binary_data(connect, tanimoto_table, nb=1, insert=False) + connect.create_index(tanimoto_collection, index_type, index_param) + logging.getLogger().info(connect.describe_collection(tanimoto_collection)) + logging.getLogger().info(connect.describe_index(tanimoto_collection)) + query_int_vectors, query_vecs, tmp_ids = self.init_binary_data(connect, tanimoto_collection, nb=1, insert=False) distance_0 = tanimoto(query_int_vectors[0], int_vectors[0]) distance_1 = tanimoto(query_int_vectors[0], int_vectors[1]) search_param = get_search_param(index_type) - status, result = connect.search_vectors(tanimoto_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(tanimoto_collection, top_k, query_vecs, params=search_param) logging.getLogger().info(status) logging.getLogger().info(result) assert abs(result[0][0].distance - min(distance_0, distance_1)) <= epsilon - def test_search_distance_ip_index_params(self, connect, ip_table, get_index): + def test_search_distance_ip_index_params(self, connect, ip_collection, get_index): ''' - target: search table, and check the result: distance + target: search collection, and check the result: distance method: compare the return distance value with value computed with Inner product expected: the return distance equals to the computed value ''' top_k = 2 nprobe = 1 - vectors, ids = self.init_data(connect, ip_table, nb=2) + vectors, ids = self.init_data(connect, ip_collection, nb=2) index_param = get_index["index_param"] index_type = get_index["index_type"] - connect.create_index(ip_table, index_type, index_param) - logging.getLogger().info(connect.describe_index(ip_table)) + connect.create_index(ip_collection, index_type, index_param) + logging.getLogger().info(connect.describe_index(ip_collection)) query_vecs = [[0.50 for i in range(dim)]] search_param = get_search_param(index_type) - status, result = connect.search_vectors(ip_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param) logging.getLogger().debug(status) logging.getLogger().debug(result) distance_0 = numpy.inner(numpy.array(query_vecs[0]), numpy.array(vectors[0])) @@ -683,15 +683,15 @@ class TestSearchBase: # TODO: enable # @pytest.mark.repeat(5) @pytest.mark.timeout(30) - def _test_search_concurrent(self, connect, table): - vectors, ids = self.init_data(connect, table) + def _test_search_concurrent(self, connect, collection): + vectors, ids = self.init_data(connect, collection) thread_num = 10 nb = 100 top_k = 10 threads = [] query_vecs = vectors[nb//2:nb] def search(): - status, result = connect.search_vectors(table, top_k, query_vecs) + status, result = connect.search_vectors(collection, top_k, query_vecs) assert len(result) == len(query_vecs) for i in range(len(query_vecs)): assert result[i][0].id in ids @@ -714,20 +714,20 @@ class TestSearchBase: top_k = 10 threads_num = 4 threads = [] - table = gen_unique_str("test_search_concurrent_multiprocessing") + collection = gen_unique_str("test_search_concurrent_multiprocessing") uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_type': IndexType.FLAT, 'store_raw_vector': False} - # create table + # create collection milvus = get_milvus(args["handler"]) milvus.connect(uri=uri) - milvus.create_table(param) - vectors, ids = self.init_data(milvus, table, nb=nb) + milvus.create_collection(param) + vectors, ids = self.init_data(milvus, collection, nb=nb) query_vecs = vectors[nb//2:nb] def search(milvus): - status, result = milvus.search_vectors(table, top_k, query_vecs) + status, result = milvus.search_vectors(collection, top_k, query_vecs) assert len(result) == len(query_vecs) for i in range(len(query_vecs)): assert result[i][0].id in ids @@ -755,20 +755,20 @@ class TestSearchBase: top_k = 10 process_num = 4 processes = [] - table = gen_unique_str("test_search_concurrent_multiprocessing") + collection = gen_unique_str("test_search_concurrent_multiprocessing") uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_type': IndexType.FLAT, 'store_raw_vector': False} - # create table + # create collection milvus = get_milvus(args["handler"]) milvus.connect(uri=uri) - milvus.create_table(param) - vectors, ids = self.init_data(milvus, table, nb=nb) + milvus.create_collection(param) + vectors, ids = self.init_data(milvus, collection, nb=nb) query_vecs = vectors[nb//2:nb] def search(milvus): - status, result = milvus.search_vectors(table, top_k, query_vecs) + status, result = milvus.search_vectors(collection, top_k, query_vecs) assert len(result) == len(query_vecs) for i in range(len(query_vecs)): assert result[i][0].id in ids @@ -784,40 +784,40 @@ class TestSearchBase: for p in processes: p.join() - def test_search_multi_table_L2(search, args): + def test_search_multi_collection_L2(search, args): ''' - target: test search multi tables of L2 - method: add vectors into 10 tables, and search + target: test search multi collections of L2 + method: add vectors into 10 collections, and search expected: search status ok, the length of result ''' num = 10 top_k = 10 - tables = [] + collections = [] idx = [] for i in range(num): - table = gen_unique_str("test_add_multitable_%d" % i) + collection = gen_unique_str("test_add_multicollection_%d" % i) uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_file_size': 10, 'metric_type': MetricType.L2} - # create table + # create collection milvus = get_milvus(args["handler"]) milvus.connect(uri=uri) - milvus.create_table(param) - status, ids = milvus.add_vectors(table, vectors) + milvus.create_collection(param) + status, ids = milvus.add_vectors(collection, vectors) assert status.OK() assert len(ids) == len(vectors) - tables.append(table) + collections.append(collection) idx.append(ids[0]) idx.append(ids[10]) idx.append(ids[20]) time.sleep(6) query_vecs = [vectors[0], vectors[10], vectors[20]] - # start query from random table + # start query from random collection for i in range(num): - table = tables[i] - status, result = milvus.search_vectors(table, top_k, query_vecs) + collection = collections[i] + status, result = milvus.search_vectors(collection, top_k, query_vecs) assert status.OK() assert len(result) == len(query_vecs) for j in range(len(query_vecs)): @@ -825,40 +825,40 @@ class TestSearchBase: for j in range(len(query_vecs)): assert check_result(result[j], idx[3 * i + j]) - def test_search_multi_table_IP(search, args): + def test_search_multi_collection_IP(search, args): ''' - target: test search multi tables of IP - method: add vectors into 10 tables, and search + target: test search multi collections of IP + method: add vectors into 10 collections, and search expected: search status ok, the length of result ''' num = 10 top_k = 10 - tables = [] + collections = [] idx = [] for i in range(num): - table = gen_unique_str("test_add_multitable_%d" % i) + collection = gen_unique_str("test_add_multicollection_%d" % i) uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table, + param = {'collection_name': collection, 'dimension': dim, 'index_file_size': 10, 'metric_type': MetricType.L2} - # create table + # create collection milvus = get_milvus(args["handler"]) milvus.connect(uri=uri) - milvus.create_table(param) - status, ids = milvus.add_vectors(table, vectors) + milvus.create_collection(param) + status, ids = milvus.add_vectors(collection, vectors) assert status.OK() assert len(ids) == len(vectors) - tables.append(table) + collections.append(collection) idx.append(ids[0]) idx.append(ids[10]) idx.append(ids[20]) time.sleep(6) query_vecs = [vectors[0], vectors[10], vectors[20]] - # start query from random table + # start query from random collection for i in range(num): - table = tables[i] - status, result = milvus.search_vectors(table, top_k, query_vecs) + collection = collections[i] + status, result = milvus.search_vectors(collection, top_k, query_vecs) assert status.OK() assert len(result) == len(query_vecs) for j in range(len(query_vecs)): @@ -868,7 +868,7 @@ class TestSearchBase: """ ****************************************************************** # The following cases are used to test `search_vectors` function -# with invalid table_name top-k / nprobe / query_range +# with invalid collection_name top-k / nprobe / query_range ****************************************************************** """ @@ -878,49 +878,49 @@ class TestSearchParamsInvalid(object): index_param = {"nlist": nlist} logging.getLogger().info(index_param) - def init_data(self, connect, table, nb=6000): + def init_data(self, connect, collection, nb=6000): ''' - Generate vectors and add it in table, before search vectors + Generate vectors and add it in collection, before search vectors ''' global vectors if nb == 6000: add_vectors = vectors else: add_vectors = gen_vectors(nb, dim) - status, ids = connect.add_vectors(table, add_vectors) + status, ids = connect.add_vectors(collection, add_vectors) sleep(add_interval_time) return add_vectors, ids """ - Test search table with invalid table names + Test search collection with invalid collection names """ @pytest.fixture( scope="function", - params=gen_invalid_table_names() + params=gen_invalid_collection_names() ) - def get_table_name(self, request): + def get_collection_name(self, request): yield request.param @pytest.mark.level(2) - def test_search_with_invalid_tablename(self, connect, get_table_name): - table_name = get_table_name - logging.getLogger().info(table_name) + def test_search_with_invalid_collectionname(self, connect, get_collection_name): + collection_name = get_collection_name + logging.getLogger().info(collection_name) top_k = 1 nprobe = 1 query_vecs = gen_vectors(1, dim) - status, result = connect.search_vectors(table_name, top_k, query_vecs) + status, result = connect.search_vectors(collection_name, top_k, query_vecs) assert not status.OK() @pytest.mark.level(1) - def test_search_with_invalid_tag_format(self, connect, table): + def test_search_with_invalid_tag_format(self, connect, collection): top_k = 1 nprobe = 1 query_vecs = gen_vectors(1, dim) with pytest.raises(Exception) as e: - status, result = connect.search_vectors(table, top_k, query_vecs, partition_tags="tag") + status, result = connect.search_vectors(collection, top_k, query_vecs, partition_tags="tag") """ - Test search table with invalid top-k + Test search collection with invalid top-k """ @pytest.fixture( scope="function", @@ -930,7 +930,7 @@ class TestSearchParamsInvalid(object): yield request.param @pytest.mark.level(1) - def test_search_with_invalid_top_k(self, connect, table, get_top_k): + def test_search_with_invalid_top_k(self, connect, collection, get_top_k): ''' target: test search fuction, with the wrong top_k method: search with top_k @@ -941,14 +941,14 @@ class TestSearchParamsInvalid(object): nprobe = 1 query_vecs = gen_vectors(1, dim) if isinstance(top_k, int): - status, result = connect.search_vectors(table, top_k, query_vecs) + status, result = connect.search_vectors(collection, top_k, query_vecs) assert not status.OK() else: with pytest.raises(Exception) as e: - status, result = connect.search_vectors(table, top_k, query_vecs) + status, result = connect.search_vectors(collection, top_k, query_vecs) @pytest.mark.level(2) - def test_search_with_invalid_top_k_ip(self, connect, ip_table, get_top_k): + def test_search_with_invalid_top_k_ip(self, connect, ip_collection, get_top_k): ''' target: test search fuction, with the wrong top_k method: search with top_k @@ -959,13 +959,13 @@ class TestSearchParamsInvalid(object): nprobe = 1 query_vecs = gen_vectors(1, dim) if isinstance(top_k, int): - status, result = connect.search_vectors(ip_table, top_k, query_vecs) + status, result = connect.search_vectors(ip_collection, top_k, query_vecs) assert not status.OK() else: with pytest.raises(Exception) as e: - status, result = connect.search_vectors(ip_table, top_k, query_vecs) + status, result = connect.search_vectors(ip_collection, top_k, query_vecs) """ - Test search table with invalid nprobe + Test search collection with invalid nprobe """ @pytest.fixture( scope="function", @@ -975,7 +975,7 @@ class TestSearchParamsInvalid(object): yield request.param @pytest.mark.level(1) - def test_search_with_invalid_nprobe(self, connect, table, get_nprobes): + def test_search_with_invalid_nprobe(self, connect, collection, get_nprobes): ''' target: test search fuction, with the wrong nprobe method: search with nprobe @@ -983,7 +983,7 @@ class TestSearchParamsInvalid(object): ''' index_type = IndexType.IVF_SQ8 index_param = {"nlist": 16384} - connect.create_index(table, index_type, index_param) + connect.create_index(collection, index_type, index_param) top_k = 1 nprobe = get_nprobes @@ -991,14 +991,14 @@ class TestSearchParamsInvalid(object): logging.getLogger().info(nprobe) query_vecs = gen_vectors(1, dim) # if isinstance(nprobe, int): - status, result = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert not status.OK() # else: # with pytest.raises(Exception) as e: - # status, result = connect.search_vectors(table, top_k, query_vecs, params=search_param) + # status, result = connect.search_vectors(collection, top_k, query_vecs, params=search_param) @pytest.mark.level(2) - def test_search_with_invalid_nprobe_ip(self, connect, ip_table, get_nprobes): + def test_search_with_invalid_nprobe_ip(self, connect, ip_collection, get_nprobes): ''' target: test search fuction, with the wrong top_k method: search with top_k @@ -1006,7 +1006,7 @@ class TestSearchParamsInvalid(object): ''' index_type = IndexType.IVF_SQ8 index_param = {"nlist": 16384} - connect.create_index(ip_table, index_type, index_param) + connect.create_index(ip_collection, index_type, index_param) top_k = 1 nprobe = get_nprobes @@ -1015,11 +1015,11 @@ class TestSearchParamsInvalid(object): query_vecs = gen_vectors(1, dim) # if isinstance(nprobe, int): - status, result = connect.search_vectors(ip_table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param) assert not status.OK() # else: # with pytest.raises(Exception) as e: - # status, result = connect.search_vectors(ip_table, top_k, query_vecs, params=search_param) + # status, result = connect.search_vectors(ip_collection, top_k, query_vecs, params=search_param) @pytest.fixture( scope="function", @@ -1034,7 +1034,7 @@ class TestSearchParamsInvalid(object): pytest.skip("ivfpq not support in GPU mode") return request.param - def test_search_with_empty_params(self, connect, table, get_simple_index): + def test_search_with_empty_params(self, connect, collection, get_simple_index): ''' target: test search fuction, with empty search params method: search with params @@ -1042,11 +1042,11 @@ class TestSearchParamsInvalid(object): ''' index_type = get_simple_index["index_type"] index_param = get_simple_index["index_param"] - connect.create_index(table, index_type, index_param) + connect.create_index(collection, index_type, index_param) top_k = 1 query_vecs = gen_vectors(1, dim) - status, result = connect.search_vectors(table, top_k, query_vecs, params={}) + status, result = connect.search_vectors(collection, top_k, query_vecs, params={}) if index_type == IndexType.FLAT: assert status.OK() @@ -1066,7 +1066,7 @@ class TestSearchParamsInvalid(object): pytest.skip("ivfpq not support in GPU mode") return request.param - def test_search_with_invalid_params(self, connect, table, get_invalid_searh_param): + def test_search_with_invalid_params(self, connect, collection, get_invalid_searh_param): ''' target: test search fuction, with invalid search params method: search with params @@ -1076,17 +1076,17 @@ class TestSearchParamsInvalid(object): search_param = get_invalid_searh_param["search_param"] if index_type in [IndexType.IVFLAT, IndexType.IVF_SQ8, IndexType.IVF_SQ8H]: - connect.create_index(table, index_type, {"nlist": 16384}) + connect.create_index(collection, index_type, {"nlist": 16384}) if (index_type == IndexType.IVF_PQ): - connect.create_index(table, index_type, {"nlist": 16384, "m": 10}) + connect.create_index(collection, index_type, {"nlist": 16384, "m": 10}) if(index_type == IndexType.HNSW): - connect.create_index(table, index_type, {"M": 16, "efConstruction": 500}) + connect.create_index(collection, index_type, {"M": 16, "efConstruction": 500}) if (index_type == IndexType.RNSG): - connect.create_index(table, index_type, {"search_length": 60, "out_degree": 50, "candidate_pool_size": 300, "knng": 100}) + connect.create_index(collection, index_type, {"search_length": 60, "out_degree": 50, "candidate_pool_size": 300, "knng": 100}) top_k = 1 query_vecs = gen_vectors(1, dim) - status, result = connect.search_vectors(table, top_k, query_vecs, params=search_param) + status, result = connect.search_vectors(collection, top_k, query_vecs, params=search_param) assert not status.OK() def check_result(result, id): diff --git a/tests/milvus_python_test/test_table.py b/tests/milvus_python_test/test_table.py deleted file mode 100644 index ed3f32fa5b..0000000000 --- a/tests/milvus_python_test/test_table.py +++ /dev/null @@ -1,1051 +0,0 @@ -import pdb -import pytest -import logging -import itertools -from time import sleep -from multiprocessing import Process -from milvus import IndexType, MetricType -from utils import * - -dim = 128 -delete_table_interval_time = 3 -index_file_size = 10 -vectors = gen_vectors(100, dim) - - -class TestTable: - - """ - ****************************************************************** - The following cases are used to test `create_table` function - ****************************************************************** - """ - - def test_create_table(self, connect): - ''' - target: test create normal table - method: create table with corrent params - expected: create status return ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - status = connect.create_table(param) - assert status.OK() - - def test_create_table_ip(self, connect): - ''' - target: test create normal table - method: create table with corrent params - expected: create status return ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - status = connect.create_table(param) - assert status.OK() - - def test_create_table_jaccard(self, connect): - ''' - target: test create normal table - method: create table with corrent params - expected: create status return ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.JACCARD} - status = connect.create_table(param) - assert status.OK() - - def test_create_table_hamming(self, connect): - ''' - target: test create normal table - method: create table with corrent params - expected: create status return ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.HAMMING} - status = connect.create_table(param) - assert status.OK() - - @pytest.mark.level(2) - def test_create_table_without_connection(self, dis_connect): - ''' - target: test create table, without connection - method: create table with correct params, with a disconnected instance - expected: create raise exception - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - with pytest.raises(Exception) as e: - status = dis_connect.create_table(param) - - def test_create_table_existed(self, connect): - ''' - target: test create table but the table name have already existed - method: create table with the same table_name - expected: create status return not ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - status = connect.create_table(param) - status = connect.create_table(param) - assert not status.OK() - - @pytest.mark.level(2) - def test_create_table_existed_ip(self, connect): - ''' - target: test create table but the table name have already existed - method: create table with the same table_name - expected: create status return not ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - status = connect.create_table(param) - status = connect.create_table(param) - assert not status.OK() - - def test_create_table_None(self, connect): - ''' - target: test create table but the table name is None - method: create table, param table_name is None - expected: create raise error - ''' - param = {'table_name': None, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - with pytest.raises(Exception) as e: - status = connect.create_table(param) - - def test_create_table_no_dimension(self, connect): - ''' - target: test create table with no dimension params - method: create table with corrent params - expected: create status return ok - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - with pytest.raises(Exception) as e: - status = connect.create_table(param) - - def test_create_table_no_file_size(self, connect): - ''' - target: test create table with no index_file_size params - method: create table with corrent params - expected: create status return ok, use default 1024 - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'metric_type': MetricType.L2} - status = connect.create_table(param) - logging.getLogger().info(status) - status, result = connect.describe_table(table_name) - logging.getLogger().info(result) - assert result.index_file_size == 1024 - - def test_create_table_no_metric_type(self, connect): - ''' - target: test create table with no metric_type params - method: create table with corrent params - expected: create status return ok, use default L2 - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size} - status = connect.create_table(param) - status, result = connect.describe_table(table_name) - logging.getLogger().info(result) - assert result.metric_type == MetricType.L2 - - """ - ****************************************************************** - The following cases are used to test `describe_table` function - ****************************************************************** - """ - - def test_table_describe_result(self, connect): - ''' - target: test describe table created with correct params - method: create table, assert the value returned by describe method - expected: table_name equals with the table name created - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - status, res = connect.describe_table(table_name) - assert res.table_name == table_name - assert res.metric_type == MetricType.L2 - - @pytest.mark.level(2) - def test_table_describe_table_name_ip(self, connect): - ''' - target: test describe table created with correct params - method: create table, assert the value returned by describe method - expected: table_name equals with the table name created - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - connect.create_table(param) - status, res = connect.describe_table(table_name) - assert res.table_name == table_name - assert res.metric_type == MetricType.IP - - @pytest.mark.level(2) - def test_table_describe_table_name_jaccard(self, connect): - ''' - target: test describe table created with correct params - method: create table, assert the value returned by describe method - expected: table_name equals with the table name created - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.JACCARD} - connect.create_table(param) - status, res = connect.describe_table(table_name) - assert res.table_name == table_name - assert res.metric_type == MetricType.JACCARD - - @pytest.mark.level(2) - def test_table_describe_table_name_hamming(self, connect): - ''' - target: test describe table created with correct params - method: create table, assert the value returned by describe method - expected: table_name equals with the table name created - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.HAMMING} - connect.create_table(param) - status, res = connect.describe_table(table_name) - assert res.table_name == table_name - assert res.metric_type == MetricType.HAMMING - - # TODO: enable - @pytest.mark.level(2) - def _test_table_describe_table_name_multiprocessing(self, connect, args): - ''' - target: test describe table created with multiprocess - method: create table, assert the value returned by describe method - expected: table_name equals with the table name created - ''' - table_name = gen_unique_str("test_table") - uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - - def describetable(milvus): - status, res = milvus.describe_table(table_name) - assert res.table_name == table_name - - process_num = 4 - processes = [] - for i in range(process_num): - milvus = get_milvus(args["handler"]) - milvus.connect(uri=uri) - p = Process(target=describetable, args=(milvus,)) - processes.append(p) - p.start() - for p in processes: - p.join() - - @pytest.mark.level(2) - def test_table_describe_without_connection(self, table, dis_connect): - ''' - target: test describe table, without connection - method: describe table with correct params, with a disconnected instance - expected: describe raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.describe_table(table) - - def test_table_describe_dimension(self, connect): - ''' - target: test describe table created with correct params - method: create table, assert the dimention value returned by describe method - expected: dimention equals with dimention when created - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim+1, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - status, res = connect.describe_table(table_name) - assert res.dimension == dim+1 - - """ - ****************************************************************** - The following cases are used to test `delete_table` function - ****************************************************************** - """ - - def test_delete_table(self, connect, table): - ''' - target: test delete table created with correct params - method: create table and then delete, - assert the value returned by delete method - expected: status ok, and no table in tables - ''' - status = connect.delete_table(table) - assert not assert_has_table(connect, table) - - @pytest.mark.level(2) - def test_delete_table_ip(self, connect, ip_table): - ''' - target: test delete table created with correct params - method: create table and then delete, - assert the value returned by delete method - expected: status ok, and no table in tables - ''' - status = connect.delete_table(ip_table) - assert not assert_has_table(connect, ip_table) - - @pytest.mark.level(2) - def test_delete_table_jaccard(self, connect, jac_table): - ''' - target: test delete table created with correct params - method: create table and then delete, - assert the value returned by delete method - expected: status ok, and no table in tables - ''' - status = connect.delete_table(jac_table) - assert not assert_has_table(connect, jac_table) - - @pytest.mark.level(2) - def test_delete_table_hamming(self, connect, ham_table): - ''' - target: test delete table created with correct params - method: create table and then delete, - assert the value returned by delete method - expected: status ok, and no table in tables - ''' - status = connect.delete_table(ham_table) - assert not assert_has_table(connect, ham_table) - - @pytest.mark.level(2) - def test_table_delete_without_connection(self, table, dis_connect): - ''' - target: test describe table, without connection - method: describe table with correct params, with a disconnected instance - expected: describe raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.delete_table(table) - - def test_delete_table_not_existed(self, connect): - ''' - target: test delete table not in index - method: delete all tables, and delete table again, - assert the value returned by delete method - expected: status not ok - ''' - table_name = gen_unique_str("test_table") - status = connect.delete_table(table_name) - assert not status.OK() - - def test_delete_table_repeatedly(self, connect): - ''' - target: test delete table created with correct params - method: create table and delete new table repeatedly, - assert the value returned by delete method - expected: create ok and delete ok - ''' - loops = 1 - for i in range(loops): - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - status = connect.delete_table(table_name) - time.sleep(1) - assert not assert_has_table(connect, table_name) - - def test_delete_create_table_repeatedly(self, connect): - ''' - target: test delete and create the same table repeatedly - method: try to create the same table and delete repeatedly, - assert the value returned by delete method - expected: create ok and delete ok - ''' - loops = 5 - for i in range(loops): - table_name = "test_table" - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - status = connect.delete_table(table_name) - time.sleep(2) - assert status.OK() - - def test_delete_create_table_repeatedly_ip(self, connect): - ''' - target: test delete and create the same table repeatedly - method: try to create the same table and delete repeatedly, - assert the value returned by delete method - expected: create ok and delete ok - ''' - loops = 5 - for i in range(loops): - table_name = "test_table" - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - connect.create_table(param) - status = connect.delete_table(table_name) - time.sleep(2) - assert status.OK() - - # TODO: enable - @pytest.mark.level(2) - def _test_delete_table_multiprocessing(self, args): - ''' - target: test delete table with multiprocess - method: create table and then delete, - assert the value returned by delete method - expected: status ok, and no table in tables - ''' - process_num = 6 - processes = [] - uri = "tcp://%s:%s" % (args["ip"], args["port"]) - - def deletetable(milvus): - status = milvus.delete_table(table) - # assert not status.code==0 - assert assert_has_table(milvus, table) - assert status.OK() - - for i in range(process_num): - milvus = get_milvus(args["handler"]) - milvus.connect(uri=uri) - p = Process(target=deletetable, args=(milvus,)) - processes.append(p) - p.start() - for p in processes: - p.join() - - # TODO: enable - @pytest.mark.level(2) - def _test_delete_table_multiprocessing_multitable(self, connect): - ''' - target: test delete table with multiprocess - method: create table and then delete, - assert the value returned by delete method - expected: status ok, and no table in tables - ''' - process_num = 5 - loop_num = 2 - processes = [] - - table = [] - j = 0 - while j < (process_num*loop_num): - table_name = gen_unique_str("test_delete_table_with_multiprocessing") - table.append(table_name) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - j = j + 1 - - def delete(connect,ids): - i = 0 - while i < loop_num: - status = connect.delete_table(table[ids*process_num+i]) - time.sleep(2) - assert status.OK() - assert not assert_has_table(connect, table[ids*process_num+i]) - i = i + 1 - - for i in range(process_num): - ids = i - p = Process(target=delete, args=(connect,ids)) - processes.append(p) - p.start() - for p in processes: - p.join() - - """ - ****************************************************************** - The following cases are used to test `has_table` function - ****************************************************************** - """ - - def test_has_table(self, connect): - ''' - target: test if the created table existed - method: create table, assert the value returned by has_table method - expected: True - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - assert assert_has_table(connect, table_name) - - def test_has_table_ip(self, connect): - ''' - target: test if the created table existed - method: create table, assert the value returned by has_table method - expected: True - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - connect.create_table(param) - assert assert_has_table(connect, table_name) - - def test_has_table_jaccard(self, connect): - ''' - target: test if the created table existed - method: create table, assert the value returned by has_table method - expected: True - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.JACCARD} - connect.create_table(param) - assert assert_has_table(connect, table_name) - - def test_has_table_hamming(self, connect): - ''' - target: test if the created table existed - method: create table, assert the value returned by has_table method - expected: True - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.HAMMING} - connect.create_table(param) - assert assert_has_table(connect, table_name) - - @pytest.mark.level(2) - def test_has_table_without_connection(self, table, dis_connect): - ''' - target: test has table, without connection - method: calling has table with correct params, with a disconnected instance - expected: has table raise exception - ''' - with pytest.raises(Exception) as e: - assert_has_table(dis_connect, table) - - def test_has_table_not_existed(self, connect): - ''' - target: test if table not created - method: random a table name, which not existed in db, - assert the value returned by has_table method - expected: False - ''' - table_name = gen_unique_str("test_table") - assert not assert_has_table(connect, table_name) - - """ - ****************************************************************** - The following cases are used to test `show_tables` function - ****************************************************************** - """ - - def test_show_tables(self, connect): - ''' - target: test show tables is correct or not, if table created - method: create table, assert the value returned by show_tables method is equal to 0 - expected: table_name in show tables - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - status, result = connect.show_tables() - assert status.OK() - assert table_name in result - - def test_show_tables_ip(self, connect): - ''' - target: test show tables is correct or not, if table created - method: create table, assert the value returned by show_tables method is equal to 0 - expected: table_name in show tables - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - connect.create_table(param) - status, result = connect.show_tables() - assert status.OK() - assert table_name in result - - def test_show_tables_jaccard(self, connect): - ''' - target: test show tables is correct or not, if table created - method: create table, assert the value returned by show_tables method is equal to 0 - expected: table_name in show tables - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.JACCARD} - connect.create_table(param) - status, result = connect.show_tables() - assert status.OK() - assert table_name in result - - def test_show_tables_hamming(self, connect): - ''' - target: test show tables is correct or not, if table created - method: create table, assert the value returned by show_tables method is equal to 0 - expected: table_name in show tables - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.HAMMING} - connect.create_table(param) - status, result = connect.show_tables() - assert status.OK() - assert table_name in result - - @pytest.mark.level(2) - def test_show_tables_without_connection(self, dis_connect): - ''' - target: test show_tables, without connection - method: calling show_tables with correct params, with a disconnected instance - expected: show_tables raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.show_tables() - - def test_show_tables_no_table(self, connect): - ''' - target: test show tables is correct or not, if no table in db - method: delete all tables, - assert the value returned by show_tables method is equal to [] - expected: the status is ok, and the result is equal to [] - ''' - status, result = connect.show_tables() - if result: - for table_name in result: - connect.delete_table(table_name) - time.sleep(delete_table_interval_time) - status, result = connect.show_tables() - assert status.OK() - assert len(result) == 0 - - # TODO: enable - @pytest.mark.level(2) - def _test_show_tables_multiprocessing(self, connect, args): - ''' - target: test show tables is correct or not with processes - method: create table, assert the value returned by show_tables method is equal to 0 - expected: table_name in show tables - ''' - table_name = gen_unique_str("test_table") - uri = "tcp://%s:%s" % (args["ip"], args["port"]) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - - def showtables(milvus): - status, result = milvus.show_tables() - assert status.OK() - assert table_name in result - - process_num = 8 - processes = [] - - for i in range(process_num): - milvus = get_milvus(args["handler"]) - milvus.connect(uri=uri) - p = Process(target=showtables, args=(milvus,)) - processes.append(p) - p.start() - for p in processes: - p.join() - - """ - ****************************************************************** - The following cases are used to test `preload_table` function - ****************************************************************** - """ - - """ - generate valid create_index params - """ - @pytest.fixture( - scope="function", - params=gen_simple_index() - ) - def get_simple_index(self, request, connect): - if str(connect._cmd("mode")[1]) == "CPU": - if request.param["index_type"] == IndexType.IVF_SQ8H: - pytest.skip("sq8h not support in cpu mode") - if request.param["index_type"] == IndexType.IVF_PQ: - pytest.skip("Skip PQ Temporary") - return request.param - - @pytest.mark.level(1) - def test_preload_table(self, connect, table, get_simple_index): - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) - status = connect.preload_table(table) - assert status.OK() - - @pytest.mark.level(1) - def test_preload_table_ip(self, connect, ip_table, get_simple_index): - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) - status = connect.preload_table(ip_table) - assert status.OK() - - @pytest.mark.level(1) - def test_preload_table_jaccard(self, connect, jac_table, get_simple_index): - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - status, ids = connect.add_vectors(jac_table, vectors) - status = connect.create_index(jac_table, index_type, index_param) - status = connect.preload_table(jac_table) - assert status.OK() - - @pytest.mark.level(1) - def test_preload_table_hamming(self, connect, ham_table, get_simple_index): - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - status, ids = connect.add_vectors(ham_table, vectors) - status = connect.create_index(ham_table, index_type, index_param) - status = connect.preload_table(ham_table) - assert status.OK() - - @pytest.mark.level(2) - def test_preload_table_not_existed(self, connect, table, get_simple_index): - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - table_name = gen_unique_str() - status, ids = connect.add_vectors(table, vectors) - status = connect.create_index(table, index_type, index_param) - status = connect.preload_table(table_name) - assert not status.OK() - - @pytest.mark.level(2) - def test_preload_table_not_existed_ip(self, connect, ip_table, get_simple_index): - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - table_name = gen_unique_str() - status, ids = connect.add_vectors(ip_table, vectors) - status = connect.create_index(ip_table, index_type, index_param) - status = connect.preload_table(table_name) - assert not status.OK() - - @pytest.mark.level(1) - def test_preload_table_no_vectors(self, connect, table): - status = connect.preload_table(table) - assert status.OK() - - @pytest.mark.level(2) - def test_preload_table_no_vectors_ip(self, connect, ip_table): - status = connect.preload_table(ip_table) - assert status.OK() - - # TODO: psutils get memory usage - @pytest.mark.level(1) - def test_preload_table_memory_usage(self, connect, table): - pass - - -class TestTableInvalid(object): - """ - Test creating table with invalid table names - """ - @pytest.fixture( - scope="function", - params=gen_invalid_table_names() - ) - def get_table_name(self, request): - yield request.param - - @pytest.mark.level(2) - def test_create_table_with_invalid_tablename(self, connect, get_table_name): - table_name = get_table_name - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - status = connect.create_table(param) - assert not status.OK() - - def test_create_table_with_empty_tablename(self, connect): - table_name = '' - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - with pytest.raises(Exception) as e: - status = connect.create_table(param) - - def test_preload_table_with_invalid_tablename(self, connect): - table_name = '' - with pytest.raises(Exception) as e: - status = connect.preload_table(table_name) - - -class TestCreateTableDimInvalid(object): - """ - Test creating table with invalid dimension - """ - @pytest.fixture( - scope="function", - params=gen_invalid_dims() - ) - def get_dim(self, request): - yield request.param - - @pytest.mark.level(2) - @pytest.mark.timeout(5) - def test_create_table_with_invalid_dimension(self, connect, get_dim): - dimension = get_dim - table = gen_unique_str("test_create_table_with_invalid_dimension") - param = {'table_name': table, - 'dimension': dimension, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - if isinstance(dimension, int): - status = connect.create_table(param) - assert not status.OK() - else: - with pytest.raises(Exception) as e: - status = connect.create_table(param) - - -# TODO: max / min index file size -class TestCreateTableIndexSizeInvalid(object): - """ - Test creating tables with invalid index_file_size - """ - @pytest.fixture( - scope="function", - params=gen_invalid_file_sizes() - ) - def get_file_size(self, request): - yield request.param - - @pytest.mark.level(2) - def test_create_table_with_invalid_file_size(self, connect, table, get_file_size): - file_size = get_file_size - param = {'table_name': table, - 'dimension': dim, - 'index_file_size': file_size, - 'metric_type': MetricType.L2} - if isinstance(file_size, int): - status = connect.create_table(param) - assert not status.OK() - else: - with pytest.raises(Exception) as e: - status = connect.create_table(param) - - -class TestCreateMetricTypeInvalid(object): - """ - Test creating tables with invalid metric_type - """ - @pytest.fixture( - scope="function", - params=gen_invalid_metric_types() - ) - def get_metric_type(self, request): - yield request.param - - @pytest.mark.level(2) - def test_create_table_with_invalid_file_size(self, connect, table, get_metric_type): - metric_type = get_metric_type - param = {'table_name': table, - 'dimension': dim, - 'index_file_size': 10, - 'metric_type': metric_type} - with pytest.raises(Exception) as e: - status = connect.create_table(param) - - -def create_table(connect, **params): - param = {'table_name': params["table_name"], - 'dimension': params["dimension"], - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - status = connect.create_table(param) - return status - -def search_table(connect, **params): - status, result = connect.search_vectors( - params["table_name"], - params["top_k"], - params["query_vectors"], - params={"nprobe": params["nprobe"]}) - return status - -def preload_table(connect, **params): - status = connect.preload_table(params["table_name"]) - return status - -def has(connect, **params): - status, result = connect.has_table(params["table_name"]) - return status - -def show(connect, **params): - status, result = connect.show_tables() - return status - -def delete(connect, **params): - status = connect.delete_table(params["table_name"]) - return status - -def describe(connect, **params): - status, result = connect.describe_table(params["table_name"]) - return status - -def rowcount(connect, **params): - status, result = connect.get_table_row_count(params["table_name"]) - return status - -def create_index(connect, **params): - status = connect.create_index(params["table_name"], params["index_type"], params["index_param"]) - return status - -func_map = { - # 0:has, - 1:show, - 10:create_table, - 11:describe, - 12:rowcount, - 13:search_table, - 14:preload_table, - 15:create_index, - 30:delete -} - -def gen_sequence(): - raw_seq = func_map.keys() - result = itertools.permutations(raw_seq) - for x in result: - yield x - -class TestTableLogic(object): - - @pytest.mark.parametrize("logic_seq", gen_sequence()) - @pytest.mark.level(2) - def test_logic(self, connect, logic_seq): - if self.is_right(logic_seq): - self.execute(logic_seq, connect) - else: - self.execute_with_error(logic_seq, connect) - - def is_right(self, seq): - if sorted(seq) == seq: - return True - - not_created = True - has_deleted = False - for i in range(len(seq)): - if seq[i] > 10 and not_created: - return False - elif seq [i] > 10 and has_deleted: - return False - elif seq[i] == 10: - not_created = False - elif seq[i] == 30: - has_deleted = True - - return True - - def execute(self, logic_seq, connect): - basic_params = self.gen_params() - for i in range(len(logic_seq)): - # logging.getLogger().info(logic_seq[i]) - f = func_map[logic_seq[i]] - status = f(connect, **basic_params) - assert status.OK() - - def execute_with_error(self, logic_seq, connect): - basic_params = self.gen_params() - - error_flag = False - for i in range(len(logic_seq)): - f = func_map[logic_seq[i]] - status = f(connect, **basic_params) - if not status.OK(): - # logging.getLogger().info(logic_seq[i]) - error_flag = True - break - assert error_flag == True - - def gen_params(self): - table_name = gen_unique_str("test_table") - top_k = 1 - vectors = gen_vectors(2, dim) - param = {'table_name': table_name, - 'dimension': dim, - 'metric_type': MetricType.L2, - 'nprobe': 1, - 'top_k': top_k, - 'index_type': IndexType.IVF_SQ8, - 'index_param': { - 'nlist': 16384 - }, - 'query_vectors': vectors} - return param diff --git a/tests/milvus_python_test/test_table_count.py b/tests/milvus_python_test/test_table_count.py deleted file mode 100644 index f49637a06c..0000000000 --- a/tests/milvus_python_test/test_table_count.py +++ /dev/null @@ -1,644 +0,0 @@ -import pdb -import pytest -import logging -import itertools -from time import sleep -import threading -from multiprocessing import Process -from milvus import IndexType, MetricType -from utils import * - -dim = 128 -index_file_size = 10 -add_time_interval = 3 -tag = "1970-01-01" -nb = 6000 - -class TestTableCount: - """ - params means different nb, the nb value may trigger merge, or not - """ - @pytest.fixture( - scope="function", - params=[ - 1, - 5000, - 100000, - ], - ) - def add_vectors_nb(self, request): - yield request.param - - """ - generate valid create_index params - """ - @pytest.fixture( - scope="function", - params=gen_simple_index() - ) - def get_simple_index(self, request, connect): - if str(connect._cmd("mode")[1]) == "CPU": - if request.param["index_type"] == IndexType.IVF_SQ8H: - pytest.skip("sq8h not support in cpu mode") - if request.param["index_type"] == IndexType.IVF_PQ: - pytest.skip("Skip PQ Temporary") - return request.param - - def test_table_rows_count(self, connect, table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nb = add_vectors_nb - vectors = gen_vectors(nb, dim) - res = connect.add_vectors(table_name=table, records=vectors) - connect.flush([table]) - status, res = connect.get_table_row_count(table) - assert res == nb - - def test_table_rows_count_partition(self, connect, table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table, create partition and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nb = add_vectors_nb - vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) - assert status.OK() - res = connect.add_vectors(table_name=table, records=vectors, partition_tag=tag) - connect.flush([table]) - status, res = connect.get_table_row_count(table) - assert res == nb - - def test_table_rows_count_multi_partitions_A(self, connect, table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table, create partitions and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - new_tag = "new_tag" - nb = add_vectors_nb - vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) - status = connect.create_partition(table, new_tag) - assert status.OK() - res = connect.add_vectors(table_name=table, records=vectors) - connect.flush([table]) - status, res = connect.get_table_row_count(table) - assert res == nb - - def test_table_rows_count_multi_partitions_B(self, connect, table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table, create partitions and add vectors in one of the partitions, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - new_tag = "new_tag" - nb = add_vectors_nb - vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) - status = connect.create_partition(table, new_tag) - assert status.OK() - res = connect.add_vectors(table_name=table, records=vectors, partition_tag=tag) - connect.flush([table]) - status, res = connect.get_table_row_count(table) - assert res == nb - - def test_table_rows_count_multi_partitions_C(self, connect, table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table, create partitions and add vectors in one of the partitions, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the table count is equal to the length of vectors - ''' - new_tag = "new_tag" - nb = add_vectors_nb - vectors = gen_vectors(nb, dim) - status = connect.create_partition(table, tag) - status = connect.create_partition(table, new_tag) - assert status.OK() - res = connect.add_vectors(table_name=table, records=vectors, partition_tag=tag) - res = connect.add_vectors(table_name=table, records=vectors, partition_tag=new_tag) - connect.flush([table]) - status, res = connect.get_table_row_count(table) - assert res == nb * 2 - - def test_table_rows_count_after_index_created(self, connect, table, get_simple_index): - ''' - target: test get_table_row_count, after index have been created - method: add vectors in db, and create index, then calling get_table_row_count with correct params - expected: get_table_row_count raise exception - ''' - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - nb = 100 - vectors = gen_vectors(nb, dim) - res = connect.add_vectors(table_name=table, records=vectors) - connect.flush([table]) - connect.create_index(table, index_type, index_param) - status, res = connect.get_table_row_count(table) - assert res == nb - - @pytest.mark.level(2) - def test_count_without_connection(self, table, dis_connect): - ''' - target: test get_table_row_count, without connection - method: calling get_table_row_count with correct params, with a disconnected instance - expected: get_table_row_count raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.get_table_row_count(table) - - def test_table_rows_count_no_vectors(self, connect, table): - ''' - target: test table rows_count is correct or not, if table is empty - method: create table and no vectors in it, - assert the value returned by get_table_row_count method is equal to 0 - expected: the count is equal to 0 - ''' - table_name = gen_unique_str() - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size} - connect.create_table(param) - status, res = connect.get_table_row_count(table) - assert res == 0 - - # TODO: enable - @pytest.mark.level(2) - @pytest.mark.timeout(20) - def _test_table_rows_count_multiprocessing(self, connect, table, args): - ''' - target: test table rows_count is correct or not with multiprocess - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nq = 2 - uri = "tcp://%s:%s" % (args["ip"], args["port"]) - vectors = gen_vectors(nq, dim) - res = connect.add_vectors(table_name=table, records=vectors) - time.sleep(add_time_interval) - - def rows_count(milvus): - status, res = milvus.get_table_row_count(table) - logging.getLogger().info(status) - assert res == nq - - process_num = 8 - processes = [] - for i in range(process_num): - milvus = get_milvus(args["handler"]) - milvus.connect(uri=uri) - p = Process(target=rows_count, args=(milvus, )) - processes.append(p) - p.start() - logging.getLogger().info(p) - for p in processes: - p.join() - - def test_table_rows_count_multi_tables(self, connect): - ''' - target: test table rows_count is correct or not with multiple tables of L2 - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nq = 100 - vectors = gen_vectors(nq, dim) - table_list = [] - for i in range(20): - table_name = gen_unique_str() - table_list.append(table_name) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.L2} - connect.create_table(param) - res = connect.add_vectors(table_name=table_name, records=vectors) - connect.flush(table_list) - for i in range(20): - status, res = connect.get_table_row_count(table_list[i]) - assert status.OK() - assert res == nq - - -class TestTableCountIP: - """ - params means different nb, the nb value may trigger merge, or not - """ - - @pytest.fixture( - scope="function", - params=[ - 1, - 5000, - 100000, - ], - ) - def add_vectors_nb(self, request): - yield request.param - - """ - generate valid create_index params - """ - - @pytest.fixture( - scope="function", - params=gen_simple_index() - ) - def get_simple_index(self, request, connect): - if str(connect._cmd("mode")[1]) == "CPU": - if request.param["index_type"] == IndexType.IVF_SQ8H: - pytest.skip("sq8h not support in CPU mode") - if request.param["index_type"] == IndexType.IVF_PQ: - pytest.skip("Skip PQ Temporary") - return request.param - - def test_table_rows_count(self, connect, ip_table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nb = add_vectors_nb - vectors = gen_vectors(nb, dim) - res = connect.add_vectors(table_name=ip_table, records=vectors) - connect.flush([ip_table]) - status, res = connect.get_table_row_count(ip_table) - assert res == nb - - def test_table_rows_count_after_index_created(self, connect, ip_table, get_simple_index): - ''' - target: test get_table_row_count, after index have been created - method: add vectors in db, and create index, then calling get_table_row_count with correct params - expected: get_table_row_count raise exception - ''' - index_param = get_simple_index["index_param"] - index_type = get_simple_index["index_type"] - nb = 100 - vectors = gen_vectors(nb, dim) - res = connect.add_vectors(table_name=ip_table, records=vectors) - connect.flush([ip_table]) - connect.create_index(ip_table, index_type, index_param) - status, res = connect.get_table_row_count(ip_table) - assert res == nb - - @pytest.mark.level(2) - def test_count_without_connection(self, ip_table, dis_connect): - ''' - target: test get_table_row_count, without connection - method: calling get_table_row_count with correct params, with a disconnected instance - expected: get_table_row_count raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.get_table_row_count(ip_table) - - def test_table_rows_count_no_vectors(self, connect, ip_table): - ''' - target: test table rows_count is correct or not, if table is empty - method: create table and no vectors in it, - assert the value returned by get_table_row_count method is equal to 0 - expected: the count is equal to 0 - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size} - connect.create_table(param) - status, res = connect.get_table_row_count(ip_table) - assert res == 0 - - # TODO: enable - @pytest.mark.timeout(60) - def _test_table_rows_count_multiprocessing(self, connect, ip_table, args): - ''' - target: test table rows_count is correct or not with multiprocess - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nq = 2 - uri = "tcp://%s:%s" % (args["ip"], args["port"]) - vectors = gen_vectors(nq, dim) - res = connect.add_vectors(table_name=ip_table, records=vectors) - time.sleep(add_time_interval) - - def rows_count(milvus): - status, res = milvus.get_table_row_count(ip_table) - logging.getLogger().info(status) - assert res == nq - - process_num = 8 - processes = [] - for i in range(process_num): - milvus = get_milvus(args["handler"]) - milvus.connect(uri=uri) - p = Process(target=rows_count, args=(milvus,)) - processes.append(p) - p.start() - logging.getLogger().info(p) - for p in processes: - p.join() - - def test_table_rows_count_multi_tables(self, connect): - ''' - target: test table rows_count is correct or not with multiple tables of IP - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nq = 100 - vectors = gen_vectors(nq, dim) - table_list = [] - for i in range(20): - table_name = gen_unique_str('test_table_rows_count_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.IP} - connect.create_table(param) - res = connect.add_vectors(table_name=table_name, records=vectors) - connect.flush(table_list) - for i in range(20): - status, res = connect.get_table_row_count(table_list[i]) - assert status.OK() - assert res == nq - - -class TestTableCountJAC: - """ - params means different nb, the nb value may trigger merge, or not - """ - - @pytest.fixture( - scope="function", - params=[ - 1, - 5000, - 100000, - ], - ) - def add_vectors_nb(self, request): - yield request.param - - """ - generate valid create_index params - """ - - @pytest.fixture( - scope="function", - params=gen_simple_index() - ) - def get_jaccard_index(self, request, connect): - logging.getLogger().info(request.param) - if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: - return request.param - else: - pytest.skip("Skip index Temporary") - - def test_table_rows_count(self, connect, jac_table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nb = add_vectors_nb - tmp, vectors = gen_binary_vectors(nb, dim) - res = connect.add_vectors(table_name=jac_table, records=vectors) - connect.flush([jac_table]) - status, res = connect.get_table_row_count(jac_table) - assert res == nb - - def test_table_rows_count_after_index_created(self, connect, jac_table, get_jaccard_index): - ''' - target: test get_table_row_count, after index have been created - method: add vectors in db, and create index, then calling get_table_row_count with correct params - expected: get_table_row_count raise exception - ''' - nb = 100 - index_param = get_jaccard_index["index_param"] - index_type = get_jaccard_index["index_type"] - tmp, vectors = gen_binary_vectors(nb, dim) - res = connect.add_vectors(table_name=jac_table, records=vectors) - connect.flush([jac_table]) - connect.create_index(jac_table, index_type, index_param) - status, res = connect.get_table_row_count(jac_table) - assert res == nb - - @pytest.mark.level(2) - def test_count_without_connection(self, jac_table, dis_connect): - ''' - target: test get_table_row_count, without connection - method: calling get_table_row_count with correct params, with a disconnected instance - expected: get_table_row_count raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.get_table_row_count(jac_table) - - def test_table_rows_count_no_vectors(self, connect, jac_table): - ''' - target: test table rows_count is correct or not, if table is empty - method: create table and no vectors in it, - assert the value returned by get_table_row_count method is equal to 0 - expected: the count is equal to 0 - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size} - connect.create_table(param) - status, res = connect.get_table_row_count(jac_table) - assert res == 0 - - def test_table_rows_count_multi_tables(self, connect): - ''' - target: test table rows_count is correct or not with multiple tables of IP - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nq = 100 - tmp, vectors = gen_binary_vectors(nq, dim) - table_list = [] - for i in range(20): - table_name = gen_unique_str('test_table_rows_count_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.JACCARD} - connect.create_table(param) - res = connect.add_vectors(table_name=table_name, records=vectors) - connect.flush(table_list) - for i in range(20): - status, res = connect.get_table_row_count(table_list[i]) - assert status.OK() - assert res == nq - -class TestTableCountHAM: - """ - params means different nb, the nb value may trigger merge, or not - """ - - @pytest.fixture( - scope="function", - params=[ - 1, - 5000, - 100000, - ], - ) - def add_vectors_nb(self, request): - yield request.param - - """ - generate valid create_index params - """ - - @pytest.fixture( - scope="function", - params=gen_simple_index() - ) - def get_hamming_index(self, request, connect): - logging.getLogger().info(request.param) - if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: - return request.param - else: - pytest.skip("Skip index Temporary") - - def test_table_rows_count(self, connect, ham_table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nb = add_vectors_nb - tmp, vectors = gen_binary_vectors(nb, dim) - res = connect.add_vectors(table_name=ham_table, records=vectors) - connect.flush([ham_table]) - status, res = connect.get_table_row_count(ham_table) - assert res == nb - - def test_table_rows_count_after_index_created(self, connect, ham_table, get_hamming_index): - ''' - target: test get_table_row_count, after index have been created - method: add vectors in db, and create index, then calling get_table_row_count with correct params - expected: get_table_row_count raise exception - ''' - nb = 100 - index_type = get_hamming_index["index_type"] - index_param = get_hamming_index["index_param"] - tmp, vectors = gen_binary_vectors(nb, dim) - res = connect.add_vectors(table_name=ham_table, records=vectors) - connect.flush([ham_table]) - connect.create_index(ham_table, index_type, index_param) - status, res = connect.get_table_row_count(ham_table) - assert res == nb - - @pytest.mark.level(2) - def test_count_without_connection(self, ham_table, dis_connect): - ''' - target: test get_table_row_count, without connection - method: calling get_table_row_count with correct params, with a disconnected instance - expected: get_table_row_count raise exception - ''' - with pytest.raises(Exception) as e: - status = dis_connect.get_table_row_count(ham_table) - - def test_table_rows_count_no_vectors(self, connect, ham_table): - ''' - target: test table rows_count is correct or not, if table is empty - method: create table and no vectors in it, - assert the value returned by get_table_row_count method is equal to 0 - expected: the count is equal to 0 - ''' - table_name = gen_unique_str("test_table") - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size} - connect.create_table(param) - status, res = connect.get_table_row_count(ham_table) - assert res == 0 - - def test_table_rows_count_multi_tables(self, connect): - ''' - target: test table rows_count is correct or not with multiple tables of IP - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nq = 100 - tmp, vectors = gen_binary_vectors(nq, dim) - table_list = [] - for i in range(20): - table_name = gen_unique_str('test_table_rows_count_multi_tables') - table_list.append(table_name) - param = {'table_name': table_name, - 'dimension': dim, - 'index_file_size': index_file_size, - 'metric_type': MetricType.HAMMING} - connect.create_table(param) - res = connect.add_vectors(table_name=table_name, records=vectors) - connect.flush(table_list) - for i in range(20): - status, res = connect.get_table_row_count(table_list[i]) - assert status.OK() - assert res == nq - - -class TestTableCountTANIMOTO: - """ - params means different nb, the nb value may trigger merge, or not - """ - - @pytest.fixture( - scope="function", - params=[ - 1, - 5000, - 100000, - ], - ) - def add_vectors_nb(self, request): - yield request.param - - """ - generate valid create_index params - """ - - @pytest.fixture( - scope="function", - params=gen_simple_index() - ) - def get_tanimoto_index(self, request, connect): - logging.getLogger().info(request.param) - if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT: - return request.param - else: - pytest.skip("Skip index Temporary") - - def test_table_rows_count(self, connect, tanimoto_table, add_vectors_nb): - ''' - target: test table rows_count is correct or not - method: create table and add vectors in it, - assert the value returned by get_table_row_count method is equal to length of vectors - expected: the count is equal to the length of vectors - ''' - nb = add_vectors_nb - tmp, vectors = gen_binary_vectors(nb, dim) - res = connect.add_vectors(table_name=tanimoto_table, records=vectors) - connect.flush([tanimoto_table]) - status, res = connect.get_table_row_count(tanimoto_table) - assert status.OK() - assert res == nb diff --git a/tests/milvus_python_test/test_wal.py b/tests/milvus_python_test/test_wal.py index 8a42241c6e..86887a2b63 100644 --- a/tests/milvus_python_test/test_wal.py +++ b/tests/milvus_python_test/test_wal.py @@ -8,7 +8,7 @@ from milvus import IndexType, MetricType from utils import * dim = 128 -table_id = "test_wal" +collection_id = "test_wal" WAL_TIMEOUT = 30 nb = 6000 add_interval = 1.5 @@ -21,109 +21,109 @@ class TestWalBase: ****************************************************************** """ @pytest.mark.timeout(WAL_TIMEOUT) - def test_wal_add_vectors(self, connect, table): + def test_wal_add_vectors(self, connect, collection): ''' target: add vectors in WAL method: add vectors and flush when WAL is enabled expected: status ok, vectors added ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == nb - status, res = connect.get_vector_by_id(table, ids[0]) + status, res = connect.get_vector_by_id(collection, ids[0]) logging.getLogger().info(res) assert status.OK() assert_equal_vector(res, vectors[0]) @pytest.mark.timeout(WAL_TIMEOUT) - def test_wal_delete_vectors(self, connect, table): + def test_wal_delete_vectors(self, connect, collection): ''' target: delete vectors in WAL method: delete vectors and flush when WAL is enabled expected: status ok, vectors deleted ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - connect.flush([table]) - status, res = connect.get_table_row_count(table) + connect.flush([collection]) + status, res = connect.count_collection(collection) assert status.OK() - status = connect.delete_by_id(table, ids) + status = connect.delete_by_id(collection, ids) assert status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == 0 @pytest.mark.timeout(WAL_TIMEOUT) - def test_wal_invalid_operation(self, connect, table): + def test_wal_invalid_operation(self, connect, collection): ''' target: invalid operation in WAL method: add vectors, delete with non-existent ids and flush when WAL is enabled expected: status ok, search with vector have result ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - connect.flush([table]) - status = connect.delete_by_id(table, [0]) + connect.flush([collection]) + status = connect.delete_by_id(collection, [0]) assert status.OK() - status = connect.flush([table]) - status, res = connect.get_table_row_count(table) + status = connect.flush([collection]) + status, res = connect.count_collection(collection) assert status.OK() assert res == 1 @pytest.mark.timeout(WAL_TIMEOUT) - def test_wal_invalid_operation_B(self, connect, table): + def test_wal_invalid_operation_B(self, connect, collection): ''' target: invalid operation in WAL - method: add vectors, delete with not existed table name when WAL is enabled + method: add vectors, delete with not existed collection name when WAL is enabled expected: status not ok ''' vectors = gen_vector(nb, dim) - status, ids = connect.add_vectors(table, vectors) + status, ids = connect.add_vectors(collection, vectors) assert status.OK() - status = connect.flush([table]) - status = connect.delete_by_id(table, [0]) - connect.flush([table]) - table_new = gen_unique_str() - status = connect.delete_by_id(table_new, ids) + status = connect.flush([collection]) + status = connect.delete_by_id(collection, [0]) + connect.flush([collection]) + collection_new = gen_unique_str() + status = connect.delete_by_id(collection_new, ids) assert not status.OK() - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == nb @pytest.mark.timeout(WAL_TIMEOUT) - def test_wal_server_crashed_recovery(self, connect, table): + def test_wal_server_crashed_recovery(self, connect, collection): ''' target: test wal when server crashed unexpectedly and restarted method: add vectors, server killed before flush, restarted server and flush expected: status ok, add request is recovered and vectors added ''' vector = gen_single_vector(dim) - status, ids = connect.add_vectors(table, vector) + status, ids = connect.add_vectors(collection, vector) assert status.OK() - status = connect.flush([table]) - status, res = connect.get_table_row_count(table) + status = connect.flush([collection]) + status, res = connect.count_collection(collection) assert status.OK() logging.getLogger().info(res) # should be 0 because no auto flush logging.getLogger().info("Stop server and restart") # kill server and restart. auto flush should be set to 15 seconds. # time.sleep(15) - status = connect.flush([table]) + status = connect.flush([collection]) assert status.OK() - status, res = connect.get_table_row_count(table) + status, res = connect.count_collection(collection) assert status.OK() assert res == 1 - status, res = connect.get_vector_by_id(table, ids[0]) + status, res = connect.get_vector_by_id(collection, ids[0]) logging.getLogger().info(res) assert status.OK() assert_equal_vector(res, vector[0]) diff --git a/tests/milvus_python_test/utils.py b/tests/milvus_python_test/utils.py index 9f3ec6b086..a686c89020 100644 --- a/tests/milvus_python_test/utils.py +++ b/tests/milvus_python_test/utils.py @@ -146,8 +146,8 @@ def gen_invalid_uris(): return uris -def gen_invalid_table_names(): - table_names = [ +def gen_invalid_collection_names(): + collection_names = [ "12-s", "12/s", " ", @@ -166,7 +166,7 @@ def gen_invalid_table_names(): "中文", "a".join("a" for i in range(256)) ] - return table_names + return collection_names def gen_invalid_top_ks(): @@ -617,8 +617,8 @@ def get_search_param(index_type): logging.getLogger().info("Invalid index_type.") -def assert_has_table(conn, table_name): - status, ok = conn.has_table(table_name) +def assert_has_collection(conn, collection_name): + status, ok = conn.has_collection(collection_name) return status.OK() and ok From 94c0254e22dee0b4131c959dc0b4dfea5a05a409 Mon Sep 17 00:00:00 2001 From: sahuang Date: Mon, 9 Mar 2020 09:20:34 +0000 Subject: [PATCH 5/8] fix test cases Signed-off-by: sahuang --- tests/milvus_python_test/conftest.py | 100 +++++++++--------- tests/milvus_python_test/test_add_vectors.py | 2 +- tests/milvus_python_test/test_config.py | 31 +++--- .../milvus_python_test/test_delete_vectors.py | 2 +- tests/milvus_python_test/test_flush.py | 2 +- .../test_get_vector_by_id.py | 2 +- tests/milvus_python_test/test_index.py | 2 +- 7 files changed, 73 insertions(+), 68 deletions(-) diff --git a/tests/milvus_python_test/conftest.py b/tests/milvus_python_test/conftest.py index 88f24453b5..f17bc597f9 100644 --- a/tests/milvus_python_test/conftest.py +++ b/tests/milvus_python_test/conftest.py @@ -90,118 +90,118 @@ def milvus(request): @pytest.fixture(scope="function") -def table(request, connect): - ori_table_name = getattr(request.module, "table_id", "test") - table_name = gen_unique_str(ori_table_name) +def collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) dim = getattr(request.module, "dim", "128") - param = {'table_name': table_name, + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.L2} - status = connect.create_table(param) + status = connect.create_collection(param) # logging.getLogger().info(status) if not status.OK(): - pytest.exit("Table can not be created, exit pytest ...") + pytest.exit("collection can not be created, exit pytest ...") def teardown(): - status, table_names = connect.show_tables() - for table_name in table_names: - connect.drop_collection(table_name) + status, collection_names = connect.show_collections() + for collection_name in collection_names: + connect.drop_collection(collection_name) request.addfinalizer(teardown) - return table_name + return collection_name @pytest.fixture(scope="function") -def ip_table(request, connect): - ori_table_name = getattr(request.module, "table_id", "test") - table_name = gen_unique_str(ori_table_name) +def ip_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) dim = getattr(request.module, "dim", "128") - param = {'table_name': table_name, + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.IP} - status = connect.create_table(param) + status = connect.create_collection(param) # logging.getLogger().info(status) if not status.OK(): - pytest.exit("Table can not be created, exit pytest ...") + pytest.exit("collection can not be created, exit pytest ...") def teardown(): - status, table_names = connect.show_tables() - for table_name in table_names: - connect.drop_collection(table_name) + status, collection_names = connect.show_collections() + for collection_name in collection_names: + connect.drop_collection(collection_name) request.addfinalizer(teardown) - return table_name + return collection_name @pytest.fixture(scope="function") -def jac_table(request, connect): - ori_table_name = getattr(request.module, "table_id", "test") - table_name = gen_unique_str(ori_table_name) +def jac_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) dim = getattr(request.module, "dim", "128") - param = {'table_name': table_name, + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.JACCARD} - status = connect.create_table(param) + status = connect.create_collection(param) # logging.getLogger().info(status) if not status.OK(): - pytest.exit("Table can not be created, exit pytest ...") + pytest.exit("collection can not be created, exit pytest ...") def teardown(): - status, table_names = connect.show_tables() - for table_name in table_names: - connect.drop_collection(table_name) + status, collection_names = connect.show_collections() + for collection_name in collection_names: + connect.drop_collection(collection_name) request.addfinalizer(teardown) - return table_name + return collection_name @pytest.fixture(scope="function") -def ham_table(request, connect): - ori_table_name = getattr(request.module, "table_id", "test") - table_name = gen_unique_str(ori_table_name) +def ham_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) dim = getattr(request.module, "dim", "128") - param = {'table_name': table_name, + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.HAMMING} - status = connect.create_table(param) + status = connect.create_collection(param) # logging.getLogger().info(status) if not status.OK(): - pytest.exit("Table can not be created, exit pytest ...") + pytest.exit("collection can not be created, exit pytest ...") def teardown(): - status, table_names = connect.show_tables() - for table_name in table_names: - connect.drop_collection(table_name) + status, collection_names = connect.show_collections() + for collection_name in collection_names: + connect.drop_collection(collection_name) request.addfinalizer(teardown) - return table_name + return collection_name @pytest.fixture(scope="function") -def tanimoto_table(request, connect): - ori_table_name = getattr(request.module, "table_id", "test") - table_name = gen_unique_str(ori_table_name) +def tanimoto_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) dim = getattr(request.module, "dim", "128") - param = {'table_name': table_name, + param = {'collection_name': collection_name, 'dimension': dim, 'index_file_size': index_file_size, 'metric_type': MetricType.TANIMOTO} - status = connect.create_table(param) + status = connect.create_collection(param) # logging.getLogger().info(status) if not status.OK(): - pytest.exit("Table can not be created, exit pytest ...") + pytest.exit("collection can not be created, exit pytest ...") def teardown(): - status, table_names = connect.show_tables() - for table_name in table_names: - connect.drop_collection(table_name) + status, collection_names = connect.show_collections() + for collection_name in collection_names: + connect.drop_collection(collection_name) request.addfinalizer(teardown) - return table_name \ No newline at end of file + return collection_name diff --git a/tests/milvus_python_test/test_add_vectors.py b/tests/milvus_python_test/test_add_vectors.py index 866d766699..ac3f1fcdc2 100644 --- a/tests/milvus_python_test/test_add_vectors.py +++ b/tests/milvus_python_test/test_add_vectors.py @@ -1302,7 +1302,7 @@ class TestNameInvalid(object): assert not status.OK() -class TestAddcollectionVectorsInvalid(object): +class TestAddCollectionVectorsInvalid(object): single_vector = gen_single_vector(dim) vectors = gen_vectors(2, dim) diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index c5de3f2829..d3eea51e20 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -28,7 +28,7 @@ class TestCacheConfig: @pytest.mark.timeout(CONFIG_TIMEOUT) def reset_configs(self, connect): ''' - reset configs so the tests are scollection + reset configs so the tests are stable ''' status, reply = connect.set_config("cache_config", "cpu_cache_capacity", 4) assert status.OK() @@ -300,8 +300,10 @@ class TestCacheConfig: logging.getLogger().info(mem_available) status, cpu_cache_capacity = connect.get_config("cache_config", "cpu_cache_capacity") assert status.OK() + logging.getLogger().info(cpu_cache_capacity) status, insert_buffer_size = connect.get_config("cache_config", "insert_buffer_size") assert status.OK() + logging.getLogger().info(insert_buffer_size) status, reply = connect.set_config("cache_config", "cpu_cache_capacity", mem_available - int(insert_buffer_size) + 1) assert not status.OK() status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available - int(cpu_cache_capacity) + 1) @@ -502,6 +504,9 @@ class TestEngineConfig: status, config_value = connect.get_config("engine_config", "use_blas_threshold") assert status.OK() assert config_value == str(i * 100) + # reset + status, reply = connect.set_config("engine_config", "use_blas_threshold", 1100) + assert status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) def test_set_gpu_search_threshold_invalid_parent_key(self, connect, collection): @@ -576,7 +581,7 @@ class TestGPUResourceConfig: @pytest.mark.timeout(CONFIG_TIMEOUT) def reset_configs(self, connect): ''' - reset configs so the tests are scollection + reset configs so the tests are stable ''' status, reply = connect.set_config("gpu_resource_config", "enable", "true") assert status.OK() @@ -1139,7 +1144,7 @@ class TestServerConfig: method: call set_config correctly expected: status ok, set successfully ''' - for valid_deploy_mode in ["cluster_readonly", "cluster_wricollection", "single"]: + for valid_deploy_mode in ["cluster_readonly", "cluster_writable", "single"]: status, reply = connect.set_config("server_config", "deploy_mode", valid_deploy_mode) assert status.OK() status, config_value = connect.get_config("server_config", "deploy_mode") @@ -1236,25 +1241,25 @@ class TestDBConfig: assert status.OK() @pytest.mark.level(2) - def test_get_preload_collection_invalid_child_key(self, connect, collection): + def test_get_preload_table_invalid_child_key(self, connect, collection): ''' target: get invalid child key - method: call get_config without child_key: preload_collection + method: call get_config without child_key: preload_table expected: status not ok ''' - invalid_configs = ["preloadcollection", "preload_collection "] + invalid_configs = ["preloadtable", "preload_table "] for config in invalid_configs: status, config_value = connect.get_config("db_config", config) assert not status.OK() @pytest.mark.timeout(CONFIG_TIMEOUT) - def test_get_preload_collection_valid(self, connect, collection): + def test_get_preload_table_valid(self, connect, collection): ''' - target: get preload_collection + target: get preload_table method: call get_config correctly expected: status ok ''' - status, config_value = connect.get_config("db_config", "preload_collection") + status, config_value = connect.get_config("db_config", "preload_table") assert status.OK() @pytest.mark.level(2) @@ -1308,15 +1313,15 @@ class TestDBConfig: assert status.OK() assert config_value == 'sqlite://:@:/' - def test_set_preload_collection_valid(self, connect, collection): + def test_set_preload_table_valid(self, connect, collection): ''' - target: set preload_collection + target: set preload_table method: call set_config correctly expected: status ok, set successfully ''' - status, reply = connect.set_config("db_config", "preload_collection", "") + status, reply = connect.set_config("db_config", "preload_table", "") assert status.OK() - status, config_value = connect.get_config("db_config", "preload_collection") + status, config_value = connect.get_config("db_config", "preload_table") assert status.OK() assert config_value == "" diff --git a/tests/milvus_python_test/test_delete_vectors.py b/tests/milvus_python_test/test_delete_vectors.py index 7c756d20a8..75be8afc07 100644 --- a/tests/milvus_python_test/test_delete_vectors.py +++ b/tests/milvus_python_test/test_delete_vectors.py @@ -538,7 +538,7 @@ class TestDeleteIdsIngalid(object): status = connect.delete_by_id(collection, [1, invalid_id]) -class TestcollectionNameInvalid(object): +class TestCollectionNameInvalid(object): """ Test adding vectors with invalid collection names """ diff --git a/tests/milvus_python_test/test_flush.py b/tests/milvus_python_test/test_flush.py index debd5581fa..52ddc78345 100644 --- a/tests/milvus_python_test/test_flush.py +++ b/tests/milvus_python_test/test_flush.py @@ -233,7 +233,7 @@ class TestFlushBase: assert res == 0 -class TestcollectionNameInvalid(object): +class TestCollectionNameInvalid(object): """ Test adding vectors with invalid collection names """ diff --git a/tests/milvus_python_test/test_get_vector_by_id.py b/tests/milvus_python_test/test_get_vector_by_id.py index 5abcd3e98d..d949807a92 100644 --- a/tests/milvus_python_test/test_get_vector_by_id.py +++ b/tests/milvus_python_test/test_get_vector_by_id.py @@ -393,7 +393,7 @@ class TestGetVectorIdIngalid(object): status = connect.get_vector_by_id(collection, invalid_id) -class TestcollectionNameInvalid(object): +class TestCollectionNameInvalid(object): """ Test adding vectors with invalid collection names """ diff --git a/tests/milvus_python_test/test_index.py b/tests/milvus_python_test/test_index.py index b84a6ba4df..c2baf12a6b 100644 --- a/tests/milvus_python_test/test_index.py +++ b/tests/milvus_python_test/test_index.py @@ -1659,7 +1659,7 @@ class TestIndexHAM: assert result._collection_name == ham_collection assert result._index_type == IndexType.FLAT -class TestIndexcollectionInvalid(object): +class TestIndexCollectionInvalid(object): """ Test create / describe / drop index interfaces with invalid collection names """ From f9ece7a550f6f1b80000fc184d6a89c7f3817577 Mon Sep 17 00:00:00 2001 From: Xiaohai Xu Date: Mon, 9 Mar 2020 20:33:32 +0800 Subject: [PATCH 6/8] fix config test Signed-off-by: Xiaohai Xu --- tests/milvus_python_test/test_config.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index d3eea51e20..9f80c9c97b 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -163,6 +163,12 @@ class TestCacheConfig: mem_available = mem_total - mem_used return int(mem_available / 1024 / 1024 / 1024) + def get_memory_total(self, connect): + _, info = connect._cmd("get_system_info") + mem_info = ujson.loads(info) + mem_total = int(mem_info["memory_total"]) + return int(mem_total / 1024 / 1024 / 1024) + @pytest.mark.timeout(CONFIG_TIMEOUT) def test_set_cpu_cache_capacity_invalid_parent_key(self, connect, collection): ''' @@ -281,14 +287,14 @@ class TestCacheConfig: expected: status not ok (cpu_cache_capacity + insert_buffer_size < system memory) ''' self.reset_configs(connect) - mem_available = self.get_memory_available(connect) - logging.getLogger().info(mem_available) - status, reply = connect.set_config("cache_config", "cpu_cache_capacity", mem_available + 1) + mem_total = self.get_memory_total(connect) + logging.getLogger().info(mem_total) + status, reply = connect.set_config("cache_config", "cpu_cache_capacity", mem_total + 1) assert not status.OK() - status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available + 1) + status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_total + 1) assert not status.OK() - @pytest.mark.timeout(CONFIG_TIMEOUT) + @pytest.skip(reason="Still needs discussion") def test_set_cache_config_out_of_memory_value_B(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory @@ -309,6 +315,7 @@ class TestCacheConfig: status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available - int(cpu_cache_capacity) + 1) assert not status.OK() + @pytest.skip(reason="Still needs discussion") def test_set_cache_config_out_of_memory_value_C(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory From 76cf09c8fafeaac67509d3fd076daabfd8af8c1f Mon Sep 17 00:00:00 2001 From: Xiaohai Xu Date: Mon, 9 Mar 2020 20:39:18 +0800 Subject: [PATCH 7/8] fix config test Signed-off-by: Xiaohai Xu --- tests/milvus_python_test/test_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/milvus_python_test/test_config.py b/tests/milvus_python_test/test_config.py index 9f80c9c97b..8ad1a884d5 100644 --- a/tests/milvus_python_test/test_config.py +++ b/tests/milvus_python_test/test_config.py @@ -294,7 +294,7 @@ class TestCacheConfig: status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_total + 1) assert not status.OK() - @pytest.skip(reason="Still needs discussion") + @pytest.mark.skip(reason="Still needs discussion") def test_set_cache_config_out_of_memory_value_B(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory @@ -315,7 +315,7 @@ class TestCacheConfig: status, reply = connect.set_config("cache_config", "insert_buffer_size", mem_available - int(cpu_cache_capacity) + 1) assert not status.OK() - @pytest.skip(reason="Still needs discussion") + @pytest.mark.skip(reason="Still needs discussion") def test_set_cache_config_out_of_memory_value_C(self, connect, collection): ''' target: set cpu_cache_capacity / insert_buffer_size to be out-of-memory From a3b28429357f6fd2bb080e2c224758cc2d4ee97e Mon Sep 17 00:00:00 2001 From: Xiaohai Xu Date: Tue, 10 Mar 2020 15:15:19 +0800 Subject: [PATCH 8/8] add back nsg Signed-off-by: Xiaohai Xu --- tests/milvus_python_test/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/milvus_python_test/utils.py b/tests/milvus_python_test/utils.py index a686c89020..1f584e0f56 100644 --- a/tests/milvus_python_test/utils.py +++ b/tests/milvus_python_test/utils.py @@ -588,7 +588,7 @@ def gen_simple_index(): IndexType.IVF_SQ8H, IndexType.IVF_PQ, IndexType.HNSW, - # IndexType.RNSG + IndexType.RNSG ] params = [ {"nlist": 1024}, @@ -597,7 +597,7 @@ def gen_simple_index(): {"nlist": 1024}, {"nlist": 1024, "m": 16}, {"M": 16, "efConstruction": 500}, - # {"search_length": 100, "out_degree": 40, "candidate_pool_size": 66, "knng": 100} + {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50} ] index_params = [] @@ -611,8 +611,8 @@ def get_search_param(index_type): return {"nprobe": 32} elif index_type == IndexType.HNSW: return {"ef": 64} - # elif index_type == IndexType.RNSG: - # return {"search_length": 100} + elif index_type == IndexType.RNSG: + return {"search_length": 50} else: logging.getLogger().info("Invalid index_type.")