From eff75c77010ec27f68e25a26c8b496f4d3fb9736 Mon Sep 17 00:00:00 2001 From: binbin <83755740+binbinlv@users.noreply.github.com> Date: Fri, 20 Aug 2021 11:00:56 +0800 Subject: [PATCH] Replace sdk source and merge tests and tests20 (#7182) Signed-off-by: Binbin Lv --- .github/mergify.yml | 20 +- .github/workflows/publish-test-images.yaml | 4 +- DEVELOPMENT.md | 2 +- build/ci/jenkins/Jenkinsfile | 29 +- build/ci/jenkins/NightlyCI.groovy | 32 +- {tests20 => tests-deprecating}/OWNERS | 0 tests-deprecating/README.md | 91 + tests-deprecating/docker/.env | 7 + tests-deprecating/docker/Dockerfile | 18 + tests-deprecating/docker/docker-compose.yml | 23 + .../go/insert_test.go | 0 .../go/key2seg_test.go | 0 .../milvus_benchmark/.gitignore | 0 .../milvus_benchmark/Dockerfile | 0 .../milvus_benchmark/README.md | 0 .../milvus_benchmark/asserts/uml.jpg | Bin .../milvus_benchmark/ci/argo.yaml | 0 .../ci/function/file_transfer.groovy | 0 .../ci/jenkinsfile/cleanup.groovy | 0 .../ci/jenkinsfile/deploy_test.groovy | 0 .../ci/jenkinsfile/notify.groovy | 0 .../milvus_benchmark/ci/main_jenkinsfile | 0 .../pod_containers/milvus-testframework.yaml | 0 .../milvus_benchmark/ci/publish_jenkinsfile | 0 .../ci/scripts/yaml_processor.py | 0 .../milvus_benchmark/__init__.py | 0 .../milvus_benchmark/chaos/__init__.py | 0 .../milvus_benchmark/chaos/chaos_mesh.py | 0 .../milvus_benchmark/chaos/chaos_opt.py | 0 .../milvus_benchmark/chaos/pod-new.yaml | 0 .../milvus_benchmark/chaos/pod.yaml | 0 .../chaos/template/PodChaos.yaml | 0 .../milvus_benchmark/chaos/test.py | 0 .../milvus_benchmark/chaos/utils.py | 0 .../milvus_benchmark/client.py | 0 .../milvus_benchmark/config.py | 0 .../milvus_benchmark/env/__init__.py | 0 .../milvus_benchmark/env/base.py | 0 .../milvus_benchmark/env/docker.py | 0 .../milvus_benchmark/env/helm.py | 0 .../milvus_benchmark/env/helm_utils.py | 0 .../milvus_benchmark/env/local.py | 0 .../milvus_benchmark/logs/log.py | 0 .../milvus_benchmark/logs/logging.yaml | 0 .../milvus_benchmark/milvus_benchmark/main.py | 0 .../milvus_benchmark/metrics/__init__.py | 0 .../milvus_benchmark/metrics/api.py | 0 .../milvus_benchmark/metrics/config.py | 0 .../metrics/models/__init__.py | 0 .../milvus_benchmark/metrics/models/env.py | 0 .../metrics/models/hardware.py | 0 .../milvus_benchmark/metrics/models/metric.py | 0 .../milvus_benchmark/metrics/models/server.py | 0 .../milvus_benchmark/parser.py | 0 .../milvus_benchmark/runners/__init__.py | 0 .../milvus_benchmark/runners/accuracy.py | 0 .../milvus_benchmark/runners/base.py | 0 .../milvus_benchmark/runners/build.py | 0 .../milvus_benchmark/runners/chaos.py | 0 .../milvus_benchmark/runners/docker_runner.py | 0 .../milvus_benchmark/runners/docker_utils.py | 0 .../milvus_benchmark/runners/get.py | 0 .../milvus_benchmark/runners/insert.py | 0 .../milvus_benchmark/runners/locust.py | 0 .../milvus_benchmark/runners/locust_file.py | 0 .../milvus_benchmark/runners/locust_task.py | 0 .../milvus_benchmark/runners/locust_tasks.py | 0 .../milvus_benchmark/runners/locust_user.py | 0 .../milvus_benchmark/runners/search.py | 0 .../milvus_benchmark/runners/test.py | 0 .../milvus_benchmark/runners/utils.py | 0 .../milvus_benchmark/scheduler.py | 0 .../milvus_benchmark/scheduler/010_data.json | 0 .../milvus_benchmark/scheduler/011_data.json | 0 .../scheduler/011_data_acc_debug.json | 0 .../scheduler/011_data_gpu_build.json | 0 .../scheduler/011_data_insert.json | 0 .../scheduler/011_delete.json | 0 .../scheduler/2_cluster_data.json | 0 .../milvus_benchmark/scheduler/2_data.json | 0 .../milvus_benchmark/scheduler/acc.json | 0 .../milvus_benchmark/scheduler/build.json | 0 .../milvus_benchmark/scheduler/clean.json | 0 .../milvus_benchmark/scheduler/cluster.json | 0 .../milvus_benchmark/scheduler/debug.json | 0 .../milvus_benchmark/scheduler/debug1.json | 0 .../milvus_benchmark/scheduler/debug2.json | 0 .../milvus_benchmark/scheduler/filter.json | 0 .../milvus_benchmark/scheduler/idc.json | 0 .../milvus_benchmark/scheduler/insert.json | 0 .../milvus_benchmark/scheduler/insert2.json | 0 .../milvus_benchmark/scheduler/jaccard.json | 0 .../milvus_benchmark/scheduler/locust.json | 0 .../scheduler/locust_insert.json | 0 .../scheduler/locust_mix.json | 0 .../scheduler/locust_mix_debug.json | 0 .../scheduler/locust_search.json | 0 .../milvus_benchmark/scheduler/loop.json | 0 .../scheduler/loop_search.json | 0 .../milvus_benchmark/scheduler/nlist.json | 0 .../milvus_benchmark/scheduler/search.json | 0 .../milvus_benchmark/scheduler/search2.json | 0 .../scheduler/search_debug.json | 0 .../scheduler/shards_ann.json | 0 .../scheduler/shards_debug.json | 0 .../scheduler/shards_stability.json | 0 .../milvus_benchmark/scheduler/stability.json | 0 .../suites/011_add_flush_performance.yaml | 0 .../suites/011_build_debug.yaml | 0 .../suites/011_cluster_cpu_accuracy_ann.yaml | 0 .../suites/011_cpu_accuracy.yaml | 0 .../suites/011_cpu_accuracy_ann.yaml | 0 .../suites/011_cpu_accuracy_ann_debug.yaml | 0 .../suites/011_cpu_accuracy_rhnsw.yaml | 0 .../suites/011_cpu_build_binary.yaml | 0 .../suites/011_cpu_build_hnsw.yaml | 0 .../suites/011_cpu_build_rhnsw.yaml | 0 .../suites/011_cpu_search.yaml | 0 .../suites/011_cpu_search_binary.yaml | 0 .../suites/011_cpu_search_debug.yaml | 0 .../suites/011_cpu_search_sift10m.yaml | 0 .../suites/011_cpu_search_sift10m_filter.yaml | 0 .../suites/011_cpu_search_sift10m_hnsw.yaml | 0 .../suites/011_cpu_search_sift10m_ivf.yaml | 0 .../suites/011_cpu_search_sift1b.yaml | 0 .../suites/011_cpu_search_sift50m.yaml | 0 .../suites/011_delete_performance.yaml | 0 .../suites/011_gpu_accuracy.yaml | 0 .../suites/011_gpu_accuracy_ann.yaml | 0 .../suites/011_gpu_accuracy_ann_debug.yaml | 0 .../suites/011_gpu_accuracy_debug.yaml | 0 .../suites/011_gpu_build.yaml | 0 .../suites/011_gpu_build_debug.yaml | 0 .../suites/011_gpu_build_sift10m.yaml | 0 .../suites/011_gpu_build_sift1b.yaml | 0 .../suites/011_gpu_build_sift50m.yaml | 0 .../suites/011_gpu_search.yaml | 0 .../suites/011_gpu_search_debug.yaml | 0 .../suites/011_gpu_search_sift10m.yaml | 0 .../suites/011_gpu_search_sift10m_100k.yaml | 0 .../suites/011_gpu_search_sift10m_filter.yaml | 0 .../suites/011_gpu_search_sift10m_ivf.yaml | 0 .../suites/011_gpu_search_sift50m.yaml | 0 .../suites/011_gpu_sift50m_ivf.yaml | 0 .../suites/011_gpu_stability.yaml | 0 .../suites/011_insert_debug.yaml | 0 .../suites/011_insert_performance.yaml | 0 .../suites/011_insert_performance_debug.yaml | 0 .../suites/011_search_dsl.yaml | 0 .../suites/011_search_stability.yaml | 0 .../suites/011_search_threshold.yaml | 0 .../suites/011_sift50m_acc.yaml | 0 .../suites/2_accuracy_ann_debug.yaml | 0 .../suites/2_cpu_accuracy.yaml | 0 .../suites/2_cpu_ann_accuracy.yaml | 0 .../milvus_benchmark/suites/2_cpu_build.yaml | 0 .../milvus_benchmark/suites/2_cpu_search.yaml | 0 .../suites/2_insert_build.yaml | 0 .../suites/2_insert_cluster.yaml | 0 .../suites/2_insert_data.yaml | 0 .../milvus_benchmark/suites/2_insert_get.yaml | 0 .../suites/2_insert_search.yaml | 0 .../suites/2_insert_search_debug.yaml | 0 .../suites/2_insert_search_sift10m.yaml | 0 .../suites/2_insert_search_sift10m_1024.yaml | 0 .../suites/2_insert_search_sift10m_2048.yaml | 0 .../suites/2_insert_search_sift10m_4096.yaml | 0 .../suites/2_insert_search_sift10m_512.yaml | 0 .../suites/2_insert_search_sift50m_1024.yaml | 0 .../suites/2_insert_search_sift50m_2048.yaml | 0 .../suites/2_insert_search_sift50m_4096.yaml | 0 .../suites/2_insert_search_sift50m_512.yaml | 0 .../suites/2_locust_insert.yaml | 0 .../suites/2_locust_insert_5h.yaml | 0 .../suites/2_locust_insert_flush.yaml | 0 .../suites/2_locust_load_insert.yaml | 0 .../suites/2_locust_load_insert_flush.yaml | 0 .../suites/2_locust_random.yaml | 0 .../suites/2_locust_random_load_release.yaml | 0 .../suites/2_locust_search.yaml | 0 .../suites/2_locust_search_5h.yaml | 0 .../suites/2_locust_search_index.yaml | 0 .../suites/add_flush_performance.yaml | 0 .../milvus_benchmark/suites/ann_debug.yaml | 0 .../milvus_benchmark/suites/clean.yaml | 0 .../suites/cluster_locust_mix.yaml | 0 .../milvus_benchmark/suites/cpu_accuracy.yaml | 0 .../suites/cpu_accuracy_ann.yaml | 0 .../suites/cpu_accuracy_ann_debug.yaml | 0 .../suites/cpu_accuracy_ann_hnsw.yaml | 0 .../suites/cpu_accuracy_ann_pq.yaml | 0 .../suites/cpu_accuracy_nsg.yaml | 0 .../suites/cpu_build_performance.yaml | 0 .../suites/cpu_search_binary.yaml | 0 .../cpu_search_performance_jaccard.yaml | 0 .../suites/cpu_search_performance_sift1b.yaml | 0 .../cpu_search_performance_sift50m.yaml | 0 .../suites/cpu_stability_sift50m.yaml | 0 .../milvus_benchmark/suites/debug.yaml | 0 .../milvus_benchmark/suites/debug_build.yaml | 0 .../suites/debug_gpu_search.yaml | 0 .../suites/flush_kill_query_pod.yaml | 0 .../milvus_benchmark/suites/gpu_accuracy.yaml | 0 .../suites/gpu_accuracy_ann.yaml | 0 .../suites/gpu_accuracy_debug.yaml | 0 .../suites/gpu_accuracy_sift1b.yaml | 0 .../suites/gpu_accuracy_sift1m.yaml | 0 .../gpu_build_performance_jaccard50m.yaml | 0 .../suites/gpu_build_sift1b_sq8h.yaml | 0 .../suites/gpu_search_performance.yaml | 0 .../gpu_search_performance_jaccard50m.yaml | 0 .../suites/gpu_search_performance_sift1b.yaml | 0 .../gpu_search_performance_sift50m.yaml | 0 .../suites/gpu_search_stability.yaml | 0 .../suites/insert_binary.yaml | 0 .../suites/insert_performance_deep1b.yaml | 0 .../suites/locust_cluster_search.yaml | 0 .../suites/locust_insert.yaml | 0 .../suites/locust_search.yaml | 0 .../suites/loop_stability.yaml | 0 .../milvus_benchmark/suites/metric.yaml | 0 .../milvus_benchmark/suites/pq.yaml | 0 .../milvus_benchmark/suites/qps.yaml | 0 .../milvus_benchmark/suites/search_debug.yaml | 0 .../suites/shards_ann_debug.yaml | 0 .../suites/shards_insert_performance.yaml | 0 .../shards_insert_performance_sift1m.yaml | 0 .../suites/shards_loop_stability.yaml | 0 .../shards_search_performance_sift1m.yaml | 0 .../milvus_benchmark/milvus_benchmark/test.py | 0 .../tests/locust_user_test.py | 0 .../milvus_benchmark/tests/test_scheduler.py | 0 .../milvus_benchmark/update.py | 0 .../milvus_benchmark/utils.py | 0 .../milvus_benchmark/requirements.txt | 0 .../python_test/.dockerignore | 0 .../python_test/.gitignore | 0 .../python_test/README.md | 0 .../collection/test_collection_count.py | 0 .../collection/test_collection_logic.py | 0 .../collection/test_collection_stats.py | 0 .../collection/test_create_collection.py | 0 .../collection/test_describe_collection.py | 0 .../collection/test_drop_collection.py | 0 .../collection/test_has_collection.py | 0 .../collection/test_list_collections.py | 0 .../collection/test_load_collection.py | 0 .../python_test/conftest.py | 0 .../python_test/constants.py | 0 .../python_test/entity/test_delete.py | 0 .../entity/test_get_entity_by_id.py | 0 .../python_test/entity/test_insert.py | 0 .../entity/test_list_id_in_segment.py | 0 .../python_test/entity/test_query.py | 0 .../python_test/entity/test_search.py | 0 .../python_test/pytest.ini | 2 + .../python_test}/requirements.txt | 4 +- .../python_test/run.sh | 0 .../python_test/stability/test_mysql.py | 0 .../python_test/stability/test_restart.py | 0 .../python_test/test_compact.py | 0 .../python_test/test_config.py | 0 .../python_test/test_connect.py | 0 .../python_test/test_flush.py | 0 .../python_test/test_index.py | 0 .../python_test/test_mix.py | 0 .../python_test/test_partition.py | 0 .../python_test/test_ping.py | 0 .../python_test/utils.py | 0 tests-deprecating/scripts/e2e-k8s.sh | 317 +++ tests-deprecating/scripts/e2e.sh | 83 + tests-deprecating/scripts/export_logs.sh | 28 + tests-deprecating/scripts/install_milvus.sh | 57 + tests-deprecating/scripts/uninstall_milvus.sh | 23 + tests/benchmark/.gitignore | 13 + tests/benchmark/Dockerfile | 30 + tests/benchmark/README.md | 184 ++ tests/benchmark/asserts/uml.jpg | Bin 0 -> 140944 bytes tests/benchmark/ci/argo.yaml | 232 ++ .../ci/function/file_transfer.groovy | 10 + tests/benchmark/ci/jenkinsfile/cleanup.groovy | 13 + .../ci/jenkinsfile/deploy_test.groovy | 24 + tests/benchmark/ci/jenkinsfile/notify.groovy | 15 + tests/benchmark/ci/main_jenkinsfile | 151 ++ .../pod_containers/milvus-testframework.yaml | 13 + tests/benchmark/ci/publish_jenkinsfile | 104 + tests/benchmark/ci/scripts/yaml_processor.py | 536 +++++ tests/benchmark/milvus_benchmark/__init__.py | 2 + .../milvus_benchmark/chaos/__init__.py | 0 .../milvus_benchmark/chaos/chaos_mesh.py | 71 + .../milvus_benchmark/chaos/chaos_opt.py | 65 + .../milvus_benchmark/chaos/pod-new.yaml | 17 + .../benchmark/milvus_benchmark/chaos/pod.yaml | 11 + .../chaos/template/PodChaos.yaml | 13 + .../benchmark/milvus_benchmark/chaos/test.py | 36 + .../benchmark/milvus_benchmark/chaos/utils.py | 38 + tests/benchmark/milvus_benchmark/client.py | 491 +++++ tests/benchmark/milvus_benchmark/config.py | 42 + .../milvus_benchmark/env/__init__.py | 14 + tests/benchmark/milvus_benchmark/env/base.py | 46 + .../benchmark/milvus_benchmark/env/docker.py | 12 + tests/benchmark/milvus_benchmark/env/helm.py | 72 + .../milvus_benchmark/env/helm_utils.py | 473 +++++ tests/benchmark/milvus_benchmark/env/local.py | 21 + tests/benchmark/milvus_benchmark/logs/log.py | 24 + .../milvus_benchmark/logs/logging.yaml | 37 + tests/benchmark/milvus_benchmark/main.py | 273 +++ .../milvus_benchmark/metrics/__init__.py | 0 .../benchmark/milvus_benchmark/metrics/api.py | 55 + .../milvus_benchmark/metrics/config.py | 3 + .../metrics/models/__init__.py | 4 + .../milvus_benchmark/metrics/models/env.py | 23 + .../metrics/models/hardware.py | 24 + .../milvus_benchmark/metrics/models/metric.py | 52 + .../milvus_benchmark/metrics/models/server.py | 27 + tests/benchmark/milvus_benchmark/parser.py | 87 + .../milvus_benchmark/runners/__init__.py | 26 + .../milvus_benchmark/runners/accuracy.py | 262 +++ .../milvus_benchmark/runners/base.py | 152 ++ .../milvus_benchmark/runners/build.py | 106 + .../milvus_benchmark/runners/chaos.py | 127 ++ .../milvus_benchmark/runners/docker_runner.py | 366 ++++ .../milvus_benchmark/runners/docker_utils.py | 126 ++ .../benchmark/milvus_benchmark/runners/get.py | 121 ++ .../milvus_benchmark/runners/insert.py | 243 +++ .../milvus_benchmark/runners/locust.py | 399 ++++ .../milvus_benchmark/runners/locust_file.py | 30 + .../milvus_benchmark/runners/locust_task.py | 38 + .../milvus_benchmark/runners/locust_tasks.py | 79 + .../milvus_benchmark/runners/locust_user.py | 109 + .../milvus_benchmark/runners/search.py | 290 +++ .../milvus_benchmark/runners/test.py | 40 + .../milvus_benchmark/runners/utils.py | 265 +++ tests/benchmark/milvus_benchmark/scheduler.py | 27 + .../milvus_benchmark/scheduler/010_data.json | 65 + .../milvus_benchmark/scheduler/011_data.json | 62 + .../scheduler/011_data_acc_debug.json | 11 + .../scheduler/011_data_gpu_build.json | 11 + .../scheduler/011_data_insert.json | 11 + .../scheduler/011_delete.json | 15 + .../scheduler/2_cluster_data.json | 41 + .../milvus_benchmark/scheduler/2_data.json | 34 + .../milvus_benchmark/scheduler/acc.json | 10 + .../milvus_benchmark/scheduler/build.json | 11 + .../milvus_benchmark/scheduler/clean.json | 11 + .../milvus_benchmark/scheduler/cluster.json | 11 + .../milvus_benchmark/scheduler/debug.json | 19 + .../milvus_benchmark/scheduler/debug1.json | 20 + .../milvus_benchmark/scheduler/debug2.json | 12 + .../milvus_benchmark/scheduler/filter.json | 11 + .../milvus_benchmark/scheduler/idc.json | 11 + .../milvus_benchmark/scheduler/insert.json | 11 + .../milvus_benchmark/scheduler/insert2.json | 11 + .../milvus_benchmark/scheduler/jaccard.json | 11 + .../milvus_benchmark/scheduler/locust.json | 15 + .../scheduler/locust_insert.json | 11 + .../scheduler/locust_mix.json | 11 + .../scheduler/locust_mix_debug.json | 10 + .../scheduler/locust_search.json | 11 + .../milvus_benchmark/scheduler/loop.json | 10 + .../scheduler/loop_search.json | 10 + .../milvus_benchmark/scheduler/nlist.json | 11 + .../milvus_benchmark/scheduler/search.json | 11 + .../milvus_benchmark/scheduler/search2.json | 11 + .../scheduler/search_debug.json | 11 + .../scheduler/shards_ann.json | 10 + .../scheduler/shards_debug.json | 15 + .../scheduler/shards_stability.json | 10 + .../milvus_benchmark/scheduler/stability.json | 11 + .../suites/011_add_flush_performance.yaml | 20 + .../suites/011_build_debug.yaml | 92 + .../suites/011_cluster_cpu_accuracy_ann.yaml | 336 +++ .../suites/011_cpu_accuracy.yaml | 55 + .../suites/011_cpu_accuracy_ann.yaml | 260 +++ .../suites/011_cpu_accuracy_ann_debug.yaml | 50 + .../suites/011_cpu_accuracy_rhnsw.yaml | 36 + .../suites/011_cpu_build_binary.yaml | 11 + .../suites/011_cpu_build_hnsw.yaml | 12 + .../suites/011_cpu_build_rhnsw.yaml | 23 + .../suites/011_cpu_search.yaml | 255 +++ .../suites/011_cpu_search_binary.yaml | 49 + .../suites/011_cpu_search_debug.yaml | 26 + .../suites/011_cpu_search_sift10m.yaml | 123 ++ .../suites/011_cpu_search_sift10m_filter.yaml | 97 + .../suites/011_cpu_search_sift10m_hnsw.yaml | 40 + .../suites/011_cpu_search_sift10m_ivf.yaml | 32 + .../suites/011_cpu_search_sift1b.yaml | 26 + .../suites/011_cpu_search_sift50m.yaml | 98 + .../suites/011_delete_performance.yaml | 17 + .../suites/011_gpu_accuracy.yaml | 61 + .../suites/011_gpu_accuracy_ann.yaml | 165 ++ .../suites/011_gpu_accuracy_ann_debug.yaml | 24 + .../suites/011_gpu_accuracy_debug.yaml | 23 + .../suites/011_gpu_build.yaml | 21 + .../suites/011_gpu_build_debug.yaml | 151 ++ .../suites/011_gpu_build_sift10m.yaml | 148 ++ .../suites/011_gpu_build_sift1b.yaml | 42 + .../suites/011_gpu_build_sift50m.yaml | 75 + .../suites/011_gpu_search.yaml | 251 +++ .../suites/011_gpu_search_debug.yaml | 79 + .../suites/011_gpu_search_sift10m.yaml | 145 ++ .../suites/011_gpu_search_sift10m_100k.yaml | 121 ++ .../suites/011_gpu_search_sift10m_filter.yaml | 126 ++ .../suites/011_gpu_search_sift10m_ivf.yaml | 50 + .../suites/011_gpu_search_sift50m.yaml | 121 ++ .../suites/011_gpu_sift50m_ivf.yaml | 26 + .../suites/011_gpu_stability.yaml | 39 + .../suites/011_insert_debug.yaml | 25 + .../suites/011_insert_performance.yaml | 113 + .../suites/011_insert_performance_debug.yaml | 131 ++ .../suites/011_search_dsl.yaml | 76 + .../suites/011_search_stability.yaml | 20 + .../suites/011_search_threshold.yaml | 50 + .../suites/011_sift50m_acc.yaml | 19 + .../suites/2_accuracy_ann_debug.yaml | 32 + .../suites/2_cpu_accuracy.yaml | 21 + .../suites/2_cpu_ann_accuracy.yaml | 194 ++ .../milvus_benchmark/suites/2_cpu_build.yaml | 22 + .../milvus_benchmark/suites/2_cpu_search.yaml | 29 + .../suites/2_insert_build.yaml | 22 + .../suites/2_insert_cluster.yaml | 24 + .../suites/2_insert_data.yaml | 13 + .../milvus_benchmark/suites/2_insert_get.yaml | 13 + .../suites/2_insert_search.yaml | 157 ++ .../suites/2_insert_search_debug.yaml | 33 + .../suites/2_insert_search_sift10m.yaml | 33 + .../suites/2_insert_search_sift10m_1024.yaml | 33 + .../suites/2_insert_search_sift10m_2048.yaml | 89 + .../suites/2_insert_search_sift10m_4096.yaml | 33 + .../suites/2_insert_search_sift10m_512.yaml | 33 + .../suites/2_insert_search_sift50m_1024.yaml | 33 + .../suites/2_insert_search_sift50m_2048.yaml | 33 + .../suites/2_insert_search_sift50m_4096.yaml | 33 + .../suites/2_insert_search_sift50m_512.yaml | 33 + .../suites/2_locust_insert.yaml | 26 + .../suites/2_locust_insert_5h.yaml | 34 + .../suites/2_locust_insert_flush.yaml | 25 + .../suites/2_locust_load_insert.yaml | 25 + .../suites/2_locust_load_insert_flush.yaml | 25 + .../suites/2_locust_random.yaml | 48 + .../suites/2_locust_random_load_release.yaml | 32 + .../suites/2_locust_search.yaml | 43 + .../suites/2_locust_search_5h.yaml | 43 + .../suites/2_locust_search_index.yaml | 43 + .../suites/add_flush_performance.yaml | 20 + .../milvus_benchmark/suites/ann_debug.yaml | 26 + .../milvus_benchmark/suites/clean.yaml | 24 + .../suites/cluster_locust_mix.yaml | 47 + .../milvus_benchmark/suites/cpu_accuracy.yaml | 61 + .../suites/cpu_accuracy_ann.yaml | 212 ++ .../suites/cpu_accuracy_ann_debug.yaml | 25 + .../suites/cpu_accuracy_ann_hnsw.yaml | 43 + .../suites/cpu_accuracy_ann_pq.yaml | 26 + .../suites/cpu_accuracy_nsg.yaml | 21 + .../suites/cpu_build_performance.yaml | 19 + .../suites/cpu_search_binary.yaml | 67 + .../cpu_search_performance_jaccard.yaml | 20 + .../suites/cpu_search_performance_sift1b.yaml | 22 + .../cpu_search_performance_sift50m.yaml | 20 + .../suites/cpu_stability_sift50m.yaml | 27 + .../milvus_benchmark/suites/debug.yaml | 90 + .../milvus_benchmark/suites/debug_build.yaml | 23 + .../suites/debug_gpu_search.yaml | 30 + .../suites/flush_kill_query_pod.yaml | 44 + .../milvus_benchmark/suites/gpu_accuracy.yaml | 41 + .../suites/gpu_accuracy_ann.yaml | 172 ++ .../suites/gpu_accuracy_debug.yaml | 40 + .../suites/gpu_accuracy_sift1b.yaml | 59 + .../suites/gpu_accuracy_sift1m.yaml | 21 + .../gpu_build_performance_jaccard50m.yaml | 20 + .../suites/gpu_build_sift1b_sq8h.yaml | 20 + .../suites/gpu_search_performance.yaml | 247 +++ .../gpu_search_performance_jaccard50m.yaml | 22 + .../suites/gpu_search_performance_sift1b.yaml | 62 + .../gpu_search_performance_sift50m.yaml | 146 ++ .../suites/gpu_search_stability.yaml | 23 + .../suites/insert_binary.yaml | 39 + .../suites/insert_performance_deep1b.yaml | 87 + .../suites/locust_cluster_search.yaml | 45 + .../suites/locust_insert.yaml | 23 + .../suites/locust_search.yaml | 49 + .../suites/loop_stability.yaml | 17 + .../milvus_benchmark/suites/metric.yaml | 47 + .../benchmark/milvus_benchmark/suites/pq.yaml | 27 + .../milvus_benchmark/suites/qps.yaml | 27 + .../milvus_benchmark/suites/search_debug.yaml | 92 + .../suites/shards_ann_debug.yaml | 25 + .../suites/shards_insert_performance.yaml | 17 + .../shards_insert_performance_sift1m.yaml | 19 + .../suites/shards_loop_stability.yaml | 16 + .../shards_search_performance_sift1m.yaml | 12 + tests/benchmark/milvus_benchmark/test.py | 36 + .../tests/locust_user_test.py | 15 + .../milvus_benchmark/tests/test_scheduler.py | 11 + tests/benchmark/milvus_benchmark/update.py | 218 ++ tests/benchmark/milvus_benchmark/utils.py | 134 ++ tests/benchmark/requirements.txt | 23 + tests/docker/.env | 2 +- tests/docker/Dockerfile | 2 +- {tests20 => tests}/go_client/README.md | 0 {tests20 => tests}/java_client/README.md | 0 tests/python_client/.dockerignore | 14 + tests/python_client/.gitignore | 17 + {tests20 => tests}/python_client/README.md | 6 +- {tests20 => tests}/python_client/README_CN.md | 6 +- .../python_client/base/client_base.py | 2 +- .../python_client/base/collection_wrapper.py | 0 .../python_client/base/connections_wrapper.py | 4 +- .../python_client/base/index_wrapper.py | 2 +- .../python_client/base/partition_wrapper.py | 0 .../python_client/base/schema_wrapper.py | 2 +- .../python_client/base/utility_wrapper.py | 2 +- .../python_client/chaos/README.md | 2 +- .../python_client/chaos/chaos_commons.py | 2 +- .../chaos_datacoord_podkill.yaml | 0 .../chaos_datanode_container_kill.yaml | 0 .../chaos_datanode_pod_failure.yaml | 0 .../chaos_objects/chaos_datanode_podkill.yaml | 0 .../chaos_indexcoord_podkill.yaml | 0 .../chaos_indexnode_podkill.yaml | 0 .../chaos_objects/chaos_minio_podkill.yaml | 0 .../chaos_objects/chaos_proxy_podkill.yaml | 0 .../chaos_querycoord_podkill.yaml | 0 .../chaos_querynode_pod_failure.yaml | 0 .../chaos_querynode_podkill.yaml | 0 .../chaos_rootcoord_podkill.yaml | 0 .../chaos_standalone_container_kill.yaml | 0 .../chaos_standalone_podkill.yaml | 0 .../skip_chaos_etcd_podkill.yaml | 0 .../skip_chaos_pulsar_podkill.yaml | 0 .../chaos/chaos_objects/testcases.yaml | 0 .../python_client/chaos/chaos_opt.py | 2 +- .../python_client/chaos/checker.py | 0 .../python_client/chaos/constants.py | 0 .../python_client/chaos/test_chaos.py | 4 +- .../chaos/test_chaos_data_consist.py | 4 +- .../python_client/check/func_check.py | 2 +- .../python_client/check/param_check.py | 0 .../python_client/common/code_mapping.py | 2 +- .../python_client/common/common_func.py | 2 +- .../python_client/common/common_type.py | 0 tests/python_client/common/constants.py | 22 + .../python_client/config/log_config.py | 0 tests/python_client/conftest.py | 439 ++++ .../graphs/module_call_diagram.jpg | Bin .../python_client/load/README.md | 0 .../python_client/load/test_workload.py | 2 +- {tests20 => tests}/python_client/pytest.ini | 2 +- .../requirements.txt | 6 +- tests/python_client/run.sh | 4 + .../python_client/scale/README.md | 2 +- .../python_client/scale/constants.py | 0 .../python_client/scale/helm_env.py | 0 .../python_client/scale/scale_common.py | 2 +- .../scale/test_data_node_scale.py | 2 +- .../scale/test_index_node_scale.py | 2 +- .../python_client/scale/test_proxy_scale.py | 0 .../scale/test_query_node_scale.py | 2 +- .../collection/test_collection_count.py | 503 +++++ .../collection/test_collection_logic.py | 138 ++ .../collection/test_collection_stats.py | 415 ++++ .../collection/test_create_collection.py | 327 +++ .../collection/test_describe_collection.py | 184 ++ .../collection/test_drop_collection.py | 108 + .../collection/test_has_collection.py | 105 + .../collection/test_list_collections.py | 102 + .../collection/test_load_collection.py | 671 ++++++ .../testcases/entity/test_delete.py | 473 +++++ .../testcases/entity/test_get_entity_by_id.py | 666 ++++++ .../testcases/entity/test_insert.py | 1187 +++++++++++ .../entity/test_list_id_in_segment.py | 318 +++ .../testcases/entity/test_query.py | 670 ++++++ .../testcases/entity/test_search.py | 1859 +++++++++++++++++ .../testcases/stability/test_mysql.py | 44 + .../testcases/stability/test_restart.py | 315 +++ .../testcases/test_collection_20.py | 0 tests/python_client/testcases/test_compact.py | 722 +++++++ tests/python_client/testcases/test_config.py | 1402 +++++++++++++ tests/python_client/testcases/test_connect.py | 233 +++ .../testcases/test_connection_20.py | 2 +- .../python_client/testcases/test_e2e_20.py | 0 tests/python_client/testcases/test_flush.py | 394 ++++ tests/python_client/testcases/test_index.py | 922 ++++++++ .../python_client/testcases/test_index_20.py | 2 +- .../python_client/testcases/test_insert_20.py | 2 +- .../python_client/testcases/test_load_20.py | 0 tests/python_client/testcases/test_mix.py | 199 ++ .../python_client/testcases/test_partition.py | 496 +++++ .../testcases/test_partition_20.py | 2 +- tests/python_client/testcases/test_ping.py | 129 ++ .../python_client/testcases/test_query_20.py | 2 +- .../python_client/testcases/test_search_20.py | 0 .../testcases/test_utility_20.py | 8 +- .../python_client/utils/api_request.py | 0 .../python_client/utils/util_log.py | 0 tests/python_client/utils/utils.py | 1031 +++++++++ tests/scripts/e2e.sh | 16 +- tests20/README.md | 5 - tests20/benchmark/README.md | 5 - tests20/python_client/.gitignore | 5 - tests20/python_client/conftest.py | 215 -- 601 files changed, 29324 insertions(+), 324 deletions(-) rename {tests20 => tests-deprecating}/OWNERS (100%) create mode 100644 tests-deprecating/README.md create mode 100644 tests-deprecating/docker/.env create mode 100644 tests-deprecating/docker/Dockerfile create mode 100644 tests-deprecating/docker/docker-compose.yml rename {tests => tests-deprecating}/go/insert_test.go (100%) rename {tests => tests-deprecating}/go/key2seg_test.go (100%) rename {tests => tests-deprecating}/milvus_benchmark/.gitignore (100%) rename {tests => tests-deprecating}/milvus_benchmark/Dockerfile (100%) rename {tests => tests-deprecating}/milvus_benchmark/README.md (100%) rename {tests => tests-deprecating}/milvus_benchmark/asserts/uml.jpg (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/argo.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/function/file_transfer.groovy (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/jenkinsfile/cleanup.groovy (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/jenkinsfile/deploy_test.groovy (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/jenkinsfile/notify.groovy (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/main_jenkinsfile (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/pod_containers/milvus-testframework.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/publish_jenkinsfile (100%) rename {tests => tests-deprecating}/milvus_benchmark/ci/scripts/yaml_processor.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/__init__.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/__init__.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/chaos_mesh.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/chaos_opt.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/pod-new.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/pod.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/template/PodChaos.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/test.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/chaos/utils.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/client.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/config.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/env/__init__.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/env/base.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/env/docker.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/env/helm.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/env/helm_utils.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/env/local.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/logs/log.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/logs/logging.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/main.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/__init__.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/api.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/config.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/models/__init__.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/models/env.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/models/hardware.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/models/metric.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/metrics/models/server.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/parser.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/__init__.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/accuracy.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/base.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/build.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/chaos.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/docker_runner.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/docker_utils.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/get.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/insert.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/locust.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/locust_file.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/locust_task.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/locust_tasks.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/locust_user.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/search.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/test.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/runners/utils.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/010_data.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/011_data.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/011_data_insert.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/011_delete.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/2_cluster_data.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/2_data.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/acc.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/build.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/clean.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/cluster.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/debug.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/debug1.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/debug2.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/filter.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/idc.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/insert.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/insert2.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/jaccard.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/locust.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/locust_insert.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/locust_mix.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/locust_mix_debug.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/locust_search.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/loop.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/loop_search.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/nlist.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/search.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/search2.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/search_debug.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/shards_ann.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/shards_debug.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/shards_stability.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/scheduler/stability.json (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_build_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_delete_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_build.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_gpu_stability.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_insert_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_insert_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_search_dsl.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_search_stability.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_search_threshold.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_cpu_build.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_cpu_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_build.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_cluster.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_data.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_get.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_insert.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_random.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/2_locust_search_index.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/add_flush_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/ann_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/clean.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_build_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_search_binary.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/debug_build.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/debug_gpu_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/gpu_search_stability.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/insert_binary.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/locust_cluster_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/locust_insert.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/locust_search.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/loop_stability.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/metric.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/pq.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/qps.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/search_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/shards_ann_debug.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/shards_loop_stability.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/test.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/tests/locust_user_test.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/tests/test_scheduler.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/update.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/milvus_benchmark/utils.py (100%) rename {tests => tests-deprecating}/milvus_benchmark/requirements.txt (100%) rename {tests => tests-deprecating}/python_test/.dockerignore (100%) rename {tests => tests-deprecating}/python_test/.gitignore (100%) rename {tests => tests-deprecating}/python_test/README.md (100%) rename {tests => tests-deprecating}/python_test/collection/test_collection_count.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_collection_logic.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_collection_stats.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_create_collection.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_describe_collection.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_drop_collection.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_has_collection.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_list_collections.py (100%) rename {tests => tests-deprecating}/python_test/collection/test_load_collection.py (100%) rename {tests => tests-deprecating}/python_test/conftest.py (100%) rename {tests => tests-deprecating}/python_test/constants.py (100%) rename {tests => tests-deprecating}/python_test/entity/test_delete.py (100%) rename {tests => tests-deprecating}/python_test/entity/test_get_entity_by_id.py (100%) rename {tests => tests-deprecating}/python_test/entity/test_insert.py (100%) rename {tests => tests-deprecating}/python_test/entity/test_list_id_in_segment.py (100%) rename {tests => tests-deprecating}/python_test/entity/test_query.py (100%) rename {tests => tests-deprecating}/python_test/entity/test_search.py (100%) rename {tests => tests-deprecating}/python_test/pytest.ini (99%) rename {tests20/python_client => tests-deprecating/python_test}/requirements.txt (93%) rename {tests => tests-deprecating}/python_test/run.sh (100%) rename {tests => tests-deprecating}/python_test/stability/test_mysql.py (100%) rename {tests => tests-deprecating}/python_test/stability/test_restart.py (100%) rename {tests => tests-deprecating}/python_test/test_compact.py (100%) rename {tests => tests-deprecating}/python_test/test_config.py (100%) rename {tests => tests-deprecating}/python_test/test_connect.py (100%) rename {tests => tests-deprecating}/python_test/test_flush.py (100%) rename {tests => tests-deprecating}/python_test/test_index.py (100%) rename {tests => tests-deprecating}/python_test/test_mix.py (100%) rename {tests => tests-deprecating}/python_test/test_partition.py (100%) rename {tests => tests-deprecating}/python_test/test_ping.py (100%) rename {tests => tests-deprecating}/python_test/utils.py (100%) create mode 100755 tests-deprecating/scripts/e2e-k8s.sh create mode 100755 tests-deprecating/scripts/e2e.sh create mode 100755 tests-deprecating/scripts/export_logs.sh create mode 100755 tests-deprecating/scripts/install_milvus.sh create mode 100755 tests-deprecating/scripts/uninstall_milvus.sh create mode 100644 tests/benchmark/.gitignore create mode 100644 tests/benchmark/Dockerfile create mode 100644 tests/benchmark/README.md create mode 100644 tests/benchmark/asserts/uml.jpg create mode 100644 tests/benchmark/ci/argo.yaml create mode 100644 tests/benchmark/ci/function/file_transfer.groovy create mode 100644 tests/benchmark/ci/jenkinsfile/cleanup.groovy create mode 100644 tests/benchmark/ci/jenkinsfile/deploy_test.groovy create mode 100644 tests/benchmark/ci/jenkinsfile/notify.groovy create mode 100644 tests/benchmark/ci/main_jenkinsfile create mode 100644 tests/benchmark/ci/pod_containers/milvus-testframework.yaml create mode 100644 tests/benchmark/ci/publish_jenkinsfile create mode 100755 tests/benchmark/ci/scripts/yaml_processor.py create mode 100644 tests/benchmark/milvus_benchmark/__init__.py create mode 100644 tests/benchmark/milvus_benchmark/chaos/__init__.py create mode 100644 tests/benchmark/milvus_benchmark/chaos/chaos_mesh.py create mode 100644 tests/benchmark/milvus_benchmark/chaos/chaos_opt.py create mode 100644 tests/benchmark/milvus_benchmark/chaos/pod-new.yaml create mode 100644 tests/benchmark/milvus_benchmark/chaos/pod.yaml create mode 100644 tests/benchmark/milvus_benchmark/chaos/template/PodChaos.yaml create mode 100644 tests/benchmark/milvus_benchmark/chaos/test.py create mode 100644 tests/benchmark/milvus_benchmark/chaos/utils.py create mode 100644 tests/benchmark/milvus_benchmark/client.py create mode 100644 tests/benchmark/milvus_benchmark/config.py create mode 100644 tests/benchmark/milvus_benchmark/env/__init__.py create mode 100644 tests/benchmark/milvus_benchmark/env/base.py create mode 100644 tests/benchmark/milvus_benchmark/env/docker.py create mode 100644 tests/benchmark/milvus_benchmark/env/helm.py create mode 100644 tests/benchmark/milvus_benchmark/env/helm_utils.py create mode 100644 tests/benchmark/milvus_benchmark/env/local.py create mode 100644 tests/benchmark/milvus_benchmark/logs/log.py create mode 100644 tests/benchmark/milvus_benchmark/logs/logging.yaml create mode 100644 tests/benchmark/milvus_benchmark/main.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/__init__.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/api.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/config.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/models/__init__.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/models/env.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/models/hardware.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/models/metric.py create mode 100644 tests/benchmark/milvus_benchmark/metrics/models/server.py create mode 100644 tests/benchmark/milvus_benchmark/parser.py create mode 100644 tests/benchmark/milvus_benchmark/runners/__init__.py create mode 100644 tests/benchmark/milvus_benchmark/runners/accuracy.py create mode 100644 tests/benchmark/milvus_benchmark/runners/base.py create mode 100644 tests/benchmark/milvus_benchmark/runners/build.py create mode 100644 tests/benchmark/milvus_benchmark/runners/chaos.py create mode 100644 tests/benchmark/milvus_benchmark/runners/docker_runner.py create mode 100644 tests/benchmark/milvus_benchmark/runners/docker_utils.py create mode 100644 tests/benchmark/milvus_benchmark/runners/get.py create mode 100644 tests/benchmark/milvus_benchmark/runners/insert.py create mode 100644 tests/benchmark/milvus_benchmark/runners/locust.py create mode 100644 tests/benchmark/milvus_benchmark/runners/locust_file.py create mode 100644 tests/benchmark/milvus_benchmark/runners/locust_task.py create mode 100644 tests/benchmark/milvus_benchmark/runners/locust_tasks.py create mode 100644 tests/benchmark/milvus_benchmark/runners/locust_user.py create mode 100644 tests/benchmark/milvus_benchmark/runners/search.py create mode 100644 tests/benchmark/milvus_benchmark/runners/test.py create mode 100644 tests/benchmark/milvus_benchmark/runners/utils.py create mode 100644 tests/benchmark/milvus_benchmark/scheduler.py create mode 100644 tests/benchmark/milvus_benchmark/scheduler/010_data.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/011_data.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/011_data_insert.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/011_delete.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/2_cluster_data.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/2_data.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/acc.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/build.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/clean.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/cluster.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/debug.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/debug1.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/debug2.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/filter.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/idc.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/insert.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/insert2.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/jaccard.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/locust.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/locust_insert.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/locust_mix.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/locust_mix_debug.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/locust_search.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/loop.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/loop_search.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/nlist.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/search.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/search2.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/search_debug.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/shards_ann.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/shards_debug.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/shards_stability.json create mode 100644 tests/benchmark/milvus_benchmark/scheduler/stability.json create mode 100644 tests/benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_build_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_delete_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_build.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_gpu_stability.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_insert_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_insert_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_search_dsl.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_search_stability.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_search_threshold.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_cpu_build.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_cpu_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_build.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_cluster.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_data.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_get.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_insert.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_random.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/2_locust_search_index.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/add_flush_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/ann_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/clean.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_accuracy.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_build_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_search_binary.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/debug_build.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/debug_gpu_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_accuracy.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_search_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/gpu_search_stability.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/insert_binary.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/locust_cluster_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/locust_insert.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/locust_search.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/loop_stability.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/metric.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/pq.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/qps.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/search_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/shards_ann_debug.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/shards_insert_performance.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/shards_loop_stability.yaml create mode 100644 tests/benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml create mode 100644 tests/benchmark/milvus_benchmark/test.py create mode 100644 tests/benchmark/milvus_benchmark/tests/locust_user_test.py create mode 100644 tests/benchmark/milvus_benchmark/tests/test_scheduler.py create mode 100644 tests/benchmark/milvus_benchmark/update.py create mode 100644 tests/benchmark/milvus_benchmark/utils.py create mode 100644 tests/benchmark/requirements.txt rename {tests20 => tests}/go_client/README.md (100%) rename {tests20 => tests}/java_client/README.md (100%) create mode 100644 tests/python_client/.dockerignore create mode 100644 tests/python_client/.gitignore rename {tests20 => tests}/python_client/README.md (98%) rename {tests20 => tests}/python_client/README_CN.md (97%) rename {tests20 => tests}/python_client/base/client_base.py (99%) rename {tests20 => tests}/python_client/base/collection_wrapper.py (100%) rename {tests20 => tests}/python_client/base/connections_wrapper.py (96%) rename {tests20 => tests}/python_client/base/index_wrapper.py (95%) rename {tests20 => tests}/python_client/base/partition_wrapper.py (100%) rename {tests20 => tests}/python_client/base/schema_wrapper.py (97%) rename {tests20 => tests}/python_client/base/utility_wrapper.py (99%) rename {tests20 => tests}/python_client/chaos/README.md (97%) rename {tests20 => tests}/python_client/chaos/chaos_commons.py (98%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_datacoord_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_datanode_container_kill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_datanode_pod_failure.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_datanode_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_indexcoord_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_indexnode_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_minio_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_proxy_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_querycoord_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_querynode_pod_failure.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_querynode_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_rootcoord_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_standalone_container_kill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/chaos_standalone_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/skip_chaos_etcd_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/skip_chaos_pulsar_podkill.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_objects/testcases.yaml (100%) rename {tests20 => tests}/python_client/chaos/chaos_opt.py (98%) rename {tests20 => tests}/python_client/chaos/checker.py (100%) rename {tests20 => tests}/python_client/chaos/constants.py (100%) rename {tests20 => tests}/python_client/chaos/test_chaos.py (99%) rename {tests20 => tests}/python_client/chaos/test_chaos_data_consist.py (98%) rename {tests20 => tests}/python_client/check/func_check.py (99%) rename {tests20 => tests}/python_client/check/param_check.py (100%) rename {tests20 => tests}/python_client/common/code_mapping.py (93%) rename {tests20 => tests}/python_client/common/common_func.py (99%) rename {tests20 => tests}/python_client/common/common_type.py (100%) create mode 100644 tests/python_client/common/constants.py rename {tests20 => tests}/python_client/config/log_config.py (100%) create mode 100644 tests/python_client/conftest.py rename {tests20 => tests}/python_client/graphs/module_call_diagram.jpg (100%) rename {tests20 => tests}/python_client/load/README.md (100%) rename {tests20 => tests}/python_client/load/test_workload.py (99%) rename {tests20 => tests}/python_client/pytest.ini (65%) rename tests/{python_test => python_client}/requirements.txt (87%) create mode 100644 tests/python_client/run.sh rename {tests20 => tests}/python_client/scale/README.md (96%) rename {tests20 => tests}/python_client/scale/constants.py (100%) rename {tests20 => tests}/python_client/scale/helm_env.py (100%) rename {tests20 => tests}/python_client/scale/scale_common.py (95%) rename {tests20 => tests}/python_client/scale/test_data_node_scale.py (98%) rename {tests20 => tests}/python_client/scale/test_index_node_scale.py (99%) rename {tests20 => tests}/python_client/scale/test_proxy_scale.py (100%) rename {tests20 => tests}/python_client/scale/test_query_node_scale.py (98%) create mode 100644 tests/python_client/testcases/collection/test_collection_count.py create mode 100644 tests/python_client/testcases/collection/test_collection_logic.py create mode 100644 tests/python_client/testcases/collection/test_collection_stats.py create mode 100644 tests/python_client/testcases/collection/test_create_collection.py create mode 100644 tests/python_client/testcases/collection/test_describe_collection.py create mode 100644 tests/python_client/testcases/collection/test_drop_collection.py create mode 100644 tests/python_client/testcases/collection/test_has_collection.py create mode 100644 tests/python_client/testcases/collection/test_list_collections.py create mode 100644 tests/python_client/testcases/collection/test_load_collection.py create mode 100644 tests/python_client/testcases/entity/test_delete.py create mode 100644 tests/python_client/testcases/entity/test_get_entity_by_id.py create mode 100644 tests/python_client/testcases/entity/test_insert.py create mode 100644 tests/python_client/testcases/entity/test_list_id_in_segment.py create mode 100644 tests/python_client/testcases/entity/test_query.py create mode 100644 tests/python_client/testcases/entity/test_search.py create mode 100644 tests/python_client/testcases/stability/test_mysql.py create mode 100644 tests/python_client/testcases/stability/test_restart.py rename tests20/python_client/testcases/test_collection.py => tests/python_client/testcases/test_collection_20.py (100%) create mode 100644 tests/python_client/testcases/test_compact.py create mode 100644 tests/python_client/testcases/test_config.py create mode 100644 tests/python_client/testcases/test_connect.py rename tests20/python_client/testcases/test_connection.py => tests/python_client/testcases/test_connection_20.py (99%) rename tests20/python_client/testcases/test_e2e.py => tests/python_client/testcases/test_e2e_20.py (100%) create mode 100644 tests/python_client/testcases/test_flush.py create mode 100644 tests/python_client/testcases/test_index.py rename tests20/python_client/testcases/test_index.py => tests/python_client/testcases/test_index_20.py (99%) rename tests20/python_client/testcases/test_insert.py => tests/python_client/testcases/test_insert_20.py (99%) rename tests20/python_client/testcases/test_load.py => tests/python_client/testcases/test_load_20.py (100%) create mode 100644 tests/python_client/testcases/test_mix.py create mode 100644 tests/python_client/testcases/test_partition.py rename tests20/python_client/testcases/test_partition.py => tests/python_client/testcases/test_partition_20.py (99%) create mode 100644 tests/python_client/testcases/test_ping.py rename tests20/python_client/testcases/test_query.py => tests/python_client/testcases/test_query_20.py (99%) rename tests20/python_client/testcases/test_search.py => tests/python_client/testcases/test_search_20.py (100%) rename tests20/python_client/testcases/test_utility.py => tests/python_client/testcases/test_utility_20.py (99%) rename {tests20 => tests}/python_client/utils/api_request.py (100%) rename {tests20 => tests}/python_client/utils/util_log.py (100%) create mode 100644 tests/python_client/utils/utils.py delete mode 100644 tests20/README.md delete mode 100644 tests20/benchmark/README.md delete mode 100644 tests20/python_client/.gitignore delete mode 100644 tests20/python_client/conftest.py diff --git a/.github/mergify.yml b/.github/mergify.yml index 1ead25879c..eb34c28f8c 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -14,22 +14,22 @@ pull_request_rules: - name: Test passed for tests changed conditions: - base=master - - -files~=^(?!tests\/python_test).+ + - -files~=^(?!tests\/python_client).+ - "status-success=continuous-integration/jenkins/pr-merge" actions: label: add: - ci-passed - - name: Test passed for tests20 changed - conditions: - - base=master - - -files~=^(?!tests20\/python_client).+ - - "status-success=continuous-integration/jenkins/pr-merge" - actions: - label: - add: - - ci-passed +# - name: Test passed for tests20 changed +# conditions: +# - base=master +# - -files~=^(?!tests20\/python_client).+ +# - "status-success=continuous-integration/jenkins/pr-merge" +# actions: +# label: +# add: +# - ci-passed - name: Test passed for document changed conditions: diff --git a/.github/workflows/publish-test-images.yaml b/.github/workflows/publish-test-images.yaml index 759606bc72..0246638782 100644 --- a/.github/workflows/publish-test-images.yaml +++ b/.github/workflows/publish-test-images.yaml @@ -7,14 +7,14 @@ on: # file paths to consider in the event. Optional; defaults to all. paths: - 'tests/docker/Dockerfile' - - 'tests/python_test/requirements.txt' + - 'tests/python_client/requirements.txt' - '.github/workflows/publish-test-images.yaml' - '!**.md' pull_request: # file paths to consider in the event. Optional; defaults to all. paths: - 'tests/docker/Dockerfile' - - 'tests/python_test/requirements.txt' + - 'tests/python_client/requirements.txt' - '.github/workflows/publish-test-images.yaml' - '!**.md' diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 4db5bf3b49..711cd09929 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -165,7 +165,7 @@ cd ../../../ To run E2E tests, use these command: ```shell -cd tests20/python_client +cd tests/python_client pip install -r requirements.txt pytest --tags=L0 -n auto ``` diff --git a/build/ci/jenkins/Jenkinsfile b/build/ci/jenkins/Jenkinsfile index 8c189b87dc..947a5f5a5e 100644 --- a/build/ci/jenkins/Jenkinsfile +++ b/build/ci/jenkins/Jenkinsfile @@ -22,7 +22,8 @@ pipeline { } axis { name 'MILVUS_CLIENT' - values 'pymilvus', 'pymilvus-orm' + values 'pymilvus' +// 'pymilvus-orm' } } agent { @@ -69,21 +70,21 @@ pipeline { --install-extra-arg "--set etcd.enabled=false --set externalEtcd.enabled=true --set externalEtcd.endpoints={\$KRTE_POD_IP:2379}" \ --skip-export-logs \ --skip-cleanup \ - --test-extra-arg "--tags=smoke" \ - --test-timeout ${e2e_timeout_seconds} - """ - } else if ("${MILVUS_CLIENT}" == "pymilvus-orm") { - sh """ - MILVUS_CLUSTER_ENABLED=${clusterEnabled} \ - ./e2e-k8s.sh \ - --node-image registry.zilliz.com/kindest/node:v1.20.2 \ - --skip-export-logs \ - --skip-cleanup \ - --kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \ - --install-extra-arg "--set etcd.enabled=false --set externalEtcd.enabled=true --set externalEtcd.endpoints={\$KRTE_POD_IP:2379}" \ - --test-extra-arg "--tags L0 L1" \ + --test-extra-arg "-x --tags smoke L0 L1" \ --test-timeout ${e2e_timeout_seconds} """ +// } else if ("${MILVUS_CLIENT}" == "pymilvus-orm") { +// sh """ +// MILVUS_CLUSTER_ENABLED=${clusterEnabled} \ +// ./e2e-k8s.sh \ +// --node-image registry.zilliz.com/kindest/node:v1.20.2 \ +// --skip-export-logs \ +// --skip-cleanup \ +// --kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \ +// --install-extra-arg "--set etcd.enabled=false --set externalEtcd.enabled=true --set externalEtcd.endpoints={\$KRTE_POD_IP:2379}" \ +// --test-extra-arg "--tags L0 L1" \ +// --test-timeout ${e2e_timeout_seconds} +// """ } else { error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}" } diff --git a/build/ci/jenkins/NightlyCI.groovy b/build/ci/jenkins/NightlyCI.groovy index b78014303f..15ffd0a8af 100644 --- a/build/ci/jenkins/NightlyCI.groovy +++ b/build/ci/jenkins/NightlyCI.groovy @@ -5,8 +5,8 @@ String cron_timezone = "TZ=Asia/Shanghai" String cron_string = BRANCH_NAME == "master" ? "50 22 * * * " : "" -int total_timeout_minutes = 120 -int e2e_timeout_seconds = 60 * 60 +int total_timeout_minutes = 240 +int e2e_timeout_seconds = 3 * 60 * 60 pipeline { agent none @@ -30,7 +30,8 @@ pipeline { } axis { name 'MILVUS_CLIENT' - values 'pymilvus', 'pymilvus-orm' + values 'pymilvus' +// 'pymilvus-orm' } } agent { @@ -77,20 +78,21 @@ pipeline { --install-extra-arg "--set etcd.enabled=false --set externalEtcd.enabled=true --set externalEtcd.endpoints={\$KRTE_POD_IP:2379}" \ --skip-export-logs \ --skip-cleanup \ + --test-extra-arg "--tags smoke L0 L1 L2" --test-timeout ${e2e_timeout_seconds} """ - } else if ("${MILVUS_CLIENT}" == "pymilvus-orm") { - sh """ - MILVUS_CLUSTER_ENABLED=${clusterEnabled} \ - ./e2e-k8s.sh \ - --kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \ - --node-image registry.zilliz.com/kindest/node:v1.20.2 \ - --install-extra-arg "--set etcd.enabled=false --set externalEtcd.enabled=true --set externalEtcd.endpoints={\$KRTE_POD_IP:2379}" \ - --skip-export-logs \ - --skip-cleanup \ - --test-extra-arg "--tags L0 L1 L2" \ - --test-timeout ${e2e_timeout_seconds} - """ +// } else if ("${MILVUS_CLIENT}" == "pymilvus-orm") { +// sh """ +// MILVUS_CLUSTER_ENABLED=${clusterEnabled} \ +// ./e2e-k8s.sh \ +// --kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \ +// --node-image registry.zilliz.com/kindest/node:v1.20.2 \ +// --install-extra-arg "--set etcd.enabled=false --set externalEtcd.enabled=true --set externalEtcd.endpoints={\$KRTE_POD_IP:2379}" \ +// --skip-export-logs \ +// --skip-cleanup \ +// --test-extra-arg "--tags L0 L1 L2" \ +// --test-timeout ${e2e_timeout_seconds} +// """ } else { error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}" } diff --git a/tests20/OWNERS b/tests-deprecating/OWNERS similarity index 100% rename from tests20/OWNERS rename to tests-deprecating/OWNERS diff --git a/tests-deprecating/README.md b/tests-deprecating/README.md new file mode 100644 index 0000000000..cc49a9a5b2 --- /dev/null +++ b/tests-deprecating/README.md @@ -0,0 +1,91 @@ +## Tests + +### E2E Test + +#### 配置清单 + +##### 操作系统 + +| 操作系统 | 版本 | +| ------ | --------- | +| CentOS | 7.5 或以上 | +| Ubuntu | 16.04 或以上 | +| Mac | 10.14 或以上 | + +##### 硬件 + +| 硬件名称 | 建议配置 | +| ---- | --------------------------------------------------------------------------------------------------- | +| CPU | x86_64 平台
Intel CPU Sandy Bridge 或以上
CPU 指令集
_ SSE42
_ AVX
_ AVX2
_ AVX512 | +| 内存 | 16 GB 或以上 | + +##### 软件 + +| 软件名称 | 版本 | +| -------------- | ---------- | +| Docker | 19.05 或以上 | +| Docker Compose | 1.25.5 或以上 | +| jq | 1.3 或以上 | +| kubectl | 1.14 或以上 | +| helm | 3.0 或以上 | +| kind | 0.10.0 或以上 | + +#### 安装依赖 + +##### 检查 Docker 和 Docker Compose 状态 + + 1. 确认 Docker Daemon 正在运行: + +```shell +$ docker info +``` + +- 安装 Docker 步骤见 [Docker CE/EE 官方安装说明](https://docs.docker.com/get-docker/)进行安装 + +- 如果无法正常打印 Docker 相关信息,请启动 Docker Daemon。 + +- 要在没有 `root` 权限的情况下运行 Docker 命令,请创建 `docker` 组并添加用户,以运行:`sudo usermod -aG docker $USER`, 退出终端并重新登录,以使更改生效 ,详见 [使用非 root 用户管理 docker](https://docs.docker.com/install/linux/linux-postinstall/)。 + + 2. 确认 Docker Compose 版本 + +```shell +$ docker-compose version + +docker-compose version 1.25.5, build 8a1c60f6 +docker-py version: 4.1.0 +CPython version: 3.7.5 +OpenSSL version: OpenSSL 1.1.1f 31 Mar 2020 +``` + +- 安装 Docker Compose 步骤见 [Install Docker Compose](https://docs.docker.com/compose/install/) + +##### 安装 jq + +- 安装方式见 + +##### 安装 kubectl + +- 安装方式见 + +##### 安装 helm + +- 安装方式见 + +##### 安装 kind + +- 安装方式见 + +#### 运行 E2E Test + +```shell +$ cd tests/scripts +$ ./e2e-k8s.sh +``` + +> Getting help +> +> 你可以执行以下命令获取帮助 +> +> ```shell +> $ ./e2e-k8s.sh --help +> ``` diff --git a/tests-deprecating/docker/.env b/tests-deprecating/docker/.env new file mode 100644 index 0000000000..57057b97e5 --- /dev/null +++ b/tests-deprecating/docker/.env @@ -0,0 +1,7 @@ +MILVUS_SERVICE_IP=127.0.0.1 +MILVUS_SERVICE_PORT=19530 +MILVUS_PYTEST_WORKSPACE=/milvus/tests/python_test +MILVUS_PYTEST_LOG_PATH=/milvus/_artifacts/tests/pytest_logs +IMAGE_REPO=milvusdb +IMAGE_TAG=20210802-87c5a49 +LATEST_IMAGE_TAG=20210802-87c5a49 diff --git a/tests-deprecating/docker/Dockerfile b/tests-deprecating/docker/Dockerfile new file mode 100644 index 0000000000..7fd8808e3f --- /dev/null +++ b/tests-deprecating/docker/Dockerfile @@ -0,0 +1,18 @@ +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +FROM python:3.6.8-jessie + +COPY ./tests/python_test/requirements.txt /requirements.txt + +RUN python3 -m pip install --no-cache-dir -r /requirements.txt + +CMD ["tail", "-f", "/dev/null"] diff --git a/tests-deprecating/docker/docker-compose.yml b/tests-deprecating/docker/docker-compose.yml new file mode 100644 index 0000000000..64d1e138c8 --- /dev/null +++ b/tests-deprecating/docker/docker-compose.yml @@ -0,0 +1,23 @@ +version: '3.5' + +services: + pytest: + image: ${IMAGE_REPO}/pytest:${IMAGE_TAG} + build: + context: ../.. + dockerfile: tests/docker/Dockerfile + cache_from: + - ${IMAGE_REPO}/pytest:${LATEST_IMAGE_TAG} + shm_size: 2G + environment: + MILVUS_SERVICE_IP: ${MILVUS_SERVICE_IP} + MILVUS_SERVICE_PORT: ${MILVUS_SERVICE_PORT} + CI_LOG_PATH: ${MILVUS_PYTEST_LOG_PATH} + volumes: + - ../../:/milvus:delegated + working_dir: ${MILVUS_PYTEST_WORKSPACE} + +networks: + default: + external: ${PRE_EXIST_NETWORK:-false} + name: ${PYTEST_NETWORK:-milvus_dev} diff --git a/tests/go/insert_test.go b/tests-deprecating/go/insert_test.go similarity index 100% rename from tests/go/insert_test.go rename to tests-deprecating/go/insert_test.go diff --git a/tests/go/key2seg_test.go b/tests-deprecating/go/key2seg_test.go similarity index 100% rename from tests/go/key2seg_test.go rename to tests-deprecating/go/key2seg_test.go diff --git a/tests/milvus_benchmark/.gitignore b/tests-deprecating/milvus_benchmark/.gitignore similarity index 100% rename from tests/milvus_benchmark/.gitignore rename to tests-deprecating/milvus_benchmark/.gitignore diff --git a/tests/milvus_benchmark/Dockerfile b/tests-deprecating/milvus_benchmark/Dockerfile similarity index 100% rename from tests/milvus_benchmark/Dockerfile rename to tests-deprecating/milvus_benchmark/Dockerfile diff --git a/tests/milvus_benchmark/README.md b/tests-deprecating/milvus_benchmark/README.md similarity index 100% rename from tests/milvus_benchmark/README.md rename to tests-deprecating/milvus_benchmark/README.md diff --git a/tests/milvus_benchmark/asserts/uml.jpg b/tests-deprecating/milvus_benchmark/asserts/uml.jpg similarity index 100% rename from tests/milvus_benchmark/asserts/uml.jpg rename to tests-deprecating/milvus_benchmark/asserts/uml.jpg diff --git a/tests/milvus_benchmark/ci/argo.yaml b/tests-deprecating/milvus_benchmark/ci/argo.yaml similarity index 100% rename from tests/milvus_benchmark/ci/argo.yaml rename to tests-deprecating/milvus_benchmark/ci/argo.yaml diff --git a/tests/milvus_benchmark/ci/function/file_transfer.groovy b/tests-deprecating/milvus_benchmark/ci/function/file_transfer.groovy similarity index 100% rename from tests/milvus_benchmark/ci/function/file_transfer.groovy rename to tests-deprecating/milvus_benchmark/ci/function/file_transfer.groovy diff --git a/tests/milvus_benchmark/ci/jenkinsfile/cleanup.groovy b/tests-deprecating/milvus_benchmark/ci/jenkinsfile/cleanup.groovy similarity index 100% rename from tests/milvus_benchmark/ci/jenkinsfile/cleanup.groovy rename to tests-deprecating/milvus_benchmark/ci/jenkinsfile/cleanup.groovy diff --git a/tests/milvus_benchmark/ci/jenkinsfile/deploy_test.groovy b/tests-deprecating/milvus_benchmark/ci/jenkinsfile/deploy_test.groovy similarity index 100% rename from tests/milvus_benchmark/ci/jenkinsfile/deploy_test.groovy rename to tests-deprecating/milvus_benchmark/ci/jenkinsfile/deploy_test.groovy diff --git a/tests/milvus_benchmark/ci/jenkinsfile/notify.groovy b/tests-deprecating/milvus_benchmark/ci/jenkinsfile/notify.groovy similarity index 100% rename from tests/milvus_benchmark/ci/jenkinsfile/notify.groovy rename to tests-deprecating/milvus_benchmark/ci/jenkinsfile/notify.groovy diff --git a/tests/milvus_benchmark/ci/main_jenkinsfile b/tests-deprecating/milvus_benchmark/ci/main_jenkinsfile similarity index 100% rename from tests/milvus_benchmark/ci/main_jenkinsfile rename to tests-deprecating/milvus_benchmark/ci/main_jenkinsfile diff --git a/tests/milvus_benchmark/ci/pod_containers/milvus-testframework.yaml b/tests-deprecating/milvus_benchmark/ci/pod_containers/milvus-testframework.yaml similarity index 100% rename from tests/milvus_benchmark/ci/pod_containers/milvus-testframework.yaml rename to tests-deprecating/milvus_benchmark/ci/pod_containers/milvus-testframework.yaml diff --git a/tests/milvus_benchmark/ci/publish_jenkinsfile b/tests-deprecating/milvus_benchmark/ci/publish_jenkinsfile similarity index 100% rename from tests/milvus_benchmark/ci/publish_jenkinsfile rename to tests-deprecating/milvus_benchmark/ci/publish_jenkinsfile diff --git a/tests/milvus_benchmark/ci/scripts/yaml_processor.py b/tests-deprecating/milvus_benchmark/ci/scripts/yaml_processor.py similarity index 100% rename from tests/milvus_benchmark/ci/scripts/yaml_processor.py rename to tests-deprecating/milvus_benchmark/ci/scripts/yaml_processor.py diff --git a/tests/milvus_benchmark/milvus_benchmark/__init__.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/__init__.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/__init__.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/__init__.py diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/__init__.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/__init__.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/__init__.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/__init__.py diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/chaos_mesh.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/chaos_mesh.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/chaos_mesh.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/chaos_mesh.py diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/chaos_opt.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/chaos_opt.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/chaos_opt.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/chaos_opt.py diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/pod-new.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/pod-new.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/pod-new.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/pod-new.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/pod.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/pod.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/pod.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/pod.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/template/PodChaos.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/template/PodChaos.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/template/PodChaos.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/template/PodChaos.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/test.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/test.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/test.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/test.py diff --git a/tests/milvus_benchmark/milvus_benchmark/chaos/utils.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/utils.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/chaos/utils.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/chaos/utils.py diff --git a/tests/milvus_benchmark/milvus_benchmark/client.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/client.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/client.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/client.py diff --git a/tests/milvus_benchmark/milvus_benchmark/config.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/config.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/config.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/config.py diff --git a/tests/milvus_benchmark/milvus_benchmark/env/__init__.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/env/__init__.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/env/__init__.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/env/__init__.py diff --git a/tests/milvus_benchmark/milvus_benchmark/env/base.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/env/base.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/env/base.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/env/base.py diff --git a/tests/milvus_benchmark/milvus_benchmark/env/docker.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/env/docker.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/env/docker.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/env/docker.py diff --git a/tests/milvus_benchmark/milvus_benchmark/env/helm.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/env/helm.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/env/helm.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/env/helm.py diff --git a/tests/milvus_benchmark/milvus_benchmark/env/helm_utils.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/env/helm_utils.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/env/helm_utils.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/env/helm_utils.py diff --git a/tests/milvus_benchmark/milvus_benchmark/env/local.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/env/local.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/env/local.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/env/local.py diff --git a/tests/milvus_benchmark/milvus_benchmark/logs/log.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/logs/log.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/logs/log.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/logs/log.py diff --git a/tests/milvus_benchmark/milvus_benchmark/logs/logging.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/logs/logging.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/logs/logging.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/logs/logging.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/main.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/main.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/main.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/main.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/__init__.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/__init__.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/__init__.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/__init__.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/api.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/api.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/api.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/api.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/config.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/config.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/config.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/config.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/models/__init__.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/__init__.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/models/__init__.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/__init__.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/models/env.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/env.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/models/env.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/env.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/models/hardware.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/hardware.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/models/hardware.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/hardware.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/models/metric.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/metric.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/models/metric.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/metric.py diff --git a/tests/milvus_benchmark/milvus_benchmark/metrics/models/server.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/server.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/metrics/models/server.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/metrics/models/server.py diff --git a/tests/milvus_benchmark/milvus_benchmark/parser.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/parser.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/parser.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/parser.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/__init__.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/__init__.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/__init__.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/__init__.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/accuracy.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/accuracy.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/accuracy.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/accuracy.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/base.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/base.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/base.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/base.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/build.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/build.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/build.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/build.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/chaos.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/chaos.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/chaos.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/chaos.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/docker_runner.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/docker_runner.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/docker_runner.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/docker_runner.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/docker_utils.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/docker_utils.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/docker_utils.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/docker_utils.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/get.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/get.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/get.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/get.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/insert.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/insert.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/insert.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/insert.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/locust.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/locust.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/locust_file.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_file.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/locust_file.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_file.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/locust_task.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_task.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/locust_task.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_task.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/locust_tasks.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_tasks.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/locust_tasks.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_tasks.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/locust_user.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_user.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/locust_user.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/locust_user.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/search.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/search.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/search.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/search.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/test.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/test.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/test.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/test.py diff --git a/tests/milvus_benchmark/milvus_benchmark/runners/utils.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/runners/utils.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/runners/utils.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/runners/utils.py diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler.py diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/010_data.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/010_data.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/010_data.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/010_data.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/011_data.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/011_data.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/011_data_insert.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data_insert.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/011_data_insert.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_data_insert.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/011_delete.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_delete.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/011_delete.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/011_delete.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/2_cluster_data.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/2_cluster_data.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/2_cluster_data.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/2_cluster_data.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/2_data.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/2_data.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/2_data.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/2_data.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/acc.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/acc.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/acc.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/acc.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/build.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/build.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/build.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/build.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/clean.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/clean.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/clean.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/clean.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/cluster.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/cluster.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/cluster.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/cluster.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/debug.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/debug.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/debug.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/debug.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/debug1.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/debug1.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/debug1.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/debug1.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/debug2.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/debug2.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/debug2.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/debug2.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/filter.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/filter.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/filter.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/filter.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/idc.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/idc.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/idc.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/idc.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/insert.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/insert.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/insert.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/insert.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/insert2.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/insert2.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/insert2.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/insert2.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/jaccard.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/jaccard.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/jaccard.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/jaccard.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/locust.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/locust.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/locust_insert.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_insert.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/locust_insert.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_insert.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/locust_mix.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_mix.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/locust_mix.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_mix.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/locust_mix_debug.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_mix_debug.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/locust_mix_debug.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_mix_debug.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/locust_search.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_search.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/locust_search.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/locust_search.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/loop.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/loop.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/loop.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/loop.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/loop_search.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/loop_search.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/loop_search.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/loop_search.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/nlist.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/nlist.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/nlist.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/nlist.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/search.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/search.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/search.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/search.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/search2.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/search2.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/search2.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/search2.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/search_debug.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/search_debug.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/search_debug.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/search_debug.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/shards_ann.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/shards_ann.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/shards_ann.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/shards_ann.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/shards_debug.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/shards_debug.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/shards_debug.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/shards_debug.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/shards_stability.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/shards_stability.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/shards_stability.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/shards_stability.json diff --git a/tests/milvus_benchmark/milvus_benchmark/scheduler/stability.json b/tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/stability.json similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/scheduler/stability.json rename to tests-deprecating/milvus_benchmark/milvus_benchmark/scheduler/stability.json diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_build_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_build_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_build_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_build_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_delete_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_delete_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_delete_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_delete_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_stability.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_stability.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_gpu_stability.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_gpu_stability.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_insert_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_insert_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_insert_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_insert_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_insert_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_insert_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_insert_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_insert_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_search_dsl.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_search_dsl.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_search_dsl.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_search_dsl.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_search_stability.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_search_stability.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_search_stability.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_search_stability.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_search_threshold.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_search_threshold.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_search_threshold.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_search_threshold.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_build.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_build.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_build.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_build.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_cpu_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_cpu_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_build.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_build.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_build.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_build.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_cluster.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_cluster.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_cluster.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_cluster.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_data.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_data.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_data.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_data.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_get.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_get.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_get.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_get.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_insert.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_insert.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_insert.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_insert.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_random.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_random.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_random.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_random.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/2_locust_search_index.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_search_index.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/2_locust_search_index.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/2_locust_search_index.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/add_flush_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/add_flush_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/add_flush_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/add_flush_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/ann_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/ann_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/ann_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/ann_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/clean.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/clean.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/clean.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/clean.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_build_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_build_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_build_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_build_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_binary.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_binary.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_binary.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_binary.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/debug_build.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/debug_build.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/debug_build.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/debug_build.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/debug_gpu_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/debug_gpu_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/debug_gpu_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/debug_gpu_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_stability.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_stability.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/gpu_search_stability.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/gpu_search_stability.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/insert_binary.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/insert_binary.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/insert_binary.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/insert_binary.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/locust_cluster_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/locust_cluster_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/locust_cluster_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/locust_cluster_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/locust_insert.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/locust_insert.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/locust_insert.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/locust_insert.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/locust_search.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/locust_search.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/locust_search.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/locust_search.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/loop_stability.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/loop_stability.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/loop_stability.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/loop_stability.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/metric.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/metric.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/metric.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/metric.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/pq.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/pq.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/pq.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/pq.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/qps.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/qps.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/qps.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/qps.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/search_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/search_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/search_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/search_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/shards_ann_debug.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_ann_debug.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/shards_ann_debug.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_ann_debug.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/shards_loop_stability.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_loop_stability.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/shards_loop_stability.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_loop_stability.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml b/tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml rename to tests-deprecating/milvus_benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml diff --git a/tests/milvus_benchmark/milvus_benchmark/test.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/test.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/test.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/test.py diff --git a/tests/milvus_benchmark/milvus_benchmark/tests/locust_user_test.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/tests/locust_user_test.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/tests/locust_user_test.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/tests/locust_user_test.py diff --git a/tests/milvus_benchmark/milvus_benchmark/tests/test_scheduler.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/tests/test_scheduler.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/tests/test_scheduler.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/tests/test_scheduler.py diff --git a/tests/milvus_benchmark/milvus_benchmark/update.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/update.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/update.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/update.py diff --git a/tests/milvus_benchmark/milvus_benchmark/utils.py b/tests-deprecating/milvus_benchmark/milvus_benchmark/utils.py similarity index 100% rename from tests/milvus_benchmark/milvus_benchmark/utils.py rename to tests-deprecating/milvus_benchmark/milvus_benchmark/utils.py diff --git a/tests/milvus_benchmark/requirements.txt b/tests-deprecating/milvus_benchmark/requirements.txt similarity index 100% rename from tests/milvus_benchmark/requirements.txt rename to tests-deprecating/milvus_benchmark/requirements.txt diff --git a/tests/python_test/.dockerignore b/tests-deprecating/python_test/.dockerignore similarity index 100% rename from tests/python_test/.dockerignore rename to tests-deprecating/python_test/.dockerignore diff --git a/tests/python_test/.gitignore b/tests-deprecating/python_test/.gitignore similarity index 100% rename from tests/python_test/.gitignore rename to tests-deprecating/python_test/.gitignore diff --git a/tests/python_test/README.md b/tests-deprecating/python_test/README.md similarity index 100% rename from tests/python_test/README.md rename to tests-deprecating/python_test/README.md diff --git a/tests/python_test/collection/test_collection_count.py b/tests-deprecating/python_test/collection/test_collection_count.py similarity index 100% rename from tests/python_test/collection/test_collection_count.py rename to tests-deprecating/python_test/collection/test_collection_count.py diff --git a/tests/python_test/collection/test_collection_logic.py b/tests-deprecating/python_test/collection/test_collection_logic.py similarity index 100% rename from tests/python_test/collection/test_collection_logic.py rename to tests-deprecating/python_test/collection/test_collection_logic.py diff --git a/tests/python_test/collection/test_collection_stats.py b/tests-deprecating/python_test/collection/test_collection_stats.py similarity index 100% rename from tests/python_test/collection/test_collection_stats.py rename to tests-deprecating/python_test/collection/test_collection_stats.py diff --git a/tests/python_test/collection/test_create_collection.py b/tests-deprecating/python_test/collection/test_create_collection.py similarity index 100% rename from tests/python_test/collection/test_create_collection.py rename to tests-deprecating/python_test/collection/test_create_collection.py diff --git a/tests/python_test/collection/test_describe_collection.py b/tests-deprecating/python_test/collection/test_describe_collection.py similarity index 100% rename from tests/python_test/collection/test_describe_collection.py rename to tests-deprecating/python_test/collection/test_describe_collection.py diff --git a/tests/python_test/collection/test_drop_collection.py b/tests-deprecating/python_test/collection/test_drop_collection.py similarity index 100% rename from tests/python_test/collection/test_drop_collection.py rename to tests-deprecating/python_test/collection/test_drop_collection.py diff --git a/tests/python_test/collection/test_has_collection.py b/tests-deprecating/python_test/collection/test_has_collection.py similarity index 100% rename from tests/python_test/collection/test_has_collection.py rename to tests-deprecating/python_test/collection/test_has_collection.py diff --git a/tests/python_test/collection/test_list_collections.py b/tests-deprecating/python_test/collection/test_list_collections.py similarity index 100% rename from tests/python_test/collection/test_list_collections.py rename to tests-deprecating/python_test/collection/test_list_collections.py diff --git a/tests/python_test/collection/test_load_collection.py b/tests-deprecating/python_test/collection/test_load_collection.py similarity index 100% rename from tests/python_test/collection/test_load_collection.py rename to tests-deprecating/python_test/collection/test_load_collection.py diff --git a/tests/python_test/conftest.py b/tests-deprecating/python_test/conftest.py similarity index 100% rename from tests/python_test/conftest.py rename to tests-deprecating/python_test/conftest.py diff --git a/tests/python_test/constants.py b/tests-deprecating/python_test/constants.py similarity index 100% rename from tests/python_test/constants.py rename to tests-deprecating/python_test/constants.py diff --git a/tests/python_test/entity/test_delete.py b/tests-deprecating/python_test/entity/test_delete.py similarity index 100% rename from tests/python_test/entity/test_delete.py rename to tests-deprecating/python_test/entity/test_delete.py diff --git a/tests/python_test/entity/test_get_entity_by_id.py b/tests-deprecating/python_test/entity/test_get_entity_by_id.py similarity index 100% rename from tests/python_test/entity/test_get_entity_by_id.py rename to tests-deprecating/python_test/entity/test_get_entity_by_id.py diff --git a/tests/python_test/entity/test_insert.py b/tests-deprecating/python_test/entity/test_insert.py similarity index 100% rename from tests/python_test/entity/test_insert.py rename to tests-deprecating/python_test/entity/test_insert.py diff --git a/tests/python_test/entity/test_list_id_in_segment.py b/tests-deprecating/python_test/entity/test_list_id_in_segment.py similarity index 100% rename from tests/python_test/entity/test_list_id_in_segment.py rename to tests-deprecating/python_test/entity/test_list_id_in_segment.py diff --git a/tests/python_test/entity/test_query.py b/tests-deprecating/python_test/entity/test_query.py similarity index 100% rename from tests/python_test/entity/test_query.py rename to tests-deprecating/python_test/entity/test_query.py diff --git a/tests/python_test/entity/test_search.py b/tests-deprecating/python_test/entity/test_search.py similarity index 100% rename from tests/python_test/entity/test_search.py rename to tests-deprecating/python_test/entity/test_search.py diff --git a/tests/python_test/pytest.ini b/tests-deprecating/python_test/pytest.ini similarity index 99% rename from tests/python_test/pytest.ini rename to tests-deprecating/python_test/pytest.ini index 537ace219f..7460c51bbf 100644 --- a/tests/python_test/pytest.ini +++ b/tests-deprecating/python_test/pytest.ini @@ -1,4 +1,6 @@ [pytest] + + log_format = [%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s) log_date_format = %Y-%m-%d %H:%M:%S diff --git a/tests20/python_client/requirements.txt b/tests-deprecating/python_test/requirements.txt similarity index 93% rename from tests20/python_client/requirements.txt rename to tests-deprecating/python_test/requirements.txt index 3d97ebf1db..f5099fb345 100644 --- a/tests20/python_client/requirements.txt +++ b/tests-deprecating/python_test/requirements.txt @@ -1,6 +1,6 @@ --extra-index-url https://test.pypi.org/simple/ -grpcio==1.26.0 -grpcio-tools==1.26.0 +grpcio==1.37.1 +grpcio-tools==1.37.1 numpy==1.19.5 pytest-cov==2.8.1 sklearn==0.0 diff --git a/tests/python_test/run.sh b/tests-deprecating/python_test/run.sh similarity index 100% rename from tests/python_test/run.sh rename to tests-deprecating/python_test/run.sh diff --git a/tests/python_test/stability/test_mysql.py b/tests-deprecating/python_test/stability/test_mysql.py similarity index 100% rename from tests/python_test/stability/test_mysql.py rename to tests-deprecating/python_test/stability/test_mysql.py diff --git a/tests/python_test/stability/test_restart.py b/tests-deprecating/python_test/stability/test_restart.py similarity index 100% rename from tests/python_test/stability/test_restart.py rename to tests-deprecating/python_test/stability/test_restart.py diff --git a/tests/python_test/test_compact.py b/tests-deprecating/python_test/test_compact.py similarity index 100% rename from tests/python_test/test_compact.py rename to tests-deprecating/python_test/test_compact.py diff --git a/tests/python_test/test_config.py b/tests-deprecating/python_test/test_config.py similarity index 100% rename from tests/python_test/test_config.py rename to tests-deprecating/python_test/test_config.py diff --git a/tests/python_test/test_connect.py b/tests-deprecating/python_test/test_connect.py similarity index 100% rename from tests/python_test/test_connect.py rename to tests-deprecating/python_test/test_connect.py diff --git a/tests/python_test/test_flush.py b/tests-deprecating/python_test/test_flush.py similarity index 100% rename from tests/python_test/test_flush.py rename to tests-deprecating/python_test/test_flush.py diff --git a/tests/python_test/test_index.py b/tests-deprecating/python_test/test_index.py similarity index 100% rename from tests/python_test/test_index.py rename to tests-deprecating/python_test/test_index.py diff --git a/tests/python_test/test_mix.py b/tests-deprecating/python_test/test_mix.py similarity index 100% rename from tests/python_test/test_mix.py rename to tests-deprecating/python_test/test_mix.py diff --git a/tests/python_test/test_partition.py b/tests-deprecating/python_test/test_partition.py similarity index 100% rename from tests/python_test/test_partition.py rename to tests-deprecating/python_test/test_partition.py diff --git a/tests/python_test/test_ping.py b/tests-deprecating/python_test/test_ping.py similarity index 100% rename from tests/python_test/test_ping.py rename to tests-deprecating/python_test/test_ping.py diff --git a/tests/python_test/utils.py b/tests-deprecating/python_test/utils.py similarity index 100% rename from tests/python_test/utils.py rename to tests-deprecating/python_test/utils.py diff --git a/tests-deprecating/scripts/e2e-k8s.sh b/tests-deprecating/scripts/e2e-k8s.sh new file mode 100755 index 0000000000..467e2d671c --- /dev/null +++ b/tests-deprecating/scripts/e2e-k8s.sh @@ -0,0 +1,317 @@ +#!/bin/bash + +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located +done +ROOT="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )" + +# Exit immediately for non zero status +set -e +# Check unset variables +set -u +# Print commands +set -x + +# shellcheck source=build/lib.sh +source "${ROOT}/build/lib.sh" +setup_and_export_git_sha + +# shellcheck source=build/kind_provisioner.sh +source "${ROOT}/build/kind_provisioner.sh" + +TOPOLOGY=SINGLE_CLUSTER +NODE_IMAGE="kindest/node:v1.20.2" +KIND_CONFIG="" +INSTALL_EXTRA_ARG="" +TEST_EXTRA_ARG="" +CLUSTER_TOPOLOGY_CONFIG_FILE="${ROOT}/build/config/topology/multicluster.json" + +while (( "$#" )); do + case "$1" in + # Node images can be found at https://github.com/kubernetes-sigs/kind/releases + # For example, kindest/node:v1.14.0 + --node-image) + NODE_IMAGE=$2 + shift 2 + ;; + # Config for enabling different Kubernetes features in KinD (see build/config/topology/trustworthy-jwt.yaml). + --kind-config) + KIND_CONFIG=$2 + shift 2 + ;; + --build-command) + BUILD_COMMAND=$2 + shift 2 + ;; + --install-extra-arg) + INSTALL_EXTRA_ARG=$2 + shift 2 + ;; + --test-extra-arg) + TEST_EXTRA_ARG=$2 + shift 2 + ;; + --test-timeout) + TEST_TIMEOUT=$2 + shift 2 + ;; + --skip-setup) + SKIP_SETUP=true + shift + ;; + --skip-install) + SKIP_INSTALL=true + shift + ;; + --skip-cleanup) + SKIP_CLEANUP=true + shift + ;; + --skip-build) + SKIP_BUILD=true + shift + ;; + --skip-build-image) + SKIP_BUILD_IMAGE=true + shift + ;; + --skip-test) + SKIP_TEST=true + shift + ;; + --skip-export-logs) + SKIP_EXPORT_LOGS=true + shift + ;; + --manual) + MANUAL=true + shift + ;; + --topology) + case $2 in + SINGLE_CLUSTER | MULTICLUSTER_SINGLE_NETWORK | MULTICLUSTER ) + TOPOLOGY=$2 + echo "Running with topology ${TOPOLOGY}" + ;; + *) + echo "Error: Unsupported topology ${TOPOLOGY}" >&2 + exit 1 + ;; + esac + shift 2 + ;; + --topology-config) + CLUSTER_TOPOLOGY_CONFIG_FILE="${ROOT}/${2}" + shift 2 + ;; + -h|--help) + { set +x; } 2>/dev/null + HELP=" +Usage: + $0 [flags] [Arguments] + + --node-image Kubernetes in Docker (KinD) Node image + The image is a Docker image for running nested containers, systemd, and Kubernetes components. + Node images can be found at https://github.com/kubernetes-sigs/kind/releases. + Default: \"kindest/node:v1.20.2\" + + --kind-config Config for enabling different Kubernetes features in KinD + + --build-command Specified build milvus command + + --install-extra-arg Install Milvus Helm Chart extra configuration. (see https://github.com/zilliztech/milvus-helm-charts/blob/main/charts/milvus-ha/values.yaml) + To override values in a chart, use either the '--values' flag and pass in a file or use the '--set' flag and pass configuration from the command line, to force a string value use '--set-string'. + Refer: https://helm.sh/docs/helm/helm_install/#helm-install + + --test-extra-arg Run e2e test extra configuration + For example, \"--tag=smoke\" + + --test-timeout To specify timeout period of e2e test. Timeout time is specified in seconds. + + --topology KinD cluster topology of deployments + Provides three classes: \"SINGLE_CLUSTER\", \"MULTICLUSTER_SINGLE_NETWORK\", \"MULTICLUSTER\" + Default: \"SINGLE_CLUSTER\" + + --topology-config KinD cluster topology configuration file + + --skip-setup Skip setup KinD cluster + + --skip-install Skip install Milvus Helm Chart + + --skip-cleanup Skip cleanup KinD cluster + + --skip-build Skip build Milvus binary + + --skip-build-image Skip build Milvus image + + --skip-test Skip e2e test + + --skip-export-logs Skip kind export logs + + --manual Manual Mode + + -h or --help Print help information + + +Use \"$0 --help\" for more information about a given command. +" + echo -e "${HELP}" ; exit 0 + ;; + -*) + echo "Error: Unsupported flag $1" >&2 + exit 1 + ;; + *) # preserve positional arguments + PARAMS+=("$1") + shift + ;; + esac +done + +export BUILD_COMMAND="${BUILD_COMMAND:-make install}" + +export MANUAL="${MANUAL:-}" + +# Default IP family of the cluster is IPv4 +export IP_FAMILY="${IP_FAMILY:-ipv4}" + +# KinD will not have a LoadBalancer, so we need to disable it +export TEST_ENV=kind +# LoadBalancer in Kind is supported using metallb if not ipv6. +if [ "${IP_FAMILY}" != "ipv6" ]; then + export TEST_ENV=kind-metallb +fi + +# See https://kind.sigs.k8s.io/docs/user/quick-start/#loading-an-image-into-your-cluster +export PULL_POLICY=IfNotPresent + +# We run a local-registry in a docker container that KinD nodes pull from +# These values are must match what is in config/trustworthy-jwt.yaml +export KIND_REGISTRY_NAME="kind-registry" +export KIND_REGISTRY_PORT="5000" +export KIND_REGISTRY="localhost:${KIND_REGISTRY_PORT}" + +export ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}" +export SINGLE_CLUSTER_NAME="${SINGLE_CLUSTER_NAME:-kind}" + +export HUB="${HUB:-milvusdb}" +export TAG="${TAG:-latest}" + +export CI="true" + +if [[ ! -d "${ARTIFACTS}" ]];then + mkdir -p "${ARTIFACTS}" +fi + +if [[ ! -x "$(command -v kind)" ]]; then + KIND_DIR="${KIND_DIR:-"${HOME}/tool_cache/kind"}" + KIND_VERSION="v0.11.1" + + export PATH="${KIND_DIR}:${PATH}" + if [[ ! -x "$(command -v kind)" ]]; then + install_kind "${KIND_DIR}" "${KIND_VERSION}" + fi +fi + +if [[ ! -x "$(command -v kubectl)" ]]; then + KUBECTL_DIR="${KUBECTL_DIR:-"${HOME}/tool_cache/kubectl"}" + KUBECTL_VERSION="v1.20.2" + + export PATH="${KUBECTL_DIR}:${PATH}" + if [[ ! -x "$(command -v kubectl)" ]]; then + install_kubectl "${KUBECTL_DIR}" "${KUBECTL_VERSION}" + fi +fi + +if [[ ! -x "$(command -v helm)" ]]; then + HELM_DIR="${HELM_DIR:-"${HOME}/tool_cache/helm"}" + HELM_VERSION="v3.5.4" + + export PATH="${HELM_DIR}:${PATH}" + if [[ ! -x "$(command -v helm)" ]]; then + install_helm "${HELM_DIR}" "${HELM_VERSION}" + fi +fi + +if [[ -z "${SKIP_SETUP:-}" ]]; then + export DEFAULT_CLUSTER_YAML="${ROOT}/build/config/topology/trustworthy-jwt.yaml" + export METRICS_SERVER_CONFIG_DIR="${ROOT}/build/config/metrics" + + if [[ "${TOPOLOGY}" == "SINGLE_CLUSTER" ]]; then + trace "setup kind cluster" setup_kind_cluster "${SINGLE_CLUSTER_NAME}" "${NODE_IMAGE}" "${KIND_CONFIG}" + else + trace "load cluster topology" load_cluster_topology "${CLUSTER_TOPOLOGY_CONFIG_FILE}" + trace "setup kind clusters" setup_kind_clusters "${NODE_IMAGE}" "${IP_FAMILY}" + + TOPOLOGY_JSON=$(cat "${CLUSTER_TOPOLOGY_CONFIG_FILE}") + for i in $(seq 0 $((${#CLUSTER_NAMES[@]} - 1))); do + CLUSTER="${CLUSTER_NAMES[i]}" + KCONFIG="${KUBECONFIGS[i]}" + TOPOLOGY_JSON=$(set_topology_value "${TOPOLOGY_JSON}" "${CLUSTER}" "meta.kubeconfig" "${KCONFIG}") + done + RUNTIME_TOPOLOGY_CONFIG_FILE="${ARTIFACTS}/topology-config.json" + echo "${TOPOLOGY_JSON}" > "${RUNTIME_TOPOLOGY_CONFIG_FILE}" + + export INTEGRATION_TEST_TOPOLOGY_FILE + INTEGRATION_TEST_TOPOLOGY_FILE="${RUNTIME_TOPOLOGY_CONFIG_FILE}" + + export INTEGRATION_TEST_KUBECONFIG + INTEGRATION_TEST_KUBECONFIG=NONE + fi +fi + +if [[ -z "${SKIP_BUILD:-}" ]]; then + trace "setup kind registry" setup_kind_registry + pushd "${ROOT}" + trace "build milvus" "${ROOT}/build/builder.sh" /bin/bash -c "${BUILD_COMMAND}" + popd +fi + +if [[ -z "${SKIP_BUILD_IMAGE:-}" ]]; then + # If we're not intending to pull from an actual remote registry, use the local kind registry + running="$(docker inspect -f '{{.State.Running}}' "${KIND_REGISTRY_NAME}" 2>/dev/null || true)" + if [[ "${running}" == 'true' ]]; then + HUB="${KIND_REGISTRY}" + export HUB + fi + export MILVUS_IMAGE_REPO="${HUB}/milvus" + export MILVUS_IMAGE_TAG="${TAG}" + + pushd "${ROOT}" + trace "build milvus image" "${ROOT}/build/build_image.sh" + trace "push milvus image" docker push "${MILVUS_IMAGE_REPO}:${MILVUS_IMAGE_TAG}" + popd +fi + +if [[ -z "${SKIP_INSTALL:-}" ]]; then + trace "install milvus helm chart" "${ROOT}/tests/scripts/install_milvus.sh" "${INSTALL_EXTRA_ARG}" +fi + +if [[ -z "${SKIP_TEST:-}" ]]; then + if [[ -n "${TEST_TIMEOUT:-}" ]]; then + trace "test" "timeout" "-v" "${TEST_TIMEOUT}" "${ROOT}/tests/scripts/e2e.sh" "${TEST_EXTRA_ARG}" + else + trace "test" "${ROOT}/tests/scripts/e2e.sh" "${TEST_EXTRA_ARG}" + fi +fi + +# Check if the user is running the clusters in manual mode. +if [[ -n "${MANUAL:-}" ]]; then + echo "Running cluster(s) in manual mode. Press any key to shutdown and exit..." + read -rsn1 + exit 0 +fi diff --git a/tests-deprecating/scripts/e2e.sh b/tests-deprecating/scripts/e2e.sh new file mode 100755 index 0000000000..92b9d62262 --- /dev/null +++ b/tests-deprecating/scripts/e2e.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +set -e +set -x + +MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}" +MILVUS_CLUSTER_ENABLED="${MILVUS_CLUSTER_ENABLED:-false}" +MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}" +PARALLEL_NUM="${PARALLEL_NUM:-3}" +MILVUS_CLIENT="${MILVUS_CLIENT:-pymilvus}" + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located +done +ROOT="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )" + +if [[ "${TEST_ENV:-}" =~ ^kind* ]]; then + if [[ "${MILVUS_CLUSTER_ENABLED}" == "false" ]]; then + MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=standalone" + else + MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=proxy" + fi + + SERVICE_TYPE=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.type}') + + if [[ "${SERVICE_TYPE}" == "LoadBalancer" ]]; then + MILVUS_SERVICE_IP=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].status.loadBalancer.ingress[0].ip}') + MILVUS_SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}') + elif [[ "${SERVICE_TYPE}" == "NodePort" ]]; then + MILVUS_SERVICE_IP=$(kubectl get nodes --namespace "${MILVUS_HELM_NAMESPACE}" -o jsonpath='{.items[0].status.addresses[0].address}') + MILVUS_SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].nodePort}') + else + MILVUS_SERVICE_IP="127.0.0.1" + POD_NAME=$(kubectl get pods --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].metadata.name}') + MILVUS_SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}') + kubectl --namespace "${MILVUS_HELM_NAMESPACE}" port-forward "${POD_NAME}" "${MILVUS_SERVICE_PORT}" & + PORT_FORWARD_PID=$! + trap "kill -TERM ${PORT_FORWARD_PID}" EXIT + fi +fi + +pushd "${ROOT}/tests/docker" + docker-compose pull --ignore-pull-failures pytest + if [[ -z "${SKIP_CHECK_PYTEST_ENV:-}" ]]; then + docker-compose build pytest + fi + + if [[ "${TEST_ENV:-}" =~ ^kind* ]]; then + export PRE_EXIST_NETWORK="true" + export PYTEST_NETWORK="kind" + fi + + export MILVUS_SERVICE_IP="${MILVUS_SERVICE_IP:-127.0.0.1}" + export MILVUS_SERVICE_PORT="${MILVUS_SERVICE_PORT:-19530}" + + if [[ "${MANUAL:-}" == "true" ]]; then + docker-compose up -d + else + if [[ "${MILVUS_CLIENT}" == "pymilvus" ]]; then + export MILVUS_PYTEST_WORKSPACE="/milvus/tests/python_test" + docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --ip ${MILVUS_SERVICE_IP} \ + --port ${MILVUS_SERVICE_PORT} ${@:-} -x" + elif [[ "${MILVUS_CLIENT}" == "pymilvus-orm" ]]; then + export MILVUS_PYTEST_WORKSPACE="/milvus/tests20/python_client" + docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --host ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} \ + --html=\${CI_LOG_PATH}/report.html --self-contained-html ${@:-} -x" + fi + fi +popd diff --git a/tests-deprecating/scripts/export_logs.sh b/tests-deprecating/scripts/export_logs.sh new file mode 100755 index 0000000000..a8fe65b014 --- /dev/null +++ b/tests-deprecating/scripts/export_logs.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +set -e +set -x + +ARTIFACTS="${ARTIFACTS:-$(mktemp -d)}" + +KIND_NAME="${1:-kind}" + +mkdir -p "${ARTIFACTS}/${KIND_NAME}" + +for node in `kind get nodes --name=kind | tr -s '\n' ' '` +do + docker cp $node:/var/log "${ARTIFACTS}/${KIND_NAME}" +done + +echo "Exported logs for cluster \"${KIND_NAME}\" to: \"${ARTIFACTS}/${KIND_NAME}\"" diff --git a/tests-deprecating/scripts/install_milvus.sh b/tests-deprecating/scripts/install_milvus.sh new file mode 100755 index 0000000000..a36fe6c37a --- /dev/null +++ b/tests-deprecating/scripts/install_milvus.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +set -e +set -x + +MILVUS_HELM_REPO="https://github.com/milvus-io/milvus-helm.git" +MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}" +MILVUS_CLUSTER_ENABLED="${MILVUS_CLUSTER_ENABLED:-false}" +MILVUS_IMAGE_REPO="${MILVUS_IMAGE_REPO:-milvusdb/milvus}" +MILVUS_IMAGE_TAG="${MILVUS_IMAGE_TAG:-latest}" +MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}" +MILVUS_INSTALL_TIMEOUT="${MILVUS_INSTALL_TIMEOUT:-300s}" + +# Delete any previous Milvus cluster +echo "Deleting previous Milvus cluster with name=${MILVUS_HELM_RELEASE_NAME}" +if ! (helm uninstall -n "${MILVUS_HELM_NAMESPACE}" "${MILVUS_HELM_RELEASE_NAME}" > /dev/null 2>&1); then + echo "No existing Milvus cluster with name ${MILVUS_HELM_RELEASE_NAME}. Continue..." +else + MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME}" + kubectl delete pvc -n "${MILVUS_HELM_NAMESPACE}" $(kubectl get pvc -n "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{range.items[*]}{.metadata.name} ') || true +fi + +if [[ "${TEST_ENV}" == "kind-metallb" ]]; then + MILVUS_SERVICE_TYPE="${MILVUS_SERVICE_TYPE:-LoadBalancer}" +else + MILVUS_SERVICE_TYPE="${MILVUS_SERVICE_TYPE:-ClusterIP}" +fi + + +if [[ ! -d "${MILVUS_HELM_CHART_PATH:-}" ]]; then + TMP_DIR="$(mktemp -d)" + git clone --depth=1 -b "${MILVUS_HELM_BRANCH:-master}" "${MILVUS_HELM_REPO}" "${TMP_DIR}" + MILVUS_HELM_CHART_PATH="${TMP_DIR}/charts/milvus" +fi + +kubectl create namespace "${MILVUS_HELM_NAMESPACE}" > /dev/null 2>&1 || true + +helm install --wait --timeout "${MILVUS_INSTALL_TIMEOUT}" \ + --set image.all.repository="${MILVUS_IMAGE_REPO}" \ + --set image.all.tag="${MILVUS_IMAGE_TAG}" \ + --set image.all.pullPolicy="${MILVUS_PULL_POLICY:-Always}" \ + --set cluster.enabled="${MILVUS_CLUSTER_ENABLED}" \ + --set service.type="${MILVUS_SERVICE_TYPE}" \ + --namespace "${MILVUS_HELM_NAMESPACE}" \ + "${MILVUS_HELM_RELEASE_NAME}" \ + ${@:-} "${MILVUS_HELM_CHART_PATH}" diff --git a/tests-deprecating/scripts/uninstall_milvus.sh b/tests-deprecating/scripts/uninstall_milvus.sh new file mode 100755 index 0000000000..05f096cd32 --- /dev/null +++ b/tests-deprecating/scripts/uninstall_milvus.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +set -e +set -x + +MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}" +MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}" + +helm uninstall -n "${MILVUS_HELM_NAMESPACE}" "${MILVUS_HELM_RELEASE_NAME}" + +MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME}" +kubectl delete pvc -n "${MILVUS_HELM_NAMESPACE}" $(kubectl get pvc -n "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{range.items[*]}{.metadata.name} ') diff --git a/tests/benchmark/.gitignore b/tests/benchmark/.gitignore new file mode 100644 index 0000000000..efef170b2f --- /dev/null +++ b/tests/benchmark/.gitignore @@ -0,0 +1,13 @@ +random_data +benchmark_logs/ +db/ +*idmap*.txt +__pycache__/ +venv +.idea +nohup.out + +*.swp +*.swo +.DS_Store +.vscode diff --git a/tests/benchmark/Dockerfile b/tests/benchmark/Dockerfile new file mode 100644 index 0000000000..08b14b96b1 --- /dev/null +++ b/tests/benchmark/Dockerfile @@ -0,0 +1,30 @@ +# Copyright (C) 2019-2020 Zilliz. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under the License +# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +# or implied. See the License for the specific language governing permissions and limitations under the License. + +FROM python:3.6.8-jessie + +SHELL ["/bin/bash", "-o", "pipefail", "-c"] + +RUN apt-get update && apt-get install -y --no-install-recommends wget apt-transport-https && \ + wget -qO- "https://get.helm.sh/helm-v3.6.1-linux-amd64.tar.gz" | tar --strip-components=1 -xz -C /usr/local/bin linux-amd64/helm && \ + wget -P /tmp https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg && \ + apt-key add /tmp/apt-key.gpg && \ + sh -c 'echo deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main > /etc/apt/sources.list.d/kubernetes.list' && \ + apt-get update && apt-get install -y --no-install-recommends \ + build-essential kubectl && \ + apt-get remove --purge -y && \ + rm -rf /var/lib/apt/lists/* + +COPY requirements.txt /requirements.txt + +RUN python3 -m pip install --no-cache-dir -r /requirements.txt + +WORKDIR /root diff --git a/tests/benchmark/README.md b/tests/benchmark/README.md new file mode 100644 index 0000000000..37c836b1e1 --- /dev/null +++ b/tests/benchmark/README.md @@ -0,0 +1,184 @@ +The milvus_benchmark is a non-functional testing tool or service which allows users to run tests on k8s cluster or at local, the primary use case is performance/load/stability testing, the objective is to expose problems in milvus project. + +## Quick start + +### Description + +- Test cases in `milvus_benchmark` can be organized with `yaml` +- Test can run with local mode or helm mode + - local: install and start your local server, and pass the host/port param when start the tests + - helm: install the server by helm, which will manage the milvus in k8s cluster, and you can interagte the test stage into argo workflow or jenkins pipeline + +### Usage + +- Using jenkins: + Use `ci/main_jenkinsfile` as the jenkins pipeline file +- Using argo: + example argo workflow yaml configuration: `ci/argo.yaml` +- Local test: + + 1. set PYTHONPATH: + + `export PYTHONPATH=/your/project/path/milvus_benchmark` + + 2. prepare data: + + if we need to use the sift/deep dataset as the raw data input, we need to mount NAS and update `RAW_DATA_DIR` in `config.py`, the example mount command: + `sudo mount -t cifs -o username=test,vers=1.0 //172.16.70.249/test /test` + + 3. install requirements: + + `pip install -r requirements.txt` + + 4. write test yaml and run with the yaml param: + + `cd milvus-benchmark/ && python main.py --local --host=* --port=19530 --suite=suites/2_insert_data.yaml` + +### Test suite + +#### Description + +Test suite yaml defines the test process, users need to add test suite yaml if adding a customized test into the current test framework. + +#### Example + +Take the test file `2_insert_data.yaml` as an example +``` +insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_2/cluster/sift_1m_128_l2 + wal_enable: true + collection_name: sift_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 +``` +- `insert_performance` + + The top level is the runner type: the other test types including: `search_performance/build_performance/insert_performance/accuracy/locust_insert/...`, each test type corresponds to the different runner conponent defined in directory `runnners` + +- other fields under runner type + + The other parts in the test yaml is the params pass to the runner, such as: + - The field `collection_name` means which kind of collection will be created in milvus + - The field `ni_per` means the batch size + - The filed `build_index` means that whether to create index during inserting + +While using argo workflow as benchmark pipeline, the test suite is made of both `client` and `server` configmap, an example will be like this: + +`server` +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: server-cluster-8c16m + namespace: qa + uid: 3752f85c-c840-40c6-a5db-ae44146ad8b5 + resourceVersion: '42213135' + creationTimestamp: '2021-05-14T07:00:53Z' + managedFields: + - manager: dashboard + operation: Update + apiVersion: v1 + time: '2021-05-14T07:00:53Z' + fieldsType: FieldsV1 + fieldsV1: + 'f:data': + .: {} + 'f:config.yaml': {} +data: + config.yaml: | + server: + server_tag: "8c16m" + milvus: + deploy_mode: "cluster" +``` +`client` +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: client-insert-batch-1000 + namespace: qa + uid: 8604c277-f00f-47c7-8fcb-9b3bc97efa74 + resourceVersion: '42988547' + creationTimestamp: '2021-07-09T08:33:02Z' + managedFields: + - manager: dashboard + operation: Update + apiVersion: v1 + fieldsType: FieldsV1 + fieldsV1: + 'f:data': + .: {} + 'f:config.yaml': {} +data: + config.yaml: | + insert_performance: + collections: + - + milvus: + wal_enable: true + collection_name: sift_1m_128_l2 + ni_per: 1000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 +``` + +## Overview of the benchmark + +### Conponents + +- `main.py` + + The entry file: parse the input params and initialize the other conponent: `metric`, `env`, `runner` + +- `metric` + + The test result can be used to analyze the regression or improvement of the milvus system, so we upload the metrics of the test result when a test suite run finished, and then use `redash` to make sense of our data + +- `db` + + Currently we use the `mongodb` to store the test result + +- `env` + + The `env` component defines the server environment and environment management, the instance of the `env` corresponds to the run mode of the benchmark + + - `local`: Only defines the host and port for testing + + - `helm/docker`: Install and uninstall the server in benchmark stage + +- `runner` + + The actual executor in benchmark, each test type defined in test suite will generate the corresponding runner instance, there are three stages in `runner`: + + - `extract_cases`: There are several test cases defined in each test suite yaml, and each case shares the same server environment and shares the same `prepare` stage, but the `metric` for each case is different, so we need to extract cases from the test suite before the cases runs + + - `prepare`: Prepare the data and operations, for example, before running searching, index needs to be created and data needs to be loaded + + - `run_case`: Do the core operation and set `metric` value + +- `suites`: There are two ways to take the content to be tested as input parameters: + - Test suite files under `suites` directory + - Test suite configmap name including `server_config_map` and `client_config_map` if using argo workflow + +- `update.py`: While using argo workflow as benchmark pipeline, we have two steps in workflow template: `install-milvus` and `client-test` + - In stage `install-milvus`, `update.py` is used to generate a new `values.yaml` which will be a param while in `helm install` operation + - In stage `client-test`, it runs `main.py` and receives the milvus host and port as the cmd params, with the run mode `local` + +### Conceptual overview + + The following diagram shows the runtime execution graph of the benchmark (local mode based on argo workflow) + + + + + + diff --git a/tests/benchmark/asserts/uml.jpg b/tests/benchmark/asserts/uml.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d88937b6c557584af8d63be2491f991344c46b7f GIT binary patch literal 140944 zcmeFZXH=8j);4-4bfifU5fu;x=|~l+iiq@%(y;*2L3)pH(QN^S()xNIS{B&zFKrbSc{sNLRg8Dv;*)SCz zksGRRlp4y+$IoeWos9XUVMJ&ICFO-s=Edd{7n+j;ro;oLoGR+oT*e-1v;_2mUzIkt z3gRYN@f_mu{8Ca<##b4XAQ)~Uyu0AfaD4GBC>3pUQh{LXC*JJeuXPtko%v6C zRAL}ri3fW4h6}((iUC~6<>{O*+5b59|A5?+i_~xSWv{@j8Q1HHx26mSSK4tKjIq1$ zpJ4awPHXqnODgEo*d4iZ;3Kyah#$Gh@>S(OzETaN=%&woxRRNdP$JONqhme24Xa}R zPkO5OM^Anib$x)IqKJAL&hBUU4`Oge{G+D`ayl)br$Zpr{aRN66x)B)ljuKsO8;Ly z{jZ+>-@)yF@$|oTcmDrk|K!BwHd888eEkxG66Xu0oR$8<*P#?jOX6=%QmkZ``}?(`ZFLHYBPh-u=aV!!0xY4{m7LKZ_S(kwEi&j zB?PynWbWp_LHQ``;YL;typ@R+uZ1V6V}g->0MmR*fch*r8! zN(p1iV{UV#gg(9K$~!5l)@s9*eHJRo_J-sW+3 zuY|a=y_z+kM;Y>$xMP`NH(K)_r34DWvOBWn=$?pD)mNa* zJ7-P*iajo<1TD->7C5^wagtUQs!~C3-HQxKKq95f#7o`g{u?Hh(jgcpHwwfz^QAOp%*!J+`Ul(+o?ja1wC}G@V9;ArZ+l3OO-3*+h63SPo z`Zys?_^IRz)=aSQ@V|F?6n{lc5@4W#tUK_%YVF0hs>h+dRiKqd=@tpRmzk6Fl{xw( zbgMTtjzQD_!O*YSLz<}qoxbiAaijGnjN)JC{C@LWk0{e);z7K%bmQU2nGRwMq{S0M zc~!ir;FY+)kAW+>7c!H6 ziuh>8#7Dc|HFNEsM-v8*wsXEP=o=^a@T4UVL{l!Jqsqw$fOXFp{7qamMwJr|r~3zv zqVVa+3m|8I@{E)yp5dwazv_N*3I=a|v}ixN=natbXIm6zz31d727e9oMw16FblTld z0)CR&O_%b~9QHFQ;^f%}%*1SvlnEG5wAeq%#b+u8NQ@C=a}bxc??D_SDpw}Q0uvyL znXe6<=DB-558Z=&?~;n~5%_h60SN}F1K7vRR!4Dhh) zS&%3VQjQUJg6u2iO{KMrG&J| zXWc1(D7DW0V_BotO?t$fkZ7K%;4;J~0>*h-skH~_aBf=|mhG1$M+K7wa8CWfPKw~U zzj2w=>7w;CL;rE|=PWUYq=r-8B%*g`!6<+B?;)qjevrhTe1ZrdUSrV=T;r@CHQXEM zQv!7dTqAVm3gr*o*1~^X!);ve5-GzA1)?c^D^iX8D+?ts%CBFQS|`9=_^?*+)&%wq zQqmPcCCW!PunbTt;wv}9yY~oMWu@a>w`%=Io&)fE@^IUhA9<=#Nf(agVK4a=mZ0t2Tef*#k;m=P+nC7rcKtY0=Whf7ia^S;*~#1>yb02KhN zHQsrdL`*P&50d~`%lC7aglnJTB-OOUo`iV%q8b_8asg_@0;2wL5F}(yjQ_kcNjsO( zEeN(t1?HI*FtxRr6vw_Wr^XEeV;fGu2Vb)^(b!0G# z6F-2?j~zZmj7o7d1YbKzbol34Ohbv+K`39oEpP;;|2PCm?z6HIJ5GlLWN(+>( zwxR-Wy%_2)0~cm^clDohSYN580#SNdiRT2;0__sd0c9E~V(#h&Gif>xxX<2saGxfY zI~RbDzR`Mdiw9H|@9LOgtW3vzk;FbsUb0HuR5`C>}9)>tUCipKsKc9}{;X)ubi8d`;zCC#QxR`kIhmrvbYfmh? z@RJx0o&`ieqW8GV{sQ~uf(5+wCZ{_UT=>+xf9Z!cMk_T8$aSorbYla9e*&U_5_HFES@{{d^2A^z)o1w6O6(XOrP}K9G#^GPqFm1!3jZS@=nse-Jb~9XAIg z6F-(L@5>2{CIKfP%f$> zAW<2I@u$Z)QF;X4_ZB6b(ih;43fh+f-vhI*XoEBmJdsgp3~mMBM+!71-Xi|Ud#vC` z%3dKmKIdi*_&@Kzhggc5?X3LB4SFMKMrXf2E>L!cWtni7i+#9kU^Ic>h~IvDyd;jt z+4N5Jx9@MTEk`%Cv*dDcCb^nqsNr`n1;*@c^C%$_#|nzxfR2Jm)33WT*SO)A zqS!}O74X*YEC(ViST8;(>^^x}WpoFJ z=cHm$!ti?l6*Qq5(CIe4_PmpBZ-9)SuDx0!x-Kc8@G3P`UD{(H-T~3&Ok+IMFaIeG z5?u^ZBfip>-lubvHNBo+qom=l895;ky$A2+oh3aBpuX^)(sB0d{aw(d5uIMIoa8fo;3ZS%n!FP-QZaRLpZe768!8%~8+rQ~ zvlqo5COi%B>3q^GTWzDGkys*_B4+o}r<|XgN9xw0oYslB1eb1fSJKt-j6p<*%{uuosa=y=gThV(ayM}OoGW2=DqnBc^o-*_L(vUUOa$;BV z3>IzdIO(^^%4iGm;0i!O^yAefrFey;IuckdH3NW!NfP9>99v{TdC>x(c+qzJ2@%Dd z-OxjZV^9)P9=H;BF@Z5#nYT9yZ!y;S3m33;Lfo#02dAo)ctEdks$4aiL;j*G8lPTj z+HiTY-uA6k?C?~u)j*+EQP$z3A7)~+IG-vs#)CHqSMuuoUPI}=^UGg&<8zjYtO0b< ztnVzE{Z)9HhII;~&YdZ5qLvR+4Q*FcwjwIt^C& z>DOML&W8P*)nJ`{ermU~&%}F zTDz@JWNOE$mAX7UiP%h@NcB6NbNDCesZV@1KCdHcYuCmtEZ3%K4}>k6IM1Cg;VmB5 z4X3U#Y-K^p8O2y{ul{(Ob&AuUZVD1L^y4QzT2021DBMY`weEpAC*@v?KKnyJN2>J9 zfTqRpgFkoeCkrdR`*b$Zj2N9vWwMMG@*KF5hjcI*M z5c!$~s77M^N5hl`fDu0R1(C~{69jsfQ$SD;?0^)IjWZDF;8*2r$FY5W0|xhKM55I9+{kh0+uA1m zyzyH7MTZV;ufU@ZoNUrtTAwO016i3S{+Ueto`*J@YR>yTO0%d<>EXSsmGO!X3<|}4 ztmOn%voOPRPvEV(PYN1%b%O$*e%EQIji)Ey?jOtUY-)BPk*RX6{qWYXUR`WCxo6Z1 z56dRhN*h8*+dIJu_nrl7{Q6xm6OT2v5Bkb=13K-?%m8hy0w)s#+<5GJkgz2K_b&&e ze05AVKA8b=m`61#;BU+!hKNU7>-3P3l7#qo*!fQO zKEGsA=l;6-Ej9(cw8g@tI;TxtA$fBHuHhJ>mi!JV9`i38>@_Bip7&+#%}Dg7A5M~& ztyxGsa#%d&Ox06aq@#}OYzu-ZrqrsR*cepsCuyusMjML^W7BG^s}0MH1uXsev(x`< z>vv>w7a3cY9G0+;7Tu6G;`Q32KyViY7YT`+L1rcvV-nUYlWxxmn#?ZuwRJVhTlFsw zRLxdy3 zw4MYtZ$~%U+%B&(dA6jRGc~whLb8^4`Yo0g-r6d!1D$?5ML8O!1l)8hXI>~2TYlni zh-5o)jFb)#3pAt8?Ed3)K+@H}FwFsJXAl|~4V4%Ud=k7$Yygu@-8ct@a`;8#B>DzU}7!%Z+ zCTT?O>0A_|@aJjY%!l?YZMV9^zN_i$)Bb#SP{qMhrKkrsce9eW&_N`+Q_#DERWF-? zbejV&4~4ADPXjy$2zbvA9J!$~UZ69f-g5!9?ZN-ztb$)OzBZMs{?BsX&F3ZTcyl&S z#a3&W_5As#`M9p;>1ugkga^($EA~CeAwa=#7U*aO=}u1!Fe|W1}9(m`p-GeWOayit%R`9bu z_$}^FVksZst!9o>LQJ-VT-JH3zQxEPwS9g}^=1ydH7cYmVMlBEqM`F@;5(>aJZ*r$ z1w5(OTf$2FO{0&qtS5o=c)KHRz1^#ifn{L z6c7lhC>g)C$0=9b6S@fwsrc@;p0V46Zw7grT^!jM0IqO2jMM5QDFd;VB4;}R`eKB* zgtg7u6&ZJx^7+K~k{5$aZzzrVq%#TJh88|s=Jf*E5Zge`pz!D560~99c+^7+T}b&9WwLEWS5iGPWIXU#U>FBHTQ4K zs@$j2)qxhKYCcTcb_Qu>4UT#qWqlf0DT@fhLCH>dorvzy;CBQRiNQ%&VOeop1qZVX=G$x9msCY~or@C5u?S z{zO67kwX%GNn@y6zsgsyBTcb9Uwd_eL_&>z=02&@Wl%Qe8=Pf?Ca*mzNpJC0cD&iL zVsB#k&)AwM@1uid+YQZd2m~kf>PYl2Ta_Bh*kgVfjZ{8}l+wWd-uDQr z!PTwT&2(o-tH84Tvqxq1R&y3)3`Gz2=^Za=jaHT6%Xkaj0i+UvM$ZlgyzAb(y2io( zup?Ck|D*cal~1=oT7Dr*CsUYv>O33v;BjJSM;dnLwV5kgQm^m#1=1D3^_|WS2l^~r zsxo#}s(EEUvvP=8QJB#z;ech^CzOTgR#FAbEPEAgdApp`HQ{Ie`ex6Y3F4WI5I9yC z(fnSKALo~(-N36zq>@uZPf_$|WR=%KKFccGF1J;0bd%IRU%AL~CU|RBN59@wXtm)P zyj~af6VxHIi??dqPILweWPfa1w-S1N|56uEantQ?Xp0oVDv~~IyMIp6|FRxQz|zrF zbu-LBj6|0uKlF?0Qo`$a-st7ooiiJ+}q(g!pnI#Y{Zt)RZX)J_<5^|nWsX=kVvYi`3BZ#i=+a;h33!IkYv0xXt zfh+M`+dg92m~Ju|4KL_4ikolPNqn#D-*sG!7`>1Nsq$>}0sZIYi`5eINLYDCd1OnJ zPub4IuEx}B4vop9=UX1r;k$_)c~e6K%lFHnQhEIzfxEN3V(-iXa9A~uB(?3a<)x64 zKc#(98JgB^H0dJ(}8z9%y))YQOib zH0$Y*M_bHtfq6mk=>Cr%cTpyv8%eWyA;b9^0xo&W|CidQZnfF5CM`RZM<2<+`;iHM zwodG)MMYwot+5;36PoI)O)RGI=G9e5)BA%SdlV%tl``C$J(YHR09A+2A<;cwi+axj zPv45XU!ldd#)(f4={q}^c(?mDS-V9P%MaDePNk5*cmdTxzSzu6ikL6|{52^r`Qhg4 zuE(plN)3e*^omb)2%3n0wo=$zNO5FjUw*4Rw}qN5?6%*cFKhqYxn4ObpNx5#ZZdE? ztO`|4kdNDW?sZV@K>ib@=T0!Yq_wn%Eu5Y&bKD+vUx~=qiC1@E?h<4B%+|Pa;Px1A1J0_^!OaKuE8jGYB8dUCQY+t)6c>3Rr6x z%6z;p)+wYS_WY>$$mdtX%C%q>@AK#j^X22Qp)vz%C^@#7xsOk<;@pto_7q`?W#sWz z#R>siJkytH95Jgdf{C!DTlhT&VWe~vIj#j6bmKkxXi zH(v7X!;VX*#yjisQ&_QJBs85lv*eml7HT8$9+I1H>UBaL3Tv$CR4S2Gr#grCGA$pN!tvXS zLnJ{u@S!v|gf_nKtQXO%l`;XZ)|(l3jOB4q5xuEvF}glh<{YB2u0NX(hz2VB3*6Lt z$it~MXW*u}_zbI_o#H9REr0Yn3RrouGFGh>eSVymEHWu}BeCRpwVe2vJMKVIh*Hk~ z5t^CbRKG;F%RQAZea{Z$88yK26vA$`h^rUAO3Aen^&7f~vM8?^uRF=z+^{9qO=G^* zet}QF^xdy}!{rvq&5gUAzevJo$455f%9Skn^qXEV3p+Fk=jfI0aJ-6o`At|OSvVtK zi?uc%`=ps-X7By<<1xak%1RNd2EhQI)kN;_=oi$yU51wh*u-rqO}bxB^H$BDQ}Dex zHZ5xPB=BWJpTW@Dxkkz4fmwAmR7Jr>k5gF1oj=Th(+9Nm{+LG9J7KTbXaX)13&X^& zJ6ryReX-XE@?V}%OPH;8bpLQ;Ndk3AIjn9c{hFUT-jYx|SY~GCvo7~@3-!Ii@ld$l z4WG*_XsATZDo`p!IJ0y$V6Jp!v_@mF`I35a?C%2g1+Ux4tZ_Gx)7%!}0VulnDW8Qm z|NHzQi9(eKvyo53=HD(@ zTAGG5>FlKc3HCWUF~082!j~qpChPG_{nb`#z+Hu-FOB*L>~;`4YXUy%#7ivN?sUTf z$D+*qyWo+E;jXT3N#5B!D*{Hr;NE0dEx-K-lUaP)_Hx5`ybeE0=VcL1MTWlYdtX*I zz8QSU_q3dDTwIxD&)s>H*06y_xXfjP8^@5knqC7C2t`tg4Z__E(_#+ZMv%E4jU2!nl+Nrl;cLS<5u0 zu+g7O+WXt7yBujZDQN1X&Nt+f-EW!h-G%uickz7B5<)$1X=(pF+x8x(60W~7om43{ zTg$ZG8PZ~D$@Z=31yX5j_x6&{&51O|+Fo(Gg$Gi)c4SLMhzxb;v}dWu@pK&LGeF#Z zA~|MtVVfW^?~$H;NqN->R5)&ZC!q+MRarxYjJ*`P1OXjklRsmDZ1TQUfiglaLsi%C z3Jq!m3P#0Vf}UkiSxN9(7e`AhKc;vggm5rjF1FX>Guw~f>w>0&=T|rU(Zl%zJ&BCl zUj9h!2P3rsS>Dde+HVVP^(=|G%&1m8=%Uj*WazaJQutt~b=1ZBeIrr&YBme}wUlR>iL2 z8a3K9Cxa5_y3++*y@E8*!K2a!w7+IkjY8DNRL7R)!(%vv=@l5I*(ZGH+X3*q0;a>e z{=h!j%$Ao_ZI;xOC#=X+I1&hRv}px*k|%R-s1cO+y#foK&UD+*&_|z>=W+E~7C_@R z%$fo+oerOE`8VEaLR`_yn12@uiU-fTj3i1dPI3zOYVRfKW%HJp_?&1+cnu$!{+O89 z*r$N)q$4S{Qf}LQqcRk{HpXO=^R@}uI-req{J}|e*)}z z2=qpG--3N6`E^g^g3YB}**HdNmbMaq3R#|7Vd1`^j`-|wK*2bD2JLx}JFEkovHks$ z@{I_{99sBvZnmO8a`YrXc$B1d(V(#Frvi5AD*smMd+UY-KC4?N2;@7bky&Zz`ymd4#N)aHoj13u)~x>vZLBe$P-2HZqj45HUWcVbD z8ZQ^l3(1A7HOYNjP?Nt*v!yG^?$)`&=bz#rH7QPCJ7nAc{92~}4&S0YGC8=TATl}M zpljY{b*Y*QYv7e8=2(GLGCF6~_l7&DaT|k8x#m+mSh||gTxUCb#{TfGEbiz}OjyU$ z7zHG5l^x=jl?>?jVKdf&t`e5_2d2MdaxEJ*8++~h?l7xmDY%$42&L8B20-k1zR4Oo zd8anmg6+zBGpQW|&yN%-G_bzg6ql|*M-Hoe_E)|&Hu)o62v#V;iegnVT-=%!N8GQ- z7&ey?ab(1+{aGuv{rFvv>a%lgEs0e&99Z^dgPp9VGp?tSQ-zA6zmz8={;vPt6)8^t zwXVRua){8`s+P0I4@+PCoRc**SRPj+S9opEsj-&zbJl**I7tsO4f8%h!Q-3a zSM}mVfQdKcfxGwF>>q2;t?E}lv~Ph{h2zAVk=EZ8>)slQ<~c2xsSV%>{@Hk#_5CFR z_C;}S619=)V>CCPJG&?W*}XUYu&G5)T+X#K-I!ca|MvOvdq?eDFbgV(NqoGH{J5!L zMQm3eGvWxuK0wf1+Qv;{KN2)J=5uABBb#p`8~SO&pXy%IuiKux^Wxq%8zC_O34mg z_Q?_WVPANw-qTxMGxWUd+9hmZ{No$)gB3{*-g(!K>}D`cd4=PaMM+L~8}*VzuBeUC7Qfe z>J{q@O5qRvu3a?KRB<6oCIXcvp5E**u?2efb+mk z^X6HHo$u_sv&rTr1D!`vMFk_I53bgEDcLyFi+Pzho#B?I|8f6KL6L1Is(B}nzIw|p zW59$qt6*t{cGl>p_YThCU|%a-q(g`#3QHo%{q~EqOf_!Yu=PhsrqBLumWZ?Gy(3TU zk)D9C<>?m1EFFRc8@930UYnoIGWA%>mXM($)z4;XLWH|4u+Gvykh6S^1VU-UP}WL( zU+}IlPUhF*?|~X`7iU+QYC{`LZPYeet|si^ZT4Ebdyh$4%PQ$Q7qXhWa`}*f`wT@1fA^CP&C$3PzV#* zCNx#L^EJS!$^BF{mNDfC;MaQ(rTH$8h~>X_l}=AmRTP^n4|QSjEt-*y)0f6bZQ~C8 zC-|DJ0?{E<&D_msHaxBANqpa?Cx5LA3C_%RAJ*SmwT6G zvHZsQvfXzI8ak^Kb&s~`se%jOWC-U>S8k(Xc!{t+au+xOkQSAM`GewEW7mAnKe{zSIiT=?kPk}RDWqO2(Wc!07V z-GLj8%keSZID>N2GVxDRGzwU&4rprTS~v>E+~6~=c(Vwz&I`v5?>sd8kiS&Bp2z#O zZntgG%Xpq57+ zx~UQGrOGMqtYleS5`6n@3N=%>{F;UKHxFS$=&JW3dSgHnYZ>T3|IzhYK=L!gfDx9p zmTwHbC>suT!|wBukIsK!)WDtz_NQ6ldMm4t2iWY{ zYS*1oeC;Sjc{^fl9T=1ByY&b3YEfg3x4)zbI0R7pN3TfF_J@iPX0@yWn=3toC+fyg zgo=3T0kObX{NTLBhYhSIGHIk<}!!tMIeBb}CKSMiNw9zmuOq{ULJ+ zscO6VeU@Iu&lGnoVyAX}pPrnuR^ceOIwvN$zqoJS^ZQ`ACn8R!Sz321{R9hi(C2CJ z1e0O&N}Ljt>DF@Z$C%cMjx_pHuUMhto$f$k7nz4spA^&eDm&@u>POt72*Ck;!(9R# z@+_>7Kb<7Edq<7OXleTd!SXel&T`NeI^f^~y$YEWtAnmTKjQO>)q1<#3E9f$?v%H8 zu6qn`p^ZdQH`cE%xEvaU0>Uzar3a3f14hX9qh?l zWe4gFVlfH{pp`~mRv`PG$M&}r8<8!)2tQo0BTaPAyUw&Qf|8xM%4&hpv;Cn<>x z*bg345~UE@K0(Q;K3p=!P`vfL-|v$$RfOz&w=x>+@UENH&D&*Gjl#Qz<*t;hL0PEK zcGWE3o0*RepD_!YUe&WQ0hBZD1Nx#xgXbiNQBg1#b}O*`kD$?=sZTysntdzM1bI4~ zfJeJU_3SrZj>KD9?DmxlgIAo7B-n;R>Vg;Trh<#f!dlv+-^$K@3pkHabU3_W=9f3M zTu6?1()re`Aakv3N7j5}xR2_*^zsl=m1}o-J@OdEhqBCtwKe{S0rU>C)pV40f>BuD)3W+U!p` z_yn8JUuM9b#Q*9hF?#&LA@#(y`i4wrauJe#-ROv=ro) zw$bMH3U?hAp_RZ7s#P1>@t_fd;!pb(AwzL}Ot<0G^z%iEheu-A%y>{gUjn2@1L^>D zF6PGiB#&ROkQj!vJ4ueY4F(3d5||b^o_ryRA%nMGO>V?CnzaRAYeRSvR-=QTS0yL=GJbX*ABMG+!snGHi#PCrf+J5FLn* z@YAn&<<8TOZpSA!mQ3bZZT3H)?|A2~tVIhYyIKm@Kb{}#s`KA2HWZnoy?_Z0GnfyI z%U0VA6Z*Xxe|o7fdD2(U(s%VaY1Z=K2~c!xnKQy$eb59|{}3kW?-K9Bv6D`j0Y{(s z`sPVEgh8XaUr7}S{b~6oJKO%&j>+uOYD4l`2QP<$iG(F|wZ`VkVxZ5_hztJLD}q?w zQVM7rS$~M7o;zdIE9{`AK`iUDQ}6jMXTLq*#yIsnzy-6%nq~)fy0YY|#os%4 zeN`u?QbWE43kO{xyM~|ASbW$AQA;OWoS;RMq2ek!9YqL|bTAs;Irnd)3~pXaI3?;E zeFxjaD)Da9JpP&yzq4@?h<@uRHq(}_X9q)0IJ3ljhu50x{OtVDmQ{uRYWvW;`!%u3 zGRmWaahH^rd|}z96$U-yn~M63y=N7Iv?8|t2y37+-+t^pRXdwNtgBTLkK0HmYoAqI z(?0`T0r_T&t@(w`aSF^OM{?{&u>)I=;*Ni^V^5ZR2C}u3m^TjUOzzH7=$uLv-kpiG z%e9p)?*H!b4~^r~#D)N_eQffznS#B+M_v5u@5A};vKH5_8(lODlZeB5xi%WL`ad=_ zQd_PJ$?}g42{|Co$BRSpprHU#%eX6ZAGjdT<&Y1LSiyT+dw>|8=EHzd1_UL8TR!Mv zaW_V(b2AVpgI70R1XpP2b9Z>Cu!b19RLR0kzXG%ugTrQ3~Y_E8Yd z?yIi7{idX7MXWxL8v7o$C!MPZ=?lhfHaLV?Ih8t@5^4nr_9=l^RyEcwqCF0p`U*^Y z9Jty8Zvd*`be8LEMxxZcHsAV6d`6g^W#in$Cw{Lb3DbLH~^%v*VM_;K* zK6USXRbSz~ZHjA3CA+%VrN%c(;`VF&`9|A`iEX=L1zn9||7iN~;LCuf=_hEbLr53@ zD*S^~^!(m(>ow=0+-rtPdxq0Rvp?}qN2i`gFvp36?A}=TaOH-l_TL7Ot@jvvpNknA?)6CM zjKA(8lxadZZ2rkG1i{xmpdda)_wF5 zNhM~xcNf<*%BsCeUzGIQFeh$aw-Sz7QNm$sRXY>jbAVZ{yP_7wlV|)p>Yu?|pB~bw z=Mm(y6)VjP9_NOB={ETdf1VNyrbBE5Wt+yY7oXZDk8x-lZ;ONuvtyaD-}r(y>oajl-;fQ+RB#rNs-;5<-wzrxdGR^Rzfod z(|N;P%{HJ-cHmcCicEdJIP?^-G<`f=kRfQo^|wo#FS>`o7QZ18;|Y9GNA-kIIT`!% zdpv2QOxmloMbSN~!fJ`INk}iKu$OlK1N&!WAGrkI#UOU}Iv|PKKA$bAzbH%GDzrL?qZOVqP+%hJy%<5c% zyGHY22Py(Dn_E27afa2GWwl<7E~BA%n6UBVOxy<%PTIjneC3CT8BLK$OFjX>RmT>L z^{F-5AJX<^43#~r!dtmeF|VQA+zdB!N1b|u2o{M+@w=yO-HQt>rFYBod#W_-V`tH% zNCz_<^+`i%jaLWTpf<8kfDUiK!NsD>phc*3Z9tPbTosi@?oU>9E-4K6%HB26KBF z`}+!5b`be6q&0Thb|1x19-MbDTF|Ld z{l%kc&);rcFa~A2h#K*43Fl?rY*x((saR#&7>cc}(!hOE-Zm+dtG^f!y|TW9?7buj zZDm!XGIK(Ew%7*PZ&+^c57rwL*shLMW$&&Ba7ZVVFBvXJr&a`ZP%B%_ioCH3zKd&C zC=qpcJ#S|L21@hMM3RVC(<(?m6YuNV6pp&Y9Fx`PvS&^vGDdEgbVIs;sd{VvoN{ef zYS82*UdN=b7tp(k12g{v26U;4t_^(+2Tl7x@GSlae^Wy=f^74+NQTr!l z*Kgy4ug5`ACS{6rElow$+bG${-#vSTQyk!>I+X7Xc>hYG)pTbmiyyxOK6-CTuL8h3i_s7G&?bAP$ zO??7+K70WCXS%C0KeoyWukZfZWET6`j6cAKIMi!aJK5d{r~W)AoWkw5@nu!Ocm1Qv z;*W~>qwR2a83n>TGIl+_Trp|fuemZ2x4Ut|$XlO8X}i>CD)5d%mYn><_)P3hljkBn zF1Tkd$}Im&&}4}9$pu>$B;bB3#?2=h`>b!cXv=|_zWm|1(@=4doTN>8_ui+*T{F;& z`04Cjr2p8epl(F<3B15ap8tjEEeT!gX*BbrVG|0Qt-i^S!C5|!vDVH}1%Eq{so2z~ zwH?t9l9lOnBn`BBLIqySx=>BzMB-m(deANJK6^ml%6{ejfc2oZNyD^xf5SdSVsZ5O z#u;)%u_#6g(+1{WU0Knlz3+B&Q#*EM^5w-`r&CDLYZGT?l23w;!Ek}ZS9J&INA>k! zwnc-M;uopgsGZAO^dUI*JX;lEU(_q)`kV~N(Re^-L0{TvZ5*xLp0--7kTu>=CuhdH zhp+da2Bh#=tU2f|fb|n8Ox0k~XCWreu=1)XX=*WAUGuC8!{ zh`7-L2H>4e3&DPZu0!V*pZkYcJ1X6nub?S9`^#U8Rw=VU29xQF-8z#m#I+cLv@RW% zZJv2j7)5jV(W1rEq)=dkX^1mSCbIl5NeP>p?X8{_9s9G78of zgWYK9gJv!^aT|$AAMRU|tKxUfk_LBu>-?Df4(f{vzTk8gIOsVP&Cds3bg?&>!W)FR z%2i!AyC%_p<#2Ok0K1Kc_0fX8014-HwIOg#?f;fJa19x_qPeM6s zEM%cZ0`Og8#ky+GHI{w^{Epa454dZK^XvR`RtG{VGWC$}oUUYM+o2lDWxOy+IFQ#46do z7sSCfW7Po*sEp5_Bvk9 z?P8^{=@yw(I*6nzn0YzYbV$RqEt`8jQ(t7ffh19UTBOJ1Zn)k z_wG!@(hh=i0s2mvK6Z#66DPDho#ZQER2J6_CLHs$x}(@1&qnN8y|C*{S7}`9oYmOa zwJNF37Inqn2!m_mcFWI>qRq53t_sU+&cuhRL}+9Nw&CmtX_rQ%@4GzOE!0jl9mtOw zL#qg>G&nBb;Uq=AIFlG6;A)O8!zc^67ncd#sZV{_ z_Y3#ox&;S2yFjvt#h|N^*X=BYr}$cjqS1Mcb4wKdQyXPgXrAFR|J{_1*emfZu1WJF z1iZ&xup4thm2NE&{yakMWd%!t(y#kr%W}{66-2aBCE^s6$LZ%BLT+IctCu8W3lsxC zF=q1xPJzJ)@>fZa=<`pibR2Sm{1>l1UOK?yo6)^WkjP zC&Jg1?{*jOWRB~<(}jT{^ia@fr^mb;E`sJ7^F{m>h_%q%?{rWR!zHGQKvzNx?Czv z*JLt5{A5?zr5q;AKqke`%JDQt6#EtZ7JV5$z2ckw{r#H@Ozk?4ya{Jzc%Da|iW`{K z(1=d^owO1eMobVzc)6i1xy8|_yCXO(%ZUG01#yN(im6;b=T)4n#yDYzs@;9#qnD89 ztCuW7%=%mp8YCQw6tV7LYQag-+r%tF{~Z6r5?#(buDuhg6O5bPphIJSU1!Pgf>-Cm zM9@%9+Nasg**P;_u#LGS`o*ubI`x3ydSQ49#HhvLGQ59ZC!uB^>h2iub-I}_7m6x zN#dmZJtjLxjkukKe$AnynwyG63IX$<`U)n4+Dy*!52i^w7SpT@DOdvXTjnRu{;@|C z!G7M*g1Dy=Jh8fcrt?Qi*)M#4L6oN&8~^zh=SFj1OmQpC3%t?Xt>QsM&MeP!tDaGNdY@1F9e z1Y5^@%*18vt9QW;!iNl1%yDt+1m?C**Es@oT*u$VSC*O|JmvCr1S=e;9-`PDX`Bjn zri5@=GgMHc9OkyMkVSr0@11uqz#@i$RJqlIHz$7-E8kfh3d#4*2mx_xvs;0NnY4J* zq44J~+@$=2K|7y`DZ^WVM^QVP)yX-d-}`rUcg-8zCK(D*n9;2!r_)>o~o((ulohRlae(P)dxW(r2IUvd?!d}Y<>+a zN<7w4!L|f|<)7DlQ>FdFfa*5$SrfMKr~lU5ym^b_)5=X2n&29@C}t50OK+zU&bMcb zzjqnH#y%?2JA~BM6;;&CXU@M7J!IAEiYdlzEA0QVNcm%_h)XY?{+?kOKNVsjC{o>E zMQEBr>JS&dh+AnhdG}n|{um+)KUV)U#J@0qE99EhTGK8Un_^RW&CTL@-M&Tfz}$ST zH0`u6R}WUDAGYP|9qj)dI{)1Bk%k$PavHedfN|!m-(-oswkyZrdaKZ3Dtj%1M*MVE z(5#kUPGc=y4eq|F;T4ec{{b+gy&a*_8V2-r{TgVqh2=>$l-z_K-6JslSJm?H0n&^x z7?%dpwIL4woh`Nf2VeRQrX*d;dFwakhLie_>?iMt$z6QGj{rDY_c;G|7 zs3&21_ro8@C~h`wfjM1#dJ=81!{Lfwv?6_b=a&=n1(x-7xD%M1q&-88Aphvi`QEP+ zEG`3J+s3c>l043C)34M2EslF*IoFleHxuDGH?vE-YFaa${;)R=^G*=ipw~0nmHCMs zVObxjcE-4;yl=?h!(7FxU9}jP@*%GJx!{`bPUxT6Hi@-!1Y60Qty>{jHD0*D>Cy8`9LHShB=5r_0M)ko=&nMA#~$M?o1x^lECM3JnIjl0Fj( z1#%ZA({vtj?CXGbZ2u8&N@7DMV%gPKt+pkqF*^>7{D}-`9G>7nyeGU>wHcJJ9y(h` zeO^-SBA=^E*G+=3b)9%)_s<2P4bnKq+?v#fX-c0&676K0mI=a|ge;*QFR3t1y}jA3 zBdQ1TGHcTf_U;;{ijES`^m)Ui8nibTL~pwlM^tk>bgO3A@7#_(UQz=t)4giQ`saYp zG$yOn&khN2AoerOboT1Y;Y`DYbbZ?y@Q$$ZC2UB_F5}$>yccxf>WmaXr=2A*K%mo( z!Vm?nQz+m~_sxn^*N|21_dt%~*{&o2cOzjrz;-mCapgc|6FeL5DCgdtnzeJUd8U-s zw8EaMZlXAtK^2GcZ6Ls6CK}VKcj;YAzT4>4yJU$Bj^?hrGXh$WdBLgmW)uBL)^{FB zUwI!kUAMgWL0y|`;KXX{!9{p~%!y8CPujyy$o6>={--pTAa09u!_pzn!tfThdUy+) z^=S*6pFakwP%SkW$_AR)jF{lOi1l|Vf7xTPGeLF**mCB?$l7}R4O9>+@aH!P3Z%& zSKsd61*RJePy9`BAiH_ki4Xuam~R6C^KDk)ZVh0kV(>eri6CuCAih1g_gR_dtLvJu z(poKT;loe(P}^}P;qMIo%5UR%&N!~!uuqro>7;YMSl^~u)C|{Ta~Tv$7@@V#aT@aU zDww=;^{n=**S-YJe5V^(NIb5b*LKaVvGo=v{I)~3^U|q3mX)7Iur2<+OH(PXLZ|&w7?CY9)nnE}JhzUJ5L_U#^^5P(i z@eSwwX>Pktw4hD5bpoI&`KUn$Toy3C@Tmh|G{DVDBX3ry2^Ajg3E2Ui7?#q+k5CR+ zPvGixC9DTxb2#DMth_fUvCXDavi2%fI!b^EB`Hg!Q;u0=C4zX-<^3=RyTSG?Nia{) zYq+8)eJ0@*_xZA&Ydm+axmTv_4#Pc9%J{X7(bd-W5F_ z2gvIpWiMS|xFVWfZ({o6&)ltLpiDNJ;44?R4JEiMFuunBS+_hH9%9r8E*)xp<9Uj_ zdlaN#!zpj0%)m$q3WHjZiGsf4!-HQJ1PEG}VUlu@yU|Kk+K4EjRUTy`?7dpu{pQE{ z@c!49ah5MmDy8fiOau0P!I5Tr6oCc3&jT=8tpnG(5VukV`lax;tQV{OX+*rzbW?meqIk%0H`|O=V-w85&YJW z<{=3@E5O8GG!QCYA`oz}U0*pSf;drjR@i0JjSE|O3wR(Js38uELuun!B2wUcAZ$v80d}}HIe-}qs{&ua zXHP`}6R6c9{~htmEZ{dB%s%%pp@&im!RJKy-)~^X5{W?ULFK#)>FfU#1LPjz6ht&e zO@z~3|E7Y1DugPO9;mw*fbb)*Ayk|LH!x8?gD>JKU4l>9yrUw7VPbXX5mTcK7{RBs9Wf*B)+-^lC5=h(lu-v2wuNmNE4j6SVJIz(dDFj?28t&Mr*MTeq9HX=G&2B|;O!GyPZSR#6e z``b(j#K*IVgx^O}kr-2nXgw-8%h@h^FybXytDgBo^uOQ!FXjK`x_>3^Uv2ZRrv2B- z{QquYg)n@s0ME@wJW0Wc=?Eh*QSbtosz{M^cDgm@;(2dhK{^AtZ|Q?^Djf2U_}a+fg%f;FAQe&^L^~4X(*VXTQff(lk95*7lNkQ%1jsM*3By3lQ}1e~@~s38 zp0p49nLpS4`CU4|g`R(9m5K+#uV-M)r3lAj*q8|*RI{bv9tgAlCKLF(ZWaSDDYOmZ zzuPTxh-Xs{|2;{P!X*S(!bLcoq&%fBY9pVcpS03RFBFeg-cp)_e3D zhW~|Ok>3Cw%0TzYn-?Pml@brJ-azl&N)c%Ge5O4TGPY|^{@>T9z+Hh0Q`@uS=O`V?qanfH` z?=uQNa}jFSiafyv6{r}Fhl_X|KH!kzf8vlKO`$oyo7h6Ug4o+2Fdae-H~D7{!9%5n zT)pX!4mgAd9URzkn{I$74qrTH;~yF1n-l!ZIS_}#UXdfLAss;7ZYiAfMj+sp|0Li} zM1Zt1s9hcMsGcB|Tnd>X`9YPx^oY$92)2;;IjXxrz%RfLeD(Nc$%XVrJd2}$l<#s~ z_?a0YUr%n~38LO62Gj*SOK%JG^XNa3_Fs1Tmz{n|+JF4%Uv~OeJN<(m|5rQxYwZ3t zcK@L*;cn``zSFO+TT14k0qBR>@92jvYIH?(Xh?;s-C4UdEhSD#c?)W@{vHQa`P1|~BFcV3 zh>ZQNv7_U@FY)OBfjF+*%LXaok>9>9DH#>xDaK^`c8m9Lm6VDqU0R+?4_|2b8Wti1wq00;e)^LC}f5zCmF|0b0I?nBR)RMpjL*({P)2^$n$i6peuqK+MGPVqM?wo3SA3U` z9vSxQtLZ)LJD;j1%b|w$jyhe$^CB^P$p2{sq!8)T;ramI$JlHOyp7n1Jf-hn6!YE0 zapB-Fav#{;9Dk@cK~duC($iyDn@INC7h+vP#B;$WXHn1}F(<(xo+rwa03j&-?L#lr z4CFhmZ9H_P$*15N(Y~&y&Y2nGqQtM1?Usu(D2nkbFk3wh55IQecT*X-Bcj}!e?_@M znyApH(8;f{lvValQI2L6g88~Nr7q_0r0pKqRjkdNCWH|r2}d54PZfW&NEO_-sQ_&P!cThN-Meh>DTT$;Jg8X*NRT;5K4d_!ej6cf3J4O z*Asf^uj;+niKc2!J;{nXY8EWWR5x&1aDB6OL*v@+)@4`tEB^jQpkQ*83AWQFnEjfE zKq2>xiV3>XZ&1_?StL%7!1+J0JVic*dbOtBa99!DDx0}#K23s3kcw;;k+Wxt@UOFN zP$G}!;ZI*91o6ELhfWNaA4iS?DiNJyKvw=B#FpZW?~3N=r`Tw`hIq_fB6+RKTN0Kw z24rbq6F(3EZLIk?I$9Ld*OcjD0C<)}01U=|9)rM1Bs-;0aB#)fU7lUgx)rs?br49?SC&5Hi&hB1 z6T0IID|9BwKEc5``{-hL_#2~~GdW>OgS!Q|KLyiA=o2~u+R7Qyu;D_*(qKaH#*IID z!=_fq>TaHqkyS+7n9OI4ST$zZo?cSPl2r4+*8x61>4%<7fZz}|9{W77xjTdi4iWj2 zfOoVFvThhniiZ>){?79u*S|?1gva+!;rYgd;#ix=GbbOvH#d;nfuS;o zmSz9@DjjfW4u~MM^DN$VkVk-v;M>1ex#Fuk%u8cv{p=DEArD=~WhzM-iSX^11tqRD zJ%3IcvLA}j$43`Q@8Z^sUU~4Q3?o8=75u5emRW=26D@NNT3cF+z1d=@BywXO=A4m{ zlLVvH@-$Hn+^7?Z7^u@0Y0F&eZvE`&cZAU#3D$m?YNyx~9FALP5&IG z->{C?kQDNol-2ND6R*krrCzV>yvqBzCRQT@a?zZWdVpNw5+iKynd$I5AW38SOOOw- zmcNWpj^~`CXJ9ClPoFAI$HvBqGcHvo^qSP}P{_2)?^)qkp3 zsTrJ%2p@CNBrOt&Uux=o6!J!QN!{JROMaAy^G*I~DJcXn`bfOZ`xAM#g6s^?`=Wi! zIg@y(f)N#32>OG{BP2Y`vATT;JZK146I$5If6&QyAO8#E7eebu_1)>^m-&Ge8C)e< zgZT1U;#st+_KJN^Uj5J%L+VJCeJFsr4PMX{5rIRmf2sb)p!(gMmC5$D7l{`ts_7Dn z9n6A9>Su7CAYaD=i#67ra;)I^HZBUf6r3rF@qtGCPrP*CtkI0|E3RI~Vd(IH^RsK{ z*s0X%FX-B-GF5wne^Kj&GV+vD4#5jVfM@9d)i3M5jj3A@t3Z9M6s^F%K}`VyZ3$1yGavA2q`@XEg*d_C#5U&>r;~w2*TChA=Rh^*E0hL zh?%^X4pssDejt*8{fiODF@T>)LuH?BH2V(>V_obJ#M zB@H~Efm;Q9#s|wa8T;{aFHYAE7k3v!~`}B5hXx=cNFHrgjMIy7>=O z#*Mj|s)~-=MGbBvBUAdO>Qwlh2RI3_zY#@GD1I%y1FAfCtQg0f^k392rW%rLXuM9v zzy*zg+jUOoT%CSynm{{@_TPM@Qrw}48f9QRCGIP}Wj>ImV`K?_6MO?0&onEknpf&# z$U=F7kSAHcuHizGkk|cMm%WEyR^z^6zE_PYOc4T8Nk1ub=!O!6tqge{ zfOl>=T+T-6PyFSZ`o2Qmtm6>|PexaLnH;z}<(BY>j`i zb2oNIgqQiJL@lWJU7_bbe>=RsL&&tBzvmVBqp*`g@P%4z{xtpyr9M1U^0^1fCMSxx zUGzyE=qg}yB)@sizC))sVtK9{Se{7{#6rP;E`+Q3%jdOUW|>OJ66O-3LYUhF{O`CHB#_Wkb>uju{-nH zrAWld`1PgG?Wqm0wE)zY*w-DEMfftWbB1!N$$-AkB-!6w*x&E6}Zw8QU_(C z8lN=2Qfa99!7n~j$a`}*n!4wMbw?ZFKGGxI$3u+Ut1NQhqCl|FLZl#B|E`3wif|Ex zGn-gPQ8ld^JL(D=3JIg-M7~nQ5>hhkYG?zJnc54M-9Px7!a)7hv>sgQ0zoqyJb9T* z?TGLgN+rgnNI~#%*KSgO7?8e(4MImk9s1+|cM)?2D+j z%)vigY3kkZz$M>g`_oexyhI8|<0HUy4iD4>=7jLbWb?s}JN zCng@{HwFYFeMc~omG$9T{B_t&xvuX%cw14*m#1G;tFNdKvpB%#5WUYFG+4{$o}^G1 zh@<=$*W1mxnhNC$0eD2Vl*mSEvW?z$=-_^#?Od7h%SS_&Q;>O3$b3iaTky#B^U7BV zF8EU5mZ2_s2s(Bo@~j0t22WA;RroA=2sz48fbI^YzOpGo(!1wf5dW2t0cx07+J-hSpq@lED;$DBhb*)CV;!M$WS(^S$k(K)XO z3w=8!I0eG{-4vqq@ySJq!V_Sj)krH#R&;L%Pt7oQsG9SwlBTvlYQBm*G&%q zh+l5H;}wT%zJ=&Y+3!fQQItcWt*K0mAWTN?e-6q2^VM^HFK}%l5GaogBFHK$tvL}4 zxq3K8%5MvH&xg~OPuY_r$G7V*Kxv?lV*T*hqs|el3CKJ*>P;*Vu~H=5ij4Ry^qKhw zpBN&!(ej9*%Mln|_ZwLe2|W7CYE5@X0S`P?r>^=|UnUKYAcd%X!>L0Cu6f`J2|ce* zh~uAZ-;~n3n-{p4frc`Q4T5W05M%dc>9sm}rZ=3f?*sHLf-+6)sAGNrJ)nYnw#Evl zn?GJ63i)`wOiX?Zcp^b?vHeVbDTqHs9&1RPqK-|!+MUD5xx?zvAtvFv!b{YW>_H;= zb_Yp0nY>1|jGw{Bdp)Fgx%4^g8rUZWT%;Jg0&pf4fh9P17h{G9j9NC`R)@@pj@7q5?-5B{$+3=s391Cp(;MTNR{p|(tDQ=t6T=5 zV|d^?=B@T9Ez0~r2%Q!Bq_z*bfk)8An%Ar-B|Y-F`^t>48-PwVe;b>6dgT6iW-=)* zpzAOo!iTqT1=*Er83snTeIejN-C!0%Wvl)CeEd_;Zze}5=?HaEMyP~TAs+8nBoTM& zRS+a#JrJt@goq>95pl%Vk=xipWr2KvnI}LE#+19qeEbfoD@FxihZevUaDSWP8^l+u z-{K`*Kb2}&7B>ayJ-9l9OClH2(9X}&y&S@`@j-C%l~ZPrMeWL^0-VrT2Enf1zeet6 zl6x8tye}mv4)hVm;4@q30WOem3Ki22x%NAiYnKNv@W6w?Zm!i<|lQcdC@9DyI^t3%v zpe^xztggCFic!>JNI;tQ_PeUk7!@>BZjb<8Rp0GE8)eG{uh{zj79Z$5UOz<_>*Y@Q z$ZUFn5vX%`HBE8oAl$e3Z+vtit{wok0Gb2nEoPpg7I@uQnS%aiv&MN1x-=GsA2Q~{ zjO3=`Pr<#CAPR+h&ZcRF|8)JTzo9X0E!O^^+bv1vw_t&BOAAa>J=L5!r_Mr42s}W} zcn^;jp7JXD;(%Hk46JfaF@PE0{Addx3Y9EyZBjF7n|FwZB1>dTjoqfIqZ}@xIOCiK z4y)%9GJeqEpV5Nrb0x<=O zOMSCh*S(+EjW~!253w@rQTm}ii*aDS!6{{NSx=Q$Yeo-veDehy(YyKXf@x(ly{+kF zQj8rajAFtB&&`8X3E-<{n0Q{avA^}amMp49KDCH{tdY8@cOQ=$4q$qsHKRyB79Ir_TTx8f%aQ_TQI;k8P%|z zff>=kL70l-?eb-^c;u7X5EN%5x;YojIQtj7@+r_xPBwyE}d@&%3lW{;o z?O~)MG<7gBDlne6WuRSqW_M|gjm+cd>E~3%DKR(RZ#^UC>iNtGF$vDTTU_a%P4*W%NaZ9mjL$S^1lb4_w%IXe^a#FD^Mq0>x`vfC;6@#8afg9ixi2hAo-;>61 zu`i_5a#XCBdXOSX^_MC0i9khTsSzXct}7QI*7LA0bhuOH7YyNJOHrn(cve8Fa4h)~ zTwzc~M&K=1=oiHxz#9gdE@!(Vvd8C_S{nSK9Su1$oAQ#Bk#Qit)7`|R5Q3g0bre9t zAvTb!O}8|dA5?L1>;>>4%+nIJHqzCA=lufHH|(O|r?K6lor2t%X!XR%XA9@k%=@#% z_6Ck?^eW@W+m{!`&kBY%Xx)1KJdd~VqB%{H7TyPoLU7n!5c^{n-Sk&a-pmS1#(>JCgqmuFWHJMhbiL3ZRtr ziULCDM>c|+ehGb*SOR)dRnurkcO)Ik<(PeCTXMJgw>c+8TWAJFwR42iuee$n-R*XV za^@Tc7f|0~*B4BEZZY;`b#xmleVP9&?kip3xO$3W`R*f?4cp1**lPeN03HLprkxj_ zcZA#Y`g!s3D#tJ*6Jt^?@Jy2aVxI->#%xsQkFwTiqX}zqR=?So3Z5}K`c})Y~p~mohb(0pu90LUrr6Cx}E;?Fokd^p+vVj zn#5QyAvTs#+Dqd)*?@KU`6aFwBMil#@R+C9sk9`vsA8mZM@m|98k|4W=6E($STaWC zhr*nW=}MORLLV~6NLKZZNGjPWHM;eAl4#d7&2gId1#kQDHNjv-yX2Zj4V{Iy$70;y z!wFJFTW3S{6%0HgRnlJFvK%OGt35vKtefS}%;aqB%_v{(QVQ0muz=|>W~=9fw8N}! zm%ioO52m$Sj82zq*9w8OQ#l_{zwU!W7JTWsFA9V=LvTX4X1$A}L|} z>QGr*ZKUG|r!^-{=%JKm!K1WC!^NWJsRP09o?l}UFBZ5b$2ZehXvHNkQRE(!qgf#a zu9a%(M9ca}*RD~KIc)~~tGcA7#GmHY=vHd{D6FlGX=>|P!kJLDq9Gbaj@J3XwYn1) zZte=Zz5;Q{CBq>0(yepMP$)FCf=l@o&g*G^!;V{mwOb!}(?*ST1M)5U0eBgj^(L_P z_2NLbwz>Zzx6<*x$;ug^F@kAjW53waT{W?y&uWq)B($Hu9pzilpRKQMK1hF3WwS$` zZ9L4{>Thz-b1k^xLdb5sT-0K}AAupS^K$4+9ZM9UG%R?;(W15c^HtqsAQ{+l?g`XvK`bKPd${(jiwbZ zGqd!UJ$|;6;-=OoiU6%ip%W2`>Wxo-m@}V)vi`;X&ACKxsn$q9qHHxnc1jctN&zci zSh5k`rT?d&##u~o^SDD$QH>e)sr-9k_Olku?eU*=eY`6pR3$W zrNy2d?5yFQkPr)~uuOV%U}8DM^SNd}Fm7~ZQ;E0gYnA9KxmO9(!EDj{!Sh!+)hc(F zS-;9}Brb6{Z=8{|zQR|KO}qNA7FsA1yRFf9l5s{w$+m0VJ?&U?4zpyiZmGF&vg{My zVHW=LH4mux%g(_(P0GDmSXfdRGK?cOogpVhPXh%huWfo2%Ze;wSr zJzBo^GHQCVcChKnb22L?W)uyrN%`UT2c75TU+mm`_!H4ja}uLD5Qyg2PjLB$-8_WP zt>`jQewe!F!zh(2O%rkLGPl@lXEgb4WnZyM#mT@@8Z0=wtx@jV)3rLNuYav$@kXxU z$TceKc6Vhxj{{NH)t7Vatt#ZTaWGW{{W|@UE}6b37A||9ORFs{@(|uF!=f z={@4*DEdx0Y;(rqJjT3L8m2Ayi^>dLxP(JK02i`1eh4ObqJzw+E5J=n;9a;(!gt|l@HJyE!te~5Eus(Dg6lH$X ztciNTWi@(paIr2`{PXK}3lndSRtdT1L_0@com)NcA1dbG<*%^q3+S0i-kRP{;dxWW zqFG0M<0bKBZP?O5>$h0U)|!U3kbFVdI}!J+j&wZ_B{_j(BdWJ=-p z_Hn$=y-m!f-h16C6Qa4+_R2Kb%ZsIIlfB~p^m_{FiSrEy=Q$2DOBGl6lgh?y8{!N1 z57%L{ii0-@^g~N>Fga+rMfuFGN#>FsvwB9T(j}fEd_KIVklP%GNFRW95aIBppW*Q3 z<1bNkD0fz%pN%G|oJk|1A|T7dFgI6gw`rr~{O1e{E}t7ZNwP77Uu&&!F8E(bcVAo@c>oFFMyV01x*-$oVj-Jys(gQZ#rh_-X`Hgno%PiT3?;&#m z{ZD3?^X0vi5j91Wqj0Wkbo%5hC>m;$opT=+1?dDy20p9(iV-t-I6PaH zHizavCs*zsy{`O{(#db0mx@y@nd?R>U?Y+#DKfFcP_frDsw|bma84Fd%D-?YdSjF9 zuBS3~z1#W4)>JyW=m~F!$rQWpnGkR2gV|AznS}3qF@u7Ki$!*;Pv_p~XRGQnC>G9F zi5l}{P`4Fd)aOuBz=-TZPa0|IiE7){RR_M77Kzj0d*17`_V;*C#!c2D=To!-GOyGg z6z^Z@DSRd6GQRbtMk!5deZs_7vQYX^=eoY9AROs0`$b&SSM-JJD&Y}*>2lh1fPrpUTe*8&v>&& zYMj4QytcEs^4L0Hw12DSXmFzS$W>*E{d>QOC=Evnv!n8&Bq&ajl(_+`vro44TSCHs zLiNL^K*^ydB@g-)WS%whr%USZHO=~^1)-f~50pG%9~0CiBN`d( zx;{K_2_C)1wwAazzQpe40!uoO<1Q+?^0hR+U}c_F(2GDNsB*nB#L#G+`BSz=i!v&K z&)dEDfy~#%0(tRFhfsIL3%+U1Z`)^T^3`K*vOvExR^-iT&eEV+RV!OnFLl4yRn!i2 z;_IpVYVw(de>zj$Br<(8ulhC{%4N$*`MU)Noe@E;C}$7qmUTh2`S|1H%FR{=bBlVt zxgB;^Wl0}K%-sl}{}~S1i~MuBCK68M6dp{Di?4+U3~pm*@!W!@38;)JW)T{hziv#E zj*O7VE3uf6dshoH?^>L$M>j~V-K*M_TJV%5UvP5c+6tAMa}1qJ7bLzpM(LRs_Eet; z#-D#|14zAC)bw7Z?|}E)siCoaZ}X z#%myX;v`||$K(86-1xloP$wFJa_!l^#d4<=`N*~!_nD%KL`<3_l|sdx?kF4OJ@h}xLhfec(bMFzIUaOqJ45lANfm*1U_h$mNiwg5CcS=Yt2QFF!*l&9ne*~krw~3GHW+qm*i)Ol;tuZaNGeb?O@>Yq$sa5u# z(?ZBE-xLyXbB@v6Bir)6+41o#vvN~V2)#Ts%!NZhQ)$w_wyPFaZ0r3})%~!LeP(i( zxlJTpN+;0$yQLx{AzEvYzBdEc?h5{P&Du#z=py|m#;eK9-|>CS8GEtwJ2^Tewj*lX zPr@jpoX(8|Ztp%%4AVoiVq;O91CNE>Gc1~5uq87i>*^GAQ(vv_NQAgNYN@i58%*eS z8ZH^kL^p87_ZG;w#_uLET59Px#^bn1y;#?kyr>R)N|eV+Wc#>tP~Thkfd{YC^5qTJ z7Y@f}SLMPG`4(-D3y+G8Ck+YOSEyClO$XFDq;mjVKXpdeD>qt5aa5RPpVKA#bL^Qh zrn3SYum>N%bW@31CSESLd(_(`Yu7J&?59RubJOHmvE>}h^+ho=lk*nu?IIO zZ+2&tSr0NAFNCH&%n-~wxobYaBEZG@etD==xPz}W1LK5&Xz?< z==$pOHEf=x$9L3q(a*MWPpfV=Z_(%g$4Yk?VZSY$J22;A@!pWXaIbljSUo5Dg zrRL8KQIWbiJ69wIn?eCM~|xR#y3NBvy2q!|BYzV7Lz zVJc@GI`XzR51h6KFcTtc{lqWpuZ!tN7N#=$98j3!;4qkMAH^N5j?-VJ#3# zRd!+Bn)6_N%UUYxwV!%(IA;uyg(7ba#(<05IgPonqZ%z1-Yto(Lk4arbknlQH6V*m zaMdcBr@L;ERR>>1H$Bvw0ehaYfSRTCRqM;d&ge}ae}Jz@o9<0`*YU0U2;N`GR`F!j zA=A-kdLm!YFls*@X*hEAYH!iya`rswJr?$pwDgYyTOYK^Nlmh$Et7TL?CM~?HV5YJ z6(cXA@)pM8LPWr6w@q_j#ZHDMz=K8ybv<5w;-*^0Q=$kJSA%ev$@)v;Yf)!eH|Uf+ z2A}H@znLE@v5#`#hI$kUnp=Dt6o?~rI%^Wqrub4}BI|a^_2n>f!Q1L=&!i#aysLT( z9QEF{=1hLY-rjGwm&Z6v`v)-ft66gctyGgMyNr60$j-Fn?tNuoXm%2f8ZUeR#hJPm z(Rb+4Y(i6Nu%nt$idFL#7Vz+x;SAyH{3o;!)gBeg2GYDIc|1C`uA0Htq69wLw=Hj~ z!Jx0C*lRgpEBK>^S-4O^{U5aeox+K#J2ilh&B;_Eg3mH{3Ubq%l4**7-kj5hZtIeZ&vCWQ%n%xS*IZjA>|Pfz3b zw;$Rh_*af6VzF8fT55J$`Sm?(di5Lj9DvMhbic4a4>0ds1m1f0X9@Twv#z>;-<`J2GcuHQ4O6}3UFvmRbP*Ojy zhOCU|HqAxtl3o9N#al`vsk&On%v-~8i=&^sKfhi{4fTCShQ!F#WxUWlP~?P7hH&zp6C3(LNq!Y4tr*?OVL96DG$? z4v%j(_SwQEuM}t^vs7PKlbZH*DY9F?gXIj?qsy40+RWz)&%7Hwewk4pn7OqqCz$02 zWp|JnQo!%FHAyvQq98Ndn%c5W*J52AH6>~OlmX)ENCt3DSfSS>3-3P zM6==q20&VA=dXMMcdXBP$Y7eSuFr2jekie8-Cb9?@!faaVIdRh=bJj7-F*2u5Pz|k7N;ID zg$gp&mB9K(HFa63EGpYUNPZ6bH_;l*micr&`VuN%#;rbvx&?c0_PmQ@g+;}dH#{Sr z=Ov$&g+!)>#D+0j4CVxzG=W;M3kms+p^&l20!@}X9|YfKs*z=>K2MA2VwUnAS5Z-m z@)Niy`=;aI#9DFgP?X`Vc96i{tOs|=lKYV&?0uhVFdNhy|ouggWfpPDN=~E%sPRJG5Hy76#sCs)|7PY}zc7E_#mRg}e zzS%Dsq3XJF+!K6)@|HFGU(mhTY2!Y$m6^2gHymd-tK5^LnN`7-g=FQ@UF5Bv#xd2e zDZ5ZB3-AN$+eP!8|J%U0@Xj=I%|kXooS0f(~=Sn*$awj@m$46#UAM zEH<3#fX~{L4O8bLPExg#p7Ow~w3~Kc1W8CqIWCOlx~RtTh|AIJco-k#m?f(*h%qqS zT>M(uzONAvb6%&EmFnBrxVZI^)(vebZgW|Bb2;VY$>N%JWNfFf!KB2F=nlOSbUCHd zsIlba$qaFw2TK4c7dvadDe8K`A$imSD-oY9-e5dT?MwW;i;VRf$q zi;#`iknT$z|G_DtNR7m2@<(ri6-ZPWY*XoLV%JxZ$1+9jR^{~t@_8KMLe~Q(C8tb(L4Z>@9|_Kkpb0<+qdL4XW<@*Z~jy$Xkn=_^pYCT0mHl=hC? z@y-vGh2=y9ES=oytUkN)hzYx1pV*VpkCneSCku*~o2J>CBIUSm*C@ z$Tbw~XxZe3krGFlK=yft{QrC=^dz_8BM(Xy|Q;}X;$FYAQh5m}=~*idT$$;+kp4pI*^|hlBM-CDKJN?JQao`k=F`*RmEOIAv278? zi%Y~4agXBG(t$p)S!kM&ihD{l^gLhiQ0tkls>g07gZWwIl8DUwCwc42jPF#)Jw+{b zvr3MBDvBoS`~nLFN>e!eXA3~2au}H2ENy*AV3fx1{?-;lkIWKP!7?QKAYJW|l}4T7 z0IX(wpH+vR5yj<1R2MvX{Jm^@r=OUoiQEhN_KK}aYvHx?phHN$$KsvzgU8-cPuQ&| zQGZlbP=Jy+c$WPd^M6-8tn#SqC38*9_Ce#uU>f>t`=P6$f-oJYBwL-AM~l0i)8Kmc zWe?(~v@x~E9Lh@OsidRQS(db_70X#~?FP$~7gHKFl|y5@f7XFo0-p65^_iBRji9jvZDIH7U%O|Zg|vq-u9S_|3NCz+is z+{Bmid*JFcB0~vVqU3zWn#_l;RPk>_>EBDS_@~}*wE9;SyBz2b>aSi__6XF<5H%ZR z1zDo~Z}AsfV0v5Dg1V8Cm}jt|b!=>zrVM zX)SW)ke3bvqXY-hqiCy*@{E}5ZIXvjZ;_teDa!W}^O#A4$pWl>MEsEe4fm%zbF3v< z-Vv6!JZDIMUi<&X;=r#RYU|wE_wtjrWJCl|?IVx4i|qH5c=0P8XOg`)MdJww-^5i; zw|ubcoKlv`H31na2ksM!tr&oANgc_0e}DPH@rvs*ww=RhSb!y4zw6^#d}-A6_Nau& zjf-QX$B-u#h1ap`BgIQX$^`Va%n#p!2;@jvvcBHVGvD67KQs%SmXNs3Q{Rfbr?+}L z09q%JZFt1!%5bUttQ8A0%KX{7ViAgLi0*oDqvQ)vp8J}MBc*SAdUm#ju4uOPGlw8i z#6Eqlj`!$}-BiY;sXdt)$f4R<+e7{-NqO&t~ZKJr^=TR(!U#BtNOhA(bDC-+WK52;m)nsm-ebJ&NXK& zy$*}bLQmKtegizEMLqU|f^IzMk0=jXu}bLUD>>%dkQXv4cBo$Sd?}pgq7q2$m|@rV zJm`&pj1}94>DUdV-BN zyv7oo63FanQB~iONyt7ke4_l7EJotkOxTxNZ6kmE?5EKRX5kKtB|+(h-RHDY`4(2^ zDj%)7_s*PrKWXlOc^1fA5Zu(LaMvJ*I;d?7tDC#`I@cai9XtfrHP6u&WLNwJ9g^jKqEUL!4Nxg(Ygj#zuw-PdUs8D{v*439Mb?v% zJx1clA-U~j4)-q;7Oy(RR7d#*jyHQ}7Trr0`>DCPhs)FX7$Np{c4-uXf)BJMB_%;l zu5zPdG^=DROq(fGGG!Qg(Ww9cRd2Qv1(C4e0 ziOOb^~MXaR>BgLCf6ZZ@b3uod|8P-UTAM43ZD*W=*L|iF2KE zUMvlm0c`ov$L&mJwj!^S+<&&;F-(*e%^cjFeDI@sD<0`lt&J)-G3UM=6V*AY#!5C&{;0R1%y2+(^^^1Tm{pXxAjM4`k{HO5&G*Ef;^FUj76(&hiPMXD z>2DJ2cr4x)bJM#V=E32pqQEQ*Jr0LH4XU^9V$v`oJ+6tv^6fe=m2><~Ys@o}kc04h z1im?_4r^P6s?(qi^tH3~vuR>6If19U%!aSk^*kOHRy<8TIC{rx75cX9QsQ~C@|ydJ zC^a+VwjZ*CTL~vMbB-TV#Z_4DjFvrSp z95>SQfLC=D+M?aebIs@wHwg8m|T(CYc-!VIGtc$ zFx%YwppO~owY2ElX%p~$_RiI*Kdj~N#hTd<3nF&hePCw1otqLMQk1U|4N)0g%_8)# zwVR};Yk6%gdt2@OGe+Oh4{-mHwwuhYcKTz)gk*fBqyM`wf-rb904Sx=P52yPaa#KY z@y$S1(fGfLdkyA9>ml=>-M>SSgARhS04h(hZLnRDi&B5+MZ7z^GR|ywvT@Sf0IZL3 z?uF6n`|&ysTJ}Y`v1_g}2NxN~Z!liQI+g0mJb(rU5c*|hP>)nUwU^d?tM&k6;DAP{#%;r$aRA-G^!n7- z&T+hceSN6S2h3zOwWpCUg9PcqJ`?9ZQ=b9oN#wis7_lo65v}S{ogF9o=+U>gIJp=4 z!u*#&f}tN$gN)PE_hzRp9ANPb6>Bi9?N7V=1R;gPj!a) zGk+R|0@hF&^eCIu&eZONofY!kS!Eq!B~BRcFIHfL_2CGqf(j;LdGqx=96a7nm_L%{ zLg!A4@7N$?pdx((Svjoov*HZIrs~-7tb!+9)!(1*RD{OqaArI{#=u~oT(sJ3Q2&&T zrUKm3j64$l6V;0OR~8?gW(%sUgPqf%WzlXcDdaP6re3S39q;d?+ZpP^j0Rl!-CC)_ z#&NVe6B(l<6YYb}9x$X7 zF}6zSXay-jvDV!obCm}RaRi;5oxHrqIlaa$+N?2yO<+gampTPb9u*~kf|O`RRBiH6 zEtPw0I?(U23TU#{n(9}S`}cm- zk7-&1tJ5DHA$+K3z015leC~>U#D4AJY?bkD)u$@_CuG7qHKvBWwr33Hw}@S?J)GY< zfL5~_?bm;wElZJv?9TTKUyJ@Wv3l#hT#1xQ?&daOgvzz8FK8fbMcT~y@uYfP_J`vs zblM$RDfZM2OvcBDH!Mh_?!&|o-BPDftl&lnukpxMP&AiA)sea3rK6`s-VGcy2k zSqQs!G3yqv`jz(=osQ2uA;bb@9QtxAE_z)%+9$`K zhn!Ss2+&$<yPKyaXcf#ccFMpcc;b3F=VKyTBfqGESYcHGRNYvLs`!40teR$?w;BTZ|(d6%hBfT3PFs10&wr-314^E zT^)L0sh}Xyb>*N=t$F=Kw|$GSAM<-?TrJt=rrpe#A=xXpGxu8k2u!|UMCoZx9(trW zpj^ck^2d9xMZwRnTRlFxm#0O!H&*--4r}3m#o=dUVr!0yh^CoKbOV{wbE=Bk(pe%+ znI`+NT7?O9#;Dcu?{b<)o*TQpSx@p@UU8yiEaJ#^Ku5~0$2@!7>k08Ptx0W)nD&i) zq0o#&@s@t!{0ltFYPnb358iIDPt9<(E!`uK1x|;``C5+St0eYSur;lSJD$4{CXY*B z9ohFM+UX{~oyEE7I5F5-8+s<;j98+ux@3awF3DzJIGyw(3?A#*&+O(I-qQ2%IuO|5 zelA6jX_!&g*Dh5n{iDr_TjtAxRcgoDlS54>E2VU`x9@fcOeD6cI%W*9tWFDtIS&kS z%&e=OtQ4RfrsR=3Oi_?{&9%=;sjR8xXGEkcCwOKy+8fN^2=(U|jT{*Ar!~+w5hc~y z)2Nz6bdu#)(@(9E5%)Ro*uN{aJI};`mxZ6~JJW%=7M^{tW9G>2B+728|9`Rf)=^ci z+rRi?2`Zwbh)OB~N+S}I%K#LRkZuI&md*tx5+W*sk_r-1(nu>M-62Q_QsSaJ@AIxg z_xYZ2&fepW-~HV&?muU&v-e&v&pV$PpE>7e&Lu>oSK#*5=;D;-Zn^8}or*aE94R^O z5+-XtvhOlAD8v8=Mf28K+5h)mg->;R?7Q6usJpgGb*1@iu1{@rgz7Q_@3KAcqLGdS z&N!8@mpIA!0;QLv_on;ftNoF&+pIFVr25q~wkENeF&;<67FzlQ&d-+4nXM4A!phg3 z^1pf9m~)A+OjXH~8gI5av9P#mQ2&0c{63KiD!L;*$?0QZh`b4)t-K<2_Y6b-42P0<;2Hs|-=U z<81Ueq)aPE!!lu`Wna+FW5?AnKztV8QF!dro^bt60j4fxg5ZIn=u9t@$`$2Q9hjifXMx45D${GERD`bKj!V{$-cRv?zelPw} ze`&{HM(KJZiDEJD*-ObI!i!P-!8VFHE2i~4lG~?;klR<3#Gem&eKKeJP^AO6N5xvb z+dT2f44!JH#gezWRUosYPvzcV&q3u|e_<|7b~*2-D{#a0rE1>IAs%F=O5LvlajQ6-!Qc1TzSPy`% z7h7ZJLXPZ@;szwr+mH+8h~D#`>(V|OB#Or;X2R5L3l}NeeRStnY)pWIpYY@8Ki!oM z%D|2bpTpc|Tb{NF8K|ijBzR^LoS%s9X`BWWhM$qEgKqD<7QRQCbjMS=Sqj`54!Gw0 zyw69~=E_fw@R+vxZ-+WoOIuM={HrrBvkHzpBXS(#$8{=6R6%Lp%2|y=}I!F zLsicBN0<|rD6*LA#|r3a)1!tY)V%dy0nomi7#w-KszRsH@wl9$x`X8U$n#67E1ktrthPzdRizKT&dEsqD`IqEZ6EQ4&}<~cQrE)Ln{HjX6x|;3xS!^ zd(XGQH)@Z?y)Okk_T$$pvI90=xlg`&l+fe86HU@7&i+^w*~V6b@&g5prN#7muJoCi zf#QY(j9yIF35pND=%T?7l@YRImmx^W+}i|0dNtc3FpyOP93u? z@{9^?rR^?uyQ#?XRASA=xpfR(iDMA`?p#;b{7h!%_{n?)=iM)SE&d|+GIFywRt?5p z63|G=vyv8%p8Dv~OMj=kZMiX>U2@0d`2~49YxT%do`&8#7o2EShKMXlZiab@rfh4~ ztVHwxT;H1i78GTirN5)Dc>8qV$SXRM9Mf>+FT>)jO51(A13?e4>ku(_U z6pTWVrP|iK1?pv^woh*O5F*UY+k4O`9$A({cE#J1tVnDB(0Ogko^*rpdv94Mi}OX# zd_neolY%Nl4JF|uvVgh&BgH-dVQQKfbW%W;RWU}_@)}AshBbbM(IoACfGWu4Ezh*W zH5!QYQ=d`pd#G=dc3zcZ-<5_mS5w-F+!RXNz{>1KnIREvRmwJv&*}Pss*glu<_0NN zUuY80xRv&;I94ocJQ`qi9^DA8QBN+TEV^N$t{T>#5}`r zO0OtZVeX{D`iMpGo`JE(zQJqx?F{jjbl7HRC2pnl?g*Tc|y zMH1vQEAGYJf%P#vlB$j`#`+mMwj$$k#!Z=QO%Z44Tg3U3i&}qRsqrtwLNubCh3B&- zenj$=otr=GB9i$$s05i{y)j|%F(Y$#h#zUaoN22TEKS?*N>WDhJ}EaW(TvV9@l_D5 z;GKxj`-eHpca~B^OLTWvH$DXYw65=GEYy3Sq#6$L5VJG=-YhrabJ=C#^(Gqnrq;_R zxG6MqOmDa^waJ*w4S%}T@*Ff9e(aS5)iIQD_}PedM@fb(;fq`|4S;rv7n76 zht)vo_zv?%5sb&S*AfFpH>ITJk>s(`7!kTD%u6GZL-?7g*)}B@! z+McMl8OOgXn<^?Psko#4QP*eJIW%q4@f}_F!dYg5u54%8>k1j#>IIE0LK(Yq>Rb7m zomy^yrdq=;9p)=KzNT{bnVscwOp$HNjNZ)xpDXd{KhJ%7-ptNd;9e9F598&rO zms-*E=e`5ccE{|Vv7gAZsASbzuCTJ=>nBe%Y<%%sToy%qoc5+|v0?(XKzUj%-7`_Q_F` z)ofSAMC*0eoP=)qu#F=>Y3uma;LjV`n~BPOC}`dQ9#`En#_;2kqLI4;r=6#}Lct|| znTqa{Ly6?w+gJnS)u8G3swt4GsCy+rGm5BF>O6e!oLAbN>7|Bp) zW`Q}M{WWJB#$C&JTitn8(Sg_w;|LU^?|Z~6k(hS!t=>WK=Qgk{X8CUbhC z*FuA-vA`)7K`;}R_0b_*oCs`as*0Qn1M8Z;^20n(ntoD4T+jT(54@TWMoyiZYA==R zbraex*5aOVdhFgk%C#Ew)XMHoGtXxLvA}pb2bj1&!y;X9zB7yLkRNtE5JDF_bFXnl z&I}9)mOzY{F&mwUWiTR>MC>fZ28KyR*akF0VVztj^}Iy@xq-H~s_JU|cjWH6&PI%D ziF@~VU)tFhw6h);iWD5wG{Jerj@usn(%AF@VpQooovxXCe36u>^H2p9nP}awS0eFu z`W|J5^=3HtH0!lzZmM0H_|>%Tl7GSOs~Bl~K)M|0TQqFyac7ooy-!dZrSUfE_`!|6 z^<;UB2jOr`NCsQ<*uh?HlB{-79oc>OfHJubJrE*@+R44xhPIb>;(?Rit&pQjHUj(5X0`;rvqtbr25hN=$qwJ4;ad zZtBf}hGzQW+fyy&buE(Id>T6`B%Wl2muF~QV`nlnwG9m6_N{T378V+Kbov-i;)b~i zPh%we#Sv^K$wzNNqeQP6L{C0y^Dmr%r91S1N=h0B@9Q`5HT8MSQ36vI)#jnF+{@8| zqt_AXaA-xud%zV@KXlu~^hCe!&QAZVvU9xNrpx*DV44IE+2!Pnf(9v}rDEl>jV+G& zVzBA`yyX&@S%XlV>ml?yg~?_!|6~gLAT+ZCv$G58`2#VL$-^Xs9Uki1Pla_mRhguh z@tKr}iH{31Tjt%d1D?zH*uiiZ-SRNulyg(?v!_qV9ZSDyaw6bQ*$DiM2%Nd&L>QH# zsjgkBVD}R*Ui~u7)AZEhc@k0j63H*c(*4UnUf=5Z(sgA+E-Zmv-C51E7(KjiP~MRF z?4#qv$z?JIG0jBzS8q$X*zr5PoG{ae$;MZc0T$(?f(BDkVU>eppup|tW2 z=-dB@$ghns8|@SNJRH-5U%IB8ot=G#jjgtjiG@YVvNQSn3*N^#1Wm_^S({PGK>GPo zg$K z8_liz2I4NfQ`QMJEj>s5-;eTk3bL;cg z+SF0;OBK@ay2o`RDX+M}Du8_D%qezUR!oll@qm~M|3feNFLUKI25>1UDQL$Gy_V=m zs`1i(KVh2WSZxEaMG?~G3GsKH_$uTQrV1%N+Z+aF?;oM>>)`~xNAZ^l^Z9chc#yH! zQAocpm|ZhFoqM}xDlD)Bbkr0R1EDS}dQv|cPeA|cq<+Egv;K1=dn;#f^Vv}t5Fe)! zOF43x=@7y4`7F<`S;mi&cb^E49JJ_qA7)e8wr*6w1*Z?WKHS4~`avUE&F$J7U=srP zeMM;Bg6c(Q6%M#9g_0TkeV6agAc%tI?|`3d#BJu;tsRW^{i+;Byl;mx`xz{o300;%3wzLB7FQl@}G{$V?q{dpJ{yzZ(z_m zCa1v}EYp+rvubHfp2@T+f}UiY%m2oNwd_6cyNHAb$(dRy;D>za`5K>{Q7ZAJ_pPQC zI(cfMYS3JMLhZ`15ZTpl(`uKxj>~J2C;L~tq?NJWEetrrd<4#Q`h(N@vqH0KajtgS z0VZAnxK2|)`>{p?VS!IqPa%BKPtWy7G&>uK!gOvcIp2f%jT7Ec@NZgXYEe_GV-9(! z&U)H@x8C1iKr)mg3j9L4pAk_^k$cs+V_@(bXL$744|kU9HH*9~(%hWf*%EmsNGT(P ztb8~l%9+NSmw29&o^(l}WJ3R?T?3{Gkt}#ze*nxc|Jav{r(O~(hl$l+`1PY#QkOF8 zCr>mdlPjE1Tgq@nnnJWct&P3%4I?#mA#%(+>&F|GESG`40MoPS3e*u2HXfYcZr`$& za1B&oc`Aev5rXMX1{DXa=*>ZLb1T0nDx+8AYIX7Ql?qS1R|EWq#Cz-Dci0edYJ@Q* zT2|)y1o$Q$f<1_D9N9;t64e`9ad;fOpZBxJR#=k zrGI^T$acf#X$H514(cC@zKB=wkVvYmdpBrJNd0>0&wlXgTF}hiYRR%}dQaM@U#~_U zA#Uu#8MV6>A?fKu-5$CRdv?Qi_{E;b)G#)2x|2-MeuF5gw_^22$vv~d0j0No#rTI= zvGzZk2FKxbfb_@Z8@o}>(qn_^LvTV?9{bgR;?bEgIkusv#}3NBspwwJf091I6Y<&q zG6m-RgxWy*9tCj2>5u{jN~RA4XbH7K61tMb%&;w2l{pilIe+#DP&fJLiV`gf#Q`y+ zZfO-7l{0X{ZklVwunyabT#nXDabZO5w&l6gJg6NYh}t~~IyMP9JmK_&+Si#% zhj>aScuHqmE@AYNoJk3*F~vcDYRpm3go?`4U2Vb=q)AnuvlB3or;3Ifx&BQP(g`C& zHT;WtsVs291>!Wl0>PJJxo2h!+zra4^R60F!XsM-P9L+)=1o*wOq;yUTl4~BVUsYw zsOc(TWA>HTLJ?u;R}fJ{zXF5K@EbLXkhI6wW{Gu^j`ung)T4$bL;7u}Lc~{nZvD#A z(5M~b`7Q?UNZelD_Hi`5YCZDU#$74zvf&{@b+Mw7d|LalpJ6KGHx9z5NgAUs76A1c zOj~yY$Hti8NpiIGdSQhsK)v+PSEHql{^P8OxH5#Do!!#%veZUSQqln`NMoGhD#STv z9=~e{cQPEm$*p4eY3o5&S4n#Pue-gFy8w^%%csIT)TJ`BlJMYR8pzXVDvTB(csH{gv_BIQREGTX|fC^Cj)NyBE0=nmqbT{_${R>K*mFjIdWme zy%4eEa*W!PsXZAp_QX>Y$09Czn{xI7Ll!j4`F6t!{1`g=>8*U2hl`Hqho1GbL_hrK zd+ft~;c=d5{WwXbSzxQjsh4$TM$f^{m_|6TLKv>o+<*R9qt5Y;b~$5utRx8FH%NZy zvZ=$YJ&x;M^F?qRgihO-#P0sW;;bM6e3_^{qny7*EGM62?uClNC-6(K;2aML8T7a9 zZG;RrIxfYX@C!Dx)$Lyn-*Lj-*+F@Iv^yLAQG11i@ZgKHPaQ9F@&zaF$sp3U=-mGO z`;CUYSA)G*zG7~UTvL$=MJO){?`TFG@RKHNS18XNlqC4O%mYO9GU_FX2LLZ~i?zPfZz^1bh z&BP7?#S*7=)fUcnC|P6f5$H)B38&|6Eg;7W40<`13&9p|Zo+d+8=nv@9;-@p6iipu z2R}TFjTFzINI_Jgep1W(6vFT`bT;AxEpC_87QS42dk+6dN}`wC29&F5g-KNm2M1(v zL8n(%ubl?_A0b$X&<<67;aKQVzh(+ZJ5$uL+KGJyrp$`^E)L_Vg?CvaZDa1V-P-*J z8zTiILHr_jG;6krl$bt?{ZHefF^|h%jEQ7u8Vyc*zpNl|y48$?NVvTR~t~>^>-*E-M;Q0XjiB<6q9o~w@4xz1x)0Gos z(}vHW%fIeoax+|mCFJ_4!U&k(Wx|_nvuQca@2LqCCDwmjHZn)Qu!ss13NBwv(2;?V z@jR3;;A7Ea(6>E-^?e7)&tkm1;reLG0idXt!C8eL+sbdWzyHB!aqtN6?Rp3TUeT2O zP%Jilf$-ol^pG_4P>hQgCv4x`Zhum~RQ8+NPanh>d$dCr6`b%jba#Rj68$Uy&CvQk zJV^e9^{LBtG%(0KJzvJPiUZksshDf$)ip#FBDb(&+_AtP(vGQ)!~!=( zXS{!M$NHz)olrhMSz^eY+)vpvd|JB_KHIYO@m{;~ZKK)J)f$XPIYg~FKe^CnTM?G3 zaqOhbC>2eFsH3e-&rpfhBAZVZ2R&56F^#`WL%MXPKLP zt(N6f-LARvCA<5l=7D8Xyw?x;^j3O`y2loO11`U7jBI|M4mncc^P9zxf(ZQz20JwPU;h9=vFn8O($k1v9C)_DV zi>wnTvQC=9sl-SlLqn#O0oS2kvsiClG{EwENq$X~pyNqJE`KZEMZeq*9@Ltg=Q94Uh%GNp$8P9pX)MqlU9vV>roHN|1q8JQf5P!4qXpr#O3l-dBDXz^DLbf9c zr*9%oaNT;Rhn$K=lHy(M3Ju22cUJEmKIoK-KJAyi(_WJ*&uJD(RVNf%|M8p;dsHgu zo;8b%EX0aT)f$%5)kH5?)%?=rwYy|3V%DLKdh3G&jR!BB!W{cCfjuz+Je?U*MgG)3 zpWh-;tWLO9aX`WsbFaB+742hkBQeygxz@Yd?~Ji2M=$$88yWuGK^QUflLjRoB#-a_ zcQw+vx|8LPyDlY|6B%@`h)t3--O}?Qo}A=h&>P&TyWMOEuv3=d&RxiQ#$O7^;IEDy z^GqER)n$;!Btq(R!;$ z;t42S=_5hIA98bAKhU-9blE5IVPPkK%5mn2gze0Do*t4vvE|D^?h4-ewUsbhOhWpB zn}Vse5W3R&6~fZ6OPAW6_VakaSANsl&+8!}>p2sy^39Yi zI0N!WE6C)Nzan$Ig(c@#3~tu!N@<{el#0K1r}!*Pf#VlKx|zY`w}1 zasgmKvzvRc$SEI#J(|nHZ!5;3&@s~plVjlDWRlYl++WSJM=z_ueOmQb3B?vmL7Ad_ ztYE2)W2*yR{k$-$vKSor5(J=2(1|XuKDvx^WpQN{!9s(_g@(Kwe-#Y*WUUT*8e?K` z7}cH=&%a>{U`RS@BNAl1{YzRhcrO!xu}aM?x5q$*{UB?H7hkmPP^&Zkb~PH|Tk(P< z%4?y{Vdbk?Er`P>rmQfqE%`kT*XMe9kc{&LrH|WmEo^sQ!XG>Z^JL-WQMS_r@Dq?2 zP817#PkjHZ_CA#>0Rj90$ywMj1x~bw&3#%CZ>0|TPUEl44cf^XNK`$5bioj`OFNOH zlVK-<2~FS*T2ErF$5?*(F3gV=dh!jIQw=7-dPp8h1M6XrgGGd?7(?Nwl@(4Vv0%vQ zZGJsXzqn|Lp(RR!|Cn_)uV7@3)|7V}7L@^X*?kP-pZbi`T|t_Nb|6&6I4rwNbQCsl z0{z4k_;h!AAjmzcN;zh@f(;wob)dY%xG&e@-SQT4Fr8$%9;Pn2#LVGM(JqUg_tFS&W+afx6$V zjn-S!n>Y`Z%o7KjY(}SA3N&tR%awTpzEdqJBL5mw5OK;-oTFEPeY46e9qHDwqd#9Z ze3xY8K9z!br*JATfzbh;o&Y@fue1;O(}^`pZ2r0y zc>iAQr33UwHT0a-^4C6|{E0#Zmz9;h8NV62@G~`&VMBs?%eG{`=FP0+(i^gB3K_ak zjuIDbc}TrYE%y%obSK(vu}Mf~V|8?Lm()-i?NdaqV-k}`yGZ+)t&V@mOUNUVF&6mv zCym<}COpoZlrTP8n1hw(%lTE?kDg&RI$F`Qv81v$@# z-0W{@54kyzFDMTg@-r*73LNqc{?td%8=;aE6$Tt^M;cCJu;9l}n&_o?Hs{L>&66P+ zT3wSY#xX*_#jxuI?{I$?=GP3VAfz#NK8ukKAR9X}aIwW}oZom8RbZDQdY#}y*7s|( zced7BkP=P{cZo=h!u4EEYz#6ZXdTn)wV+6S<2^P~H9&d0rix zM><50&lXK*^tw+($ET)p(o6Z`>-L={` zTQ@gFf$6ob8dJlF@D=87Pl)x5T=+pIHZT=qPmf@t?6g@d|)fGuI)zVgLhurKh1cJ0QUIS7(mEUU= zc+IrMd&bdfU9<)&ay(&7FXRXJ<#1UW9~hXFf__gWj~v*mQR?pQveDXBJ;tMILTO@< zK+HtIhxI*CY8;3o3^cm-m!j-H6X)5f5X?A)cz!!LZm|7RBhN^W{X?+C%An`$8aOtC zB66dnXH$n&;Z}%C#sxc%bhBt3C|Ty&QGVDg_SL*+XW-n!;vQfA_F2));#K1!qa-X zgv#cF=cg2rMK3*FU0u^*f?L-_H_PT8@N@<|s0=Y>B-lt%^k(0gXWVtTssp(SI;Yl4 z%swx{BZh^xuDR3stbL#F>nRf~TYATHWSc#BY%JVBV&vO}735|CQgp=3V%36Om6U-) zJ!;6H$D%+gdFi>RxM=j2YBU^u{Dm&f}DU&3UkJ9$|PMzM5IW16AC*Gd8CU zNHbhPEl)GB&P*!}F56 zi<0wO6O!d``d4n)d8ni-33_iV5mUnyCAXI*r=~2<)*O(S^V1Hsm^&K}iwFocre;?u ze);ulBg~9+4ykim4rHvo@az#6SPCpv0uA zWe5D7+RqP2a;RZsz?ZFkn~>;f6&?io&dlJ@*D6KtbQ4OQ@^QGQljcvHw?y+6i}fpd z@g?Q4?dj;H!jF+XF568JWjj=^5qK0y{sjZ9?StTk%=?z@c312|54qoy@vt77q`x=S z7BZzAKgAakPjbtGkDIHitJv0UYw1gyYKdG(VA|^7=Off#)iZQYIF2`_2xv$Xe4wbE zZ+7qRA5`ymxaf942PWTBzO$T}iDDn(eEb2&1)vja=dM)!M!VzI+%`eyHtgrH+@;aO zU6-|w7sVuxIxN|KJkj%%c%ReKOq=)WfKdBLdVCog?~U%g_ci3HvnXLnQ1uuEDlUns zd?4Zo{KisW);=3UK2ZaRXW{;}CIav7h|Z+3mJvOFLyt*B_Ks z2hxxUgdPf-;(L`M93~OFR>d}g0faWeZ=K7^9KY?^m@0@Bt;8%|&^K<#`j zqvYF=I^!L$-5f2uzD7`hkmOR_sR#88cVaG3&(L9_-fk9O{nm%fN!WF9z=G&t;-%Hq zS_q1#PuS>r+C0EDe*lDR!+^8evoHp)xli*TRQXhCCaxA88Hx@Yqz@lF75;?Qw(ykx z)(B0Auq1U}yXeSG0uK5q<#4bdX1n#Gg{z?l;n;BKq5GAA#*q7G7H7XRC%o9mKU%Wb zOuMl|KkFu=>(nN7Wr)Y1%nkuh3X26kxyyw+Q6@SuesFWX-E*Jh zdYvNa*!(9lANx|G`aG*1QdE1SJb^uM+j;b_DoKLsc*O%Q&;ty;=rKztpRqXJl)5b; zLtwwOdA4KJw4;=7Euv$|r$QAbFZKP?D8OHVx2H5gEeKSq>~P5eRH-u9aJ6EGCzkt< zVoOFRsg7?UTb5{CJ9_YmFacZ&Q{<(Khpo--?##|V6e_s|WHjI#yV05S(8r~M;6Rl? zkFLt5#F6@;t)`()`pqRFq7F7{AArJRBQ83c4lLGiz@C&%`W;~KHvp1^Kp<A)taA+hOsw8XX_Mw?j#uLdlY;ho?k`ouFhBU_&7{%=BhY+h!9+>RNa#6 zIl=%%iP15DC;go|oE;~c)3>e96ID2E17Jp>q3=GXw3%YRnV0FEzOp@R+`b=qu}Jt8 zIR3a$`Nw=6$EDCiUQ^rh00g?CeB6Jk#A+|;-h8~`2o*_iiBDV^sE3PU6y*Y29IQ7g zG8LamVvS8$nqaPfXvalt*1d(6PZs>I=g8J#3**MuIk+MFvv>4&C3ogSgoLx9YI2ZS zUt1o)pCQ5pp>RWwt0wFp-VOLoIh+}#S5bq>KlnoL(tE2EZrDfQzCIQnGC4lCih5^| zpck4lC*aoEf8$jS%xyZopgSO|GGxiW?Sh~vD!0R*k8YrVj;KPF+9>J$XZ#FprHy|}pDbG1hF7^fzA!GMcSy1AfRkOr{TPx7Me?ZpJN;_q+5bU)rP zX?(?>^m664D~z=k!e%>jk0cNC>3c7CWi2*~h8$nKAdTD_WdzXZ#FdG$v#`XP_R(8V zY4hPjhqYo>+M>8hg?dmLzOJ!X8lK|mPN?-wqMx1euEc6*93KHuMNL$c6fSpvs)&KQ zHE_d|BdWTxQUwqm&65;z`c*tzHou&S^qzb%f5ENh_j=D(*iY{B94LIb;{6CH;_$T4ro=($~o8vEaY_;rqjLOcvE7gJC z?HsyC3tC2a$t!@jiF)0K$%lb!H^-DK4HMzR0n{8u!2Yj{hP$NRh?NkZ0nVi_UPR>- zx462~QkK{H!W-9-U=7$emwp?WU)r7%qC;97Oh;yvQdcv|fP!>RywG8{ed2t$EY@yG z6y_)nUka6Bc>FcTtmm_$%K<}6X7Tp|;xmqO*Jg+MoqP0Tj@nF%?{`_++|-{B(5TWV zTL`1z4A=Ltp!58d#q1-f36p;~H4t1G!j<29x?S|BN+=KY{YT|ycjii#zVw3BNqYRz z6Hzv(9-)tOIXLsVNYuXDkfi7#VNL9LoVHoS=SXH*h-#W>2}ccy#BC$VhIqc~~$yHu5=qH3>hmEhf%-0reviI4hLd zl->gDriqIyQb`-E7#W|F7TRQtOtl5!cbTR+%n=^s83;elTAFTEe! zm12qoIhgcLo~Qzp$k+g~A`2?aY>%fW6Pb*Q5VX-wkkL%6Rfqv2g3Qg=MGpuKx{da= zNYlVn+Su|U85TdbNz+yja~FTwzkldRxXX_BV&qUzK`9E{knH*8?*f_bH&D-1dF889 zzSkBxPd_Z9-_|ecTusd=vb?;TmJ0+5m}em%!WP;3SVSp^%~vh^xZ=X;$VA;aR80xU z5v-(&Kz-ezbb3FDZ-~UkWUE5Wr{|<%a}{ez$c?04(?}ajN!eZBqyhgFa$&b9_wiRp z%K7at?^E}C4tdTyIM0c*MO;P))fz=HA*4|iwzacbZs7Syx0G32|2&G4)|(C&b|-7& zT`5R+jN0BxIe-MRhe7KVgWg2|!CP-SIJjk*ugywser>5a^@#4~rm`rI&Ub73aJ>wA zUyvQdjyzWMG8*8M3;E%-@I1C;aMlL^>Y5x7kcnFuD~AY#yM^t{IXkwyDITO|+@6%N>dAE^um_3dm06KS;Otp9XUmaLd4yTM zqnO^dXeyB}TBerKd&CjcM6MAPf3KZ`4{xsyF-}E!NPYLXgxcw!le`J?<%c`pxh4|; zW+V?WB_@x!kMJnB_GtMPsK4~K7gRu4+n&kcOJz87BMg9wgixAJ19F+Xa91*hDdi`% z44&4g{;`Je9)_HYTWbJuGFnUw&Oy)<8Ra?i@rvZh?U-@^^_IRgsS^pPIzw)J)E7S<%A_xXy`gfCHMpl&W8u6Gp>J;u@pVj5ik9S@!sqS-4Ztk z6kq&3nR@rH*(O(pVqFK_^L&mlr1T1Jl|Bsq)(PdEqf`KBwx-kxkMVC^n>|lAxV!YF z@q)xXnGK)v?G;*wCLoU!W>FNNquv)W%3GA|Iu@o;Q#`aiQ#7y(5^s8_0wJ@-EndSf zUhi~8h8+&wKZCOMjfte#q*X$wX~fKpocnY@rlfK{qj)AjMd6~!{jNzjkkl9hW`h-| z4)ht{reDUi3_<4&P%I#dK*cb!JfSuqsrHd5UZD67+A1L2IMg16CEYpOlc7hOGPx2x z>&15Sjahe^R@T@{o$fz0jTZ^nb;{ zfec(I&^7@DDdMcwCr>)u{&-^FSZshV1amM=6ax5>Lx;7d!Hi2JfhYI<*@rQg(76NX>#e>46sK*+MBIG8XlPkfZ2xIni_BG{~kAvy3-cIsD zf0&;-x_SLd<+n5FA;I!&9~+wC>uk~YvE43nB?Q5TOJ|ety_MV8M!${b1<>d}fkr_a zlE-s!rLJY7jPjk0!sTwAQ5GKe>%Tru#9%tLFu(oac(nKLoWwbBgdheDeSIi(1#l72 zO^Ntj@GkEIZ{QuBp)~;!!WGXCs89voZdM%@^|OsC1O|rvf#IgnM;aP~BK2Jh(dfN^ z-*5;dCNdjNuC6^2{|dM{J#upa^XU!r{mY^v3Zu9>DV zLTFp~{UIp-mQnD4lJO`JkJ}{&zg&5|XmB5R&p~o@fhky?)s^=}d_~`h<8Q59$g?yFi_Cbkb)-MJ0hqJ+Q9NX7NAxB#uq+( z)QAL)E5j%!G1XvD8F{>v_qS~3EUbthEp&tY zi=haDR&mVT-Z^v@Hs0B}2hW)?7fnmyN(#DUpl1fUWOaL01aKX+BeX+n(j_1uJ)tQX zCi;7-eGlI|#p%|UbD%w;qN0Njj=@+Sz}0V{W1gD^xr)X!xFlomTdhMNesv5C=xzQh z%KI?c)-C~BDi8yS1T`rfjB~g+eZf8EH}GIRA$nKtpbUGA=32uaPe7mY zIq+Eu)(3_Y9)7L{E&{+l|ogOA;i&8S@ z45ot4?U|_Jcv~+c&58uQ`#F=B%Rx0G>3NiUvRVeJ3%hXzw?D(Yc0V9hVj}@VL)?y(VF9g>i_^wB5I>}@@=4HF)T!K$4wq1YQ#NYIY%yE z48~awQzYgo)|GEh!3pcp6r#4>PZ2nkNamRXnLn2;$OYV{)DA3<3e0OwDT5mCqO~>h}b0FZbLd~vV zDbDdUU`atw)B)NJ`a@ZHUnS}S)--VTBC>Ji<&uC9_!e|fii(Rx*x&iG4G?fDT?aIYhhLF3Ox#krA&jRq8_UDKHWC5ApC@Y74n}TYnx&u|r z14t4~a7}X@OoaJehk9H2`|NLC`ECTf5}JkoN)Cs>ev&|KE{8TLn(7BM`p0V-kz&{j z`OR$oj!}b1k_PL2doK+{+Y789l2~&Z@tFbQmXTAgtZ&6_hzB?l$df@$5CRgHx(j34 z=##W6L!O2Y1MJ&?zft}uhi?eQ(P3QnSkRkf!h^Rdf%7o^c~hkcV2#9Qt`txd2Bbxn zaNV06aL|0tdQj6YjDD}=XE=VeqM~9#<{s;pFaX0n&|EUNhz0CKC}(=$;uKYb0|80~ zz~w|^JmzaEC75+{lNvs7Heor#K6JX*eGw#RpDX_-XIPG>G#7A&BaEM5Il~7poCPIV zrWYM}2bK-VTOHOY$axYN@nI;mo!Dj!!Z}4i!-gmbocblgq{73#y`bxYA!$K5%UPJ< z!LbXtJM^>0IG&s%fq=w#_6iizq<5)d4XQMIwK>pyI^;Wp$LM}3Kms695$=j-V<3QQ zK@!js75r2K*dl}-E+Tt+@sfISb~a>bYGx+GdylmCA0_ywj-qAv-!O6ULIxkq%`o?l z@?x75C4i2PKh>WflJEP%4Ot#4QxYEh0Y%gCNyQu+pdF@*u7HSDO$-$(po&cG&gLv* z;4vI?y$((mks^0PKqXA;bbPB zs|sa=`_b=at^hKqi$WUs^z;G`CM2yp*s}l#&x8hy(R+aLTJa8d@vrPY?g~(|JBcB= zF}22lOS-3|0Op)9KnSpHp}@x5|G^xj%IDGCkR>Fq>7L8c^+e@mxy_FN{oiM_I^bH1 z4Z8oGEos0wRrK&=b^^FDpg$Gu)j$$~1i;kGuxdz*8}KTS1WZxAgtV!$3{>*{Q+tZ2 ztqw)!_N3zC;uTy&4*c8yBtab7``tgfU@1?{u6N6*n8-mWKYtPDa&Th7qc$+?^Cxv3 z9RC1bTmSeFM+vMW?6~xG00<%TkBFCS$_Lf?*G)stKs(s%QF%QzBh`V%pwS%yI0n@J zC=U<@3Om{^#}gdLQzqR0dxH%tEWd6noA0%8ng;Oy(BOkL@9k4b!2e&izX1Gy0_fGS zRZoBb17=DL{dRVCjvqhnpOKLPI9fA?#3^{pDj$Zy$SEfU9DuArAK|{{wF9))x$N#2 zA~Jv%jkpYu@VTmE=zNr&rM17G*4x{=t)qkL_uSj>ErzXsOkBGUOn50lUiJ+=ebW4% zvI07(8QLzuc-v7}q@yD>&Y-~{1$y!-lJS5BB~vRLo$`0!Qhxlj$-y5-m0*o2DO`AS zmByH>;OQAK6Qiw;b|lY!b0mMc4O}{P5fg~J3OtLf7^#5z%&IH1W$sHVb@;6h!%NSc&$L7X3ZZGcD_P&8&0k^}!7xat?!So2qSY(i| zZQ_A8M3zqj1yLC8v&swxz5iN*19aq|gXQ_paK7=Ai>#3EDqL!p8=Z^~srkn^-)DUc zB&EtS^aB4IasFoOBMDW2a@>D}{{H~w0G{PrfO7nqpH%!U&-^FK(RI85IurB`!+r;9 zpu+awDvD;pG!u*V26+f;jsX>N#m71MJVf)dEm1`ZgZMneQw0vGu`i2Q5R4PCM&x(w zfdEhi+OvlLOD&*$ce8vHVyBk;GT{jvaOqDkMEpi{!v&ZKK-7kYdf5bya)SneHRKat zeZ&>+K*OL-hLJwpz{g$J<*SJU7<|V0C!AXX{^`Ff_ecP$X;*-+3{cnGukUc7ZGQ6X zU%Y#? z!S&A+yNW`l3b?EjP`?{u`(g`pI5-A5)Bi4qB}1-N0(=8?u>W^fabzSGLP3Y%LnSH4 zg_kv$)HsaL!_zZ9Gn2=|!{hXS?$hq!Q&2+zMT5g=citKWK(eE42EKJ+jv3N6TpS&i z1uu<2qjZrylnw^MyqDq`(@}7-rPeEdul91l7zcSlIPKCfpa+c;vyu_&On*I>uS9<< zAvPfRAn%;D2ULQ%z--A39-%~70`D;Z{DZj12nEwqXqq=7&zQ;@Ohw4^+9cugDROfy zDDGa11$kCqExr}-pW7k$WMuC{pBAQeAUZ_6q2P;v_})MMnqBMxcbMOwSuZ#~1OE7b zeDIS!cowE^be!dqFM9B}H zZWeFuIGy4}ZEOOvwt7F&T83+>)zvChFy=}NHpbJW&|{eea}gkEF80GEcZTmV@JeBM z^0ACBLLCxu_mdkRvH%#qN1Bb*6lz2JtWLXFpSH zv4kIz` z2q@URsBkNWp?0hSxO=d2^GXxCl@Uq=Kq})2dWQK;??{ zrYpEAqJ!4vG;sq+B44X#(3jZH{nL_D(9z>o(3574VgeH|3PWRsW_)1!f|;u29V#?` z4D+Le1j6EkJbN2g7nMHL*-vD?8e0tQ%>h+5ZqQgL4CBY&>H|vpf7V4rqWuM;Y{IQ|03c4nP7;^<;?~D%%*MgW~Rt+mw_OH z{A)jf<|o>#vV>rW6Zok_9h@5ByxB#fe}b-}>U+r~i%zSjYf)QuZ?gb5FBJBs8q#6E zbCeJzxG^GM8QMT17$aa8@b(Y@=N!N*=eL_r;w(9iA7j8SHR8;2@Gz(V|MIaalw5O) ztT0#X^gd}cLK$9}%b08TmqSW#$E zAAYVc8DmAQ;?lkd9rR4vee?pJV<$FS$hJSKW9Li#R0+MdSVIl~$Z-Im50?rixlpWH zv>IXi`PTN%zz08usu66*F3x@hW6zng>*?t`4L{c^@0Evur5!JaPKZOX)9)n(w0l6z zBFDyfT$*1eQBmYFSnCD?$X25E8QiV)en9IP!j+Z)ZiA-VP`!mzEJAd}00`5^aVXPj zb^|lu?$-vN8u0D6n}5-3-Y0Dj0(J7=uT(YCZ?2MUaWiNyE6teZq1Pf%Q}*6oVa-B~ z#^}BYO1O+O0rU|~uph|8GKuA)#K`7i-r~e zfx3UM>2n*S#oz+-rsgSp0Ua&KTx=yj|D;DW;J9J-Pz*p$;Z}36Wch?y=-@7P?= zl~JKzr2?9Kd&CvHA23w{{?hEL&VG(QIQlPg0O5Z4GU zs0{v44Br7Wlsckg4n&Yf`K_S0-fcDAHOseydjQm=_m7aGWA)DbGsZDXSy6PbyaaF2 zE>t;S!l4F_J_RDoiBV2D?7IU?#_Z8~7up4uHnBpN5oJFYL<96A=*&BTcQDP9r4`^Q z=2k%Q^BE+f*_1RuKRp!@?+--yJmZ+4LslKpLra2zbL2T%f401#-Y>fx&FlVQ`q{<7Qeo zW{oy*h)}PVBRH)L?9RId0_Dj4duAnZ?C!VEoJ&7PRM9?31pEnbAHZ=f&rON>75)7> z0W&<^YD9AM#XF| z4h5!=C&M&%uCc>jO78c7^v(rpSJf(anBgu2vW;Bmlvcry2Ep0hB3_es@D0wC%=<`2 ze4bl8S;`9b3JiK?()(Uy;?I2piy%;@0&g*i05?NQ=X7o_TLn#?9Pl7hr6x!Rpc1v` z*N?fLUPKixjF+}S?F?}dTtQF`ftFBB3@R)TlPH^KB83Uv;Z9gl#W7CA3Svj zq^~IGn>OKIfzBXrsosCp6|xLg@Ex1;N=4TV;ouWh#T}55LP`asdkljCwH{t&2LB`? z3eo)7`;NFn9&W?lYv|0k_us>G*D8@ zC@I_qI`p}JU(N=^4XATQc997>5P6?Vgjc0C-L-5d+M&M!m3Qqx*?5KUY(3c%|Dyg5 zmk{a(jZPnz`ipF}r*_@G99??9amr`AVW_IXU~b{f)y$2J@R1Xu;_)U_24J<|x8>U6 zhZ_ZR^CZUcCusAmk_V+lR?{a1Ns36QUI4T{W@B0IkG*;h8@k(O>OXFkc{7I(Zl@e9Uj zn$}keTbs(<(h;*On4rEF-<3}Z?lJXyJDzHSw&4Vf?!2TkiUzvV0qIV(X}G8XQN_2n zT^5Q2A0o0;VVpH;sVZR6iSY9HX`D-iUL86Pw68SP#q|q-P5FT4o>jntAaT|v`0!wV ze09-Vm}a9d*hDk>zAJ?WD7=Rw1@Td?KS{t+k2(Om8Y@aaG^uRzAwZXCwpFhVQ*{2! zU9hn9uz^QUpFwvd)J!Y3IFPDF$o2 zYob0nN=IpA!V0(Ej5(~fdp>OL_xPmFhkU+HdzH?+iK@W+a^q_Oj+f$voHL~pWqo<) zdR{DQ*1qnt8d)vhtZdCJK>l=@9+O;aP&M4FBlDrGGp^G^X5Ugr=!!x zzM1O|145bfInlG*EvqBJ2X%wn#$1#{@Oe=vryphy_UPz zFe|G~bfjq4Q;%-9nW{!iJa|ZXR{T7vN985v<`bE)wIUiuI?0_+Ayz#lJ6s29Ef~dY z8jmxUk5DiL~`3G$TTzAyiax3><9vg_K1hY=JEIz&Z4N4hO`qqpv;mMFv}ViYr3R(xZ;C$vm&9Ou-cd^0)eQjMaps^`0%Vyu)!==SK=#%ZK+ZEq|U#kt7P*K&G?o9FZwYotqxilf@8w)PJrJyHnt zigqeYy5-|x+k=+wJO?c-L$^pB?XWl&eBCzA5L|fu>7%0T<*EMK~575PC%;sZvib`3f0-ZHTBVP#5apVy*vv*reW#Oo`b1^sKI znL-D=<@Wsr_aZ`H>qKvl>N)nGyWB(yYtLKaUv*#ctZ>>;;MXf$T+6HV>Egdfu{h*d zvpd?fv}a1oZiK}l5Nk8?BK&3Xa%|`V#cpXb%cR8}o<-D{PR{mZ|I62rGx4nXeOEFM zySlqv=Z)zct83ZzlM2@(q{Sn`9LN{TmUMG|yrs0B8s&fKpoSlHt8ngyyHE&Zp@HSj zi=&BI8@2meLp)X7gVIm^Oa(RulwX_`p9-;&C`;aSYJ(gebP{Y&;uh#^4R8tW zB*?$LO4Ds>v|bo5?(MPBRlE4*UQHA44xr>~;U)p#btoX6`60KD5xe>5gkk3`zqjD! zAYq8Wok{Fg0~ycE=VvN-;hw*<`SAPBkBwOO7ktiIhvqBIJf)^(a?wtG-gK8!-VcRt z=69D1cx;YEpNn+eJsjeGRG|6^r}BrpQ}>OfxB5e#GCJQ0HX|qwQ9nGXP=l3QyZUK3 zTAk%BsxY@@uT%Oh70zE2-j2x$at~Q|@ORC%ObKr6+#Yi`I=jF3R6XJ%`{8b={Pal) ze#)((otza3ilo7imYtHRw@;>qg?5&#CwE&epnCiLoU|@^&aUJwC8fgNH!zf)SVjuy zatAjgRjgVDyl#v0iyLxTe+9Q+h`O^*{V2{SksuOPJA8O!Xs_P$e2&QS&BLwsl>wbY zl-w5=!KXs*Xar0)>2&%FWXAS8-z_o*vPq|qN!cijfQc`R{D6#9Ll(r(kYfufgkU7 ze~Q{q-}alUP~f*rJ&i2Vz5oukigZsIx#M)ndi1CDw)e~Ky#03S18~Ws;FIZ`yp-(F z%RzkLOb9He`l6(-w3hDG&uZ%HzCq{HSzd>O-sEGQ{NJ}6gEnX&H1}M@oeDk&gq!@D zg&W1N_m?BL6E2pYZ%-1+cQwgQnsI-khniOvp*mdZ&+=vxny8y&SRJF@yREx5`t+qo z)MYHby)wrp@RIz~>Nk!vyX?A$)GLifLbjsRZTb?!s4&k0co(?F5-{+ z>^FzXw>qeL406{q6Y_@53#vC)f~oSOThC>;k2zlE^K??o(xr=Y#PV31dZT~9xR#U- z948?FK61m7pV6Xla*pf8x>M)6Zw)xQz|YvV?IF`QuEU*Sk&44;yNe(zkmzxiyyt^bY1jSL)q$xz^YGJT~cay31Y9xeI-O zkF1V=|Webv%zI8$XD2k2AyAT&*|K9)2elF{Ua>TNd%b6ax|Z*}8FW}zzutc4=E7%VBKgDcr0(*l zR4R;{l-q6Xj}cVti?CLxQ|+bR-p#GO?5Rm`C?s?kZM}LU>FmCV$dbf-NjAIguIGVZ z!+8_>EKu_3%4hs)c7!M=2?nhlMycpQHL58`11>1Q5Q*!1k=Va4HG-|dyw+Sl*(%S^3G@7a>W4gpp21)M3K+~z0u znX#Isy^R|YPsQ;=!Eq!TL%lpvU#5rkHtVGID^6^t?K|Cx_Re3}8wJGf&8JSzUt}Kq zz$b<&i;@PoRzX`;0uXNIStFlZw3EZi<~nI?hqb)t7eNidnmvE_6Rc&5!`(Mds@-WX z`@ZkswsuUHISyaky;EYEydO8@s&dI;;A7sByue~d;p}jQ_AXyp3kO-lJ!+C`Fpses z%da0T@0cZfulp9c;FPMY?~NLjlXkmL6yLY$4!m)SXF+MV%04<;fym14%UW1}`GtM~ z`x3li#28s&o9o3>jX(Ix+r3?)slEtd8of6SQ*FMb<(#2508+TkDi+}^8%To;`Q#Z zJe}RVtrt2u57_Sq-p`?9CZ8{R^NPw^UpOcBpEB5N=QDgfNePK^Bf0Xyc?<5kcf796 zAJ(F$>PWsv-MqPESz}u?)znBcVu%$<;K0sb_;CqbAj)z2imnWdx#*@Jzw|$rr&zz* zzv&LCTdKHXZ@OP&SJcc+q(0OA=;%9Q{Jv34AML(@aJRqCJCLEy|b%x;hJV zSe*q{`DCrcGGTYNlb?zXcI)!Gwa^)KHEML#MpD+K(Gi6<6C+cHGP?U2BOVCYj1$>> z>tRtkAb5lQpx%zMA^`hAO`S0zRzFUCeSLgsskW4~wAlU!a4q=pJw3=lINvY{xGl}~ zb22r!=1KyAn0hMUHCY&c0^B>c`2Mo4EG)Itga2IrwTM05rB&OBq$SUH-MTJK{|xF^ z2#n`@7u1NAuO*g@5A`pe^W5CAjMl)*?P=@ipg)C;Pa1+e6Y?E<(@9@xjz>0O}=$(8Ft)=tBS7J8){H0{ei-h{inE(kMf2=!*ZV2A@=xU?V`!nP>JOS z8WM{S58N~JFJ){qUhVDejbw!R^-Vq5LA}Y!*ySE}u~lR_Hx{xJvc{0z+x*k-5ec$b zn^+QdZ__!$zALa>$VIkz+|Ib0(Sg5{ebS9~u*C6GCKJ~CRy(_IV)QVb8)xm{SQ&77 zSe}g%;!uj7Sm=nf`A9$x`O+c*E7r3HJwobM%N`mp1r=~ zYFR}>5^qotIeJBCqgB~v@cmqMw7^DRP;T$5rao=g;lf?Ry`B|CAwL;djm6WQ#ZYAi z78VwZ@b;(e6Tx(H=RC>xPy&?qZ*lyTj0t?n?0ueDSupI1NEg+>q(>g>WK{n=rQF-g zHjT7O9?}g`ubYXrzc#3tM#axpZL`xhkvsQlu!_5XskCh1aNYeUif>KQDZ8-M0W5t! zNjnTqNkN14-rDAZ=<&5lZtO8@0yppw!}3fpa25DDI3M-A#jDx|cX08a?~gSbxI%`S zGE3#7H<}xpL+b7HOgv0%tEbup{oDQc==MJFi7cpNAI(C8UrUV-Nd!HEIgns`P$L1~7Jh=1Ssn@VYC*71!+DgpACpB~UFnfO@ ztUn7cIv{DZaA|XQ{7VV->sePbxI(COr;uvy+VZ>dG@G^$O!YhJQPiFJv+YrbLoB6b zCLY4&Z#}p1ceZ>FeT(H7Ub6G9Deah{jDAL^k;R%MJV~~j4C+8y=Rh80K9OL9eTfJd z1DD_D3i2q-%E4uIQSY&1vXY3u&>@jp*uawpiG$R4h2gDKbW;=9F|#F72Yb9*Ems?Z zn)xJ~D@IElyyoWS3~PO~3J6pwQosIC>o+$XByr>KSu6{{t0d!MF&L)We52`Hs8g4p zJI2OeAM@p@sCh^Vv7E_7l$6Tz*HWaHUF&a1%ELm;RO<2+6y1j&zce?9dw8+F3g^s3 zCq<%Y-lzVdlKR3^wa-ZcBN+-GdkSdb_m3?nd!&pHy)w`Dudhg}!fWjDt!0GS7tiN3 zMJbM@EEx4k(RQT(wSdMWSe=ghNpUsf{T;8~3!;iGzXqg#iu zThBveyZAeH4C>*$NmVnmO5GPc`2q9Dx==GkBdm9N+H675o_XzBoy^q~Lms*=n~Rj? z+yU6#A|#jJX=V9(Iy^FnoARZka`2Od&Ds_He0_1;`&5(}$<^DkyQL&vE-!zaR)Q8aCsl$o|B+_UVJoCf?Mu{@10t=4V`H zIzzuNXS`@oo3o%gm@fO!9&eRbm7Hqic3Xt)&VFp+#xpjRw2kKCX&K(pXLRwV-IbYoQ_(~_Ij->P>w#G2So z2p#T<%L&et&INv%!++N;7}hpvL&aw68P1Y)*;zAOH41pLLbZwX{X86#`PZ$jQzoW8 z0ZG*C;!#!xP8$yll1MfrO_vKb-oWq>tZ!BRuqE)DZOtQ>iIT$h11>`Zxs0@o?!qZN zM3!%a^S+vveX9ADsiqH1`=>TT^G;>4e!EB}9U8%gcdEa&m1fv|Ez<4AnDmb6P;{3= z(5%dtye>-mr(GJ--2FB;XZo`YzlHhcny3bK4lh;g>%L>ty;8QjnbSv^J4ASGnzAgH zHDog^EG%8LkKmK1|MNA9`U_9Hf@$aQefexT0$l71lj%1sg$@}_Ocgt`I=i*YWQn;u zDC|AriZ^SQJaSQ)uQfjwHv8upmQ*?fhKGks6cp7=02k5`MA#7=-5QR*I-GDBiEQ5b zN)Hp@7_lS5!fvDhGv(217{Zl=_035yfJ}}|0Uwm5(ipvajML~(ax$U8y0X#dA}cKb zqlMGcZrRllQ82v9^}RxH+b7|28Zd2z?;Y!-dsUC(5rrW?$(Gv5cn~K5cLY8an2@uI zV*qzmdZS^@|26m)m5#zwe+?Buql8(NMpkfRW+e`k9@ube+jRgU&iss9gT|^4#!3~J z^bp&O1oz}qB}GMA1_lOVTx>Jbm#>LHqCL8p6c=1TD=!20cYY3oN0BQKh?3NUV{r|^ zX=l0MoRNl;7tJ2rm5B23!j3U|>FW!P-xu6PZ5JF}LzFERlb$Y9-_Vfo`Ljw#^tEHI z17_F53fP~;!&z{tXbbMM%detltoW(SEiB%pr(Y*VvYnTJ{)b%*P0>jKfYhl|K5TTY z-oQ$UxWB1EB8~*?v1Y_3m|`A0fIzg{fz!)32hM>Ne^%!%oI|Ab)>H5W=NZAXvauL0 zVT-1#dZxn+m}$_agZ^irgv5Tpm}h(j3tO8tX3cBbASUta&-jm-`I4~N!1&9M_4I(D z_PK^c=LkSLJ~(d_`zs3%&_fGr1S>W+m^lN-rH8)Zz1`RaOAZEnA9`O^F?j0@mkalR zE$=|{ZLb!eB6`IFt(_NmHf9w5##oU{-jcA#F6&Q#zkLS2L1VITKQQD2v3TIal2sdG zWacGuK45*^LS0~qxtFi-ulRpPaGWRIAj8E@X9w3E{+f@&nP)>J6ciM1P*O?$Fd1<_ zS8y@9HI_255t!BXv({EHgA0TyaLDC80%KC+^U?n8a$&%vCMOg4z#n8HuX*289$j9P zFldY2^ZJ(h|Ee}ncWF)yudlK?y zud{dtMurV9o?kRf<_Z)3<>Kf;aHs;~rqD@i`y81*YRvopr`D09E5cvH6w_ z3SQho)fg=~9*u%S7mJ0<`*&epq&WY4iBVBek&u-1#)f;5n*QWIiXG^Si2+PRy=O&` zI8I4n6Iy*lHlK%k6vEre|vOLxN0F$Mj&;-??I8qhi4#lN$$d|g{$fs^M_BRbQ!2@4Cqw0N!w4F(2EH2f@DF7`_d z%sbrzNv@3HiBKB(GhV2V!kt?7(p|AiQFOSwL*ZU6lVA48(8r$pf&Ln^v4usmLHp2Zw{`2QF-zCUER3bL+PW$@Pz*R10?UkreB z0$Na3_i5>t+kDPY7@$1Jp2wp(Sj;|}kLJwjLWETNJq#acB zqi4Sy1JMjKtl2Mkh)^PMKPh7B#1Bbzr3QMkoaH8DWTEkGw_qm)5)x@DvJFvibv0S3hW{Dmh;9hFkb}5hH9je zns-AqNJJp?1(Mtu8#ccgb_rU%?JdwUx z#``HR2>s*FAg}$W*kOS!TxNy%fEFn53C$OtFolFe@DC09_#6KE$qBF_s8-R9!dnOH zDGFIh?$}DlWBRqA=pnM7L35ebhMQjHz;o~rlYf2VFWdspLlVIwu%3M7AjXE{PJ?@% zFB0*gf=s9qzXq7>4ZMWG7b&i?uzcd9L@gd%ZWsA4(E)y~)B@{T#-+OsQk@wo2ZQxv zsY~4WT&ke%tcGi7Xs8i{g@;ou;$b50&nE+{1jr=cNFMXJnJD&dB@cQHWB*t~P`!1B z_Mti2028Qj5@KWTN=Zr46XF(TJzd8=b|ggQZ49-zz$1W*3vvmBch`&{yldkIDFYCZ zR$V<4^u6v@(a65dVvI1=3x5bstb-l(0`&ivxWHtcaZ=i=Xgu|rjt~2H(FH_E9>(9} z;m|?egvnAr90j@J8IFu(1YkH-Cu*Mq@ZtqN9d(hD4_%1*CxZC6g;j(OQuIR7pgEET z*85vRJYZ?UA%YP3w&4#zF!xG)-FMOlQ<@;DA)4_p*c-5Pj00GOVxnS@D}VhY7Iq*h zKopr=E>yG>>B58@$mHb6aIH>1|)=RqA#E)66(>MKP!d7WeFNWBvkqwC!c=shkbG~CxFk89WlKSXAvPH1dTITx4Lla zQ_zr+1vnk~-4hcNN}8JWK3DL04#Wm{JfR1PMqKQ-17;k;V%Fd(eOzc{6*4jlUXrQV>MP6WJ0}d_aOHy~9I`L^5IL z=H{k*aP#oE##Y!7|Mv+lhhe~^j2pEMIBF~!jpDf&@R`mTgJOQ3j0WpT7CXlM3+4av zWh=c?Grv+b5@~(p5Ed?kgzy72%~5LpjcQCz0=YQ(yDwf|MpIO1 z{qCK%IvEpCEu{dc#p|QW#qewqPy|Y80mNS%6Jd`&PPRTUOd(fH6!^>lFc3HI%g@<2ip@ zK|w1fB{p9;4&!1Yr$BsqoHu;*yLe{6_=uwqYnd?2iyr!dAT!N5K$$<1J+J>`?W`aL z;OX|QUrb|zUkCjr!{M9Wz+f0l*-N2$PeeKHRqZ^OMK3 zgB;uH!x0n9{gS{Bj2i)!L^iB*WqrZ7*HaZP{x@_ynkw~C#`te#29$PxMcki1K|6Pl z2;kz(*y@N)NC;KWyuU)s71}jQKY4d}!Q2Ov_Wv*rK*mEn-~fj;5wAcjP#wjBC^{A( zPaXk8qv>4WzY2y^0I&F7$+rVEWjjLC%wAzjfFnr&M1-;59 z;t)eS5XrxDAb<|P1RJa^d;i~HihG_3*r?{*H7r^rA{A4`&~;jm1NX41070?kMOefi zztm8mgmg@V{)SN9qg}mFBe*-LWqv?fLHqHf|8}t}IFsFtpejq-3aV`NUfY+Gs%bqSKLqZZWpqz6Mj%; zA*^BoZ{jU=h4wJX!zuy8Vsv=F+JWRx3?7A$`7Fzy2}C-6vt$x1>;Z^9P>mtj(I4Ra zyV5kse5eVu&vdl!NJ z5)Sb#^GKPPN6MoTG%9Rh9sIy{!TNON51{M-eJ%Je=6;REh7O1pDyr)B|AgUOB1F){LHv|*!AVB{edNI$}E^M1c*ivlkpi-o9zKuN}=KI z#I664KCK5!J##OhkxdyxJfWu-POuFM9?y zBrxps7juXN*lus4L;+7Zlm6uX!wi)2@J&^h@y%?t6RAr_awO+9G)1eF@x5dYzRLj` z|L;UR}9zy#W>C!m7RaZw7&*cUYmTzwld3M1hWZ4j3&|fKK$Vnk5)Extaw6l zq>#RBJT6VEH-@o3sQM*A9pR{dCX#PV9*oQAfLEvE1JZ;mhL6twsKQ5g%Y1^jF8+uG z0$Vm(BkS@-(Fe?+sq0@z;iUDzP`^y2Q>6}Q;+bdS)wgt&-s@e}u`Mpjw6sIZC6$-J zYLrOA!fvv^!p73WD8o;VOS3=m2I+`H-f`VObylkTl)`-l`V*_C3d^JI(B57degr3z zmtwkcX_gPs#a3+C@A4R^`5Nb*mG}2w_@g@B7K?domDtbmMy9 ze7adn4Wbq!ATbcS$ZKnBU!P@(1Y2fQ0*hc(Ymd0W7|B2Q0NTuftC+_nJVFm8^fdNU z;gsZL|G+?EJZw1I0>j{4FmY)U3azw*3*i^aC=sGrwz;G zd1U&i8?kEGgx~$48sJ6or2@+ToX`V!a5_wc@@C~T4%Va<(_oz@h&<*a??rF|ZSnnE z^6o20&OyYG z39wWT7sJx@WcdF1RcH||KL^A`@9a(CKKkrJhX56}tYEM!7OuH0m)EP&tJLvH0yVj@ zCYLmG4IQ2OkQG1SBv1$=r)F1*>6?4v>?-B+0)5%@(fO8Il|G*vpn` zS)u{NnZ!fk0C5*-9<<+bVm4eVW&AEL0BAyQxMWaC=W+sQ;xuDJi<-Za|8ij^h<$*X z3@jJt1TITX77ueJg_BNFNM$XSaduy2MHkafcR^Vxa6<+v`Z;Ma+=cVT2!eyYNn6`Wu%9l7*Y+_kmD)$-cB27OU)(PbZ`1)8Z$qiG4*f6PpVhK6b^K)x#2_E0bI$ec3e>I~v<9`-4C`lD6uoKnxQbIgQU2!_35P4TP)|V_@wc;@P zK>=n%tJOSC+7pE8H2`5><1ci%<$&$Q#~-2nx&Yv%6z5LrpojDQ)t`biKi%l9Q8g_4 znrO|9q~^UigL{ZbykwB6ZXN}}0WoeF6B`>F`9_EwE!KPA%Ud9}#*SiZ1yA@CsAjl9 zwCDd&x`DQzG$?(G?H)|4)nKb}cS!>xa6L`yT@JVLRn3^+2|^f6?nX$C3VMS!LPmHZ zIy)Gn_YMDrjtgvC^~7JBH~r`&lO64As7Q1L#dB2Ag7b(e-H8faNJ5`nX6~ccw7^mBMl@8pwcKvGbUM5bYULzU$&!f<^5;SZi zVNroV0Xhs&K+zkE&>oq86k?iT>U#iP2ECrEcV&;7%BVcUDQf3&qGkyxOI&on3Zl{w z0I3c)ja{vp7r>GfZQW~cOeuQ%@I#W=TYsgQ`E`~!^v*k0HzZNay?BNXV)cj}SEo25 zO2FgFk(xNkfgaMKY1dT?Li1bV&r|5S81R3At#JTmRn z(^rm5p}jz%F6ZEzcOhF|S8wbQ9StyXsOovFh1LcjOcD=)$~d^VIc_We}4)m`JN*K>Uk!Ub@M?-x(liJ&`eDhV+z zae?z1?g^|4j7$6?4DsxQC8s&KiINX8^HFbE(+{MFqb?ldrkBj=#;gzQYiVhmz4Ye2 zS2?m{6i!YOe9pm1Ma!w*gE4`4lgs2YaKC)uD>Qv=U$9xY7<{p-CqS`om4hf%MMm0( zc6vtQt?*)e0FX$0th_#mFr^({eT4G6_s{XJWpzjGtf{eBjk9$ROxe7qrrM z*~?cAtH#u;)6-=j<(DP0l@XCkNzFUTQ~QdI+o3 z35Q?{y@1xgm=p?|SO@3(tdRR;$1?~vM+EwwuRPCr(uW7KH^(1SOX!<*#JKyGw1=)!nbse9yz!1| z5^tSG)tWPBzf;pZ#}PXs4M;o;INy4BkMIOs5W;4=z#qpYX)8->FEc{zaAqSB(Z=+ZiVFkk+Iq{-<)nFpB^kLAl!9SUqodb#Etl@?0|;7ex&y7)5-V%>&7Eo(_k9=Q{&04Xay6I2)3ynONBYKk^Rn9 zyo0HQ!%WcLCScd}`)j+q^pC0_5>k59cHv?)05OcOphFO0N8EF4k8E+8A;=!R`mz~l zFIgl8XqLGW!()!4b}KzwZ9fInBT7Wm0sYmQ=aS2n(SbAomJ4ctc6@uS4Dd-(sLWBG z|L0x5jJL>{lObSv)36_@W96b+RI5dd&>t})1$}O@7TaZw}XfH+8bGA|FG@K9T?~-LC0xTP!k$eL%C+CsYnmZ%x2(;E9#1pcZREy>a z-nJxpYH+{9x#NgNBCfyzVM$0$lmeb1J#LdJOk?N;id4ym9v|(RX5QIQ4WPG&{L#Dq zV3)`JQX#0^wISv^ZE*V-@PLwULKegPUzrB27Y^5%_|#%zLTpavqhiM3v+M=*b|~Qw zs1JbVCeQ}%pgZH}pJ76y?#%TpiWE~)V>4o73%LW2#pTe-+yv=#r0d4xjp&Mdf;&$- zf85~hy)tMk>azM~tnP#jdQ%PtKV(<EWG4g%o>yrVg7XrTJU6G~VCl(oFhFT(N_@(15qa+@dOZW{0r~SN# zPO#W+41ZKUdf#NT7s;l>yX9ZIkV^uzapjk6fm}iaszd#~=+A;L(mU$%5Q^=1kW@)6 ztrduF%BjS|3cmxZ-Hvx9m{evR) zGfvJ2b$1WFfcP2VauMJK(ekh=ad184kwiaWmsDA=dZmJuJVj%s`QQf&_%lC*hkAhL z2H8yCH=!FOAX=Q%5s|K1y1Hx$2?-HibAu1pWm@KdOXQ-cQ!=wQWuj-2g>ZljC87Am zq>c4u+0Z`kiB}iEE)<%6vLgOXQw5bCfcC9+cz~vd*ZXUrsAdC;L3HhMtBo8w1gt)R z@$&fjYPc)#x!ybO1qj!>sW+*v{(+42$9eXnymd=Z4h}{6|3JuFkd;|)MBjB7#?XdA zygZO_x?RU$&_i;dG64eP699au5L+5SCV3FYQ#({7T#kj90U&v*M=3`D1FIc389Ts; z@zMPZr2ljtLAE&%_g*z>y*>-&139le*EeSy9gT*k$-wV10IxU+=7|-ic2jH*I-J4M z$`JmY(7e`xGjIq9Edn5YijD40cqy#h@Jiva>g$dy&MTz^+MVotPZ7oNUxigplSuKq0kGqAa8wW+L`jSKA*ax~ z%vX#q!>{?kI^()=2%_ZF%wPZx5ylZ}L{<_H5C^bp*B_Ledbw>3 za?qWl-CY8C>|y|qlM#_gYhRj65ePgtbq1yWS2)lLSa_9Q3i$YZagh%627(Ac(5QMD z+Bd`lECYl)NfT#_cEtG4{5ErY`VRP@za$2GAKQ#S0bRMDp>1-LC2e9dAQTYO+NLzY z_4ZlYI?Qb8RG>Zm_-p==47oeA#%fhZHB_NY%*Pbr)OYIjZN&*}bCiR^m6TpXgRasK zTdveKHQOhi_zWS9lLXCrRAFJ^{SI}$=itRPmjNKB`X1%IS|)5N+%s+~LL3lGSmM6D zUwv^u9@FxLOMfLh`|(GhCrR1ftndkaMbW)2PQ$_?eR}Opyr+lfAvUCIArT-4Koy$I zLs%h5Dgz)10!UWDK(aXKp*6S%++H0xri)?xG@8f*;g02k+WoYH4oA?`` z4mMlAVhS{cjI;5W?*gqVE`%i zKt`BG4G{^l=zhx0L(mZEUy9@-QOG!{VuY=PPP&>Ps{B7dwe4QTk<$05SRBcU7`#co zj*W%QM^1zF{Vd?91yI-c-1pnVSs{RYgGwFfv%JHUE=VL6ysUUDuxjt_Ojvsp@(C`~ zHz$GaLs5ez@{kXJO<;U>qOosXau;c>~um~Bjh%V(dlsEDa*coNnNaF1z+yqZa z&6F8e0(uG9bUuCNwE^M)Nxw8|1971180QP);(2uvYZ#3%-beBTrd!NO8SF*#)cWYs z9qnjOeAQ3?U2|>AYJ0=rl4r&zCM6#Ve;jvYl|boQK!7I$n#iEWI=q|zoF`n)K@-}y z7;gxI89*`{4=71pYzL;f;2uzq;Y-EEJ2k&+D|?R{>fn#Ky#P&)Pc{kwoy?|bL9Zbr zvmcd{XXxapfjX#zRtG;|)Imn9@0TDKhD7$s!XKvCdo;h_iWwF9ow*LqM7<)i@nfmTeK!CP$pUz|UN7-O9_Bz;i%A#S`xc&n1#H9x{s-Q2KxDlo-G?&G9xX@3DBY^*(4n&AWkB-DcllNyY(0sKKbnlIhO z@Fgl%@S*`F(5`eLbs!`lHX3wq{l#9EsgYlZy16?v(+*73)Gl6P_((fK?FE#AE|W?i z8(@I!K|00)K9P*rFOWiL3y}UB3oJiHXlQ&04h@wyHcm+vQf?$X$|H@qFRp?NAcaXJ zSMNeBd-*fwiv?9$|{0i~Cn+tM7_ zyEj_a@$H*t7@zMvZ%gCfO!VVgp?#2gw1SZgU!No8(c`E8J$@Q4uVK#v-OP&d&&y5S z!)tHPUe~0{t1mnQI0tBFN7k?cP$L3vNck|oEZ{}r$rMf9YtnP?`r5iqI117%$?sRD z?hf-?(8aM`BD0u+-Wtw5(@^bMF;O0#V3l`Q3niS;e7d8vvtc`DY+{gSC`4%Lz}P;~ z=}AuModt%umaQR=Xc+>zf`d|>2mHMIrCpw${Qb7JT1&qRG~1{2pz7yP`X3XP)k&Hl zT178Q!Du$7#R~WBRSM?v&@Xf5@23~EUn2T|)3-CmgL*n!5`dU&Hf6fQ!=}3_sVTve zlaKo~%huDSp%)VGZ!G0+W!lt=I6mtS+v%0zb*a#=@{C^nnZ{{QMw`DOrv9ydZmHq` zUtRsPW`JGYOl+FV*sMlgs-$n<)YT7MwrVP7;El2~a%G62h`o0kJ5h&&A{J$bYGaoM z&V%;CcR+WQJls35n7`}}y(l$$lzb#XAr*x#mWeUR2NV9UL;)@`GJmsoz@ra@Y3CYv z%Y@yMyytt#Se+-C2M;Rz-BIfSKdWCdSyO~w84-Ze;Kff0e(FcXqXPvA33AONn#t9? z#PR)NoqDwYPpy?kOG`HQ&6O$6)yn+Cdz9R6@9*qR-t005El&sRIb$f-Keoau7cjZp zZud#M5H{ho0$Q;%!9YqJ7#=4bG1+^8HDz9pX!EMjioG8#oKKcryobQvUOX$fw!dz(uDz;Dn7JUBdw%yE!SBFKmf-WwWez7B0uSxo|-nB}@gvcOY{W_pr z#84xeOtNq|8|d8#cS`2Q0T#U$V+~;I_7y^QEJ&gA)HRBSX-*(IqzzGFWCuM)eV z8qTV3_%(l}mw_t2`&?%V_I zjTXJt9r2|)y)}=Z`FS%9(@1;6h6RUr2hY>Q?6V!jeD;Y+ci%-Q!(lINX)-s5k0cm1^IP|`3m z*_1+MpOvdYOE)UJ1c_fF{e_;@18=`F^qF~rCPh65Ec`Uv7@9ZnjhU-gn*0&^~WOaVec8~J^O zPl(i1miA0>)tG#Oef?(Y)&2A(!_Dqt$&J;Cwi04kgAuO@(pSg;ad=Qyu zSJ$@e?!Ifkw?x$WvSMQ=g$KX_?VC%?}^dd^kc&||_aB{MUJjq?6& z#Az4u`<|I8nK_TbHaf4Tq@{)WaQ8~Qu*eNKd;LOBVzHrgXk8V~8_^8dCHuzY<*DvL z>kPAe!v;zuc$GgQ-%&*5>E3EJ`)c((_4rE-Aqjo`xU<)pgY&v{mGjS-vG1cC=i~XZ z!JDUz;WybR_2mRCry=@6fg_;3o9G~xZ4JY@crj@nzv|jaW+-*nQ=3x@=(oC8d zUi`?*F*nz{I_FpM9lVYDIlZgjN}#UEe0S;YM4m|}v$Inb-Rg)Afu?;j_J~$=x$%Ax zc)7Q0ST!XrBdD+Mo#)u(#DwHlDfpp=C1C%);-|Z)%{e;o-fRPSy}T+kttjC&>hOF3 z184sCl~f%?7I4ZyK;w?NFBd`AebaNjc5h@2%Fe|W9O!ixN_p(8P(VjCB&6Duo87iM zL0R0ZFozO-J=?7w?alz+0%r2uUkN2E2=jZMR`9l|_}g5*mQNT(XG29K0>t;&Fhs78OA6{sp@c-fMv0gG z#$SIkxet*+(aMS_5%&KHp8$}#7@rpL<7k8-`ll1KJ(mySpG-unH zT$`~OJ?MYwc`&*wB&y>$8E_atxm2O=?%~mEp1bQw5E{-VBt7IhHc{AQp5M92d9c!5 z?OkOcJ~}YA8A34Z-c-Es)wbq4ZEYm2dX#&*+lqBV*V7y^9os&fi8h8L`*4Pk>vtTo zSgS)99eX6MmF@q;FX7zGRa~vo-+Gqi{BhKkvUKQ0okR3o5b6DLGjEkU{WdkxQt%oU z`iMYL*|Ak$V$W2~z#QWd8_-DiYoLN;!F7#N(V7T>d(4UD!xpN$mp=PN*v{~p z6H8Muz>GY&m3@bn-djb2!zkJ;;O2ed?c;5q*gg?EtkiGvTe4Gx>U1zZBq`nuu9gp& zA*0svEQ{ZJ)N6YvFj0J?yRM^q*hedxuYGBVX$g<%bf;K>9*^AR!Zj~^UZMB4N}|W-Nty$Mrt;}WMnl?LgmmQi)Dx61-~ty zk4WCWOHu3FFz@-eE{$C#02~>SsB@^bSR%IB=h>3H$*pUC|D~D;4<&O;#ae`Y6HbyY zjisDN18QyP)I0;vu&eEWYp0VBuaV4v$9(P-n@w+CXnhcYAcNn)+AI4*4T{KXM%q^Q z3Nw3pI-J~>7f*F*zv3y_6f*NHLUq}gHnep!mC;f}dMFNfq};#k_7iKSvt%L^OaG13 zUfK5eo7GAk0*BlACPR@Pi^jRtZ2X^7#(ua`hlJN_YQH!~&FClZAycEvs}|+{`5Q-m z@O*xdHqQay%=Mh1pEUc2FP)2=mF_EC-OP#H`aEqCP z!S@z;F%%NbnqneVC_ccnclEw;nyqC_Y*0i`FK31`mS1hx^;8^k+eyLC;-7!^NtEHN zzGsPQ#{G--+ZJ8*Uqq!Uu%54uuu#{N_l_q`Na$3bqu}*c^un^=^9?AKVMCZ&$96%b(MZeuk9V6WFh$ zT3!EY9eXgKOZzhz+vGy1!@KP>s9;c9r-IzN=&b*{5ud{3E8BH7pD1gcKQ zZ|GXMPtWyPR1bZxbrMym|6$=Ni_pQXK>=-bBfQGdkKa~TKZmBzfs;&J4;%@KHV&rs zNL5(+7tiwxmIh{*!QQB`=c>`CcDkh|8_e`lM3a@@;m%j2@D0Q(keP0si~l4S7~6Sf z0@YwSz05b^ba->?n{28sI1puZRGBr0QK_SFzO=SkF*G$KxIo+e&BiC_R0ZL=vw;MO z?yV2)P2Mg!Kl&$xqeBsNRRqO?zOP8KvRozc!49a8fF3m2gMEui(as|9(j%nQ0aXb3 zTNP@YfBQ-Xb}v7jSe}2OJURq-*Ud#?a(`2~gsPuA`(<(lvk+c+C*v3Hm4q$BHFJlz z#ajpc^%DnL4OK$Qby+8T`Fb_WLPJ7WJ)8Gc83i^FDywECI-9v_Q)2JH8BaMm3%T9G zZE5mC9{^q5L4A4`O8C)c7RS(Lb|*oSK-2)v5tY!_w?4Kt6dYY~$e6mg?{`Nsvov9Y zr;BOGBX7V_+H{xMg3PmYwO71-{B5ARl_&T5iuc{F3a91Mp37*SyN=~_d^%B1ii5*| z;VH#_M~{O#3TyRc`**On=?WR|wQLvi(pewsPYI9PGk-{&j9%T_NYaZ##jAaZa#&1J z6bfJYd6sv@KPcA2Vf;ItC^&@KWg{3TWG$o9&qV9Tcg{7xtk`@t5PvnzbX?8hXQ>(W z+_{xiTm!_K;;vDbUvLcXQn^gd+sg}N072(SOR;dNedx_sTsuaL?>2wlxdeO{y(@${ zFM-w^QZymo4TTM?RmP2v&Mlsf7D#HIDH-{CqN?Bcg-ddCQ^{mAlfie?MGYaP{wW#f z1H~fL9!a(ECC^C)o6Yw*io*LwpJJ_I6*_Mmc=J@TjNGM2{j}W7?@^J=Efm>(wO6P3 z*_;#uR&rpNzC|(nY-t6Lu8UIUof7*uEO}(Jwo6TNE;OSIFhBmWE0p##_KHTUDcBB0Z71~Xb*cG?p!gH( zGw13U4@i}lhRP-eY+Lfaat}G_^MY%+`lnMu*JfGrlPNY5_jx{rhF#Jo+r zhe!p$edPx0(?1k5XaV6jE-9Zk2;AFwlwA)%P5{x7yF=H)9FuJe06^Z|Hr%*FeQHK)W@3MhuO_?9f|-zbj<-ef=5ozQF$zIVW)m~Qf> zsVsjG9OcBsD8$BmTnft=v30XVwsou_g#F*{gBbM--| zgn8HH@}F}Z&UQ?f>>hQf#f?#yi#QCXZBW#9d?Trq54P?eemF?cqP$-khdolYbH*@9 zRw{PXX*}Vj;T!XN&!$FK6B-N~ryIn~$u4Su)5cPr?Z=+wtY&Q-l9g{Xhc5=YT9)vn zvKhwYGpc%5DF61g;1aZUg5kXUg}2vpj252@@;|9PSH*JDv3Kf0 z(aZrwRYB)+odBkxc(iBXzgUT@UvvAU~pRu zFCK|(8GgsrWwQS~p$B`!lVDKZqVVRyJKs28O0vAFrT#;Ak_}7lL9hn73Jeo>863zA z4Jt$Z%CDF(Jh|J|!k~yqgT&CGTNETj1f&t9yPH8JR7$#AVyI!Lq32!qjJS25bDs0O*Y$pS z_m|D(%-r`{|M>lTtxVxq*Tt{aA%>WfJX=g{5ibwA#kt?H{^7-Wo&5^!Em)P%xYC{U zA7@_abe63;x2E6hbiK38QFKpu(%EU|eU~9sN4QUr?8Z4x^^O_2LM4ley!c1Y%?lU0 zMguJ?T+G!z3F*TZGgXF7Z5NW#MsSF!FID};CsXn6Qripr(0zsTc;lNm63{IlS9pfd zbox-9SXU1}Dbxq@?9kC?M=a0=PeVXe?dj+f?foFx`3NQ&?*nL-Nv-tejpUkDJ4Fcj z1_t%wFOMqB`+9}m61g!4jZFFs$Mgwdlg<-JYUNDvsV9L8cklKddM@rlR21D)okgxjYH3`z`Iujj! zSK^X$BH~A7knyoJi`&DyWvkoFg=s383sfN#Efs9KnB&o&J7eQm9v)`0j~=4VXj-SL-+fP`GfLmjpJjFQQ*!M`h2{G{z64ol z(B;RM7@Z1s+x_sQQjZ6we)Ssr<*O4z$qPYDNS~o6k}KM7Zk2N-Sa1T^aTU?2W1>jK z2K`jZ};%ybgN*cMZsI{ZS%CPtyNYDO-lh66UdJIAYS`#r*jWjU88 z_l)MHG)6m4m|U!gPEysbWeI=oysX1geXsPlE9FI5Gv%067o!(?4zC~1++I~zbVI1c z3l6Q69UUI4%sw9J~IPUuB-X@EiUq_tS%A zg;~C;JSLs;#*g-)_Id%d)elRq-G)*P<3-8Ms1`e3Xh`T>b$|}XXl#B7XT2sVYTyf@9xNb>(7=jbh~V~uCdf(7Ry2RR=%Wh zd*xc`Xq#YsGl?Q6NlNo+wW*^;nn{&w@%LO!XGM9l-7c&6b{&7Kh63S1E=8pyol*(* ziPK}(*@kkQ$Gu0O5@gi6Ex*yS7&^S7F)?DO64p>RM62GUq#4EABI&BRZcHbAK4d$l z^39AO11H~W^E0mnVYJ0c6Y?R|x6?goZjq*a=)|D#G4g^0pXu?@?{51?{51#91rFjJ zORNyvyG}2R`%{-;Vf<#j-ged1yQ3k_sX!K-S9wM8Z>c`bqkg7$`wQ=)*54QT9E`qI zyj4}+0Kyz<1WP#VRBjWw(_DyLf97m77v@VTXDq|6#1s3d&HEtyk)>j2c+sxoAm4(Xy znpXb!CL`en<91z#soRP9qy`lsuY%^4sFlN110G8TT8|0h8XUalY0!6Hbb~{L`u>7O z1fWJNXqc$!;|ORfwg!h4?rZ&elQ<(701Cg!ERiCD*au+3U2RF|yMSHyp%LwR3lOeTQMgb|L#B1{Z52ySh|Djy?+$@7q-s zqAwqOTt-nnh!a<|EW!02J|MU@_tAK0=LSgT1usel^}k808F9&HcwW!pu%%*eIbMh? zv_!q_)&7y)MWaOe#_n5V#m!4cZ(hADIDUl(OuAIbsM-cY&G%q1tL>5Rz<$+e!KCI= zg~SgLunMbzor~^s>(;@h6Pg+hv%Sx-oAETr_ZQ9A1)SEJLJ{<;>=#LTTGwJ{`T~dX zTFO!x_NxYY6xAnKnqSjW`wZ#^R^%QK(uJeho+$+JvzLGx?1 zQnz^~+t7v=cE;!4s6&U3+}DZjV)OBnk+3=QiJm)KrRIBe>8ZxV!56(v`N*&|^0KvJ<|;N9w~`NmGz@Q=V!k8cCRlitB0xzW=g zom;e`=2I8&4Zph`J6AQ~@hR+{=$gFfgNL&x-uN_T_L%*OiaW6$uTndjpyR1z;^FUc zHMO<1@l|bHpgPhzt`GQokJyqWB6WINMhl$N_K5S^LwJt$YT=L77W~V8FMttBw1~HQ zYf|SH3c5_wC64UQgjT-%Ng|E`ZNgUUGj2mRT3#yL4 z>dpbfMbWDa+(^mQ(CzXMtFqPo!LcD1j0-rlF-{A5y{21w84ZTTa!k0MW4-sg-7bnd zxei6hlkSf_!8u;%ADpN#Zgoh$jmtLpL;iJEirz+lGB8CTQx6BDaEWPU@?b7bHL1@q zQgWiD)I$eXpOvJL)TT!385ZppuIRqo`7pw_%wcfM-e_ryx4$->CLE^LFAzF-ug`KY zToIl!XlQZ)g#>Q{~2lkXcZ(EsB&%@WmU4DpOoNqGe5&{J37r&-5$QTZj0B5>RUrqP<02sk!RM-1P`cTP1zV82e^~ zdv}qsXOh}!a99F##1~N|{k=xOcnmb<161+dJgR%oTR1BvNA`muc5;Fg}|1^`(WZMS|$U4AtlE#9)|T@$*Ly4@jjt>Nbm+ z1}+A+>5(`#nlNZhF*>L{aud3CV_^%k$WC3k6l7jzG}|Rz$Yc7jepMGAxP6~9ik$)B zl5D@zq#483BGs{EY_ztJLG~uHUnoELMc2cclV=n{&*#4J#L{@3jlL*-OH7{h&FiLZ z$M0vL{+k>ye4?ga9D|Sa>+G7fpp|OR&Zo0w>WnmnFE)-hzZvQO_^|Ahy@eb_b%2I# zs{DguWkm1o8#X zv{O|36eDUm-^5E z5+CO1C_$lg*RZu4S~>^y(5SF^)?bs6J|Ht+5Qf^AWkk0n?qrHEG$S5IzdFHf`bx8~ z(=0ym?->(zXR2;Ulc16b#DyE)em98q6zc2co9}K`f-|RXX50%t0jjX!Hirw}ph-vW zi(8#X?R4Zt2gyH{#7;7RYVhj#ex`D=B!~yAWBmx^w(gF~0Me|3Qm)W98#%mthdx&~ zcnE*>yO`nB+h-1x2;5>iO9O{)LDt#Exr0kcGf=XBeI2+nAA<93&t3+0tE(3qXAe%u z;m;|sOohgsypz2DdI?U5KjY-q#@JP8x*Dd#)p@5BSLE`!pK+l;*Jlp-rK)QJI5V+M zKeS6H+sr;GRSDZh9Lv&pcYAnT@9kGogS=?JdlpV#F1)z8!D}?H(F$s3nlT=UDVQ|B zQ}$2IE6}%nGOn>7QNNm)3QjL>D63VXsFt4GKJBtwo`^`3SRAyT{G ze`vnN$>S@tZt+6Ns2yD3a*$hh0`gn4%iO7Ad~W|ohftX7_;|O`{AsgdGwi(P@VBEb zOec0L5|$6M@qlK)5HOb8R_-#(uf!pqT*?f(QozguVNTH@;Fvu)0tFQQ!GSl(qG$X6 zfV7~mq$9o&roE{p#v;|%0$JF@?s*`~Ne~`iO_thdaWABCcj33~oo9-d6ID>_x=R_R z=78pXGP@C5k#wL3FIfXtM{ln;XNm%4Vkgrvk=eV27PB7$-F-Iae>66;dKA-x3=NHs zB5B(E6}f;@Xsh!t5~$hXQ_aylDEw~u=gRj^pdJyI>8Zn{4rYO=%;+7y366$RJi+nu z8h=V%8qjU|Ru^e;3Rg8V(b<)je*2E@tu$VCbr3xtcR7qOzj(HY&A!k+59hFTb7Ghm zbxH^v2LEJcx+}!aQW?g)JZw0Tu7b+jj7GfX$Fw*j%8-GmnHf-+2?{K2cNN&GxQU&EtI$C%g2tS)js;Oi_uijN`M#%6sPcppfqi~0_|`K#Ms zdVCxBxEbuF?x(m4VH7}wJ3ha?HH8cNw0&lGa2QSdhU$M`oe2YCs5ej9d%tqqIxs+N zLP8WZS5zVywKW^~WNt|JtMAOjXiGG}jrn&UN=p-^wYB4EY=S1D3~{08b{%&{1h^wW zQvX|Bq51F$-6UlveLl(goWIW3A&@>2mq$5fbqY=IhP-YF57X!8jB?X{#Ca(5IhmL| z%$x77Pc?`28)DvnC z8gMdfDE*jLvAofrSEs}4qLmDsuslZ}b5!f$QLzKr;#wRi>C!=7YL=2^lG2^BP=x;7 ziPY|xw@o+9>37Sc|2}vAZE|k{PKsMNWbjlKs?rwLiK0~zU%C{okIl8W`0=d~TzDY2 zP*%DcHe^{f#nHP}T{*sXt9PMz+{38#schF2@i1#w4%bY8ifO7rw=S*XQV|tw>Di^u zkv-6XDW#@D{$zb93csoA zz?kc@UN63omynWwx}Qh#iOXL5E9(dtH;2x|DRV18vw-CDIjB@}w(##3PeF0Ei@>!A zVk#Kle-Tq0o59+7IXn=BEj^Zb;T-pLOEQXcDmKSMZAnFM@oFvZqGRQu&)x3J8+M{L z_%2)R2h!5ZU+`Z*Ads6Sz24J4k(WDc0kY%2_S1?Dhv|uAgaWVA(nUY=1Tba?vAdHRM#K+%|(| zu=Ivr*YQgt`N7^@pWr@cVWY&8p;_rj3TbXAdfH zJzkk9fRD&Zm!H#LiVC|tuJF7?P#XDYWf-58y{>|I5kc8yC9>_$>y6IOqw?ge*r{0` z;FRk2+mWtX#5#7+F1RBMH%Git14rblR#I*v8pPJbcAwSP52L0Ro=$SA;MJnGvx0RO zoR;!qPO(TV|ESwKE0>+lhf2GnxZNP+VX|u(Pg{USv7u`aVjELuWIScmNuMmx^1w8V zI%>haz2JIkhMJ2$vv1_Wy*Dkvl}%BDCoc4Z>*-LlNYS z?Uyh0P3I%lC#MYY`_`;3cPf(z&^fgEWbyEo$M@=Qo~-4R*udIgw&s|G4-FlY-WqMu zQC3Q1Y>~uAmEd=AR<}68C3ZsF9dZ(xiPcZ4KU2Sq4#RYa6c&~sHRVvXbqSm1@m&5;TX@9Yhf*CQ?1>m#mcN|{T4>4m#iA&K3i_n z(V*W=qZ7ena?i2~J-NO??ZF(jyOxCvvvNw@j@isezNGvl3Ay7B?xbru`%F!3zzkD4 zR+gS^!PS@CqB^?0lZ>)TUuzfNZA(S1G)psp3yjjvzP6187wO8g@oT_Ii)g--vW9-u z4FeZXcOcU};Uja+FMIi5NjO;5;#caJoG-!=ZkuTh!7Wn!%97}JB`a?b1#zz9VcoLB zmgUgBhjcrZ+Cm~|1F40t9tZMtV~I7Iwg1pDF-MyR4%(={8j>ray7-t zsymuPJgP1RxKGUL`Xc&$$Nvhf;5@vMe66UA9Z9nI0U32HR&o|qG&A*S7;#*DzQX)s zTBejrvVf`!MslrE)p_leA-LsfQ$~z^Jt$a;#yED(hpRA1`FIts09SPEX#2BhE!aKV z{)n5Cy7VN)ikF&D4R}Y{?HOa_4qF-9klpoD8%b;t#Y&vTQ_R~Hlc^O9s2N$CMwdFg z{5HS)aF@8y!)&IA~grWE1ex0Dp^F9_hJ36 z#Z;-22t?aeZu+Jd_bxahXvI7ho=(rsze=RS@G+~LjA8_r=zYHePuQy2+1Uwl?<+k9 z>u+5?PXbS*>PWQK2$PVHJpzABc@EaR@bG3bwk@^7ZoU-hv5kA~aZ=c`uy~kWrDQmu zsYPlfHTycZzCW^Z8G$%CEP|6}WRYC2maFeY?A~v%5Z3pwsKC8r8+$Q)0*~)i=IkHZ zc&}1pSw5U@)6&ZCrqQsRKvq4BSXy@M6=Z4MHgCvoRc!HJ!>Zu7%N&=>rNvHLiaD)E zc$93-##cGDuI4uQmJbC6XZbS<*CZ@d=4{}0ST?_`|IoxZ*EdWHt&xt{>aTwj`SD$| zmJxxCvZ3-yt#<0Tz*%dHu+wUc6!vJlf*S2hAL~6ZKUl#p>9L!wS@-lg(t{WM!N25m zxhLGf;qDE2!KPLcYlnC_@i!RPi#^=r zTsQD;rP%Id$*olE$H3to-b1L$^RXx3I^Of8DKQ-s}~+@YI~u@IbdmpT4yF zq^+n&QHxq{Zv}x~oE~vP+jD zM7E!2TZ)psvwUYlVqJhm?5>1iL%XZ&)3ftV?QCznkm3(;T@Lcc;g#(496vtC)_oo> zXo|Ps_N$y+%|3yP_SFq&z`5@X_88Uj(XQdzrYbVDt;JVjsr@~~vGcsR0Ljv#;oYrT z(J2p`^rAX?-{RG(ua~@yW1l$WqguV+)GHizxmB@U$Zyt|x)KtHcJ419rso%(t8Lm% z3hwVgQdfb`0)<(S-FWHziPhm)Y5oAWo!lk_e(4*cCcYMl$4uEHGhTtq{d_3%(Q32h z(koex>j^AF?^O6*C*L>W1e0#m^T3i^+qdum?wzxo+ZZEmovJneY=5S(!5ut;7o*4Q zroa&+PJ1|ynkc`bIB=P=_+x|TS3Rp&$e!@MY{y6r)7pP!i+vf|2Mmq5W{y}3eqk)O zcEV%b;vRB!!asZEUT*&a_WLttmhxe<*wC~4ttzB;*ZW^RXsh>|Izu zn%K$L>C(djB{wokY?mk3zv57InmZfRHp?UaxeZykmoCFoyVz=!)5G(F_h;~M$-?EA zE`vLCGB23dHXT1#zYt?z&GNrSl4JHle++=pIVFR^B>M{`4g%hnvn+g0M}OLEPYkOS z(!9qMDLj0_WuZ{+!&t4OZ26g|D5FkZY_bCW7zFC$-e%c>(a2uWO$Xhh2bfoKv|rsf z*GG5gi@kTO|ITzlLgG~Uplxv`<#R!Mt&Q%F>*E;wap8MNxAEu~E^~!)<2=k^INsR1 z!Ksg(Ct|Z7M;Ry>UN&xyD#5tp{I>?IVtclvT&z}^Yq>z#OL)*}%KDxMxLLV9Nh#Mn z*(le1%BU&(g4^6amhSU$m6!rtU4G2)CiTuYc{aC1OKwGdrww&~9DPE{deyjc& z^JZLVwn3#^Q{Va^JIa4$xH#HjDD{q(imie9X(2iof601QHX7d~i!v#289&6sh>$LC zsdA}zryk;8D6N-_OkHb6(6}jVK4_J|=VWJKyZK4r8$_34l97S;DC_N~GR!MN!T#|EeK&nxPIYtd&k z7S76e!n`Wr>D_}gJXVMkpV>Y;F?OBwG9e#W3}ZYK9j~D2JT+RR%r@ls*&ka>?RGbR zRVB*BL1OFz(j&6|QrUcoE+;q4lIOdqr0s*+HJ%5(@zG*@EF|FGMS~!52H_(CuR?{CcbcN}=W(9ha5)-qFJ_2Lv)co*>Z}N{H|)2iy`rcdgp1jK zO;Eh%#siDkw|eA@HtbZ@a`R+&)b7Z3b{(Vs?i@#(9DbP*oANAHpSr-;_&rPuOL7dl zeBAU*j>G3ZttQL4N!zIWxU8s~e3LeFbmG>NnXs4GK9b_3_LwY9~zQ((&l zD@vFGm#@n+q$rGL@HxGeI!*fhm^J+S!<00cW^j!se&cnW%f5E|`H(Q7TWgj0XJfWg zh_)rZH)+nDr!C2<#r)z=0i3wa;uYy_=^NQ~xQp|2xLt*ebKs~gwY$DTRG>%O_}KkG zF@ezYXp0JlrTAFHfaCCJ&upah#tklv_jyV?ZnA1UXQYo_=n5U(QwIuQHIEV*EdJmo z*%i-fMF@2S{ZH510@Qi&{PGgGZ*|h9KZsxB6)mgAIsyF@(0CqHXmz;O)7@@ zE2sD?F)gw{xh#)^*Fg^`FyKvmspRr5$ z6C%g@&iQ)on`u2OYY7y&@L zP~ebqbQ|;>83tBK8bYe8+!AHVan=-2q49RLILw%Q3fv|;!+AUGleXSc%J9K-%NvxQ zolN3Ew9}WtGAtQwHh_N+7XiP<*nL@fUsiuXRs_Y%2pL&Y`X=2Dk=1T-k6W^5=sAFr zEK&X#;BtwkRM4&0#1=oy;28sef1HW{>}aei)$gdCXTd+;iQjP1+1DYru*Gx=W?uH zoRFA53h3CO$~q+luKARldYM6P=MN#ZVnNxDJ4x@W=wMCIKDgpwT1_^zLf~)m0d|3l z7iped;~AfP9~pFnRp5|UvCoc%77>b zN%bMv{a3gP^YS4h#JdqDq~vNktW!_5IIin0ool~6>{?i?u-6Na31`3yJ?M#fW^%<- z<^@$F+DF8o_%QWdM~JdA1`%*{v?uzKfwNrPE11{gigEtvO$E%hEifvc)kQyFVDZlv zWWvJ^-^l1!d=7RL&71Ssvc}ylnkiu#dL)EST^R`4XV;$P+AMpAUmg{OhtR*1MqXK2 zU8`+-uD}!hVgyK?K;%!GW&2}A)>Fm-!j!dJS@LR)%;>UNu5AWA4G~iD zlqrXc^9S^p)jQ~7B_PrqnKt+OSjc18`8}=q@LfC7i&WWH80g9Q<<~R@X<}R&;T#&dpaODogbD1r;t@C7! z@4RMBo~(Ewgfam6Yl&nkcRX_|T84>W*_}HGx?EXUt#s{Z_s|V*hz(phQ-_b1uQ)t1 zj|a&R@Gr8I=FCF_B8MDj#Sp!BKI9h}s~FVFHk?e}ooo&lcZr_y0bdgOsfq2k&hWwq z&p?pb4p}__nOL%{685pKRXb=E+Hi3`zn*&O!^Xe_JlJ7i`!(Z^``dGb*H zQ1M}kA4jwVkXJIa^32X_=bV+Z?E}&P^LlxFkaGcp30!G!PK5OY#28Z?Ij0xL_dFkP zWAm>g)f(`~7i@gGbh(y-ry!1aSI3tg-DzN(Y)1PzUMJ9s^c#JN$#hbQ$-F`D{pr~1 z0#XO`AiINhn{cM_00E$biFy?^(9Z-Okpr>2@e&bHVzdm#qmOE41sw0Y32k|Ta-})2 z$3?qld;XjMH4H%y;@U~uxT@_=3Pe-d9i4vYW!DGkK{Eax^yzB^5n6V!t!F^a<{ThW zRBcZ2Od63+U&*q{c^_CmUfLP}i}gA~y6Mce*7N>lXyGk_K+Ukd9-?|TME!1vcKXcs zTX6J_J6h5M?dAdC0l4SjYHSF%Y{owr0cg7d^jL&yRJli%sLHCeC5G*KvelilXT0T- z*8IxBdIVIxL?w?l(93=p8y<5Kw5WyT9Uyl(skfZB;lw=1v)ObME$PMP>(zc>E{{`c zWoX>+;lr~-j*T*5Mtp;4%9WLYRd>{LTec!x)9v;NTWNwmKBhcqaj#$GP>E%`7&oc> zQLRf>cL45Tw%HR`vQ;6*CJ;5!GV00`j@xVmAL2aGhoEJ>ogt%=HglbuKAoO{C(|CH z4CI-mSEs#K2eAt~Jv^WLd+|eApyX2NZZm#>kECaa@OS5*=tN3>j0b>dMs^ou+#nwG z)AW=$T|0RAP+i2Tnat61Y6j>}iLKrJ49ioS+%K-a7g7d{qagbF5s&`+^Hh(PSnOC_ zaNPRVbE(3FySV&C^;@#Zrf>$g{_K}b^a}zL#sfE@7jF;|bdrUImxfi~6=dkN54xmh z%@DGZ95QiQzzjcdD+KQ+5b<4_%%=R>$M+#N%io(+-D=!Oi@0)ZG0m;r?%<%7^A*AqXK0>NwBsv8R~wL$D|My20kKDIZ+*=uLbHfAqQnCsAzxW)LG~GjrK%p4FEKk@B3_ z>^$&Ud3@^#3?}Xx;G_I+cBs0DwC(rg9=Tl%QF59Y5l4?gwi;mVrLrb>GrgrRhZC;+ zT+L4k=2Zh819cR*Y&it+p@#{+JjM62YN$%9%-RHb_Phf4zM2|Q>ts2-awv|JC=isu zgs22W(fheF>#*vPzK~qg$8?9`hrA{#8xpL(Ql5mqa+`!`0O*cep9Z|mC0Lj#qJ`Nb zJK<5_xE1@zuG*2lkH`B;=oP{M{QQhFD`fS!SuLRDs1TR4T8F41CHEuxSj66frE9|R_FZ- z@CFy8Nz^96agkXis-1?1)E7?~O{yRFjx_w0p}=f(awZby^#FKUuU0$7eiGK8i0l{i ztE;j`uPG$&wxJ1GW%7<|M7~H){&k+*E|b9Gnmrbu(~u#_(L!YI-75c$<|c+r5pzHh zLt&?WjkR(Z(gn;GaYdCxaOpC${NgRsO#!W_2I$wnT~VB(*J#kFmebZZt;&Vai=V&a zj**7)_Hk*Lqs{<+hkqKh1fe?2D~*E4=)UP!VK6Ux0-zTB4XD-FdKMNoHns}_0=LcO zbT|n}2p9trLXXG$0JDKqhR{C?sAu>as$UWeN!@=*(e6nj0bRX{$TSXR`T}39-%@U$ z6X|AX#!H|l%a@WM|LVW_NxuXI`a{444ie@sKfeE{mG9m(XP=f#@0Caub^~FBXpAlQ zJP2{j&WDHveBqbc&feA}Z2HFDruVi8nGSca{;pyAXT{lmN}nT_gtKaGen)5-C=4Mj zQzY^?Oo7TG%YZupNmA3(kTXX6f#76V z!AX7Tjvqh%*iK;6IJeRy8d^PpSHWa$x2o;V2t*&WE988wyqQSrf&LNuitsrx)QJ6O zr~&*h4TBY)m*z{yW;1@7T6F-rB<&{ED*-ayjsF1PBR7dq-oynb<>n=$Z~)vpI1NhzzI9Bl%oyEsc%Z&mHlDg0iRO%PdU;rJTbP#G{_ zkQ0e)V(O(yi6+Q#)W4JCWzsH7Ek>U1q2r8~t)?qRD19IQhQ?2wzk^~7h^5tukJB^u zsURpEh^6!X-Q#S@EKinkS?Y-Ql#)F1HFv`B`KuD$T2=vCqIncNLDB-^hYN%lKRE5@ zUVln2fg57FtkG~q5HF`zm+AeUbRvY31Rt;2?wEue@IweDue*>VV#ING&m{i#7&SQq$A=;sLsb_w zvda_2u2>yK|8k&XL_?acj6h%hua3?1IWO?v4T%H~2Q}-b5LOfRo03rtLQ25N*~^*f ziiclK@|s3%&PJm?T)aT_7WzusX@N48f#G2mbaJ!;Ne+`_3k~b-G8sKeV<2`b6sKWu zQc7BYI3@*>7cbrg#&l1fk4)=(fuhj^>-ld12c~uN&V=IlFzX^@1Rt~$xBwt4rqv|M z3KCAcz|{bm0@FQc`lqW*dItDSN1)*S(N09bIOH=yar<^Dl^EY;DA(*MYizK2IiGVWJRcu z?nou67ZhX!&4JNq6mScKxJ48_!iu=4enVze_eb%JQ!RQ&KxQu2ZnUkazm53S z$C^atFHabC0%>rdI;^4LH}|;lx5lx4*V{v7L1kx5xax;N5_5$Y{YrM3UTNcmFk~lS zUcFFoGDkXM?)Od#_%6U;Kh@HDt@^lx&q?vLdhu`TfxbQWdwGSnNHW=2Iits2_PlO^ z>e{XlK^;JXhhP^QFPs7SiMUIUbeP<_{xfw3#f}g^rwvwG9=NpcIf>xWXsW$*N?E6_ zWW{*A5x_dzsm5sc&oM+g=M$R{tV8!J34`Z=F#F1Rj!&tDI$;t?p_w3ZPl=aNQBh%= z3knM2_U3=I2Vh=&G+O2NEi&%87@GG~oThKoJHit>cT$1=Jo*etD#2W;|8!{W>0bg5 z)|&wEZF`9PPe+ve2g6MWPKiG3krdzRX(<~I504MA4t!+Q4l5E0o-*j4Oljl%3kH!D zxUvE|hyGv>Bn}`o3l%$+xc{r&|6A}0g@Q?15|H&H#2$C{fVLJ?3g~Ck_dJeYl1Wfj znPnfCmpsu`Px+rM;+|C7#S+zgAyWDx$88>hnhWfy`R|hdb8T_oh`ULbpsi8%l<5kK z5B(Z$d!JvB+U;L9P7u)QMxuH(n;(<_Dasc_^OL&g`TS;n_LiikO;nx`KSpt4#54$>=BW|6m&+53xXU z)A&A<3DjQn2atSEB<{Z4j^Yfvh&TQhGc-P=#zJ2>ZwS=~w33+eo0MFGw#SLtq4BEap ziPlF7)`xJ?1p*qN0T_a&EJ7yY!x*CMpZS9S4+ZYOGL@1R=wlohfo!GIAUN&lx4yoWV^qR>mI<{tIG!$pplI~B{#uhO%t$~Mr$ zyYs_!&zM)@>V}1_hE7>IHRwKg`0&HV2IJ78caLG9r0KcSiI%`VvxcJ8_t^T+#RF+d zJ5LOazX{%d+N1gr^OTD3vAgq3YI0qW9CPpbR+;_m1MFBWugdP6vGl7K(%YjhHUDYTKoI^<6WABx@%%w7DO$u$g1UN>;e*1zA-TR zf`yr7tEWC`Q27q^kH*53RDQGfG?1YI($=fYSz0}hLovg&tYD^l#q_qRmqjar;ixxj7z;C*uTy2tKv+!f$2v4D?Z=R z(a}eo)Q;8r9<;{G>fZWrbpT9R(4e*kF=O9&vi&9+!s>^&n}%zyF)zK=Dl+dad4jGg zH0!=B-H#l0W3<4JF`yn-bYqetEq$4g0~gvbu`3@qYoC%)>GYLuOhz)cSZ%~qA$Ddo z{VUovp9L_?hJO6e)80bC8FhqZckx9e3ybsgdu&jk>sfF*Bdz#+LA!?NhjsUzSwy{+ z`*P&*Q>V;Dq*vZk`|sd}rRn+Zl{EF{J!*evA>t?I>DnW z)r_T!W4LF1rDE|pzxlC`b>+JYm|ebRk#WD0DM?K4k`4EfB>4EW{qDw8eZBN9R%9y5 zw#byDHde}IvpsfbeGZcLm2wNcdS(~%idj8Ogk48~$k@vpuKi`+-rkVL`0L)ia%s?` zcSd+c2wV8pd-xmkRCl-k3w;a1t#Put@x_~W=rj2mNdp(<>92jjs)?K-lLb2e*Z zLojxualNm*s%v`-Y$Em2TV;V5`z~#p&xeG~?c=Q*rUJzmO6pSUzh-q;F4)@u7_(e1 zYNho|#e4>rGv;wb#>ToXeZ|%ZSLI@HK9o)>j-P1H_>Vtl&@WLhN2lM?%+`hk5C6&y|&FMhi=>IvVRdLyC{gs@d0zcdvXME*x90fZr@KNDxwrHcss zDgp~ssf0amB_t9Od`8&>6}K5x-RP$ZwOMR|5_wp~%WWLRd%Xal<#;8lY^5+dK~wA_ za-nppj*VsG%Q_&-ahC0o1LLy@yt9INw;ob&Ib9s;id3~l&+=nzW4Eqvnzo|wTPQ21 z-RgbR<`pagdgyc!RWI|c`C_ZmskHPg(~5lp7E%G0GL~l3ZRND>D%{7bfSj8QMNqmW zaPDh&fvKKe(8rZ&Hr#5EliqB_qE$S^9QrMa>r7A`bw*)&UFL?EN?d@Q>4jQsc#A8C z{#ss`khFr$u;aLYQ#OSXMoO`EAXn2_W#r=lmMGqChor^n2W`rX(TnCM&B&^ik5gO# zi9Tbm6>94?{Ja7l*-B`*_`@E(M#zu|piJZn*K0VXE>r;t5Hge}OIF}3{g8NW?OnYD zhx=q!;-QmPr2yXHVOH|N9?j!nyYdVCnBo2h1`s0 zrdJdB-AvM-KFo7pe8w_R!RaP6Ef|CK3(fKX`d`P!jD1-rPEx!r-)7}H&WQcNDP6iD z^d7yec9yBKt(>m?cruT1R(s(soH&UHZQ+BMucbK*wC^?kAuoFd=_X|Q>_*r@hnpx# zPk{cFmMpE@_=!rVRL{52FoZm3gZpYaLdc`Afv>z~ITpWNH*Bl}+r|cZoI})MPf+roeBp%O@GEd?3lJ0d7!+^Al`-?Bw16B<3^v7)PCx)l>XvV21U`faC(^Z zR&Pt4g8tGQE}WpVVcONen6v$Pu=UnJ_e*VC>vj6*XfOexRIlI81^BxnvDRbA>9;(= z@8RUS6dY^rDy0o126OkQRay5PIVQT~s3-CtvN@DEFL9Q)Nq#*oDU4hgn4G8d4F0OL z>(uOV^n#OB?O+gyL;2)+K_T4OORYo!U&6jK+`1wZtIA87^aMYBWJ}$zjRi1~A&Tl1x|m=+w3{1etm~8U zqTX|`7{@UmtQCIEiq?Den`YI;X0h3fx~Xlwz9W_fQJodR}dS~}A;<-}6M z>rCgi80*cZ|DsiX2`$_|@ED9UF=oXZRE|GH(>Y!P`K;GB+d&u0)9Pui>RdewZSsM< zMPcXcc8`1knX-R+kEWpL5!*vieK?XEoh~rd7~W3Pd5bf;V=1O=Wwc&uEjJX|DY@uH z?WcBJp<9v4Q+fxjG9LHruqj%+5^q^PUlf_wW_}EY0b0~kVl)2Ng{Da1>*PfsZD6LA)DI<@g5XrF^7k>DY*x;ge0Pw7U!cl0;Da>4fhy?hBPISP7_n#a|^t z&#yo5@-kiZ0u;A=a;Mw@dG+HeiO2&gp(O>x=G}%Vw4_}ELQk|Ec(;oe0-Y4cSNeKpJ5*KtlO-m&Y}%EC zYdNK`J|bIl`MOfl6K_?=QW648x3f~<24dKE2k(Bg`1q7el{pxgfz)@-z?~EByM;L? zpS-*P3}-M46qMQasV^tZ&yUYg0{f;L^RjE(zU3E*Pe%7#GX& ze7nT#{YuG{^%2ajiyyJk7}sR%29X9vZzBe$nLLFUZ=->kE339sArafr+vQXw`_DBit%@x-3XT_MyUmvrQy)4cq^zE#8o=*5N|oIA zuwsOa|8WI|UoTZlc*un&xISL?RUOl3ySuh4;Zdc^qFxb$DKM>kl90nBaHi7&(W;)x zc>9{y6i@$w2`OG+(TDciwW9sQ)?ZbGS2fi4~_j%=TH8yKSJtb8@6wG6U2DW$SC1uM> zPQEj&lPr8^sSNr}yMvOZ#WF5ynoAP?_MVLl6q z!sJghbl}kq+lQBxHe!WQ2nRz=#u66bl zC54dTfBGvvyJ1iPsScfCrusk9Y;omePnaET;5p?}Z{{e}^nW&1ZcwlT*RO~M*aWaZ z4vL?C;4iQM9Swwi)%pbim+>7t$U%>te=cDuyq71s9aY^!c;!?E=u7++5JG=IuY?9| z9dP&FNP2yv`XNE1$8n?id1a%-3P>eOlHE`tNt(;5+MMYDsg>FAl#D-q4y}P5YEsvL zows4N2fx??)R|{A^#UJ&CbDE$yU^y8T8s_#KNsy;-2vW=Z4uI!c)i*3bbCAORhzVVS;K#ic_|-vVG> zGvwzZ7+X=>Svbk@|uB*0sDxi?foi0sGrb(_?eL zMoQdXLoxnW`I|^-qn~==a%mNGb?ZV)DT13UkCy#t<#<*vNHuWjn9aOJIAiB2K+200 zbEVg6_$tFswiB9v{^nMS7cq%_QjwhN$n+z^N&<*0vEp4U4>`)XaF(!=V_G?SHTyK} zezisal4T?EAFdN2aDx+UDJoDj(LsQ?yh9n@ee_zDd}H4_Erb&wejek_!R3JtJIeX1 zv(93=uoxh-i{mC)FvxELoh7%_l8j%JXXD`aE* zhd!|q`?xM8|Fcm?myD0ePpdeb60AZ@*PwOa<<6#<_>I;-LjbQ{FzyV}!y>u~Ni3j4 z0O}s3rx`iMn5n?5s@QFj&uWcDuzAZpde5c@mJxRd>K;BY*L7nbGy7gmfXEvm;)Apj zd`{EB2cbUD^X=J0VbkzJ49ffk6twfmdZddQoAMn(dG?pMd&QGIsaldJ$(e3#1EX5c zDT&miWHR?xQ2_IAI`=F=x9fG#?Mj0MDi0p?i;_(2#Po22w}t?aaZr9ny?kdmEQ)C< z&6)C3(Cq{xl^F{?K2b8opasyO|7Ani?>{HyAg=mx3?!1^1@hFfBUHygN$Q9uaPS|3 zUpIbB1>-fpiE>H~a5tVGP?;tr{{a=aC;iQxGeLiN(ZA0axWoo{=nEdMenUR$_cyZM2Gbr;PeQz$!WJY_5OFVk zzjZR3Q0vqsfhvVyz6VVGoD#nCM>!I7vJs$qqh9V62w37hk+9}7MOobJBcuk2-U>OS zWjY{xm;M5=^v4SYZpe^)w*!o=#sjiNP?FYx3H3{Y|9O32)vcfUIAtKA2D=I-c4L*R zgU9%RwQ+;Bsh{Ao`GfB=t#DE5ign8Gd)xXs9b=axB*hDOVPaDn4)dY`gjA^MCQRP! z(UIKlEs-DrCv`r8);~SS0@-afJ9&VnUX%FDHBfx&UtYch8tyscU?MJE&cvyPpL_Ib z{#g_0HRKfoy{SjPK&E-_m*yRVi5s844hD3Kz`L7{{K0)z>oCHrW%7NYlJhy3hVwcp zA`a}|t0aO$h7}wbV4$Jw|A&^zJO|I=k#M}qDZ`o#6~n9^wwq;w9t3_fBm*>A^dYMV zv~qyI-l?kU{=o=Pb^!cShG`JXfEjY}4L=%7bHc)3REf(0&e_&oJqtQ}bpf3Z{sSzL zKanB*Uy_p%4d`Ev?r(-H7O)RX*VpeN2UiFS#y5eI}?%Gv(q1)l8 z(c1S;b(O=lT|-qu$qyA%dc#YlJ%>Bv8d=CAhx^4VRk|i4uBlIYu=v+?25DCuElN)A zIMvs@&-QEhBr6Okk+W?YD9_FqVQvg_tx;S%kt+1zR;SZ_$*3Pyj?GJd6g^vsOSh{z zO{<)dezoq|el>tw(@kbE$cZWl#J*|N=^f9ljayyAGLg8^W8!^xw&LW#6WiRXY92DV zC3Db!a-?^a-{g4XQ|i9)-V{!miM~D~7%kA8X}0%%g_*Xer(6FYDY7cib~!YhBN1m& zrwF}y3|%TuM}~^snmD&8xtWFZG{0{u_8cxuwOo6#;#2Qvlgdh{Hx~MDR01Hg zVmb;|zd9Y*R&l%dh7|ajKhTrNNOj5All!~X5#EnfLDxD;!PCO8KxbI2acK43kld-)r zQ9;y+Tf>!C>aD_V?v;B|tO_G4Ok1)}JS^T@o-A#)%Si(|w?do5EojzS0hM})(5}{4 zKxiw1rA_`^|FzH1&3Vm*)|1c<-_6I&A?mA8ZCSo6q7~mDV(7GY&ETk0&Nw|Y5A{i7 zooj%?`mFwNGg-P6C3^Y&bT%6;pJ=OimT5L#&gHG`=w9tPdCNK9RomR9zW(Y!dt>1> zWmONiw{j1McpVC~dVfZCK{zJ%pPAX2AwnN%(wc-`2L5~3pX2nRJe0?dzVl42P3l4srX`hM{&%^C~Cz99zXzoTvc@}znek^_$7Ld*?P z>Jm1GxtG)xmb$|%p;x`H?8uyU%{A1lcc1*_tn~Qes85AH#Ci5c=EQwI`ov>IGf1v} zzx;{nqL6j9s{wsF`KQ06QK7~bn7Mdxa(*Aul*A>A9zchk=(n7x5H+z}?XVxxI!WOz zZbL(_YHQv!?a2IjGj}Vq!aj}(xlFU)t^K6!<3?y)cgmtDibtzGG=toUY(ErktT4* zX5Wg0T?76slAO~s~zoIN>~y~!`2R4W}(355nJN_4=M z-L7MSwna~MtP?})+K0$w!#9Pb^scu2rZ)j~DK5nZ+VV0Jz27xuCsh6;)xfZD&8s^@ z#{U?b6?u#K95U63_v5q$YF*Dtu-kf7TD7N0zWsf-)wn|fbWvGm4)RUPdh|vzKZxcV zFKE%(X2a=5-BalL*_9!{t@^qjS?B+=+_u4}fECm+W#@OsPa*I4CA!=1pw70aPe)+v zP_CSZ%Zb;uVmh6nHdQT+(bqC;-g&@P=by$rRdQB`E_Iun%MPt4;{xQCx;^V1x3@H_ ziDq~W@a2rqSAH6Edg#!CRjWuH$VXw^y=bFi=!sUeJO0$*@dItTp2VRnWfhmdwEVJ# zL?F^W+S0VC*yZgXyUa2sE9A!ny3O5GDw!ybRTnFiU8}f-=_$#>+nP_(Q~XyiAxpMB%tXEPNg@yu_We#I{i;M}TT zOm4}dCfRXsv3B`vlO_Pd`6Ib|q7eY7&+D&Jw3MAHv`@-0Oa#n^wP>VFxk!L_i$d5+ z#SUaPb}zqCZ`fa|A;qH7^T`$@OWw-l#o|eB9mR&Vg~iVgK%6}G?aJ{WPdAqik2v>@ zYcf=+U5(D44)Rrvk2e{OdMNb=23U-{rzM9fsHI86JczC}PWm)%0i_Hhb|JKM()45? zvks;gGIF@sr`w}1( zObwsBRi3@2TiNv8Ti05ju(De%EE0S-uJ~}sb%n=Tnibvb@&zAHG518-Ttvhl5B=5P z+z_RG5kT!L?v2iOdVcZy@roDc_`5RT;9ZpMl4NVwf88XMC8pwDHTIxZwVlt9qkFue za*J7dCpS6Ww6oK;3`Tf_D^RIo>yA9Z-M{3~q{_FVezI(GYm@3|s8FuYa!niKonyJFS?xu-IMo)H8*-DL6pNNM zU&q?wrd7lbj%ab}fJD=iV)=N%;6pX8dGGnyS}7UQAq$=Jbeu zSPZSq`OjT`L^-swv>)q^03^A@&Nz*pa;Ma+Xqf2MuWTK>HB+G8(IjNBE=8{E#Q{3W z4`Y}6$lTrXZR&pPez#Bf1fAzI3w1i#-#yfD&%n$?QQt@PKOSkTGzl)j0B{Q6d@$jS66MF)t|_&t6gFfzzwI{Z9#b*?Y+O9j z|D!i6g-@Z&J;1ZguF=8T(|YN|fJEWYkLh_`=jv2vKpY~@^25Q!;qmp8!$VFVaP3lP0UBC4enS*bv zJ6@I7(tkMae)p^P=0j0&gv^f5Msh!4NLpKK^+4FGcI6@8%VEQbdTH+y5_XhYli=m_ zjpt~s;O41-#Q`-5J+O)307$I=>r3JP;L-t~KDnmTLyu@S?-%9}g@RvazJCEDHb=e= z>46*a?y`_QI^5$pYoCT}W?9><-2Mbdr$AVsdfPb&Mq}DFiMHtBFk-Mf2*y}Fa^6Mo zk&FHp9~tTK_ya48`p*FYq+;A>tym`{v(Y$q2~o&A1J#k%^h7(Ph6Q$c#I9y1t*bG= zS2~i}1T9*BZm7UH_?+8YG0uw})JPJbOn6$4n)T!Lgvx+v~G4 z03%jIt@UA0>Mx`YG$ON@W@wDWuGTv5$BAA{OoW)^qp8yP9SjCdr7K`=BuCIga4z@5 zA?$QJK`X_Yn1kN&?9nO+P?jQEMLVTCWT;#)AY^0JIUTr1Sycy8Ca$pJ*Jd$7bI%nS zVS&1@p~_`{3J{G~paNJiEr7^a9e{x9@Qlp@g4#593S-dQ_*3K)cL@2d${PvIyRx`S z@Ja#9rHVmIdUf}KD4g?u_>2Z!@{@t*JPtxPy6%4Tb%cNA@b*!B*uAC$P?)fpa$GY! zdYf^0iSEHapcfVD*^iuloQ;hqoccAlU z1vY`Jpe(iB>>{40+c))sLv~5d59Jms;Fxb5t)YCcATU2t{oP96b&kG-HAakL;OK=a z-zj3m3#>H&>s42NLvct^U%GssN4O%;LiZ1%k#+W3#g(5DWN9P?5?NDyZl>CKmypyD z=)j#=pPTDBywmx#7mTTp4C=}I5@rk2c?f4VOFk8Xp={oV#toFLqhg%>XH^WMYwV$j z9*F>V-Iq?-plg6i-!4r0bf!~RkB12U!i%-};`76XCx*yK`*!aiLQ0SFrGmQb=`UXF zx=5~n_lB)n;(k|Y*Gxw@7QL?(!J>YyErA70Uv}`f?Af1BfT}Pg&5bjT28Xyfls zFT)m7(7ffxnY`r@P+oIdpEOt(?Th|R_sK#{ z!D#1aBzXNej6EF17oFVxn=53 zEZ7T?@-QY72o)pmyD&W?VgAFx29Y*n8uI1Oy{zzpC0@-7T!gIY%MPF<$9rKWWIz!d zj$O8NsVrcCy?sMSqHOM(G1HcI0y!6UuUeId-*h4x(Kynkf8DG?XGC`nX$vvFM?iX# zmHj3Z_SCi^dWq&$B{Hz&6VhK+K&*HE1SH@ok&@EVQbuFw8K@!7Pe?*UI~tJ(A)LiF zt;gyS23SK9Hl+-0NLsjk=U;SF06+L1c~7%Ygw)VAhxjPBQO!LE(EK_QVulT{_|{Z+ z(c0_c?CV?J8J<}?3aP9t*!cjjK`>300O%76J|Pl&m(Dq~5}ED>)=K_8Yl0F?w@olU z3zpJ3ama_;AB)tZmf@e*3T_MU%B~{aGm;V(^Oaa8@n7=E3 zUc~^b@KlIGKk}IoI24Gocum;)UIUcH($XRDELO21ZGt-$M$PDVaKJz-TAD^2z|KuX zrvDO1^&ZqrrD0}@2K&yek+!Sy+qm{ODFr+6j?>ML7Vb`By&sGj!eBoVq`X^M(N}Dm zmi+1G&NA$LEm_F!g;W&QKy6#h%UwYI1V;QkTZrw6$cbs!j>hX79PkdPNAP2&9hG+9 z(=3Pz?_bi}I|z*ObZBebf29LI(Z50mGaZftH|y}SY|vz8P<7J36~AWn>K`3M_4)P_ zt@WW=3E@PxG5xX6i*f?B~2SB3*!Jz$@J(h?%nkyrSLHX!6 zptU@Y3?P2QWfw{AQ zrnTBH`W+%c8>fYS{;ZQ375oqLWqyPE{&~(W_!=j{tX}za?$_>R%;!0KV1DKS865{M z?VM38|7Lm6cNg*LZEN_4!Kh!`(TAp-`m7NFQ+yKoEdi#3|JdIMb3hNTRfcZbn=m=$ z(tVkDbJ-}I{65K)g*7H>8bfg}E5F@bPO?H%QRYYg@dX$lz|_)Cq;;$4Ut>@xEBK$9 z&?EZj5lP+dKT$ct=7UXjm?16rO=5GJDYX|Env5PMG7~IWQ$ypwZ)?2)S=c#ag+P=i zCI`EeD`H<{u?*`Z#GgL-H6gcx>$0W(u6F?|!WMlGympYqoZ-IhJ89Cr1Ncpgq95X} zSFdD}%HHspZpZ4JKWu+2^)p?|d+X7Y=v=~%#&?e$ormdU63(k_VHBw3NWK#Iq)0*i z=+EzMb#}$n&ewy-#_Y_tt{v&hEE{z%doYj-t>jbw!KpBIW*idAdCrkn!@6fj;sH%9 zH5egK661x7H<%p=HiK29im%gqmBB=W!_23wyam@Ci;jGZ_z$f|h`5TF*)fGP+B&!; z8&7P9fe6p(rR1`2vBrGa|5>*lRDcLSu48uA(u{s171){3MGQb~-&cGvkBOG|kv3tp zIXG=qMV7m(?+%J(b+SWsVA(G|%c50zNLr~!e7-t)*~%HRPX8b>Ap5YwIZAGSuz_go z2xMyP(4gK{PMSG+itsoOb&Zm9eDwl@1Gt_Mn-yN+Y$AMiRT7S|Ax!h&|1GAuCYudq z|JkiNF{;5lt}%;q0Jo6o30~ANzR3A#wv7{InKJU)Lqy{elx6a!S?1sHTSm`%+?v+) z2V_Y{2*;R;Kp5^iJXt;uhGGX?a11?P`S!|EK016f@~$?@y-Gb12hrE(0~S=SUX%cO zRNn5b`bFNRt=$|>(tV8{O?>lRV?ca6o6Xh-eJiM`*i-gf@T!e1sdkMs3``DVLfh-lokxB+sSrtM`hUvDttUW9I2BqYg2>a0%5eSY|bNp-U2W-ROEE{|SCOwY_{bQrsYVr|)#^hnNUE*A>sm@XHJ=M`SO&pI6lh z!Gm>})S4Fg0ji8d*Bof@-zXtf_BR`LHs=^7EJyTQEab-4+$15eG(_1ovFhlG-`%oFmSGWPU9*>w!wmC5bCSK%$y!pyZO| z!NiCgli|~tACpNGg1vMsHg;`SQztAIya_7k4~r@CM;M@#6eiRpTNj0kgc8Tw$ z^pwm_OSBDgd`tN4#iYmv>;5`qB0{fgq~rJ;zMb;RMU7ByOm*|L23KByRIu#TDg9}M z`EswZJ+sZq(OSD=<)*9mmf!=HPG}T|Lo3~uQBSMMnDc8h<|EOiM=|cMad>rAF~UHk>o zND5BK@>3^S>j?~Wn|G^x8hUHVZm7sQM=Fp|7h?*1^D5}28d$zB67eNslqb(Gj9G75 zNB+gz6BWDZQYVHxURBLbVC>h!IPYS{p`|bapNamSD|1OFndzkR{HQ0Z-VSuXXQ8W7 zH;LGMRd2@s~OvAi4-1A&WHxs=ra`rYYYq~UQ zkXfuC4PD?qYbwDtA)^6OpbO$8xZc5lBqPH3RavMLYUlPM0sCNvMlPJ@t-MoU!z|SGd;hAfJig7#^F*Jq4@ZR?z z2?+FX6^jld4xo0#cgBve-SoUo&6>E$JViD=V#iZ~nY< z$B{^w9%e}MzOyf46>^A=wWbfTc8RYQoxjI&YD)E-$+}Z=%E}dniG_2!<_E%q6^K~8 z!^A>oolVOCo2#QnM>V@`!=p{yJ%8OK40cw_i92;xC8=#Th$&@!sj|_eQ;SW=(pR3k zsWhIIBlsT&mIVg?YWlM1uFgQO4h$M&PE)-|9H|hYF=o zd%}zHqo!u*%Sv;C%EV_loG6!4MOgm*P|C-L>kaL{c8r#Sb2N8Dt*-zs1DDz3EnH0hmh;Nw9==CUQ8lt>$U#h!H%< zJ|41Zmz@)F*n1@A>6!ICZ=V)E<_QIEH~xT+qt=U9ArY3j%pXCILIq=HksZqji^1&? zOxs|JA)ujM;I>76lw64?aI9j*9pk6H(-`xS5}(b}hRQlb+*vK~oPA(#_kyKpGy1K1 z{C)3yaPdtaKg0JeYNxPJ%Ilbc(}0A6K72n3dHyAgIOfK7+G>>14Hz4j7H|W(^AVRc zsMe*CIp#~?u0Nyi!YxNPw?dJ?89{zr$lXLpq0bTVh!)I43)4ydn~gs{6|rTX(@#7xdl?h0SHY@`ZR4jOUWxtqNkU8%46e&6aik1oBH6W;)xc(uDij1xT_s`MBxr^RQ(4C<&1!r@!H1;h1Fi`eIi0oyW91F<}gTNExNN>REB|w&+ORrJcPn@>u z5Nt1T3Lp@?lpicrgEol$baWB)=;-KK_Q`?MsKpa~Ae1?!2Jnz=!HYoL>Ik)#yiMX3pOpW7H1k zgso8al7d*^6oC^N1DFw}i;w{e@C1*i>=r75A^CH-hTi~vUZmJO0l9QLtCxz74({sQww`bZ)L>=- z8r?_w6O)8L4@TSJ2}d>qbH>_>inx&PZaZSqP+3SJUj84_V5MRA2xYTb~^rm{HYGkV15g`gXnFP`(GF*?t&c*~LWrAtF_q(DU8 z4kXTzH$Cu^Z0Ma0Ph`RWBWLR7P#r{SBWC;diLU%IJ2ggL43?07W6dy z#V?5gwPS}sLd8HrFODwF+5_|mN-InO&h)r3b~pkTZ`5~k>ntaOmeJh{Oy_Hu3xIqd z0t)pFFxa33j4$}=4@%e=-;H)FvV>#ZfU#2J{ll+%@0d1K2B@wpduh0;l;9OPIMJ0n z*2tVv2TlC-e4HV+Zc4qrO}UfG6KpRE8Ay?^)R=j319Yj5i%R~S2eMOeCEWM-HdOJd zdVBVUTL(<`)w-+pzKd=z&koP{`7*Wp?V{d%zAA+Ys>0JXoQid}Z);u~)wmcb^@SQe z92*OBQr%SNUg*@+8J=eMmQHE#v1wDWhl8n-Ybn$1WhDpSv$NusTNfQhKSeV`1ZE5^ zib>6j6c)4FEGoIBM{Kxll%^;?LGc%~6nA?!I$Tgb=pSyd8K?BK@&oq-)ksO8?q`Ok zL}d!YVqt#VnufSTz(6i9d-!Ay3rEJP)I&JuQ!7AUR%Rj`r4&_c}{HRMEh~-mxNpAOS zsd4_3FE;HIKT<%I*(5u^CM#7lm!W)m*5mO8qIr z)3nKK?ed{PK6$RVu{>Ve5A}62<#nye4KkBw_PDSLvKG8mb%(!&jugKTdX07QV zVbl)4w4vjIn(=FB;5K#|IuX3?WwYTU118qE6IvgIpYR+$*!yh2f@5pi8L&*8t>`7949a0G4ST;eLi?3!RrWoZEVgvS-F@`c{GON zruhBHB~Zc7E^bMZb*3}6kWTyaqPA;kPzyfObEM;7quON0OZ95k(p>VY$-mpWwO8Ui zM(V>kb=*37H7%_nQt)2T=tkz)T5i?8YI09&YDeQ0YrfcLyCljX>ip8F%Qu8ZlO2HN zM2B#LuYQJvz1T%eM$VDW*zVvq%k4wd9%4vOyQk_93};f+b_<(IsIAPyd{gL}P> zg0m4oY$kgP>mR0pWWtJf9(L7mwpf#Gtyqs{oiWYm_#q^*?U|0pk1oUz{reJI;Uoa`{ZTLb6(&56};{M)i#BTviQBkOGR z013*_C}#33QO(|JYExlPm%HnlBc2C9I`syqkKM3+sn}>s+ z;3*6fO}9-u1&&Bm)wM2eY~9${+9D^hO>Ck^JwQg4upH5si)q*gQSFH95}~fXkP2zL zxO|PTd~3cL1so+qS#Q^Ut4-n9iTTk1bKY?6N@vUxa_(B z!K2N|`bL~vJE?b4pFDVfxF|i0tKI*^a9N#jylekM@(R}><*gnb(YT(NVpZk3!ArSo zzK#hP)$qn;n6%V%$7P+e8*j3kB>gLDi+mDX2kC%Y4z1~$z=EGP&!cj(`C(c`r(F&%T3*U8590vxt*2@mX4JJOVUfqzHiC*`zo!xgvv{faZq^M{ig z9)&Y_*z+5@jl}fUXI(6=nHrZ!m|YfPI+KlEoP}S^8dXOoN=&!_?lS-x9(KgSL)4jZs;c+m~mmXyKe&D()*V&po>nU(>uXb~8G22F;YkWc!1w!T-j4FUP#qIa3_RNB9A^dP zAxia}^Pt6LPF%8Vq)4pUB4#TaoS$8lHpdQnbddOu#)bpxa&Kb_)&ITb$O6^YGjx2M66 zXbcl``u=ZuC*jwXN=e1-qEi8<4GoKp12(?bWn|Sm1pG+b!uB8ZFtiYefealx^B4Nz z)EU}Dn34*ZCGZq~p*?*_nih)z)fOd}1qt%wj&lTEgAM`!aiZRFv^}(~w=q9jN7_OZ z5|IXQ9>*P9u1i2{aQ*eDBj6ApkB%6ETGhLN>W_RLrS)0DFgMjzr4oWL6iApt?zbKF z!n$4Xc;cQ*IMOk&P)P-M=8bL0#bkbZxi?3G^T0Re+I|KS*^t`?kg*xOmm}M3uhM1+ zD&f9bJf)-Z(^@*b^eTuom~HEE2|YcL?~f@@E5sjL3ZsYB0UmJ7>+%P*66VDG8&Pjt zw{TubbPo#cQbjMafGaMChNLX#;3@(gK8cYLXA!)qiMFf5A9I#?b?!AIsdk`jhvQ?P zlsZ(miJ`gTXTSh@KeAWDu#Y)1atfpBYcovE@DFevXqi_VPlwN1RKm><7n}r#=PR*67|-Wvp54Xk4ANL)O}Z_op3(Z;Bj8i( z9O*W{(P;kO~Mg_Xw!sq$};Sv;!NencF!%hW9=b)K#HbM z&XR02tJ^lvZb(A_I=mPY8qG5}w))&UNg_hI4MtNw*i=AJ4_k}R1iwks6)a3ioMCEs z_-gD4zl)=7wE4~~FzDfWruGNY=a9cbZ}Pr(Pq?i2oGDzq(hVX<^ULAL?{~l;oU;r@9z1D9jqBVc`ecbB(Lrnt$_@}0mDt=EtUWtgW+cGWT}R{#{-&Dj&0{Y6Sa zT&1F~!biF7VONdz0(mYk?ryuVHSw1CY+(ChDK{JqC4r!w$=}!lts$PH35I>MAB#cR zkLwb;G#f?{4L<@mT3RB76&UF7+jOR{w@N*Ci9s1T^5q0^XY=gWbv4`HLp=fwnJgJk zd3?V+6KTin>09ya2TRE9&E7n!9Iu+%ys0c-wq7uWw21~uCRg)(nsDPdr)J0nE5B7- z(lnBQlxm`t3uj8BU`|0AYu-2~+X-Lm&$7~TgIZ(#YhWPAsJF7Bz+mTf`5sn8?|-9B zRw!7WLp~BbN4l0`p=x|~y<4xRa&0t^ojMvBOm-~{ znR2wfAb^PoAnp6n3rQQXct}O|EnNcX?BK4RF{7`ctu$%2!dX=&2x@qq zqkgQZu#l0on!Cf9+m~YHDe~X$K6b<}V?1SeUHs!rMswz>IX@&rF>8EsO;gMU{fnpyc5+ z9x8f9Txf&3Gq;Vd1MHBi#6=JLWku3r_e%fMepBhD3V!}X3k*c(C7Y&KasPj))R@io zOFAvG$-gJIW%OXX4)~ZaNVYTzZTc{b^Ds-gE^b2-`&nRZ>A*b~PN9Zk;VO5=y->ga zsp=4`C@1N2L-5&z8@1_9xl<)5Kitp$QrCx3ZxrHu9Yf<>@NGWn zmGX(}isJLN)41t->*#tu?(V?i zeWThMG4+!ZL$$a9uP=~rTVMy9SM~5trwkg-Litz1W!m=p8D9H>R)Hwu+8=rfKr_a6 z5{Q0oDhn3zv>$>^QgH$Y5^9fW8WM-BMx~Og*WpDXJP~?5kc0< zM_2VCT80ZxxC&wDS1ez3YZo9EMD=N^-yVRrsCc-2|2)h!we-bUmS|1MyZxK#@Kiu< z6=McZ0v9Bh7v&SdjUggwJD8dr0>R5~-?IyIS#nyy_~ZA2gC+G=NgFQAg6P7SidxRR zWFU4Q$qJ#WJK=1K-4mqVcJBt*m(G_**A{#%G2$pDREGOvkkMd=;B?)eU2UVuv`jM) zk*3D5#FyBdQ@8ZSjs+hq$&cj8BR(Dh61-=HkgaLhVcNXU6rD+t2<}OzI~$6mk)isJ z$Cc%;Erf-57QBckOlb!XuZ6l4A}4Y+XHzL{bZ~}q?UCXPQCx>(Y$8Io>ja29T;JCJ zo6`3F-4rL>;JIx}ks=QfV6oU5UFP2~13=lnj@3|>N#KCJ9phN=GOWNO0LMFc?0_<6 zVk%tU@{L*JaP&S%Ex>_WCui15^MeAFlSmzFzSod2ydw>27%I}aBOMo11;hwNOz52Y zkk6Vk!t`l4aF|!}o2%sjEXT_oJc*=I^B^PFEI%JMOsypei8_NlYTu05T%?eq;^C$J z^M*rC^E{T|Uz7LeemD#&AgYS76VZ@~pJCuRKPWp;o+DZypB6_fQH86n|2i59%m_3^ z3aQHZmyA#Fs>QxSB@M?{6mtC9g6GnD7h$3RT<6y>+^`!pL_n{dd$usHpsm6!1P`I3 zFhUra&iL!I7p=gmY|j2Id>2QWfKXEhHRf~Pg7QHGr3+3=6%dGMipNuOHXc&=^@;Eo zy(sWTgN;#suLnoZJ$zl${4!DW4M)mT#@)K;Rl;A8nQ^+#tM z7gc7=$jbE`gjwZ~n!|#YH{uK(AKkHk!||}zioUpuQ24M?UJ7?UUTI-F;YA_fQ)({k zIgdf8ZEs68J+n2vV!exzLZCc0`IWfl#Himuo@a+a{MrjvBSycO$rn#lU3iB?V+cP) zILu+ZUr&t^;uU~P;#&BKFk`zFA};68O5Fu-V4*`t-wm;iWef8!cF$R)jG^Sz=6U#> z=9b?hXzaBmHvyeI`gjPLQ2N{TqM791uQOsDzy`J3(NIqD+ebh!14;^h{}Q4auF0#%l1Vb`28vf0GyHF$nURM21!_+2Ya8;P;uT{K{?MWHlf`b c_ljXpoe0jsg8YIDIQXZfuDvsMhw-KV17K>`3jhEB literal 0 HcmV?d00001 diff --git a/tests/benchmark/ci/argo.yaml b/tests/benchmark/ci/argo.yaml new file mode 100644 index 0000000000..d9a43dda51 --- /dev/null +++ b/tests/benchmark/ci/argo.yaml @@ -0,0 +1,232 @@ +metadata: + name: benchmark + namespace: qa + uid: e8a51212-9b27-441d-b357-e73c63854ccf + resourceVersion: '63697833' + generation: 41 + creationTimestamp: '2021-05-25T11:04:40Z' + labels: + workflows.argoproj.io/creator: system-serviceaccount-argo-argo-server + managedFields: + - manager: argo + operation: Update + apiVersion: argoproj.io/v1alpha1 + fieldsType: FieldsV1 + fieldsV1: + 'f:metadata': + 'f:labels': + .: {} + 'f:workflows.argoproj.io/creator': {} + 'f:spec': + .: {} + 'f:arguments': + .: {} + 'f:parameters': {} + 'f:entrypoint': {} + 'f:nodeSelector': + .: {} + 'f:node-role.kubernetes.io/benchmark': {} + 'f:onExit': {} + 'f:serviceAccountName': {} + 'f:templates': {} + 'f:tolerations': {} + 'f:volumes': {} +spec: + templates: + - name: benchmark-loop + inputs: {} + outputs: {} + metadata: {} + steps: + - - name: call-benchmark-test + template: benchmark + arguments: + parameters: + - name: server-instance + value: '{{workflow.name}}-{{item.instanceId}}' + - name: server-configmap + value: '{{item.server-configmap}}' + - name: client-configmap + value: '{{item.client-configmap}}' + withParam: '{{workflow.parameters.configmaps}}' + - name: uninstall-all + inputs: {} + outputs: {} + metadata: {} + steps: + - - name: uninstall-milvus + template: uninstall-milvus + arguments: + parameters: + - name: server-instance + value: '{{workflow.name}}-{{item.instanceId}}' + withParam: '{{workflow.parameters.configmaps}}' + - name: benchmark + inputs: + parameters: + - name: server-instance + - name: server-configmap + - name: client-configmap + outputs: {} + metadata: {} + steps: + - - name: install-milvus + template: install-milvus + arguments: + parameters: + - name: server-instance + value: '{{inputs.parameters.server-instance}}' + - name: server-configmap + value: '{{inputs.parameters.server-configmap}}' + - - name: client-test + template: client-test + arguments: + parameters: + - name: server-instance + value: '{{inputs.parameters.server-instance}}' + - name: server-configmap + value: '{{inputs.parameters.server-configmap}}' + - name: client-configmap + value: '{{inputs.parameters.client-configmap}}' + - name: uninstall-milvus + inputs: + parameters: + - name: server-instance + outputs: {} + metadata: {} + container: + name: '' + image: 'registry.zilliz.com/milvus/milvus-test-env:v0.5' + command: + - /bin/sh + - '-c' + args: + - ' helm uninstall -n qa-milvus {{inputs.parameters.server-instance}} && kubectl delete pvc -l app.kubernetes.io/instance={{inputs.parameters.server-instance}} -n qa-milvus ' + resources: {} + volumeMounts: + - name: kube-config + mountPath: /root/.kube + - name: install-milvus + inputs: + parameters: + - name: server-instance + - name: server-configmap + artifacts: + - name: charts + path: /src/helm + git: + repo: 'git@github.com:milvus-io/milvus-helm.git' + revision: master + sshPrivateKeySecret: + name: github-key + key: ssh-private-key + - name: benchmark-src + path: /src/benchmark + git: + repo: 'git@github.com:zilliztech/milvus_benchmark.git' + revision: '{{workflow.parameters.test-client-branch}}' + sshPrivateKeySecret: + name: github-key + key: ssh-private-key + outputs: {} + metadata: {} + container: + name: '' + image: 'registry.zilliz.com/milvus/milvus-test-env:v0.5' + command: + - /bin/sh + - '-c' + args: + - ' cd /src/helm/charts/milvus && cp -r /src/benchmark/milvus_benchmark/* . && cp /configmap-server/config.yaml . && python update.py --src-values=values.yaml --deploy-params=config.yaml && cat values.yaml && helm install -n qa-milvus --set image.all.repository={{workflow.parameters.milvus-image-repository}} --set image.all.tag={{workflow.parameters.milvus-image-tag}} --set image.all.pullPolicy=Always --set etcd.persistence.enabled=false --set servicemonitor.enabled=true --wait --timeout 15m {{inputs.parameters.server-instance}} . && kubectl get pods -n qa-milvus -l app.kubernetes.io/instance={{inputs.parameters.server-instance}} ' + resources: {} + volumeMounts: + - name: kube-config + readOnly: true + mountPath: /root/.kube + - name: benchmark-server-configmap + mountPath: /configmap-server + volumes: + - name: benchmark-server-configmap + configMap: + name: '{{inputs.parameters.server-configmap}}' + - name: client-test + inputs: + parameters: + - name: server-instance + - name: server-configmap + - name: client-configmap + artifacts: + - name: source + path: /src + git: + repo: 'git@github.com:zilliztech/milvus_benchmark.git' + revision: '{{workflow.parameters.test-client-branch}}' + sshPrivateKeySecret: + name: github-key + key: ssh-private-key + outputs: {} + metadata: {} + container: + name: '' + image: 'registry.zilliz.com/milvus/milvus-test-env:v0.5' + command: + - /bin/sh + - '-c' + args: + - ' cd /src && pip install -r requirements.txt -i https://pypi.doubanio.com/simple/ --trusted-host pypi.doubanio.com && pip install -i https://test.pypi.org/simple/ pymilvus=={{workflow.parameters.test-sdk-version}} && cd milvus_benchmark && export PYTHONPATH=/src && python main.py --host={{inputs.parameters.server-instance}}-milvus.qa-milvus.svc.cluster.local --local --suite=/configmap-client/config.yaml --server-config=/configmap-server/config.yaml' + resources: + limits: + cpu: '4' + memory: 4Gi + volumeMounts: + - name: kube-config + readOnly: true + mountPath: /root/.kube + - name: benchmark-server-configmap + mountPath: /configmap-server + - name: benchmark-client-configmap + mountPath: /configmap-client + - name: db-data-path + mountPath: /test + volumes: + - name: benchmark-server-configmap + configMap: + name: '{{inputs.parameters.server-configmap}}' + - name: benchmark-client-configmap + configMap: + name: '{{inputs.parameters.client-configmap}}' + - name: db-data-path + flexVolume: + driver: fstab/cifs + fsType: cifs + secretRef: + name: cifs-test-secret + options: + mountOptions: vers=1.0 + networkPath: //172.16.70.249/test + activeDeadlineSeconds: 21600 + entrypoint: benchmark-loop + arguments: + parameters: + - name: milvus-image-repository + value: harbor.zilliz.cc/dockerhub/milvusdb/milvus-dev + - name: milvus-image-tag + value: master-latest + - name: test-client-branch + value: master + - name: test-sdk-version + value: 2.0.0rc4.dev1 + - name: configmaps + value: ' [ {"instanceId":"1", "server-configmap": "server-single-8c16m", "client-configmap": "client-acc-sift-ivf-flat" } ]' + serviceAccountName: qa-admin + volumes: + - name: kube-config + secret: + secretName: qa-admin-config + nodeSelector: + node-role.kubernetes.io/benchmark: '' + tolerations: + - key: node-role.kubernetes.io/benchmark + operator: Exists + effect: NoSchedule + onExit: uninstall-all diff --git a/tests/benchmark/ci/function/file_transfer.groovy b/tests/benchmark/ci/function/file_transfer.groovy new file mode 100644 index 0000000000..bebae14832 --- /dev/null +++ b/tests/benchmark/ci/function/file_transfer.groovy @@ -0,0 +1,10 @@ +def FileTransfer (sourceFiles, remoteDirectory, remoteIP, protocol = "ftp", makeEmptyDirs = true) { + if (protocol == "ftp") { + ftpPublisher masterNodeName: '', paramPublish: [parameterName: ''], alwaysPublishFromMaster: false, continueOnError: false, failOnError: true, publishers: [ + [configName: "${remoteIP}", transfers: [ + [asciiMode: false, cleanRemote: false, excludes: '', flatten: false, makeEmptyDirs: "${makeEmptyDirs}", noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: "${remoteDirectory}", remoteDirectorySDF: false, removePrefix: '', sourceFiles: "${sourceFiles}"]], usePromotionTimestamp: true, useWorkspaceInPromotion: false, verbose: true + ] + ] + } +} +return this diff --git a/tests/benchmark/ci/jenkinsfile/cleanup.groovy b/tests/benchmark/ci/jenkinsfile/cleanup.groovy new file mode 100644 index 0000000000..ec38b34618 --- /dev/null +++ b/tests/benchmark/ci/jenkinsfile/cleanup.groovy @@ -0,0 +1,13 @@ +try { + def result = sh script: "helm status -n milvus ${env.HELM_RELEASE_NAME}", returnStatus: true + if (!result) { + sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME}" + } +} catch (exc) { + def result = sh script: "helm status -n milvus ${env.HELM_RELEASE_NAME}", returnStatus: true + if (!result) { + sh "helm uninstall -n milvus ${env.HELM_RELEASE_NAME}" + } + throw exc +} + diff --git a/tests/benchmark/ci/jenkinsfile/deploy_test.groovy b/tests/benchmark/ci/jenkinsfile/deploy_test.groovy new file mode 100644 index 0000000000..a44127e7d6 --- /dev/null +++ b/tests/benchmark/ci/jenkinsfile/deploy_test.groovy @@ -0,0 +1,24 @@ +try { + dir ("milvus-helm-charts") { + // sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts' + // sh 'helm repo update' + checkout([$class: 'GitSCM', branches: [[name: "${HELM_BRANCH}"]], userRemoteConfigs: [[url: "${HELM_URL}", name: 'origin', refspec: "+refs/heads/${HELM_BRANCH}:refs/remotes/origin/${HELM_BRANCH}"]]]) + } + // dir ("milvus_benchmark") { + // print "Git clone url: ${TEST_URL}:${TEST_BRANCH}" + // checkout([$class: 'GitSCM', branches: [[name: "${TEST_BRANCH}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "${TEST_URL}", name: 'origin', refspec: "+refs/heads/${TEST_BRANCH}:refs/remotes/origin/${TEST_BRANCH}"]]]) + print "Install requirements" + + // sh "python3 -m pip install pymilvus-distributed==0.0.57" + // sh "python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com" + sh "python3 -m pip install -r requirements.txt" + if ("${params.CLUSTER_NAME}" == "idc-kubernetes") { + sh "export KUBECONFIG=/root/kube/.kube/config && cd milvus_benchmark && export PYTHONPATH=${env.WORKSPACE}/ && python3 main.py --image-version=${params.IMAGE_VERSION} --schedule-conf=scheduler/${params.CONFIG_FILE}" + } else { + sh "cd milvus_benchmark && export PYTHONPATH=${env.WORKSPACE}/ && python3 main.py --image-version=${params.IMAGE_VERSION} --schedule-conf=scheduler/${params.CONFIG_FILE}" + } + // } +} catch (exc) { + echo 'Deploy Test Failed !' + throw exc +} \ No newline at end of file diff --git a/tests/benchmark/ci/jenkinsfile/notify.groovy b/tests/benchmark/ci/jenkinsfile/notify.groovy new file mode 100644 index 0000000000..0a257b8cd8 --- /dev/null +++ b/tests/benchmark/ci/jenkinsfile/notify.groovy @@ -0,0 +1,15 @@ +def notify() { + if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) { + // Send an email only if the build status has changed from green/unstable to red + emailext subject: '$DEFAULT_SUBJECT', + body: '$DEFAULT_CONTENT', + recipientProviders: [ + [$class: 'DevelopersRecipientProvider'], + [$class: 'RequesterRecipientProvider'] + ], + replyTo: '$DEFAULT_REPLYTO', + to: '$DEFAULT_RECIPIENTS' + } +} +return this + diff --git a/tests/benchmark/ci/main_jenkinsfile b/tests/benchmark/ci/main_jenkinsfile new file mode 100644 index 0000000000..f74445418b --- /dev/null +++ b/tests/benchmark/ci/main_jenkinsfile @@ -0,0 +1,151 @@ +pipeline { + agent none + + options { + timestamps() + } + + parameters{ + string defaultValue: 'master', description: 'server image version', name: 'IMAGE_VERSION', trim: true + choice choices: ['kubernetes', 'idc-kubernetes'], description: 'cluster name', name: 'CLUSTER_NAME' + string defaultValue: '2_data.json', description: 'test suite config yaml', name: 'CONFIG_FILE', trim: true + string defaultValue: 'd0928627-efb6-4cfd-8030-9bf635988d85', description: 'git credentials', name: 'GIT_USER', trim: true + } + + environment { + HELM_URL = "https://github.com/zilliztech/milvus-helm-charts.git" + HELM_BRANCH = "main" + TEST_URL = "https://github.com/zilliztech/milvus_benchmark.git" + TEST_BRANCH = "distributed" + HELM_RELEASE_NAME = "distributed-benchmark-test-${env.BUILD_NUMBER}" + } + + stages { + stage("Setup env") { + agent { + kubernetes { + cloud "${params.CLUSTER_NAME}" + label "test-benchmark-${env.JOB_NAME}-${env.BUILD_NUMBER}" + defaultContainer 'jnlp' + yaml """ + apiVersion: v1 + kind: Pod + metadata: + labels: + app: milvus + componet: test + spec: + containers: + - name: milvus-test-env + image: registry.zilliz.com/milvus/milvus-test-env:v0.3 + command: + - cat + tty: true + volumeMounts: + - name: kubeconf + mountPath: /root/.kube/ + readOnly: true + - name: kubeconf2 + mountPath: /root/kube/.kube/ + readOnly: true + - name: db-data-path + mountPath: /test + readOnly: false + nodeSelector: + kubernetes.io/hostname: idc-sh002 + tolerations: + - key: worker + operator: Equal + value: performance + effect: NoSchedule + volumes: + - name: kubeconf + secret: + secretName: test-cluster-config + - name: kubeconf2 + secret: + secretName: idc-cluster-config + - name: db-data-path + flexVolume: + driver: "fstab/cifs" + fsType: "cifs" + secretRef: + name: "cifs-test-secret" + options: + networkPath: "//172.16.70.249/test" + mountOptions: "vers=1.0" + """ + } + } + + stages { + stage("Publish Daily Docker images") { + steps { + container('milvus-test-env') { + script { + boolean isNightlyTest = isTimeTriggeredBuild() + if (isNightlyTest) { + // build job: 'milvus-publish-daily-docker', parameters: [[$class: 'StringParameterValue', name: 'BRANCH', value: "${params.IMAGE_VERSION}"]], wait: false + build job: 'milvus-publish-daily-docker', parameters: [string(name: 'LOCAL_DOKCER_REGISTRY_URL', value: 'registry.zilliz.com'), string(name: 'REMOTE_DOKCER_REGISTRY_URL', value: 'registry-1.docker.io'), string(name: 'REMOTE_DOCKER_CREDENTIALS_ID', value: 'milvus-docker-access-token'), string(name: 'BRANCH', value: String.valueOf(IMAGE_VERSION))], wait: false + } else { + echo "Skip publish daily docker images ..." + } + } + } + } + } + + stage("Deploy Test") { + steps { + container('milvus-test-env') { + script { + print "In Deploy Test Stage" + // use the idc context + // sh 'kubectl config use-context idc001' + if ("${params.CLUSTER_NAME}" == "idc-kubernetes") { + print "Use cluster name idc001" + sh 'export KUBECONFIG=/root/kube/.kube/config' + } + load "${env.WORKSPACE}/ci/jenkinsfile/deploy_test.groovy" + } + } + } + } + + stage ("Cleanup Env") { + steps { + container('milvus-test-env') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/cleanup.groovy" + } + } + } + } + } + post { + success { + script { + echo "Milvus benchmark test success !" + } + } + aborted { + script { + echo "Milvus benchmark test aborted !" + } + } + failure { + script { + echo "Milvus benchmark test failed !" + } + } + } + } + } +} + +boolean isTimeTriggeredBuild() { + if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) { + return true + } + return false +} diff --git a/tests/benchmark/ci/pod_containers/milvus-testframework.yaml b/tests/benchmark/ci/pod_containers/milvus-testframework.yaml new file mode 100644 index 0000000000..6b1d6c7dfd --- /dev/null +++ b/tests/benchmark/ci/pod_containers/milvus-testframework.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: milvus + componet: testframework +spec: + containers: + - name: milvus-testframework + image: registry.zilliz.com/milvus/milvus-test:v0.2 + command: + - cat + tty: true diff --git a/tests/benchmark/ci/publish_jenkinsfile b/tests/benchmark/ci/publish_jenkinsfile new file mode 100644 index 0000000000..9936030c0e --- /dev/null +++ b/tests/benchmark/ci/publish_jenkinsfile @@ -0,0 +1,104 @@ +pipeline { + agent none + + options { + timestamps() + } + + parameters{ + string defaultValue: 'registry.zilliz.com', description: 'Local Docker registry URL', name: 'LOCAL_DOKCER_REGISTRY_URL', trim: true + string defaultValue: 'registry-1.docker.io', description: 'Remote Docker registry URL', name: 'REMOTE_DOKCER_REGISTRY_URL', trim: true + string defaultValue: 'milvus-docker-access-token', description: 'Remote Docker credentials id', name: 'REMOTE_DOCKER_CREDENTIALS_ID', trim: true + string(defaultValue: "master", description: 'Milvus server version', name: 'BRANCH') + } + + environment { + DAILY_BUILD_VERSION = VersionNumber([ + versionNumberString : '${BUILD_DATE_FORMATTED, "yyyyMMdd"}' + ]); + } + + stages { + stage('Push Daily Docker Images') { + matrix { + agent none + axes { + axis { + name 'OS_NAME' + values 'ubuntu18.04', 'centos7' + } + + axis { + name 'CPU_ARCH' + values 'amd64' + } + + axis { + name 'BINARY_VERSION' + values 'gpu', 'cpu' + } + } + + stages { + stage("Publish Docker Images") { + environment { + DOCKER_VERSION = "${params.BRANCH}-${BINARY_VERSION}-${OS_NAME}-release" + REMOTE_DOCKER_VERSION = "${params.BRANCH}-${OS_NAME}-${BINARY_VERSION}-${DAILY_BUILD_VERSION}" + REMOTE_DOCKER_LATEST_VERSION = "${params.BRANCH}-${OS_NAME}-${BINARY_VERSION}-latest" + } + + agent { + kubernetes { + label "${OS_NAME}-${BINARY_VERSION}-publish-${env.BUILD_NUMBER}" + defaultContainer 'jnlp' + yaml """ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: publish + componet: docker +spec: + containers: + - name: publish-images + image: registry.zilliz.com/library/docker:v1.0.0 + securityContext: + privileged: true + command: + - cat + tty: true + resources: + limits: + memory: "4Gi" + cpu: "1.0" + requests: + memory: "2Gi" + cpu: "0.5" + volumeMounts: + - name: docker-sock + mountPath: /var/run/docker.sock + volumes: + - name: docker-sock + hostPath: + path: /var/run/docker.sock + """ + } + } + + stages { + stage('Publish') { + steps { + container('publish-images') { + script { + load "${env.WORKSPACE}/ci/jenkinsfile/publishDailyImages.groovy" + } + } + } + } + } + } + } + } + } + } +} diff --git a/tests/benchmark/ci/scripts/yaml_processor.py b/tests/benchmark/ci/scripts/yaml_processor.py new file mode 100755 index 0000000000..0e6d7dbbf4 --- /dev/null +++ b/tests/benchmark/ci/scripts/yaml_processor.py @@ -0,0 +1,536 @@ +#!/usr/bin/env python3 + +import sys +import argparse +from argparse import Namespace +import os, shutil +import getopt +from ruamel.yaml import YAML, yaml_object +from ruamel.yaml.comments import CommentedSeq, CommentedMap +from ruamel.yaml.tokens import CommentToken + +## +yaml = YAML(typ="rt") +## format yaml file +yaml.indent(mapping=2, sequence=4, offset=2) + + +############################################ +# Comment operation +# +############################################ +def _extract_comment(_comment): + """ + remove '#' at start of comment + """ + # if _comment is empty, do nothing + if not _comment: + return _comment + + # str_ = _comment.lstrip(" ") + str_ = _comment.strip() + str_ = str_.lstrip("#") + + return str_ + + +def _add_eol_comment(element, *args, **kwargs): + """ + add_eol_comment + args --> (comment, key) + """ + if element is None or \ + (not isinstance(element, CommentedMap) and + not isinstance(element, CommentedSeq)) or \ + args[0] is None or \ + len(args[0]) == 0: + return + + comment = args[0] + # comment is empty, do nothing + if not comment: + return + + key = args[1] + try: + element.yaml_add_eol_comment(*args, **kwargs) + except Exception: + element.ca.items.pop(key, None) + element.yaml_add_eol_comment(*args, **kwargs) + + +def _map_comment(_element, _key): + origin_comment = "" + token = _element.ca.items.get(_key, None) + if token is not None: + try: + origin_comment = token[2].value + except Exception: + try: + # comment is below element, add profix "#\n" + col = _element.lc.col + 2 + space_list = [" " for i in range(col)] + space_str = "".join(space_list) + + origin_comment = "\n" + "".join([space_str + t.value for t in token[3]]) + except Exception: + pass + + return origin_comment + + +def _seq_comment(_element, _index): + # get target comment + _comment = "" + token = _element.ca.items.get(_index, None) + if token is not None: + _comment = token[0].value + + return _comment + + +def _start_comment(_element): + _comment = "" + cmt = _element.ca.comment + try: + _comment = cmt[1][0].value + except Exception: + pass + + return _comment + + +def _comment_counter(_comment): + """ + + counter comment tips and split into list + """ + + x = lambda l: l.strip().strip("#").strip() + + _counter = [] + if _comment.startswith("\n"): + _counter.append("") + _counter.append(x(_comment[1:])) + + return _counter + elif _comment.startswith("#\n"): + _counter.append("") + _counter.append(x(_comment[2:])) + else: + index = _comment.find("\n") + _counter.append(x(_comment[:index])) + _counter.append(x(_comment[index + 1:])) + + return _counter + + +def _obtain_comment(_m_comment, _t_comment): + if not _m_comment or not _t_comment: + return _m_comment or _t_comment + + _m_counter = _comment_counter(_m_comment) + _t_counter = _comment_counter(_t_comment) + + if not _m_counter[0] and not _t_counter[1]: + comment = _t_comment + _m_comment + elif not _m_counter[1] and not _t_counter[0]: + comment = _m_comment + _t_comment + elif _t_counter[0] and _t_counter[1]: + comment = _t_comment + elif not _t_counter[0] and not _t_counter[1]: + comment = _m_comment + elif not _m_counter[0] and not _m_counter[1]: + comment = _t_comment + else: + if _t_counter[0]: + comment = _m_comment.replace(_m_counter[0], _t_counter[0], 1) + else: + comment = _m_comment.replace(_m_counter[1], _t_counter[1], 1) + + i = comment.find("\n\n") + while i >= 0: + comment = comment.replace("\n\n\n", "\n\n", 1) + i = comment.find("\n\n\n") + + return comment + + +############################################ +# Utils +# +############################################ +def _get_update_par(_args): + _dict = _args.__dict__ + + # file path + _in_file = _dict.get("f", None) or _dict.get("file", None) + # tips + _tips = _dict.get('tips', None) or "Input \"-h\" for more information" + # update + _u = _dict.get("u", None) or _dict.get("update", None) + # apppend + _a = _dict.get('a', None) or _dict.get('append', None) + # out stream group + _i = _dict.get("i", None) or _dict.get("inplace", None) + _o = _dict.get("o", None) or _dict.get("out_file", None) + + return _in_file, _u, _a, _i, _o, _tips + + +############################################ +# Element operation +# +############################################ +def update_map_element(element, key, value, comment, _type): + """ + element: + key: + value: + comment: + _type: value type. + """ + if element is None or not isinstance(element, CommentedMap): + print("Only key-value update support") + sys.exit(1) + + origin_comment = _map_comment(element, key) + + sub_element = element.get(key, None) + if isinstance(sub_element, CommentedMap) or isinstance(sub_element, CommentedSeq): + print("Only support update a single value") + + element.update({key: value}) + + comment = _obtain_comment(origin_comment, comment) + _add_eol_comment(element, _extract_comment(comment), key) + + +def update_seq_element(element, value, comment, _type): + if element is None or not isinstance(element, CommentedSeq): + print("Param `-a` only use to append yaml list") + sys.exit(1) + element.append(str(value)) + + comment = _obtain_comment("", comment) + _add_eol_comment(element, _extract_comment(comment), len(element) - 1) + + +def run_update(code, keys, value, comment, _app): + key_list = keys.split(".") + + space_str = ":\n " + key_str = "{}".format(key_list[0]) + for key in key_list[1:]: + key_str = key_str + space_str + key + space_str = space_str + " " + if not _app: + yaml_str = """{}: {}""".format(key_str, value) + else: + yaml_str = "{}{}- {}".format(key_str, space_str, value) + + if comment: + yaml_str = "{} # {}".format(yaml_str, comment) + + mcode = yaml.load(yaml_str) + + _merge(code, mcode) + + +def _update(code, _update, _app, _tips): + if not _update: + return code + + _update_list = [l.strip() for l in _update.split(",")] + for l in _update_list: + try: + variant, comment = l.split("#") + except ValueError: + variant = l + comment = None + + try: + keys, value = variant.split("=") + run_update(code, keys, value, comment, _app) + except ValueError: + print("Invalid format. print command \"--help\" get more info.") + sys.exit(1) + + return code + + +def _backup(in_file_p): + backup_p = in_file_p + ".bak" + + if os.path.exists(backup_p): + os.remove(backup_p) + + if not os.path.exists(in_file_p): + print("File {} not exists.".format(in_file_p)) + sys.exit(1) + + shutil.copyfile(in_file_p, backup_p) # 复制文件 + + +def _recovery(in_file_p): + backup_p = in_file_p + ".bak" + + if not os.path.exists(in_file_p): + print("File {} not exists.".format(in_file_p)) + sys.exit(1) + elif not os.path.exists(backup_p): + print("Backup file not exists") + sys.exit(0) + + os.remove(in_file_p) + + os.rename(backup_p, in_file_p) + + +# master merge target +def _merge(master, target): + if type(master) != type(target): + print("yaml format not match:\n") + yaml.dump(master, sys.stdout) + print("\n&&\n") + yaml.dump(target, sys.stdout) + + sys.exit(1) + + ## item is a sequence + if isinstance(target, CommentedSeq): + for index in range(len(target)): + # get target comment + target_comment = _seq_comment(target, index) + + master_index = len(master) + + target_item = target[index] + + if isinstance(target_item, CommentedMap): + merge_flag = False + for idx in range(len(master)): + if isinstance(master[idx], CommentedMap): + if master[idx].keys() == target_item.keys(): + _merge(master[idx], target_item) + # nonlocal merge_flag + master_index = idx + merge_flag = True + break + + if merge_flag is False: + master.append(target_item) + elif target_item not in master: + master.append(target[index]) + else: + # merge(master[index], target[index]) + pass + + # # remove enter signal in previous item + previous_comment = _seq_comment(master, master_index - 1) + _add_eol_comment(master, _extract_comment(previous_comment), master_index - 1) + + origin_comment = _seq_comment(master, master_index) + comment = _obtain_comment(origin_comment, target_comment) + if len(comment) > 0: + _add_eol_comment(master, _extract_comment(comment) + "\n\n", len(master) - 1) + + ## item is a map + elif isinstance(target, CommentedMap): + for item in target: + if item == "flag": + print("") + origin_comment = _map_comment(master, item) + target_comment = _map_comment(target, item) + + # get origin start comment + origin_start_comment = _start_comment(master) + + # get target start comment + target_start_comment = _start_comment(target) + + m = master.get(item, default=None) + if m is None or \ + (not (isinstance(m, CommentedMap) or + isinstance(m, CommentedSeq))): + master.update({item: target[item]}) + + else: + _merge(master[item], target[item]) + + comment = _obtain_comment(origin_comment, target_comment) + if len(comment) > 0: + _add_eol_comment(master, _extract_comment(comment), item) + + start_comment = _obtain_comment(origin_start_comment, target_start_comment) + if len(start_comment) > 0: + master.yaml_set_start_comment(_extract_comment(start_comment)) + + +def _save(_code, _file): + with open(_file, 'w') as wf: + yaml.dump(_code, wf) + + +def _load(_file): + with open(_file, 'r') as rf: + code = yaml.load(rf) + return code + + +############################################ +# sub parser process operation +# +############################################ +def merge_yaml(_args): + _dict = _args.__dict__ + + _m_file = _dict.get("merge_file", None) + _in_file, _u, _a, _i, _o, _tips = _get_update_par(_args) + + if not (_in_file and _m_file): + print(_tips) + sys.exit(1) + + code = _load(_in_file) + mcode = _load(_m_file) + + _merge(code, mcode) + + _update(code, _u, _a, _tips) + + if _i: + _backup(_in_file) + _save(code, _in_file) + elif _o: + _save(code, _o) + else: + print(_tips) + sys.exit(1) + + +def update_yaml(_args): + _in_file, _u, _a, _i, _o, _tips = _get_update_par(_args) + + if not _in_file or not _u: + print(_tips) + sys.exit(1) + + code = _load(_in_file) + + if _i and _o: + print(_tips) + sys.exit(1) + + _update(code, _u, _a, _tips) + + if _i: + _backup(_in_file) + _save(code, _in_file) + elif _o: + _save(code, _o) + + +def reset(_args): + _dict = _args.__dict__ + _f = _dict.get('f', None) or _dict.get('file', None) + + if _f: + _recovery(_f) + else: + _t = _dict.get('tips', None) or "Input \"-h\" for more information" + print(_t) + + +############################################ +# Cli operation +# +############################################ +def _set_merge_parser(_parsers): + """ + config merge parser + """ + + merge_parser = _parsers.add_parser("merge", help="merge with another yaml file") + + _set_merge_parser_arg(merge_parser) + _set_update_parser_arg(merge_parser) + + merge_parser.set_defaults( + function=merge_yaml, + tips=merge_parser.format_help() + ) + + +def _set_merge_parser_arg(_parser): + """ + config parser argument for merging + """ + + _parser.add_argument("-m", "--merge-file", help="indicate merge yaml file") + + +def _set_update_parser(_parsers): + """ + config merge parser + """ + + update_parser = _parsers.add_parser("update", help="update with another yaml file") + _set_update_parser_arg(update_parser) + + update_parser.set_defaults( + function=update_yaml, + tips=update_parser.format_help() + ) + + +def _set_update_parser_arg(_parser): + """ + config parser argument for updating + """ + + _parser.add_argument("-f", "--file", help="source yaml file") + _parser.add_argument('-u', '--update', help="update with args, instance as \"a.b.c=d# d comment\"") + _parser.add_argument('-a', '--append', action="store_true", help="append to a seq") + + group = _parser.add_mutually_exclusive_group() + group.add_argument("-o", "--out-file", help="indicate output yaml file") + group.add_argument("-i", "--inplace", action="store_true", help="indicate whether result store in origin file") + + +def _set_reset_parser(_parsers): + """ + config merge parser + """ + + reset_parser = _parsers.add_parser("reset", help="reset yaml file") + + # indicate yaml file + reset_parser.add_argument('-f', '--file', help="indicate input yaml file") + + reset_parser.set_defaults( + function=reset, + tips=reset_parser.format_help() + ) + + +def main(): + parser = argparse.ArgumentParser() + sub_parsers = parser.add_subparsers() + + # set merge command + _set_merge_parser(sub_parsers) + + # set update command + _set_update_parser(sub_parsers) + + # set reset command + _set_reset_parser(sub_parsers) + + # parse argument and run func + args = parser.parse_args() + args.function(args) + + +if __name__ == '__main__': + main() diff --git a/tests/benchmark/milvus_benchmark/__init__.py b/tests/benchmark/milvus_benchmark/__init__.py new file mode 100644 index 0000000000..d53e40a3d5 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/__init__.py @@ -0,0 +1,2 @@ +from locust import User, events +import gevent \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/chaos/__init__.py b/tests/benchmark/milvus_benchmark/chaos/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/benchmark/milvus_benchmark/chaos/chaos_mesh.py b/tests/benchmark/milvus_benchmark/chaos/chaos_mesh.py new file mode 100644 index 0000000000..6c55d50a86 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/chaos_mesh.py @@ -0,0 +1,71 @@ +import logging +import os +from yaml import full_load +from milvus_benchmark.chaos import utils + +logger = logging.getLogger("milvus_benchmark.chaos.base") + + +class BaseChaos(object): + cur_path = os.path.abspath(os.path.dirname(__file__)) + + def __init__(self, api_version, kind, metadata, spec): + self.api_version = api_version + self.kind = kind + self.metadata = metadata + self.spec = spec + + def gen_experiment_config(self): + pass + """ + 1. load dict from default yaml + 2. merge dict between dict and self.x + """ + + def check_config(self): + if not self.kind: + raise Exception("kind is must be specified") + if not self.spec: + raise Exception("spec is must be specified") + if "action" not in self.spec: + raise Exception("action is must be specified in spec") + if "selector" not in self.spec: + raise Exception("selector is must be specified in spec") + return True + + def replace_label_selector(self): + self.check_config() + label_selectors_dict = self.spec["selector"]["labelSelectors"] + label_selector = next(iter(label_selectors_dict.items())) + label_selector_value = label_selector[1] + # pods = utils.list_pod_for_namespace(label_selector[0] + "=" + label_selector_value) + pods = utils.list_pod_for_namespace() + real_label_selector_value = list(map(lambda pod: pod, filter(lambda pod: label_selector_value in pod, pods)))[0] + self.spec["selector"]["labelSelectors"].update({label_selector[0]: real_label_selector_value}) + + +class PodChaos(BaseChaos): + default_yaml = BaseChaos.cur_path + '/template/PodChaos.yaml' + + def __init__(self, api_version, kind, metadata, spec): + super(PodChaos, self).__init__(api_version, kind, metadata, spec) + + def gen_experiment_config(self): + with open(self.default_yaml) as f: + default_config = full_load(f) + f.close() + self.replace_label_selector() + experiment_config = default_config + experiment_config.update({"apiVersion": self.api_version}) + experiment_config.update({"kind": self.kind}) + experiment_config["metadata"].update(self.metadata) + experiment_config["spec"].update(self.spec) + return experiment_config + + +class NetworkChaos(BaseChaos): + def __init__(self, api_version, kind, metadata, spec): + super(NetworkChaos, self).__init__(api_version, kind, metadata, spec) + + def gen_experiment_config(self): + pass \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/chaos/chaos_opt.py b/tests/benchmark/milvus_benchmark/chaos/chaos_opt.py new file mode 100644 index 0000000000..a502f00ee4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/chaos_opt.py @@ -0,0 +1,65 @@ +from __future__ import print_function +from utils import * +import logging +from pprint import pprint +from kubernetes import client, config +from kubernetes.client.rest import ApiException +from milvus_benchmark import config as cf + +logger = logging.getLogger("milvus_benchmark.chaos.chaosOpt") + +class ChaosOpt(object): + def __init__(self, kind, group=cf.DEFAULT_GROUP, version=cf.DEFAULT_VERSION, namespace=cf.CHAOS_NAMESPACE): + self.group = group + self.version = version + self.namespace = namespace + self.plural = kind.lower() + + # def get_metadata_name(self): + # return self.metadata_name + + def create_chaos_object(self, body): + # body = create_chaos_config(self.plural, self.metadata_name, spec_params) + # logger.info(body) + pretty = 'true' + config.load_kube_config() + api_instance = client.CustomObjectsApi() + try: + api_response = api_instance.create_namespaced_custom_object(self.group, self.version, self.namespace, + plural=self.plural, body=body, pretty=pretty) + print(api_response) + logging.getLogger().info(api_instance) + except ApiException as e: + logger.error("Exception when calling CustomObjectsApi->create_namespaced_custom_object: %s\n" % e) + raise Exception(str(e)) + + def delete_chaos_object(self, metadata_name): + print(metadata_name) + try: + config.load_kube_config() + api_instance = client.CustomObjectsApi() + data = api_instance.delete_namespaced_custom_object(self.group, self.version, self.namespace, self.plural, + metadata_name) + logger.info(data) + except ApiException as e: + logger.error("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e) + raise Exception(str(e)) + + def list_chaos_object(self): + try: + config.load_kube_config() + api_instance = client.CustomObjectsApi() + data = api_instance.list_namespaced_custom_object(self.group, self.version, self.namespace, + plural=self.plural) + # pprint(data) + except ApiException as e: + logger.error("Exception when calling CustomObjectsApi->list_namespaced_custom_object: %s\n" % e) + raise Exception(str(e)) + return data + + def delete_all_chaos_object(self): + chaos_objects = self.list_chaos_object() + if len(chaos_objects["items"]) > 0: + for item in chaos_objects["items"]: + metadata_name = item["metadata"]["name"] + self.delete_chaos_object(metadata_name) diff --git a/tests/benchmark/milvus_benchmark/chaos/pod-new.yaml b/tests/benchmark/milvus_benchmark/chaos/pod-new.yaml new file mode 100644 index 0000000000..395bff6ef9 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/pod-new.yaml @@ -0,0 +1,17 @@ +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: milvus-podchaos + namespace: chaos-testing +spec: + action: pod-kill + duration: 30s + mode: one + scheduler: + cron: '@every 20s' + selector: + labelSelectors: + app.kubernetes.io/name: zong-single-etcd-0 + namespaces: + - milvus + value: '' diff --git a/tests/benchmark/milvus_benchmark/chaos/pod.yaml b/tests/benchmark/milvus_benchmark/chaos/pod.yaml new file mode 100644 index 0000000000..330082840b --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/pod.yaml @@ -0,0 +1,11 @@ +chaos: + kind: PodChaos + spec: + action: pod-kill + selector: + namespaces: + - milvus + labelSelectors: + "app.kubernetes.io/name": etcd + scheduler: + cron: "@every 20s" \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/chaos/template/PodChaos.yaml b/tests/benchmark/milvus_benchmark/chaos/template/PodChaos.yaml new file mode 100644 index 0000000000..9fe26aa2b2 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/template/PodChaos.yaml @@ -0,0 +1,13 @@ +apiVersion: chaos-mesh.org/v1alpha1 +kind: PodChaos +metadata: + name: pod-failure-example + namespace: chaos-testing +spec: + action: pod-failure + mode: one + selector: + labelSelectors: + 'app.kubernetes.io/component': 'tikv' + scheduler: + cron: '@every 2m' \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/chaos/test.py b/tests/benchmark/milvus_benchmark/chaos/test.py new file mode 100644 index 0000000000..497c03a0ed --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/test.py @@ -0,0 +1,36 @@ +from gevent import monkey +monkey.patch_all() +from yaml import full_load, dump +from chaos.chaos_opt import ChaosOpt +from milvus_benchmark.chaos.chaos_mesh import PodChaos, NetworkChaos +from milvus_benchmark import config + +kind_chaos_mapping = { + "PodChaos": PodChaos, + "NetworkChaos": NetworkChaos +} + + +if __name__ == '__main__': + with open('./pod.yaml') as f: + conf = full_load(f) + f.close() + chaos_config = conf["chaos"] + kind = chaos_config["kind"] + spec = chaos_config["spec"] + metadata_name = config.NAMESPACE + "-" + kind.lower() + metadata = {"name": metadata_name} + chaos_mesh = kind_chaos_mapping[kind](config.DEFAULT_API_VERSION, kind, metadata, spec) + experiment_params = chaos_mesh.gen_experiment_config() + # print(experiment_params) + # with open('./pod-new-chaos.yaml', "w") as f: + # dump(experiment_params, f) + # f.close() + chaos_opt = ChaosOpt(chaos_mesh.kind) + res = chaos_opt.list_chaos_object() + print(res) + if len(res["items"]) != 0: + # chaos_opt.delete_chaos_object("milvus-pod-chaos") + print(res["items"][0]["metadata"]["name"]) + chaos_opt.delete_all_chaos_object() + print(chaos_opt.list_chaos_object()) diff --git a/tests/benchmark/milvus_benchmark/chaos/utils.py b/tests/benchmark/milvus_benchmark/chaos/utils.py new file mode 100644 index 0000000000..6ab89fcc81 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/chaos/utils.py @@ -0,0 +1,38 @@ +import logging +from operator import methodcaller + +from kubernetes import client, config +from milvus_benchmark import config as cf + +logger = logging.getLogger("milvus_benchmark.chaos.utils") + + +def list_pod_for_namespace(label_selector="app.kubernetes.io/instance=zong-standalone"): + config.load_kube_config() + v1 = client.CoreV1Api() + ret = v1.list_namespaced_pod(namespace=cf.NAMESPACE, label_selector=label_selector) + pods = [] + # label_selector = 'release=zong-single' + for i in ret.items: + pods.append(i.metadata.name) + # print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) + return pods + + +def assert_fail(func, milvus_client, **params): + try: + methodcaller(func, **params)(milvus_client) + except Exception as e: + logger.debug("11111111111111111111111111") + logger.info(str(e)) + pass + else: + raise Exception("fail-assert failed") + + +def assert_pass(func, milvus_client, **params): + try: + methodcaller(func, **params)(milvus_client) + logger.debug("&&&&&&&&&&&&&&&&&&&&") + except Exception as e: + raise diff --git a/tests/benchmark/milvus_benchmark/client.py b/tests/benchmark/milvus_benchmark/client.py new file mode 100644 index 0000000000..41a66964ef --- /dev/null +++ b/tests/benchmark/milvus_benchmark/client.py @@ -0,0 +1,491 @@ +import sys +import pdb +import random +import logging +import json +import time, datetime +import traceback +from multiprocessing import Process +from pymilvus import Milvus, DataType +import numpy as np +import utils +import config +from milvus_benchmark.runners import utils + +logger = logging.getLogger("milvus_benchmark.client") + +INDEX_MAP = { + "flat": "FLAT", + "ivf_flat": "IVF_FLAT", + "ivf_sq8": "IVF_SQ8", + "nsg": "NSG", + "ivf_sq8h": "IVF_SQ8_HYBRID", + "ivf_pq": "IVF_PQ", + "hnsw": "HNSW", + "annoy": "ANNOY", + "bin_flat": "BIN_FLAT", + "bin_ivf_flat": "BIN_IVF_FLAT", + "rhnsw_pq": "RHNSW_PQ", + "rhnsw_sq": "RHNSW_SQ" +} +epsilon = 0.1 +DEFAULT_WARM_QUERY_TOPK = 1 +DEFAULT_WARM_QUERY_NQ = 1 + + +def time_wrapper(func): + """ + This decorator prints the execution time for the decorated function. + """ + + def wrapper(*args, **kwargs): + start = time.time() + # logger.debug("Milvus {} start".format(func.__name__)) + log = kwargs.get("log", True) + kwargs.pop("log", None) + result = func(*args, **kwargs) + end = time.time() + if log: + logger.debug("Milvus {} run in {}s".format(func.__name__, round(end - start, 2))) + return result + + return wrapper + + +class MilvusClient(object): + def __init__(self, collection_name=None, host=None, port=None, timeout=300): + self._collection_name = collection_name + self._collection_info = None + start_time = time.time() + if not host: + host = config.SERVER_HOST_DEFAULT + if not port: + port = config.SERVER_PORT_DEFAULT + # retry connect remote server + i = 0 + while time.time() < start_time + timeout: + try: + self._milvus = Milvus( + host=host, + port=port, + try_connect=False, + pre_ping=False) + break + except Exception as e: + logger.error(str(e)) + logger.error("Milvus connect failed: %d times" % i) + i = i + 1 + time.sleep(30) + + if time.time() > start_time + timeout: + raise Exception("Server connect timeout") + # self._metric_type = None + + def __str__(self): + return 'Milvus collection %s' % self._collection_name + + def set_collection(self, collection_name): + self._collection_name = collection_name + + # TODO: server not support + # def check_status(self, status): + # if not status.OK(): + # logger.error(status.message) + # logger.error(self._milvus.server_status()) + # logger.error(self.count()) + # raise Exception("Status not ok") + + def check_result_ids(self, result): + for index, item in enumerate(result): + if item[0].distance >= epsilon: + logger.error(index) + logger.error(item[0].distance) + raise Exception("Distance wrong") + + @property + def collection_name(self): + return self._collection_name + + # only support the given field name + def create_collection(self, dimension, data_type=DataType.FLOAT_VECTOR, auto_id=False, + collection_name=None, other_fields=None): + self._dimension = dimension + if not collection_name: + collection_name = self._collection_name + vec_field_name = utils.get_default_field_name(data_type) + fields = [ + {"name": vec_field_name, "type": data_type, "params": {"dim": dimension}}, + {"name": "id", "type": DataType.INT64, "is_primary": True} + ] + if other_fields: + other_fields = other_fields.split(",") + for other_field_name in other_fields: + if other_field_name.startswith("int"): + field_type = DataType.INT64 + elif other_field_name.startswith("float"): + field_type = DataType.FLOAT + elif other_field_name.startswith("double"): + field_type = DataType.DOUBLE + else: + raise Exception("Field name not supported") + fields.append({"name": other_field_name, "type": field_type}) + create_param = { + "fields": fields, + "auto_id": auto_id} + try: + self._milvus.create_collection(collection_name, create_param) + logger.info("Create collection: <%s> successfully" % collection_name) + except Exception as e: + logger.error(str(e)) + raise + + def create_partition(self, tag, collection_name=None): + if not collection_name: + collection_name = self._collection_name + self._milvus.create_partition(collection_name, tag) + + @time_wrapper + def insert(self, entities, collection_name=None, timeout=None): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + try: + insert_res = self._milvus.insert(tmp_collection_name, entities, timeout=timeout) + return insert_res.primary_keys + except Exception as e: + logger.error(str(e)) + + @time_wrapper + def insert_flush(self, entities, _async=False, collection_name=None): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + try: + insert_res = self._milvus.insert(tmp_collection_name, entities) + return insert_res.primary_keys + except Exception as e: + logger.error(str(e)) + self._milvus.flush([tmp_collection_name], _async=_async) + + def get_dimension(self): + info = self.get_info() + for field in info["fields"]: + if field["type"] in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]: + return field["params"]["dim"] + + def get_rand_ids(self, length): + segment_ids = [] + while True: + stats = self.get_stats() + segments = stats["partitions"][0]["segments"] + # random choice one segment + segment = random.choice(segments) + try: + segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["id"]) + except Exception as e: + logger.error(str(e)) + if not len(segment_ids): + continue + elif len(segment_ids) > length: + return random.sample(segment_ids, length) + else: + logger.debug("Reset length: %d" % len(segment_ids)) + return segment_ids + + # def get_rand_ids_each_segment(self, length): + # res = [] + # status, stats = self._milvus.get_collection_stats(self._collection_name) + # self.check_status(status) + # segments = stats["partitions"][0]["segments"] + # segments_num = len(segments) + # # random choice from each segment + # for segment in segments: + # status, segment_ids = self._milvus.list_id_in_segment(self._collection_name, segment["name"]) + # self.check_status(status) + # res.extend(segment_ids[:length]) + # return segments_num, res + + # def get_rand_entities(self, length): + # ids = self.get_rand_ids(length) + # status, get_res = self._milvus.get_entity_by_id(self._collection_name, ids) + # self.check_status(status) + # return ids, get_res + + + @time_wrapper + def get_entities(self, get_ids): + get_res = self._milvus.get_entity_by_id(self._collection_name, get_ids) + return get_res + + @time_wrapper + def delete(self, ids, collection_name=None): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + self._milvus.delete_entity_by_id(tmp_collection_name, ids) + + def delete_rand(self): + delete_id_length = random.randint(1, 100) + count_before = self.count() + logger.debug("%s: length to delete: %d" % (self._collection_name, delete_id_length)) + delete_ids = self.get_rand_ids(delete_id_length) + self.delete(delete_ids) + self.flush() + logger.info("%s: count after delete: %d" % (self._collection_name, self.count())) + get_res = self._milvus.get_entity_by_id(self._collection_name, delete_ids) + for item in get_res: + assert not item + # if count_before - len(delete_ids) < self.count(): + # logger.error(delete_ids) + # raise Exception("Error occured") + + @time_wrapper + def flush(self, _async=False, collection_name=None, timeout=None): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + self._milvus.flush([tmp_collection_name], _async=_async, timeout=timeout) + + @time_wrapper + def compact(self, collection_name=None): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + status = self._milvus.compact(tmp_collection_name) + self.check_status(status) + + # only support "in" in expr + @time_wrapper + def get(self, ids, collection_name=None, timeout=None): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + # res = self._milvus.get(tmp_collection_name, ids, output_fields=None, partition_names=None) + ids_expr = "id in %s" % (str(ids)) + res = self._milvus.query(tmp_collection_name, ids_expr, output_fields=None, partition_names=None, timeout=timeout) + return res + + @time_wrapper + def create_index(self, field_name, index_type, metric_type, _async=False, index_param=None): + index_type = INDEX_MAP[index_type] + metric_type = utils.metric_type_trans(metric_type) + logger.info("Building index start, collection_name: %s, index_type: %s, metric_type: %s" % ( + self._collection_name, index_type, metric_type)) + if index_param: + logger.info(index_param) + index_params = { + "index_type": index_type, + "metric_type": metric_type, + "params": index_param + } + self._milvus.create_index(self._collection_name, field_name, index_params, _async=_async) + + # TODO: need to check + def describe_index(self, field_name, collection_name=None): + # stats = self.get_stats() + tmp_collection_name = self._collection_name if collection_name is None else collection_name + info = self._milvus.describe_index(tmp_collection_name, field_name) + logger.info(info) + index_info = {"index_type": "flat", "metric_type": None, "index_param": None} + if info: + index_info = {"index_type": info["index_type"], "metric_type": info["metric_type"], "index_param": info["params"]} + # transfer index type name + for k, v in INDEX_MAP.items(): + if index_info['index_type'] == v: + index_info['index_type'] = k + return index_info + + def drop_index(self, field_name): + logger.info("Drop index: %s" % self._collection_name) + return self._milvus.drop_index(self._collection_name, field_name) + + @time_wrapper + def query(self, vector_query, filter_query=None, collection_name=None, timeout=300): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + must_params = [vector_query] + if filter_query: + must_params.extend(filter_query) + query = { + "bool": {"must": must_params} + } + result = self._milvus.search(tmp_collection_name, query, timeout=timeout) + return result + + @time_wrapper + def warm_query(self, index_field_name, search_param, metric_type, times=2): + query_vectors = [[random.random() for _ in range(self._dimension)] for _ in range(DEFAULT_WARM_QUERY_NQ)] + # index_info = self.describe_index(index_field_name) + vector_query = {"vector": {index_field_name: { + "topk": DEFAULT_WARM_QUERY_TOPK, + "query": query_vectors, + "metric_type": metric_type, + "params": search_param} + }} + must_params = [vector_query] + query = { + "bool": {"must": must_params} + } + logger.debug("Start warm up query") + for i in range(times): + self._milvus.search(self._collection_name, query) + logger.debug("End warm up query") + + @time_wrapper + def load_and_query(self, vector_query, filter_query=None, collection_name=None, timeout=120): + tmp_collection_name = self._collection_name if collection_name is None else collection_name + must_params = [vector_query] + if filter_query: + must_params.extend(filter_query) + query = { + "bool": {"must": must_params} + } + self.load_collection(tmp_collection_name) + result = self._milvus.search(tmp_collection_name, query, timeout=timeout) + return result + + def get_ids(self, result): + # idss = result._entities.ids + ids = [] + # len_idss = len(idss) + # len_r = len(result) + # top_k = len_idss // len_r + # for offset in range(0, len_idss, top_k): + # ids.append(idss[offset: min(offset + top_k, len_idss)]) + for res in result: + ids.append(res.ids) + return ids + + def query_rand(self, nq_max=100, timeout=None): + # for ivf search + dimension = 128 + top_k = random.randint(1, 100) + nq = random.randint(1, nq_max) + nprobe = random.randint(1, 100) + search_param = {"nprobe": nprobe} + query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)] + metric_type = random.choice(["l2", "ip"]) + logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe)) + vec_field_name = utils.get_default_field_name() + vector_query = {"vector": {vec_field_name: { + "topk": top_k, + "query": query_vectors, + "metric_type": utils.metric_type_trans(metric_type), + "params": search_param} + }} + self.query(vector_query, timeout=timeout) + + def load_query_rand(self, nq_max=100, timeout=None): + # for ivf search + dimension = 128 + top_k = random.randint(1, 100) + nq = random.randint(1, nq_max) + nprobe = random.randint(1, 100) + search_param = {"nprobe": nprobe} + query_vectors = [[random.random() for _ in range(dimension)] for _ in range(nq)] + metric_type = random.choice(["l2", "ip"]) + logger.info("%s, Search nq: %d, top_k: %d, nprobe: %d" % (self._collection_name, nq, top_k, nprobe)) + vec_field_name = utils.get_default_field_name() + vector_query = {"vector": {vec_field_name: { + "topk": top_k, + "query": query_vectors, + "metric_type": utils.metric_type_trans(metric_type), + "params": search_param} + }} + self.load_and_query(vector_query, timeout=timeout) + + # TODO: need to check + def count(self, collection_name=None): + if collection_name is None: + collection_name = self._collection_name + row_count = self._milvus.get_collection_stats(collection_name)["row_count"] + logger.debug("Row count: %d in collection: <%s>" % (row_count, collection_name)) + return row_count + + def drop(self, timeout=120, collection_name=None): + timeout = int(timeout) + if collection_name is None: + collection_name = self._collection_name + logger.info("Start delete collection: %s" % collection_name) + self._milvus.drop_collection(collection_name) + i = 0 + while i < timeout: + try: + row_count = self.count(collection_name=collection_name) + if row_count: + time.sleep(1) + i = i + 1 + continue + else: + break + except Exception as e: + logger.warning("Collection count failed: {}".format(str(e))) + break + if i >= timeout: + logger.error("Delete collection timeout") + + def get_stats(self): + return self._milvus.get_collection_stats(self._collection_name) + + def get_info(self, collection_name=None): + if collection_name is None: + collection_name = self._collection_name + return self._milvus.describe_collection(collection_name) + + def show_collections(self): + return self._milvus.list_collections() + + def exists_collection(self, collection_name=None): + if collection_name is None: + collection_name = self._collection_name + res = self._milvus.has_collection(collection_name) + return res + + def clean_db(self): + collection_names = self.show_collections() + for name in collection_names: + self.drop(collection_name=name) + + @time_wrapper + def load_collection(self, collection_name=None, timeout=3000): + if collection_name is None: + collection_name = self._collection_name + return self._milvus.load_collection(collection_name, timeout=timeout) + + @time_wrapper + def release_collection(self, collection_name=None, timeout=3000): + if collection_name is None: + collection_name = self._collection_name + return self._milvus.release_collection(collection_name, timeout=timeout) + + @time_wrapper + def load_partitions(self, tag_names, collection_name=None, timeout=3000): + if collection_name is None: + collection_name = self._collection_name + return self._milvus.load_partitions(collection_name, tag_names, timeout=timeout) + + @time_wrapper + def release_partitions(self, tag_names, collection_name=None, timeout=3000): + if collection_name is None: + collection_name = self._collection_name + return self._milvus.release_partitions(collection_name, tag_names, timeout=timeout) + + # TODO: remove + # def get_server_version(self): + # return self._milvus.server_version() + + # def get_server_mode(self): + # return self.cmd("mode") + + # def get_server_commit(self): + # return self.cmd("build_commit_id") + + # def get_server_config(self): + # return json.loads(self.cmd("get_milvus_config")) + + # def get_mem_info(self): + # result = json.loads(self.cmd("get_system_info")) + # result_human = { + # # unit: Gb + # "memory_used": round(int(result["memory_used"]) / (1024 * 1024 * 1024), 2) + # } + # return result_human + + # def cmd(self, command): + # res = self._milvus._cmd(command) + # logger.info("Server command: %s, result: %s" % (command, res)) + # return res + + # @time_wrapper + # def set_config(self, parent_key, child_key, value): + # self._milvus.set_config(parent_key, child_key, value) + + # def get_config(self, key): + # return self._milvus.get_config(key) diff --git a/tests/benchmark/milvus_benchmark/config.py b/tests/benchmark/milvus_benchmark/config.py new file mode 100644 index 0000000000..21586cd1eb --- /dev/null +++ b/tests/benchmark/milvus_benchmark/config.py @@ -0,0 +1,42 @@ +MONGO_SERVER = 'mongodb://192.168.1.234:27017/' +# MONGO_SERVER = 'mongodb://mongodb.test:27017/' + +SCHEDULER_DB = "scheduler" +JOB_COLLECTION = "jobs" + +REGISTRY_URL = "registry.zilliz.com/milvus/milvus" +IDC_NAS_URL = "//172.16.70.249/test" +DEFAULT_IMAGE = "milvusdb/milvus:latest" + +SERVER_HOST_DEFAULT = "127.0.0.1" +SERVER_PORT_DEFAULT = 19530 +SERVER_VERSION = "2.0.0-RC3" +DEFUALT_DEPLOY_MODE = "single" + + +HELM_NAMESPACE = "milvus" +BRANCH = "master" + +DEFAULT_CPUS = 48 + +RAW_DATA_DIR = "/test/milvus/raw_data/" + +# nars log +LOG_PATH = "/test/milvus/benchmark/logs/{}/".format(BRANCH) + +DEFAULT_DEPLOY_MODE = "single" +SINGLE_DEPLOY_MODE = "single" +CLUSTER_DEPLOY_MODE = "cluster" + +NAMESPACE = "milvus" +CHAOS_NAMESPACE = "chaos-testing" +DEFAULT_API_VERSION = 'chaos-mesh.org/v1alpha1' +DEFAULT_GROUP = 'chaos-mesh.org' +DEFAULT_VERSION = 'v1alpha1' + +# minio config +MINIO_HOST = "milvus-test-minio.qa-milvus.svc.cluster.local" +MINIO_PORT = 9000 +MINIO_ACCESS_KEY = "minioadmin" +MINIO_SECRET_KEY = "minioadmin" +MINIO_BUCKET_NAME = "test" \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/env/__init__.py b/tests/benchmark/milvus_benchmark/env/__init__.py new file mode 100644 index 0000000000..d64a773c8c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/env/__init__.py @@ -0,0 +1,14 @@ +import logging +from .helm import HelmEnv +from .docker import DockerEnv +from .local import LocalEnv + +logger = logging.getLogger("milvus_benchmark.env") + + +def get_env(env_mode, deploy_mode=None): + return { + "helm": HelmEnv(deploy_mode), + "docker": DockerEnv(None), + "local": LocalEnv(None), + }.get(env_mode) diff --git a/tests/benchmark/milvus_benchmark/env/base.py b/tests/benchmark/milvus_benchmark/env/base.py new file mode 100644 index 0000000000..84702658be --- /dev/null +++ b/tests/benchmark/milvus_benchmark/env/base.py @@ -0,0 +1,46 @@ +import logging +from milvus_benchmark import utils +from milvus_benchmark import config + +logger = logging.getLogger("milvus_benchmark.env.env") + + +class BaseEnv(object): + """docstring for Env""" + def __init__(self, deploy_mode="single"): + self.deploy_mode = deploy_mode + self._name = utils.get_unique_name() + self._hostname = None + self._port = config.SERVER_PORT_DEFAULT + + def start_up(self): + logger.debug("IN ENV CLASS") + pass + + def tear_down(self): + pass + + def restart(self): + pass + + def set_hostname(self, hostname): + self._hostname = hostname + + def set_port(self, port): + self._port = port + + def resources(self): + pass + + @property + def name(self): + return self._name + + @property + def hostname(self): + return self._hostname + + @property + def port(self): + return self._port + diff --git a/tests/benchmark/milvus_benchmark/env/docker.py b/tests/benchmark/milvus_benchmark/env/docker.py new file mode 100644 index 0000000000..22e032a19d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/env/docker.py @@ -0,0 +1,12 @@ +import logging +from milvus_benchmark.env.base import BaseEnv + +logger = logging.getLogger("milvus_benchmark.env.docker") + + +class DockerEnv(BaseEnv): + """docker env class wrapper""" + env_mode = "docker" + + def __init__(self, deploy_mode=None): + super(DockerEnv, self).__init__(deploy_mode) diff --git a/tests/benchmark/milvus_benchmark/env/helm.py b/tests/benchmark/milvus_benchmark/env/helm.py new file mode 100644 index 0000000000..35469c284e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/env/helm.py @@ -0,0 +1,72 @@ +import os +import time +import pdb +import logging +import traceback + +from milvus_benchmark.env import helm_utils +from milvus_benchmark.env.base import BaseEnv +from milvus_benchmark import config + +logger = logging.getLogger("milvus_benchmark.env.helm") +TIMEOUT = 5 + +class HelmEnv(BaseEnv): + """helm env class wrapper""" + env_mode = "helm" + + def __init__(self, deploy_mode="single"): + super(HelmEnv, self).__init__(deploy_mode) + self._name_space = config.HELM_NAMESPACE + + def start_up(self, helm_path, helm_install_params): + if "namespace" in helm_install_params: + self._name_space = helm_install_params["namespace"] + server_name = helm_install_params["server_name"] + server_tag = helm_install_params["server_tag"] if "server_tag" in helm_install_params else None + server_config = helm_install_params["server_config"] if "server_config" in helm_install_params else None + milvus_config = helm_install_params["milvus_config"] + image_tag = helm_install_params["image_tag"] + image_type = helm_install_params["image_type"] + + logger.debug(self.deploy_mode) + server_config = helm_utils.update_server_config(server_name, server_tag, server_config) + # update values + values_file_path = helm_path + "/values.yaml" + if not os.path.exists(values_file_path): + raise Exception("File {} not existed".format(values_file_path)) + lock_file_path = helm_path + "/values.yaml.lock" + start_time = time.time() + while os.path.exists(lock_file_path) and time.time() < start_time+TIMEOUT: + logger.debug("Waiting for the lock file to release") + time.sleep(1) + if not os.path.exists(lock_file_path): + # generate lock file + open(lock_file_path, 'a').close() + try: + if milvus_config: + helm_utils.update_values(values_file_path, self.deploy_mode, server_name, server_tag, milvus_config, server_config) + logger.debug("Config file has been updated, remove the lock file") + os.system("rm -rf %s" % lock_file_path) + logger.debug("Start install server") + hostname = helm_utils.helm_install_server(helm_path, self.deploy_mode, image_tag, image_type, self.name, + self._name_space) + status_cmd = 'kubectl get pods -n milvus -l release=zong-standalone -o=jsonpath=\'{range .items[*]}{.metadata.name}{"\t"}{.status.phase}{"\n"}{end}\'' + if not hostname: + logger.error("Helm install server failed") + return False + else: + self.set_hostname(hostname) + while not helm_utils.running_status(self.name, self._name_space): + pass + else: + return hostname + except Exception as e: + os.system("rm -rf %s" % lock_file_path) + logger.error("Helm install server failed: %s" % (str(e))) + logger.error(traceback.format_exc()) + return False + + def tear_down(self): + logger.debug("Start clean up: {}.{}".format(self.name, self._name_space)) + helm_utils.helm_del_server(self.name, self._name_space) diff --git a/tests/benchmark/milvus_benchmark/env/helm_utils.py b/tests/benchmark/milvus_benchmark/env/helm_utils.py new file mode 100644 index 0000000000..2d1caec8ec --- /dev/null +++ b/tests/benchmark/milvus_benchmark/env/helm_utils.py @@ -0,0 +1,473 @@ +import os +import pdb +import time +import logging +import hashlib +import traceback +from yaml import full_load, dump +from milvus_benchmark import utils +from milvus_benchmark import config + +logger = logging.getLogger("milvus_benchmark.env.helm_utils") +BOOKKEEPER_PULSAR_MEM = '\"-Xms512m -Xmx1024m -XX:MaxDirectMemorySize=1024m -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintHeapAtGC -verbosegc -XX:G1LogLevel=finest\"' +BROKER_PULSAR_MEM = '\"-Xms512m -Xmx1024m -XX:MaxDirectMemorySize=1024m -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem\"' + + +def get_host_cpus(hostname): + from kubernetes import client, config + config.load_kube_config() + client.rest.logger.setLevel(logging.WARNING) + try: + v1 = client.CoreV1Api() + cpus = v1.read_node(hostname).status.allocatable.get("cpu") + except Exception as e: + logger.error(traceback.format_exc()) + logger.error(str(e)) + cpus = 0 + finally: + return cpus + + +def update_server_config(server_name, server_tag, server_config): + cpus = config.DEFAULT_CPUS + if server_name: + try: + cpus = get_host_cpus(server_name) + if not cpus: + cpus = config.DEFAULT_CPUS + except Exception as e: + logger.error("Get cpus on host: {} failed".format(server_name)) + logger.error(str(e)) + if server_config: + if "cpus" in server_config.keys(): + cpus = server_config["cpus"] + # self.hardware = Hardware(name=self.hostname, cpus=cpus) + if server_tag: + cpus = int(server_tag.split("c")[0]) + kv = {"cpus": cpus} + logger.debug(kv) + if server_config: + server_config.update(kv) + else: + server_config = kv + return server_config + + +""" +description: update values.yaml +return: no return +""" + + +def update_values(file_path, deploy_mode, hostname, server_tag, milvus_config, server_config=None): + # bak values.yaml + file_name = os.path.basename(file_path) + bak_file_name = file_name + ".bak" + file_parent_path = os.path.dirname(file_path) + bak_file_path = file_parent_path + '/' + bak_file_name + if os.path.exists(bak_file_path): + os.system("cp %s %s" % (bak_file_path, file_path)) + else: + os.system("cp %s %s" % (file_path, bak_file_path)) + with open(file_path) as f: + values_dict = full_load(f) + f.close() + cluster = False + if deploy_mode == "cluster": + cluster = True + + # TODO: disable change config + # cluster = False + # if "cluster" in milvus_config and milvus_config["cluster"]: + # cluster = True + # for k, v in milvus_config.items(): + # if k.find("primary_path") != -1: + # suffix_path = milvus_config["suffix_path"] if "suffix_path" in milvus_config else None + # path_value = v + # if suffix_path: + # path_value = v + "_" + str(int(time.time())) + # values_dict["primaryPath"] = path_value + # values_dict['wal']['path'] = path_value + "/wal" + # values_dict['logs']['path'] = path_value + "/logs" + # # elif k.find("use_blas_threshold") != -1: + # # values_dict['useBLASThreshold'] = int(v) + # elif k.find("gpu_search_threshold") != -1: + # values_dict['gpu']['gpuSearchThreshold'] = int(v) + # if cluster: + # values_dict['readonly']['gpu']['gpuSearchThreshold'] = int(v) + # elif k.find("cpu_cache_capacity") != -1: + # values_dict['cache']['cacheSize'] = v + # if cluster: + # values_dict['readonly']['cache']['cacheSize'] = v + # # elif k.find("cache_insert_data") != -1: + # # values_dict['cache']['cacheInsertData'] = v + # elif k.find("insert_buffer_size") != -1: + # values_dict['cache']['insertBufferSize'] = v + # if cluster: + # values_dict['readonly']['cache']['insertBufferSize'] = v + # elif k.find("gpu_resource_config.enable") != -1: + # values_dict['gpu']['enabled'] = v + # if cluster: + # values_dict['readonly']['gpu']['enabled'] = v + # elif k.find("gpu_resource_config.cache_capacity") != -1: + # values_dict['gpu']['cacheSize'] = v + # if cluster: + # values_dict['readonly']['gpu']['cacheSize'] = v + # elif k.find("build_index_resources") != -1: + # values_dict['gpu']['buildIndexDevices'] = v + # if cluster: + # values_dict['readonly']['gpu']['buildIndexDevices'] = v + # elif k.find("search_resources") != -1: + # values_dict['gpu']['searchDevices'] = v + # if cluster: + # values_dict['readonly']['gpu']['searchDevices'] = v + # # wal + # elif k.find("auto_flush_interval") != -1: + # values_dict['storage']['autoFlushInterval'] = v + # if cluster: + # values_dict['readonly']['storage']['autoFlushInterval'] = v + # elif k.find("wal_enable") != -1: + # values_dict['wal']['enabled'] = v + + # # if values_dict['nodeSelector']: + # # logger.warning("nodeSelector has been set: %s" % str(values_dict['engine']['nodeSelector'])) + # # return + # values_dict["wal"]["recoveryErrorIgnore"] = True + # # enable monitor + # values_dict["metrics"]["enabled"] = True + # values_dict["metrics"]["address"] = "192.168.1.237" + # values_dict["metrics"]["port"] = 9091 + # # only test avx2 + # values_dict["extraConfiguration"].update({"engine": {"simd_type": "avx2"}}) + # # stat_optimizer_enable + # values_dict["extraConfiguration"]["engine"].update({"stat_optimizer_enable": False}) + + # # enable read-write mode + # if cluster: + # values_dict["cluster"]["enabled"] = True + # # update readonly log path + # values_dict["readonly"]['logs']['path'] = values_dict['logs']['path'] + "/readonly" + # if "readonly" in milvus_config: + # if "replicas" in milvus_config["readonly"]: + # values_dict["readonly"]["replicas"] = milvus_config["readonly"]["replicas"] + + # use_external_mysql = False + # if "external_mysql" in milvus_config and milvus_config["external_mysql"]: + # use_external_mysql = True + # # meta mysql + # if use_external_mysql: + # values_dict["mysql"]["enabled"] = False + # # values_dict["mysql"]["persistence"]["enabled"] = True + # # values_dict["mysql"]["persistence"]["existingClaim"] = hashlib.md5(path_value.encode(encoding='UTF-8')).hexdigest() + # values_dict['externalMysql']['enabled'] = True + # if deploy_mode == "local": + # values_dict['externalMysql']["ip"] = "192.168.1.238" + # else: + # values_dict['externalMysql']["ip"] = "milvus-mysql.test" + # values_dict['externalMysql']["port"] = 3306 + # values_dict['externalMysql']["user"] = "root" + # values_dict['externalMysql']["password"] = "milvus" + # values_dict['externalMysql']["database"] = "db" + # else: + # values_dict["mysql"]["enabled"] = False + # # update values.yaml with the given host + node_config = None + perf_tolerations = [{ + "key": "worker", + "operator": "Equal", + "value": "performance", + "effect": "NoSchedule" + }] + if hostname: + node_config = {'kubernetes.io/hostname': hostname} + elif server_tag: + # server tag + node_config = {'instance-type': server_tag} + cpus = server_config["cpus"] + logger.debug(hostname) + if cluster is False: + if node_config: + values_dict['standalone']['nodeSelector'] = node_config + values_dict['minio']['nodeSelector'] = node_config + values_dict['etcd']['nodeSelector'] = node_config + # TODO: disable + # set limit/request cpus in resources + values_dict['standalone']['resources'] = { + "limits": { + # "cpu": str(int(cpus)) + ".0" + "cpu": str(int(cpus)) + ".0" + }, + "requests": { + "cpu": str(int(cpus) // 2 + 1) + ".0" + # "cpu": "4.0" + } + } + logger.debug("Add tolerations into standalone server") + values_dict['standalone']['tolerations'] = perf_tolerations + values_dict['minio']['tolerations'] = perf_tolerations + values_dict['etcd']['tolerations'] = perf_tolerations + else: + # values_dict['pulsar']["broker"]["configData"].update({"maxMessageSize": "52428800", "PULSAR_MEM": BOOKKEEPER_PULSAR_MEM}) + # values_dict['pulsar']["bookkeeper"]["configData"].update({"nettyMaxFrameSizeBytes": "52428800", "PULSAR_MEM": BROKER_PULSAR_MEM}) + values_dict['proxynode']['nodeSelector'] = node_config + values_dict['querynode']['nodeSelector'] = node_config + values_dict['indexnode']['nodeSelector'] = node_config + values_dict['datanode']['nodeSelector'] = node_config + values_dict['minio']['nodeSelector'] = node_config + + # values_dict['pulsar']["enabled"] = True + # values_dict['pulsar']['autoRecovery']['nodeSelector'] = node_config + # values_dict['pulsar']['proxy']['nodeSelector'] = node_config + # values_dict['pulsar']['broker']['nodeSelector'] = node_config + # values_dict['pulsar']['bookkeeper']['nodeSelector'] = node_config + # values_dict['pulsar']['zookeeper']['nodeSelector'] = node_config + values_dict['pulsarStandalone']['nodeSelector'] = node_config + if hostname: + logger.debug("Add tolerations into cluster server") + values_dict['proxynode']['tolerations'] = perf_tolerations + values_dict['querynode']['tolerations'] = perf_tolerations + values_dict['indexnode']['tolerations'] = perf_tolerations + values_dict['datanode']['tolerations'] = perf_tolerations + values_dict['etcd']['tolerations'] = perf_tolerations + values_dict['minio']['tolerations'] = perf_tolerations + values_dict['pulsarStandalone']['tolerations'] = perf_tolerations + # values_dict['pulsar']['autoRecovery']['tolerations'] = perf_tolerations + # values_dict['pulsar']['proxy']['tolerations'] = perf_tolerations + # values_dict['pulsar']['broker']['tolerations'] = perf_tolerations + # values_dict['pulsar']['bookkeeper']['tolerations'] = perf_tolerations + # values_dict['pulsar']['zookeeper']['tolerations'] = perf_tolerations + + # add extra volumes + values_dict['extraVolumes'] = [{ + 'name': 'test', + 'flexVolume': { + 'driver': "fstab/cifs", + 'fsType': "cifs", + 'secretRef': { + 'name': "cifs-test-secret" + }, + 'options': { + 'networkPath': config.IDC_NAS_URL, + 'mountOptions': "vers=1.0" + } + } + }] + values_dict['extraVolumeMounts'] = [{ + 'name': 'test', + 'mountPath': '/test' + }] + + with open(file_path, 'w') as f: + dump(values_dict, f, default_flow_style=False) + f.close() + # DEBUG + with open(file_path) as f: + for line in f.readlines(): + line = line.strip("\n") + logger.debug(line) + + +# deploy server +def helm_install_server(helm_path, deploy_mode, image_tag, image_type, name, namespace): + logger.debug("Server deploy mode: %s" % deploy_mode) + host = "%s-milvus-ha.%s.svc.cluster.local" % (name, namespace) + # TODO: update etcd config + etcd_config_map_cmd = "kubectl create configmap -n %s %s --from-literal=ETCD_QUOTA_BACKEND_BYTES=8589934592 --from-literal=ETCD_SNAPSHOT_COUNT=5000 --from-literal=ETCD_AUTO_COMPACTION_MODE=revision --from-literal=ETCD_AUTO_COMPACTION_RETENTION=1" % ( + namespace, name) + if os.system(etcd_config_map_cmd): + raise Exception("Create configmap: {} failed".format(name)) + logger.debug("Create configmap: {} successfully".format(name)) + log_path = config.LOG_PATH + "install.log" + install_cmd = "helm install \ + --set standalone.service.type=ClusterIP \ + --set image.all.repository=%s \ + --set image.all.tag=%s \ + --set minio.persistence.enabled=false \ + --set etcd.persistence.enabled=false \ + --set etcd.envVarsConfigMap=%s \ + --namespace %s \ + %s . >>%s >&1" % (config.REGISTRY_URL, image_tag, name, namespace, name, log_path) + # --set image.all.pullPolicy=Always \ + if deploy_mode == "cluster": + install_cmd = "helm install \ + --set standalone.enabled=false \ + --set image.all.repository=%s \ + --set image.all.tag=%s \ + --set minio.persistence.enabled=false \ + --set etcd.persistence.enabled=false \ + --set etcd.envVarsConfigMap=%s \ + --namespace %s \ + %s . >>%s >&1" % (config.REGISTRY_URL, image_tag, name, namespace, name, log_path) + # --set image.all.pullPolicy=Always \ + elif deploy_mode != "single": + raise Exception("Deploy mode: {} not support".format(deploy_mode)) + logger.debug(install_cmd) + logger.debug(host) + if os.system("cd %s && %s" % (helm_path, install_cmd)): + logger.error("Helm install failed: %s" % name) + return None + logger.debug("Wait for 60s ..") + time.sleep(60) + # config.load_kube_config() + # v1 = client.CoreV1Api() + # pod_name = None + # pod_id = None + # pods = v1.list_namespaced_pod(namespace) + # for i in pods.items: + # if i.metadata.name.find(name) != -1: + # pod_name = i.metadata.name + # pod_ip = i.status.pod_ip + # logger.debug(pod_name) + # logger.debug(pod_ip) + # return pod_name, pod_ip + return host + + +# delete server +@utils.retry(3) +def helm_del_server(name, namespace): + # logger.debug("Sleep 600s before uninstall server") + # time.sleep(600) + delete_etcd_config_map_cmd = "kubectl delete configmap -n %s %s" % (namespace, name) + logger.info(delete_etcd_config_map_cmd) + if os.system(delete_etcd_config_map_cmd): + logger.error("Delete configmap %s:%s failed" % (namespace, name)) + del_cmd = "helm uninstall -n milvus %s" % name + logger.info(del_cmd) + if os.system(del_cmd): + logger.error("Helm delete name:%s failed" % name) + return False + return True + + +def restart_server(helm_release_name, namespace): + res = True + timeout = 120000 + # service_name = "%s.%s.svc.cluster.local" % (helm_release_name, namespace) + config.load_kube_config() + v1 = client.CoreV1Api() + pod_name = None + # config_map_names = v1.list_namespaced_config_map(namespace, pretty='true') + # body = {"replicas": 0} + pods = v1.list_namespaced_pod(namespace) + for i in pods.items: + if i.metadata.name.find(helm_release_name) != -1 and i.metadata.name.find("mysql") == -1: + pod_name = i.metadata.name + break + # v1.patch_namespaced_config_map(config_map_name, namespace, body, pretty='true') + # status_res = v1.read_namespaced_service_status(helm_release_name, namespace, pretty='true') + logger.debug("Pod name: %s" % pod_name) + if pod_name is not None: + try: + v1.delete_namespaced_pod(pod_name, namespace) + except Exception as e: + logger.error(str(e)) + logger.error("Exception when calling CoreV1Api->delete_namespaced_pod") + res = False + return res + logger.error("Sleep 10s after pod deleted") + time.sleep(10) + # check if restart successfully + pods = v1.list_namespaced_pod(namespace) + for i in pods.items: + pod_name_tmp = i.metadata.name + logger.error(pod_name_tmp) + if pod_name_tmp == pod_name: + continue + elif pod_name_tmp.find(helm_release_name) == -1 or pod_name_tmp.find("mysql") != -1: + continue + else: + status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true') + logger.error(status_res.status.phase) + start_time = time.time() + ready_break = False + while time.time() - start_time <= timeout: + logger.error(time.time()) + status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true') + if status_res.status.phase == "Running": + logger.error("Already running") + ready_break = True + break + else: + time.sleep(5) + if time.time() - start_time > timeout: + logger.error("Restart pod: %s timeout" % pod_name_tmp) + res = False + return res + if ready_break: + break + else: + raise Exception("Pod: %s not found" % pod_name) + follow = True + pretty = True + previous = True # bool | Return previous terminated container logs. Defaults to false. (optional) + since_seconds = 56 # int | A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. (optional) + timestamps = True # bool | If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. (optional) + container = "milvus" + # start_time = time.time() + # while time.time() - start_time <= timeout: + # try: + # api_response = v1.read_namespaced_pod_log(pod_name_tmp, namespace, container=container, follow=follow, + # pretty=pretty, previous=previous, since_seconds=since_seconds, + # timestamps=timestamps) + # logging.error(api_response) + # return res + # except Exception as e: + # logging.error("Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e) + # # waiting for server start + # time.sleep(2) + # # res = False + # # return res + # if time.time() - start_time > timeout: + # logging.error("Restart pod: %s timeout" % pod_name_tmp) + # res = False + return res + + +def get_pod_status(helm_release_name, namespace): + from kubernetes import client, config + config.load_kube_config() + v1 = client.CoreV1Api() + pod_status = [] + label_selector = 'app.kubernetes.io/instance={}'.format(helm_release_name) + # pods = v1.list_namespaced_pod(namespace, label_selector=label_selector) + pods = v1.list_namespaced_pod(namespace) + for i in pods.items: + if i.metadata.name.find(helm_release_name) != -1: + pod_name = i.metadata.name + result = v1.read_namespaced_pod_status(pod_name, namespace) + pod_status.append({"pod": pod_name, "status": result.status.phase}) + # print(pod_status) + return pod_status + + +def running_status(helm_release_name, namespace): + pod_status = get_pod_status(helm_release_name, namespace) + for pod in pod_status: + if pod["status"] != "Running": + return False + return True + + +if __name__ == '__main__': + def ff(): + namespace = 'milvus' + helm_release_name = 'zong-standalone' + # st = get_pod_status(helm_release_name, namespace) + status = get_pod_status(helm_release_name, namespace) + print(status) + for s in status: + if s["status"] != "Runningk": + return False + return True + + + def fff(): + print(time.time()) + + + while not ff(): + print("retry") + else: + print("gogog") + print("hhhh") diff --git a/tests/benchmark/milvus_benchmark/env/local.py b/tests/benchmark/milvus_benchmark/env/local.py new file mode 100644 index 0000000000..ea8e20c46e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/env/local.py @@ -0,0 +1,21 @@ +import logging +from milvus_benchmark.env.base import BaseEnv + +logger = logging.getLogger("milvus_benchmark.env.local") + + +class LocalEnv(BaseEnv): + """docker env class wrapper""" + env_mode = "local" + + def __init__(self, deploy_mode=None): + super(LocalEnv, self).__init__(deploy_mode) + + def start_up(self, hostname, port): + res = True + try: + self.set_hostname(hostname) + except Exception as e: + logger.error(str(e)) + res = False + return res diff --git a/tests/benchmark/milvus_benchmark/logs/log.py b/tests/benchmark/milvus_benchmark/logs/log.py new file mode 100644 index 0000000000..d19088f9aa --- /dev/null +++ b/tests/benchmark/milvus_benchmark/logs/log.py @@ -0,0 +1,24 @@ +import logging.config +from datetime import datetime +import os +import yaml +import config + +cur_path = os.path.abspath(os.path.dirname(__file__)) +LOG_CONFIG_PATH = cur_path + "/logging.yaml" +FILE_NAME = config.LOG_PATH + 'benchmark-{:%Y-%m-%d}.log'.format(datetime.now()) + + +def setup_logging(config_path=LOG_CONFIG_PATH, default_level=logging.INFO): + """ + Setup logging configuration + """ + print(FILE_NAME) + try: + with open(config_path, 'rt') as f: + log_config = yaml.safe_load(f.read()) + log_config["handlers"]["info_file_handler"].update({"filename": FILE_NAME}) + logging.config.dictConfig(log_config) + except Exception: + raise + logging.error('Failed to open file', exc_info=True) diff --git a/tests/benchmark/milvus_benchmark/logs/logging.yaml b/tests/benchmark/milvus_benchmark/logs/logging.yaml new file mode 100644 index 0000000000..908133e590 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/logs/logging.yaml @@ -0,0 +1,37 @@ +version: 1 +disable_existing_loggers: False +formatters: + simple: + format: "[%(asctime)-15s] [%(levelname)8s] - %(message)s (%(name)s:%(lineno)s)" + +handlers: + console: + class: logging.StreamHandler + level: DEBUG + formatter: simple + stream: ext://sys.stdout + + info_file_handler: + class: logging.FileHandler + formatter: simple + level: DEBUG + filename: info.log + +# error_file_handler: +# class: logging.handlers.RotatingFileHandler +# level: ERROR +# formatter: simple +# filename: errors.log +# maxBytes: 10485760 # 10MB +# backupCount: 20 +# encoding: utf8 + +loggers: + milvus_benchmark: + level: DEBUG + handlers: [console, info_file_handler] + propagate: no + +root: + level: DEBUG + handlers: [console, info_file_handler] diff --git a/tests/benchmark/milvus_benchmark/main.py b/tests/benchmark/milvus_benchmark/main.py new file mode 100644 index 0000000000..d97c3f8bee --- /dev/null +++ b/tests/benchmark/milvus_benchmark/main.py @@ -0,0 +1,273 @@ +import os +import sys +import time +import argparse +import logging +import traceback +# from queue import Queue +from yaml import full_load, dump +from milvus_benchmark.metrics.models.server import Server +from milvus_benchmark.metrics.models.hardware import Hardware +from milvus_benchmark.metrics.models.env import Env + +from milvus_benchmark.env import get_env +from milvus_benchmark.runners import get_runner +from milvus_benchmark.metrics import api +from milvus_benchmark import config, utils +from milvus_benchmark import parser +# from scheduler import back_scheduler +from logs import log + +log.setup_logging() +logger = logging.getLogger("milvus_benchmark.main") + +# q = Queue() + + +def positive_int(s): + i = None + try: + i = int(s) + except ValueError: + pass + if not i or i < 1: + raise argparse.ArgumentTypeError("%r is not a positive integer" % s) + return i + + +def get_image_tag(image_version): + return "%s-latest" % (image_version) + + +# def shutdown(event): +# logger.info("Check if there is scheduled jobs in scheduler") +# if not back_scheduler.get_jobs(): +# logger.info("No job in scheduler, will shutdown the scheduler") +# back_scheduler.shutdown(wait=False) + + +def run_suite(run_type, suite, env_mode, env_params, timeout=None): + try: + start_status = False + metric = api.Metric() + deploy_mode = env_params["deploy_mode"] + deploy_opology = env_params["deploy_opology"] if "deploy_opology" in env_params else None + env = get_env(env_mode, deploy_mode) + metric.set_run_id() + metric.set_mode(env_mode) + metric.env = Env() + metric.server = Server(version=config.SERVER_VERSION, mode=deploy_mode, deploy_opology=deploy_opology) + logger.info(env_params) + if env_mode == "local": + metric.hardware = Hardware("") + if "server_tag" in env_params and env_params["server_tag"]: + metric.hardware = Hardware("server_tag") + start_status = env.start_up(env_params["host"], env_params["port"]) + elif env_mode == "helm": + helm_params = env_params["helm_params"] + helm_path = env_params["helm_path"] + server_name = helm_params["server_name"] if "server_name" in helm_params else None + server_tag = helm_params["server_tag"] if "server_tag" in helm_params else None + if not server_name and not server_tag: + metric.hardware = Hardware("") + else: + metric.hardware = Hardware(server_name) if server_name else Hardware(server_tag) + start_status = env.start_up(helm_path, helm_params) + if start_status: + metric.update_status(status="DEPLOYE_SUCC") + logger.debug("Get runner") + runner = get_runner(run_type, env, metric) + cases, case_metrics = runner.extract_cases(suite) + # TODO: only run when the as_group is equal to True + logger.info("Prepare to run cases") + runner.prepare(**cases[0]) + logger.info("Start run case") + suite_status = True + for index, case in enumerate(cases): + case_metric = case_metrics[index] + result = None + err_message = "" + try: + result = runner.run_case(case_metric, **case) + except Exception as e: + err_message = str(e) + "\n" + traceback.format_exc() + logger.error(traceback.format_exc()) + logger.info(result) + if result: + case_metric.update_status(status="RUN_SUCC") + case_metric.update_result(result) + else: + case_metric.update_status(status="RUN_FAILED") + case_metric.update_message(err_message) + suite_status = False + logger.debug(case_metric.metrics) + if deploy_mode: + api.save(case_metric) + if suite_status: + metric.update_status(status="RUN_SUCC") + else: + metric.update_status(status="RUN_FAILED") + else: + logger.info("Deploy failed on server") + metric.update_status(status="DEPLOYE_FAILED") + except Exception as e: + logger.error(str(e)) + logger.error(traceback.format_exc()) + metric.update_status(status="RUN_FAILED") + finally: + if deploy_mode: + api.save(metric) + # time.sleep(10) + env.tear_down() + if metric.status != "RUN_SUCC": + return False + else: + return True + + +def main(): + arg_parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + # helm mode with scheduler + arg_parser.add_argument( + "--image-version", + default="", + help="image version") + arg_parser.add_argument( + "--schedule-conf", + metavar='FILE', + default='', + help="load test schedule from FILE") + + # local mode + arg_parser.add_argument( + '--local', + action='store_true', + help='use local milvus server') + arg_parser.add_argument( + '--host', + help='server host ip param for local mode', + default='127.0.0.1') + arg_parser.add_argument( + '--port', + help='server port param for local mode', + default='19530') + arg_parser.add_argument( + '--suite', + metavar='FILE', + help='load test suite from FILE', + default='') + arg_parser.add_argument( + '--server-config', + metavar='FILE', + help='load server config from FILE', + default='') + + args = arg_parser.parse_args() + + if args.schedule_conf: + if args.local: + raise Exception("Helm mode with scheduler and other mode are incompatible") + if not args.image_version: + raise Exception("Image version not given") + env_mode = "helm" + image_version = args.image_version + with open(args.schedule_conf) as f: + schedule_config = full_load(f) + f.close() + helm_path = os.path.join(os.getcwd(), "..//milvus-helm-charts/charts/milvus-ha") + for item in schedule_config: + server_host = item["server"] if "server" in item else "" + server_tag = item["server_tag"] if "server_tag" in item else "" + deploy_mode = item["deploy_mode"] if "deploy_mode" in item else config.DEFAULT_DEPLOY_MODE + suite_params = item["suite_params"] + for suite_param in suite_params: + suite_file = "suites/" + suite_param["suite"] + with open(suite_file) as f: + suite_dict = full_load(f) + f.close() + logger.debug(suite_dict) + run_type, run_params = parser.operations_parser(suite_dict) + collections = run_params["collections"] + image_type = suite_param["image_type"] + image_tag = get_image_tag(image_version) + for suite in collections: + # run test cases + milvus_config = suite["milvus"] if "milvus" in suite else None + server_config = suite["server"] if "server" in suite else None + logger.debug(milvus_config) + logger.debug(server_config) + helm_params = { + "server_name": server_host, + "server_tag": server_tag, + "server_config": server_config, + "milvus_config": milvus_config, + "image_tag": image_tag, + "image_type": image_type + } + env_params = { + "deploy_mode": deploy_mode, + "helm_path": helm_path, + "helm_params": helm_params + } + # job = back_scheduler.add_job(run_suite, args=[run_type, suite, env_mode, env_params], + # misfire_grace_time=36000) + # logger.info(job) + # logger.info(job.id) + + elif args.local: + # for local mode + deploy_params = args.server_config + deploy_params_dict = None + if deploy_params: + with open(deploy_params) as f: + deploy_params_dict = full_load(f) + f.close() + logger.debug(deploy_params_dict) + deploy_mode = utils.get_deploy_mode(deploy_params_dict) + server_tag = utils.get_server_tag(deploy_params_dict) + env_params = { + "host": args.host, + "port": args.port, + "deploy_mode": deploy_mode, + "server_tag": server_tag, + "deploy_opology": deploy_params_dict + } + suite_file = args.suite + with open(suite_file) as f: + suite_dict = full_load(f) + f.close() + logger.debug(suite_dict) + run_type, run_params = parser.operations_parser(suite_dict) + collections = run_params["collections"] + if len(collections) > 1: + raise Exception("Multi collections not supported in Local Mode") + # ensure there is only one case in suite + # suite = {"run_type": run_type, "run_params": collections[0]} + suite = collections[0] + timeout = suite["timeout"] if "timeout" in suite else None + env_mode = "local" + return run_suite(run_type, suite, env_mode, env_params, timeout=timeout) + # job = back_scheduler.add_job(run_suite, args=[run_type, suite, env_mode, env_params], misfire_grace_time=36000) + # logger.info(job) + # logger.info(job.id) + + +if __name__ == "__main__": + try: + if not main(): + sys.exit(-1) + # from apscheduler.events import EVENT_JOB_MISSED + # back_scheduler.add_listener(listen_miss, EVENT_JOB_MISSED) + # back_scheduler.start() + # except (KeyboardInterrupt, SystemExit): + # logger.error("Received interruption") + # # back_scheduler.shutdown(wait=False) + # sys.exit(0) + except Exception as e: + logger.error(traceback.format_exc()) + # back_scheduler.shutdown(wait=False) + sys.exit(-2) + # block_scheduler.shutdown(wait=False) + logger.info("All tests run finshed") + sys.exit(0) \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/metrics/__init__.py b/tests/benchmark/milvus_benchmark/metrics/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/benchmark/milvus_benchmark/metrics/api.py b/tests/benchmark/milvus_benchmark/metrics/api.py new file mode 100644 index 0000000000..365c67c914 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/api.py @@ -0,0 +1,55 @@ +import pdb +import logging +from pymongo import MongoClient + +from .models.env import Env +from .models.hardware import Hardware +from .models.metric import Metric +from .models.server import Server +from .config import DB, UNIQUE_ID_COLLECTION, DOC_COLLECTION +from milvus_benchmark import config + +_client = MongoClient(config.MONGO_SERVER) +logger = logging.getLogger("milvus_benchmark.metric.api") + + +def insert_or_get(md5): + collection = _client[DB][UNIQUE_ID_COLLECTION] + found = collection.find_one({'md5': md5}) + if not found: + return collection.insert_one({'md5': md5}).inserted_id + return found['_id'] + + +def save(obj): + if not isinstance(obj, Metric): + logger.error("obj is not instance of Metric") + return False + + logger.debug(vars(obj)) + if not isinstance(obj.server, Server): + logger.error("obj.server is not instance of Server") + return False + + if not isinstance(obj.hardware, Hardware): + logger.error("obj.hardware is not instance of Hardware") + return False + + if not isinstance(obj.env, Env): + logger.error("obj.env is not instance of Env") + return False + + md5 = obj.server.json_md5() + server_doc_id = insert_or_get(md5) + obj.server = {"id": server_doc_id, "value": vars(obj.server)} + + md5 = obj.hardware.json_md5() + hardware_doc_id = insert_or_get(md5) + obj.hardware = {"id": hardware_doc_id, "value": vars(obj.hardware)} + + md5 = obj.env.json_md5() + env_doc_id = insert_or_get(md5) + obj.env = {"id": env_doc_id, "value": vars(obj.env)} + + collection = _client[DB][DOC_COLLECTION] + collection.insert_one(vars(obj)) diff --git a/tests/benchmark/milvus_benchmark/metrics/config.py b/tests/benchmark/milvus_benchmark/metrics/config.py new file mode 100644 index 0000000000..369c17eb12 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/config.py @@ -0,0 +1,3 @@ +DB = 'test' +UNIQUE_ID_COLLECTION = 'unique_id' +DOC_COLLECTION = 'doc' diff --git a/tests/benchmark/milvus_benchmark/metrics/models/__init__.py b/tests/benchmark/milvus_benchmark/metrics/models/__init__.py new file mode 100644 index 0000000000..c7edd1373a --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/models/__init__.py @@ -0,0 +1,4 @@ +from .env import Env +from .hardware import Hardware +from .metric import Metric +from .server import Server diff --git a/tests/benchmark/milvus_benchmark/metrics/models/env.py b/tests/benchmark/milvus_benchmark/metrics/models/env.py new file mode 100644 index 0000000000..7c3740a3b6 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/models/env.py @@ -0,0 +1,23 @@ +import json +import hashlib + + +class Env: + """ + { + "_version": "0.1", + "_type": "env", + "server_config": dict, + "OMP_NUM_THREADS": string, + } + """ + + def __init__(self, server_config=None, omp_num_threads=None): + self._version = '0.1' + self._type = 'env' + self.server_config = server_config + self.OMP_NUM_THREADS = omp_num_threads + + def json_md5(self): + json_str = json.dumps(vars(self), sort_keys=True) + return hashlib.md5(json_str.encode('utf-8')).hexdigest() diff --git a/tests/benchmark/milvus_benchmark/metrics/models/hardware.py b/tests/benchmark/milvus_benchmark/metrics/models/hardware.py new file mode 100644 index 0000000000..c288e456ac --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/models/hardware.py @@ -0,0 +1,24 @@ +import json +import hashlib + + +class Hardware: + """ + { + "_version": "0.1", + "_type": "hardware", + "name": string, + "cpus": float + } + + """ + + def __init__(self, name=None, cpus=0.0): + self._version = '0.1' + self._type = 'hardware' + self.name = name + self.cpus = cpus + + def json_md5(self): + json_str = json.dumps(vars(self), sort_keys=True) + return hashlib.md5(json_str.encode('utf-8')).hexdigest() diff --git a/tests/benchmark/milvus_benchmark/metrics/models/metric.py b/tests/benchmark/milvus_benchmark/metrics/models/metric.py new file mode 100644 index 0000000000..ea38009bb0 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/models/metric.py @@ -0,0 +1,52 @@ +import time +import datetime +import json +import hashlib +from .env import Env +from .server import Server +from .hardware import Hardware + + +class Metric(object): + def __init__(self): + self._version = '0.1' + self._type = 'metric' + self.run_id = None + self.mode = None + self.server = Server() + self.hardware = Hardware() + self.env = Env() + self.status = "INIT" + self.err_message = "" + self.collection = {} + self.index = {} + self.search = {} + self.run_params = {} + self.metrics = { + "type": "", + "value": None, + } + self.datetime = str(datetime.datetime.now()) + + def set_run_id(self): + self.run_id = int(time.time()) + + def set_mode(self, mode): + self.mode = mode + + # including: metric, suite_metric + def set_case_metric_type(self): + self._type = "case" + + def json_md5(self): + json_str = json.dumps(vars(self), sort_keys=True) + return hashlib.md5(json_str.encode('utf-8')).hexdigest() + + def update_status(self, status): + self.status = status + + def update_result(self, result): + self.metrics["value"].update(result) + + def update_message(self, err_message): + self.err_message = err_message \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/metrics/models/server.py b/tests/benchmark/milvus_benchmark/metrics/models/server.py new file mode 100644 index 0000000000..c4aa9ea067 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/metrics/models/server.py @@ -0,0 +1,27 @@ +import json +import hashlib + + +class Server: + """ + { + "_version": "0.1", + "_type": "server", + "version": string, + "build_commit": string, + # "md5": string, + } + """ + + def __init__(self, version=None, mode=None, build_commit=None, deploy_opology=None): + self._version = '0.1' + self._type = 'server' + self.version = version + self.mode = mode + self.build_commit = build_commit + self.deploy_opology = deploy_opology + # self.md5 = md5 + + def json_md5(self): + json_str = json.dumps(vars(self), sort_keys=True) + return hashlib.md5(json_str.encode('utf-8')).hexdigest() diff --git a/tests/benchmark/milvus_benchmark/parser.py b/tests/benchmark/milvus_benchmark/parser.py new file mode 100644 index 0000000000..14abfc4702 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/parser.py @@ -0,0 +1,87 @@ +import pdb +import logging + +logger = logging.getLogger("milvus_benchmark.parser") + + +def operations_parser(operations): + if not operations: + raise Exception("No operations in suite defined") + for run_type, run_params in operations.items(): + logger.debug(run_type) + return (run_type, run_params) + + +def collection_parser(collection_name): + tmp = collection_name.split("_") + # if len(tmp) != 5: + # return None + data_type = tmp[0] + collection_size_unit = tmp[1][-1] + collection_size = tmp[1][0:-1] + if collection_size_unit == "w": + collection_size = int(collection_size) * 10000 + elif collection_size_unit == "m": + collection_size = int(collection_size) * 1000000 + elif collection_size_unit == "b": + collection_size = int(collection_size) * 1000000000 + dimension = int(tmp[2]) + metric_type = str(tmp[3]) + return (data_type, collection_size, dimension, metric_type) + + +def parse_ann_collection_name(collection_name): + data_type = collection_name.split("_")[0] + dimension = int(collection_name.split("_")[1]) + metric = collection_name.split("_")[2] + # metric = collection_name.attrs['distance'] + # dimension = len(collection_name["train"][0]) + if metric == "euclidean": + metric_type = "l2" + elif metric == "angular": + metric_type = "ip" + elif metric == "jaccard": + metric_type = "jaccard" + elif metric == "hamming": + metric_type = "hamming" + return (data_type, dimension, metric_type) + + +def search_params_parser(param): + # parse top-k, set default value if top-k not in param + if "top_ks" not in param: + top_ks = [10] + else: + top_ks = param["top_ks"] + if isinstance(top_ks, int): + top_ks = [top_ks] + elif isinstance(top_ks, list): + top_ks = list(top_ks) + else: + logger.warning("Invalid format top-ks: %s" % str(top_ks)) + + # parse nqs, set default value if nq not in param + if "nqs" not in param: + nqs = [10] + else: + nqs = param["nqs"] + if isinstance(nqs, int): + nqs = [nqs] + elif isinstance(nqs, list): + nqs = list(nqs) + else: + logger.warning("Invalid format nqs: %s" % str(nqs)) + + # parse nprobes + if "nprobes" not in param: + nprobes = [1] + else: + nprobes = param["nprobes"] + if isinstance(nprobes, int): + nprobes = [nprobes] + elif isinstance(nprobes, list): + nprobes = list(nprobes) + else: + logger.warning("Invalid format nprobes: %s" % str(nprobes)) + + return top_ks, nqs, nprobes diff --git a/tests/benchmark/milvus_benchmark/runners/__init__.py b/tests/benchmark/milvus_benchmark/runners/__init__.py new file mode 100644 index 0000000000..eeb734d392 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/__init__.py @@ -0,0 +1,26 @@ +from .insert import InsertRunner, BPInsertRunner +from .locust import LocustInsertRunner, LocustSearchRunner, LocustRandomRunner +from .search import SearchRunner, InsertSearchRunner +from .build import BuildRunner, InsertBuildRunner +from .get import InsertGetRunner +from .accuracy import AccuracyRunner +from .accuracy import AccAccuracyRunner +from .chaos import SimpleChaosRunner + + +def get_runner(name, env, metric): + return { + "insert_performance": InsertRunner(env, metric), + "bp_insert_performance": BPInsertRunner(env, metric), + "search_performance": SearchRunner(env, metric), + "insert_search_performance": InsertSearchRunner(env, metric), + "locust_insert_performance": LocustInsertRunner(env, metric), + "locust_search_performance": LocustSearchRunner(env, metric), + "locust_random_performance": LocustRandomRunner(env, metric), + "insert_build_performance": InsertBuildRunner(env, metric), + "insert_get_performance": InsertGetRunner(env, metric), + "build_performance": BuildRunner(env, metric), + "accuracy": AccuracyRunner(env, metric), + "ann_accuracy": AccAccuracyRunner(env, metric), + "simple_chaos": SimpleChaosRunner(env, metric) + }.get(name) diff --git a/tests/benchmark/milvus_benchmark/runners/accuracy.py b/tests/benchmark/milvus_benchmark/runners/accuracy.py new file mode 100644 index 0000000000..0ec2a1aaae --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/accuracy.py @@ -0,0 +1,262 @@ +import json +import time +import copy +import logging +import numpy as np + +from milvus_benchmark import parser +from milvus_benchmark.runners import utils +from milvus_benchmark.runners.base import BaseRunner + +logger = logging.getLogger("milvus_benchmark.runners.accuracy") +INSERT_INTERVAL = 50000 + + +class AccuracyRunner(BaseRunner): + """run accuracy""" + name = "accuracy" + + def __init__(self, env, metric): + super(AccuracyRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + vector_type = utils.get_vector_type(data_type) + index_field_name = utils.get_default_field_name(vector_type) + base_query_vectors = utils.get_vectors_from_binary(utils.MAX_NQ, dimension, data_type) + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size + } + index_info = self.milvus.describe_index(index_field_name, collection_name) + filters = collection["filters"] if "filters" in collection else [] + filter_query = [] + top_ks = collection["top_ks"] + nqs = collection["nqs"] + search_params = collection["search_params"] + search_params = utils.generate_combinations(search_params) + cases = list() + case_metrics = list() + self.init_metric(self.name, collection_info, index_info, search_info=None) + for search_param in search_params: + if not filters: + filters.append(None) + for filter in filters: + filter_param = [] + if isinstance(filter, dict) and "range" in filter: + filter_query.append(eval(filter["range"])) + filter_param.append(filter["range"]) + if isinstance(filter, dict) and "term" in filter: + filter_query.append(eval(filter["term"])) + filter_param.append(filter["term"]) + for nq in nqs: + query_vectors = base_query_vectors[0:nq] + for top_k in top_ks: + search_info = { + "topk": top_k, + "query": query_vectors, + "metric_type": utils.metric_type_trans(metric_type), + "params": search_param} + # TODO: only update search_info + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metric.search = { + "nq": nq, + "topk": top_k, + "search_param": search_param, + "filter": filter_param + } + vector_query = {"vector": {index_field_name: search_info}} + case = { + "collection_name": collection_name, + "index_field_name": index_field_name, + "dimension": dimension, + "data_type": data_type, + "metric_type": metric_type, + "vector_type": vector_type, + "collection_size": collection_size, + "filter_query": filter_query, + "vector_query": vector_query + } + cases.append(case) + case_metrics.append(case_metric) + return cases, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + self.milvus.set_collection(collection_name) + if not self.milvus.exists_collection(): + logger.info("collection not exist") + self.milvus.load_collection(timeout=600) + + def run_case(self, case_metric, **case_param): + collection_size = case_param["collection_size"] + nq = case_metric.search["nq"] + top_k = case_metric.search["topk"] + query_res = self.milvus.query(case_param["vector_query"], filter_query=case_param["filter_query"]) + true_ids = utils.get_ground_truth_ids(collection_size) + logger.debug({"true_ids": [len(true_ids[0]), len(true_ids[0])]}) + result_ids = self.milvus.get_ids(query_res) + logger.debug({"result_ids": len(result_ids[0])}) + acc_value = utils.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids) + tmp_result = {"acc": acc_value} + return tmp_result + + +class AccAccuracyRunner(AccuracyRunner): + """run ann accuracy""" + """ + 1. entities from hdf5 + 2. one collection test different index + """ + name = "ann_accuracy" + + def __init__(self, env, metric): + super(AccAccuracyRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, dimension, metric_type) = parser.parse_ann_collection_name(collection_name) + hdf5_source_file = collection["source_file"] + index_types = collection["index_types"] + index_params = collection["index_params"] + top_ks = collection["top_ks"] + nqs = collection["nqs"] + search_params = collection["search_params"] + vector_type = utils.get_vector_type(data_type) + index_field_name = utils.get_default_field_name(vector_type) + dataset = utils.get_dataset(hdf5_source_file) + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name + } + filters = collection["filters"] if "filters" in collection else [] + filter_query = [] + search_params = utils.generate_combinations(search_params) + index_params = utils.generate_combinations(index_params) + cases = list() + case_metrics = list() + self.init_metric(self.name, collection_info, {}, search_info=None) + true_ids = np.array(dataset["neighbors"]) + for index_type in index_types: + for index_param in index_params: + index_info = { + "index_type": index_type, + "index_param": index_param + } + for search_param in search_params: + if not filters: + filters.append(None) + for filter in filters: + filter_param = [] + if isinstance(filter, dict) and "range" in filter: + filter_query.append(eval(filter["range"])) + filter_param.append(filter["range"]) + if isinstance(filter, dict) and "term" in filter: + filter_query.append(eval(filter["term"])) + filter_param.append(filter["term"]) + for nq in nqs: + query_vectors = utils.normalize(metric_type, np.array(dataset["test"][:nq])) + for top_k in top_ks: + search_info = { + "topk": top_k, + "query": query_vectors, + "metric_type": utils.metric_type_trans(metric_type), + "params": search_param} + # TODO: only update search_info + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metric.index = index_info + case_metric.search = { + "nq": nq, + "topk": top_k, + "search_param": search_param, + "filter": filter_param + } + vector_query = {"vector": {index_field_name: search_info}} + case = { + "collection_name": collection_name, + "dataset": dataset, + "index_field_name": index_field_name, + "dimension": dimension, + "data_type": data_type, + "metric_type": metric_type, + "vector_type": vector_type, + "index_type": index_type, + "index_param": index_param, + "filter_query": filter_query, + "vector_query": vector_query, + "true_ids": true_ids + } + cases.append(case) + case_metrics.append(case_metric) + return cases, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + metric_type = case_param["metric_type"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + index_type = case_param["index_type"] + index_param = case_param["index_param"] + index_field_name = case_param["index_field_name"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(collection_name): + logger.info("Re-create collection: %s" % collection_name) + self.milvus.drop() + dataset = case_param["dataset"] + self.milvus.create_collection(dimension, data_type=vector_type) + insert_vectors = utils.normalize(metric_type, np.array(dataset["train"])) + if len(insert_vectors) != dataset["train"].shape[0]: + raise Exception("Row count of insert vectors: %d is not equal to dataset size: %d" % ( + len(insert_vectors), dataset["train"].shape[0])) + logger.debug("The row count of entities to be inserted: %d" % len(insert_vectors)) + # Insert batch once + # milvus_instance.insert(insert_vectors) + info = self.milvus.get_info(collection_name) + loops = len(insert_vectors) // INSERT_INTERVAL + 1 + for i in range(loops): + start = i * INSERT_INTERVAL + end = min((i + 1) * INSERT_INTERVAL, len(insert_vectors)) + if start < end: + tmp_vectors = insert_vectors[start:end] + ids = [i for i in range(start, end)] + if not isinstance(tmp_vectors, list): + entities = utils.generate_entities(info, tmp_vectors.tolist(), ids) + res_ids = self.milvus.insert(entities) + else: + entities = utils.generate_entities(tmp_vectors, ids) + res_ids = self.milvus.insert(entities) + assert res_ids == ids + logger.debug("End insert, start flush") + self.milvus.flush() + logger.debug("End flush") + res_count = self.milvus.count() + logger.info("Table: %s, row count: %d" % (collection_name, res_count)) + if res_count != len(insert_vectors): + raise Exception("Table row count is not equal to insert vectors") + if self.milvus.describe_index(index_field_name): + self.milvus.drop_index(index_field_name) + logger.info("Re-create index: %s" % collection_name) + self.milvus.create_index(index_field_name, index_type, metric_type, index_param=index_param) + logger.info(self.milvus.describe_index(index_field_name)) + logger.info("Start load collection: %s" % collection_name) + # self.milvus.release_collection() + self.milvus.load_collection(timeout=600) + logger.info("End load collection: %s" % collection_name) + + def run_case(self, case_metric, **case_param): + true_ids = case_param["true_ids"] + nq = case_metric.search["nq"] + top_k = case_metric.search["topk"] + query_res = self.milvus.query(case_param["vector_query"], filter_query=case_param["filter_query"]) + result_ids = self.milvus.get_ids(query_res) + acc_value = utils.get_recall_value(true_ids[:nq, :top_k].tolist(), result_ids) + tmp_result = {"acc": acc_value} + return tmp_result + diff --git a/tests/benchmark/milvus_benchmark/runners/base.py b/tests/benchmark/milvus_benchmark/runners/base.py new file mode 100644 index 0000000000..e0ac1176e4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/base.py @@ -0,0 +1,152 @@ +import time +import pdb +import logging +import threading +import traceback +import grpc +import numpy as np + +from milvus_benchmark.env import get_env +from milvus_benchmark import config +from milvus_benchmark.client import MilvusClient +from . import utils + +logger = logging.getLogger("milvus_benchmark.runners.base") + + +class BaseRunner(object): + """runner is actually the executors""" + + def __init__(self, env, metric): + self._metric = metric + self._env = env + self._run_as_group = False + self._result = dict() + self._milvus = MilvusClient(host=self._env.hostname) + + def run(self, run_params): + pass + + def stop(self): + logger.debug("Stop runner...") + pass + + @property + def hostname(self): + return self._env.hostname + + @property + def port(self): + return self._env.port + + @property + def milvus(self): + return self._milvus + + @property + def metric(self): + return self._metric + + @property + def result(self): + return self._result + + @property + def run_as_group(self): + return self._run_as_group + + def init_metric(self, name, collection_info=None, index_info=None, search_info=None, run_params=None, t="metric"): + self._metric.collection = collection_info + self._metric.index = index_info + self._metric.search = search_info + self._metric.type = t + self._metric.run_params = run_params + self._metric.metrics = { + "type": name, + "value": self._result + } + + # TODO: need an easy method to change value in metric + def update_metric(self, key, value): + pass + + def insert_core(self, milvus, info, start_id, vectors): + # start insert vectors + end_id = start_id + len(vectors) + logger.debug("Start id: %s, end id: %s" % (start_id, end_id)) + ids = [k for k in range(start_id, end_id)] + entities = utils.generate_entities(info, vectors, ids) + ni_start_time = time.time() + try: + _res_ids = milvus.insert(entities) + except Exception as e: + logger.error("Insert failed") + logger.error(traceback.format_exc()) + raise e + # assert ids == res_ids + # milvus.flush() + ni_end_time = time.time() + logger.debug(milvus.count()) + return ni_end_time-ni_start_time + + # TODO: need to improve + def insert(self, milvus, collection_name, data_type, dimension, size, ni): + total_time = 0.0 + rps = 0.0 + ni_time = 0.0 + vectors_per_file = utils.get_len_vectors_per_file(data_type, dimension) + if size % vectors_per_file or size % ni: + logger.error("Not invalid collection size or ni") + return False + i = 0 + info = milvus.get_info(collection_name) + if data_type == "local" or not data_type: + # insert local + info = milvus.get_info(collection_name) + while i < (size // vectors_per_file): + vectors = [] + for j in range(vectors_per_file // ni): + # vectors = src_vectors[j * ni:(j + 1) * ni] + vectors = utils.generate_vectors(ni, dimension) + if vectors: + start_id = i * vectors_per_file + j * ni + ni_time = self.insert_core(milvus, info, start_id, vectors) + total_time = total_time+ni_time + i += 1 + else: + # insert from file + while i < (size // vectors_per_file): + vectors = [] + if vectors_per_file >= ni: + file_name = utils.gen_file_name(i, dimension, data_type) + # logger.info("Load npy file: %s start" % file_name) + data = np.load(file_name) + # logger.info("Load npy file: %s end" % file_name) + for j in range(vectors_per_file // ni): + vectors = data[j * ni:(j + 1) * ni].tolist() + if vectors: + start_id = i * vectors_per_file + j * ni + ni_time = self.insert_core(milvus, info, start_id, vectors) + total_time = total_time+ni_time + i += 1 + else: + vectors.clear() + loops = ni // vectors_per_file + for j in range(loops): + file_name = utils.gen_file_name(loops * i + j, dimension, data_type) + data = np.load(file_name) + vectors.extend(data.tolist()) + if vectors: + start_id = i * vectors_per_file + ni_time = self.insert_core(milvus, info, start_id, vectors) + total_time = total_time+ni_time + i += loops + rps = round(size / total_time, 2) + ni_time = round(total_time / (size / ni), 2) + result = { + "total_time": round(total_time, 2), + "rps": rps, + "ni_time": ni_time + } + logger.info(result) + return result diff --git a/tests/benchmark/milvus_benchmark/runners/build.py b/tests/benchmark/milvus_benchmark/runners/build.py new file mode 100644 index 0000000000..7d4fb8d804 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/build.py @@ -0,0 +1,106 @@ +import time +import copy +import logging +from milvus_benchmark import parser +from milvus_benchmark.runners import utils +from milvus_benchmark.runners.base import BaseRunner + +logger = logging.getLogger("milvus_benchmark.runners.build") + + +class BuildRunner(BaseRunner): + """run build""" + name = "build_performance" + + def __init__(self, env, metric): + super(BuildRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_per = collection["ni_per"] + vector_type = utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + index_field_name = utils.get_default_field_name(vector_type) + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + flush = True + if "flush" in collection and collection["flush"] == "no": + flush = False + self.init_metric(self.name, collection_info, index_info, search_info=None) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics = list() + case_params = list() + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "flush_after_insert": flush, + "index_field_name": index_field_name, + "index_type": index_type, + "index_param": index_param, + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + self.milvus.set_collection(collection_name) + if not self.milvus.exists_collection(): + logger.info("collection not exist") + logger.debug({"collection count": self.milvus.count()}) + + def run_case(self, case_metric, **case_param): + index_field_name = case_param["index_field_name"] + start_time = time.time() + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], + index_param=case_param["index_param"]) + build_time = round(time.time() - start_time, 2) + tmp_result = {"build_time": build_time} + return tmp_result + + +class InsertBuildRunner(BuildRunner): + """run insert and build""" + name = "insert_build_performance" + + def __init__(self, env, metric): + super(InsertBuildRunner, self).__init__(env, metric) + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, other_fields=other_fields) + self.insert(self.milvus, collection_name, case_param["data_type"], dimension, + case_param["collection_size"], case_param["ni_per"]) + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time() - start_time, 2) + logger.debug({"collection count": self.milvus.count()}) + logger.debug({"flush_time": flush_time}) diff --git a/tests/benchmark/milvus_benchmark/runners/chaos.py b/tests/benchmark/milvus_benchmark/runners/chaos.py new file mode 100644 index 0000000000..f5224e2ffb --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/chaos.py @@ -0,0 +1,127 @@ +import copy +import logging +import pdb +import time +from operator import methodcaller +from yaml import full_load, dump +import threading +from milvus_benchmark import utils +from milvus_benchmark.runners import utils as runner_utils +from milvus_benchmark.chaos import utils as chaos_utils +from milvus_benchmark.runners.base import BaseRunner +from chaos.chaos_opt import ChaosOpt +from milvus_benchmark import config +from milvus_benchmark.chaos.chaos_mesh import PodChaos, NetworkChaos + +logger = logging.getLogger("milvus_benchmark.runners.chaos") + +kind_chaos_mapping = { + "PodChaos": PodChaos, + "NetworkChaos": NetworkChaos +} + +assert_func_mapping = { + "fail": chaos_utils.assert_fail, + "pass": chaos_utils.assert_pass +} + + +class SimpleChaosRunner(BaseRunner): + """run chaos""" + name = "simple_chaos" + + def __init__(self, env, metric): + super(SimpleChaosRunner, self).__init__(env, metric) + + async def async_call(self, func, **kwargs): + future = methodcaller(func, **kwargs)(self.milvus) + + def run_step(self, interface_name, interface_params): + if interface_name == "create_collection": + collection_name = utils.get_unique_name("chaos") + self.data_type = interface_params["data_type"] + self.dimension = interface_params["dimension"] + self.milvus.set_collection(collection_name) + vector_type = runner_utils.get_vector_type(self.data_type) + self.milvus.create_collection(self.dimension, data_type=vector_type) + elif interface_name == "insert": + batch_size = interface_params["batch_size"] + collection_size = interface_params["collection_size"] + self.insert(self.milvus, self.milvus.collection_name, self.data_type, self.dimension, collection_size, + batch_size) + elif interface_name == "create_index": + metric_type = interface_params["metric_type"] + index_type = interface_params["index_type"] + index_param = interface_params["index_param"] + vector_type = runner_utils.get_vector_type(self.data_type) + field_name = runner_utils.get_default_field_name(vector_type) + self.milvus.create_index(field_name, index_type, metric_type, index_param=index_param) + elif interface_name == "flush": + self.milvus.flush() + + def extract_cases(self, collection): + before_steps = collection["before"] + after = collection["after"] if "after" in collection else None + processing = collection["processing"] + case_metrics = [] + case_params = [{ + "before_steps": before_steps, + "after": after, + "processing": processing + }] + self.init_metric(self.name, {}, {}, None) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics.append(case_metric) + return case_params, case_metrics + + def prepare(self, **case_param): + steps = case_param["before_steps"] + for step in steps: + interface_name = step["interface_name"] + params = step["params"] + self.run_step(interface_name, params) + + def run_case(self, case_metric, **case_param): + processing = case_param["processing"] + after = case_param["after"] + user_chaos = processing["chaos"] + kind = user_chaos["kind"] + spec = user_chaos["spec"] + metadata_name = config.NAMESPACE + "-" + kind.lower() + metadata = {"name": metadata_name} + process_assertion = processing["assertion"] + after_assertion = after["assertion"] + # load yaml from default template to generate stand chaos dict + chaos_mesh = kind_chaos_mapping[kind](config.DEFAULT_API_VERSION, kind, metadata, spec) + experiment_config = chaos_mesh.gen_experiment_config() + process_func = processing["interface_name"] + process_params = processing["params"] if "params" in processing else {} + after_func = after["interface_name"] + after_params = after["params"] if "params" in after else {} + logger.debug(chaos_mesh.kind) + chaos_opt = ChaosOpt(chaos_mesh.kind) + chaos_objects = chaos_opt.list_chaos_object() + if len(chaos_objects["items"]) != 0: + logger.debug(chaos_objects["items"]) + chaos_opt.delete_chaos_object(chaos_mesh.metadata["name"]) + # with open('./pod-newq.yaml', "w") as f: + # dump(experiment_config, f) + # f.close() + # concurrent inject chaos and run func + # logger.debug(experiment_config) + t_milvus = threading.Thread(target=assert_func_mapping[process_assertion], args=(process_func, self.milvus,), kwargs=process_params) + try: + t_milvus.start() + chaos_opt.create_chaos_object(experiment_config) + # processing assert exception + except Exception as e: + logger.info("exception {}".format(str(e))) + else: + chaos_opt.delete_chaos_object(chaos_mesh.metadata["name"]) + # TODO retry connect milvus + time.sleep(15) + assert_func_mapping[after_assertion](after_func, self.milvus, **after_params) + finally: + chaos_opt.delete_all_chaos_object() + logger.info(chaos_opt.list_chaos_object()) diff --git a/tests/benchmark/milvus_benchmark/runners/docker_runner.py b/tests/benchmark/milvus_benchmark/runners/docker_runner.py new file mode 100644 index 0000000000..406f3524d2 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/docker_runner.py @@ -0,0 +1,366 @@ +import os +import logging +import pdb +import time +import random +from multiprocessing import Process +import numpy as np +from client import MilvusClient +import utils +import parser +from runner import Runner + +logger = logging.getLogger("milvus_benchmark.docker") + + +class DockerRunner(Runner): + """run docker mode""" + def __init__(self, image): + super(DockerRunner, self).__init__() + self.image = image + + def run(self, definition, run_type=None): + if run_type == "performance": + for op_type, op_value in definition.items(): + # run docker mode + run_count = op_value["run_count"] + run_params = op_value["params"] + container = None + + if op_type == "insert": + if not run_params: + logger.debug("No run params") + continue + for index, param in enumerate(run_params): + logger.info("Definition param: %s" % str(param)) + collection_name = param["collection_name"] + volume_name = param["db_path_prefix"] + print(collection_name) + (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name) + for k, v in param.items(): + if k.startswith("server."): + # Update server config + utils.modify_config(k, v, type="server", db_slave=None) + container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None) + time.sleep(2) + milvus = MilvusClient(collection_name) + # Check has collection or not + if milvus.exists_collection(): + milvus.delete() + time.sleep(10) + milvus.create_collection(collection_name, dimension, index_file_size, metric_type) + # debug + # milvus.create_index("ivf_sq8", 16384) + res = self.do_insert(milvus, collection_name, data_type, dimension, collection_size, param["ni_per"]) + logger.info(res) + # wait for file merge + time.sleep(collection_size * dimension / 5000000) + # Clear up + utils.remove_container(container) + + elif op_type == "query": + for index, param in enumerate(run_params): + logger.info("Definition param: %s" % str(param)) + collection_name = param["dataset"] + volume_name = param["db_path_prefix"] + (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name) + for k, v in param.items(): + if k.startswith("server."): + utils.modify_config(k, v, type="server") + container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None) + time.sleep(2) + milvus = MilvusClient(collection_name) + logger.debug(milvus.show_collections()) + # Check has collection or not + if not milvus.exists_collection(): + logger.warning("Table %s not existed, continue exec next params ..." % collection_name) + continue + # parse index info + index_types = param["index.index_types"] + nlists = param["index.nlists"] + # parse top-k, nq, nprobe + top_ks, nqs, nprobes = parser.search_params_parser(param) + for index_type in index_types: + for nlist in nlists: + result = milvus.describe_index() + logger.info(result) + # milvus.drop_index() + # milvus.create_index(index_type, nlist) + result = milvus.describe_index() + logger.info(result) + logger.info(milvus.count()) + # preload index + milvus.preload_collection() + logger.info("Start warm up query") + res = self.do_query(milvus, collection_name, [1], [1], 1, 1) + logger.info("End warm up query") + # Run query test + for nprobe in nprobes: + logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe)) + res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count) + headers = ["Nq/Top-k"] + headers.extend([str(top_k) for top_k in top_ks]) + utils.print_collection(headers, nqs, res) + utils.remove_container(container) + + elif run_type == "insert_performance": + for op_type, op_value in definition.items(): + # run docker mode + run_count = op_value["run_count"] + run_params = op_value["params"] + container = None + if not run_params: + logger.debug("No run params") + continue + for index, param in enumerate(run_params): + logger.info("Definition param: %s" % str(param)) + collection_name = param["collection_name"] + volume_name = param["db_path_prefix"] + print(collection_name) + (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name) + for k, v in param.items(): + if k.startswith("server."): + # Update server config + utils.modify_config(k, v, type="server", db_slave=None) + container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None) + time.sleep(2) + milvus = MilvusClient(collection_name) + # Check has collection or not + if milvus.exists_collection(): + milvus.delete() + time.sleep(10) + milvus.create_collection(collection_name, dimension, index_file_size, metric_type) + # debug + # milvus.create_index("ivf_sq8", 16384) + res = self.do_insert(milvus, collection_name, data_type, dimension, collection_size, param["ni_per"]) + logger.info(res) + # wait for file merge + time.sleep(collection_size * dimension / 5000000) + # Clear up + utils.remove_container(container) + + elif run_type == "search_performance": + for op_type, op_value in definition.items(): + # run docker mode + run_count = op_value["run_count"] + run_params = op_value["params"] + container = None + for index, param in enumerate(run_params): + logger.info("Definition param: %s" % str(param)) + collection_name = param["dataset"] + volume_name = param["db_path_prefix"] + (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name) + for k, v in param.items(): + if k.startswith("server."): + utils.modify_config(k, v, type="server") + container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None) + time.sleep(2) + milvus = MilvusClient(collection_name) + logger.debug(milvus.show_collections()) + # Check has collection or not + if not milvus.exists_collection(): + logger.warning("Table %s not existed, continue exec next params ..." % collection_name) + continue + # parse index info + index_types = param["index.index_types"] + nlists = param["index.nlists"] + # parse top-k, nq, nprobe + top_ks, nqs, nprobes = parser.search_params_parser(param) + for index_type in index_types: + for nlist in nlists: + result = milvus.describe_index() + logger.info(result) + # milvus.drop_index() + # milvus.create_index(index_type, nlist) + result = milvus.describe_index() + logger.info(result) + logger.info(milvus.count()) + # preload index + milvus.preload_collection() + logger.info("Start warm up query") + res = self.do_query(milvus, collection_name, [1], [1], 1, 1) + logger.info("End warm up query") + # Run query test + for nprobe in nprobes: + logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe)) + res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count) + headers = ["Nq/Top-k"] + headers.extend([str(top_k) for top_k in top_ks]) + utils.print_collection(headers, nqs, res) + utils.remove_container(container) + + elif run_type == "accuracy": + """ + { + "dataset": "random_50m_1024_512", + "index.index_types": ["flat", ivf_flat", "ivf_sq8"], + "index.nlists": [16384], + "nprobes": [1, 32, 128], + "nqs": [100], + "top_ks": [1, 64], + "server.use_blas_threshold": 1100, + "server.cpu_cache_capacity": 256 + } + """ + for op_type, op_value in definition.items(): + if op_type != "query": + logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type) + break + run_count = op_value["run_count"] + run_params = op_value["params"] + container = None + + for index, param in enumerate(run_params): + logger.info("Definition param: %s" % str(param)) + collection_name = param["dataset"] + sift_acc = False + if "sift_acc" in param: + sift_acc = param["sift_acc"] + (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name) + for k, v in param.items(): + if k.startswith("server."): + utils.modify_config(k, v, type="server") + volume_name = param["db_path_prefix"] + container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None) + time.sleep(2) + milvus = MilvusClient(collection_name) + # Check has collection or not + if not milvus.exists_collection(): + logger.warning("Table %s not existed, continue exec next params ..." % collection_name) + continue + + # parse index info + index_types = param["index.index_types"] + nlists = param["index.nlists"] + # parse top-k, nq, nprobe + top_ks, nqs, nprobes = parser.search_params_parser(param) + if sift_acc is True: + # preload groundtruth data + true_ids_all = self.get_groundtruth_ids(collection_size) + acc_dict = {} + for index_type in index_types: + for nlist in nlists: + result = milvus.describe_index() + logger.info(result) + milvus.create_index(index_type, nlist) + # preload index + milvus.preload_collection() + # Run query test + for nprobe in nprobes: + logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe)) + for top_k in top_ks: + for nq in nqs: + result_ids = [] + id_prefix = "%s_index_%s_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \ + (collection_name, index_type, nlist, metric_type, nprobe, top_k, nq) + if sift_acc is False: + self.do_query_acc(milvus, collection_name, top_k, nq, nprobe, id_prefix) + if index_type != "flat": + # Compute accuracy + base_name = "%s_index_flat_nlist_%s_metric_type_%s_nprobe_%s_top_k_%s_nq_%s" % \ + (collection_name, nlist, metric_type, nprobe, top_k, nq) + avg_acc = self.compute_accuracy(base_name, id_prefix) + logger.info("Query: <%s> accuracy: %s" % (id_prefix, avg_acc)) + else: + result_ids, result_distances = self.do_query_ids(milvus, collection_name, top_k, nq, nprobe) + debug_file_ids = "0.5.3_result_ids" + debug_file_distances = "0.5.3_result_distances" + with open(debug_file_ids, "w+") as fd: + total = 0 + for index, item in enumerate(result_ids): + true_item = true_ids_all[:nq, :top_k].tolist()[index] + tmp = set(item).intersection(set(true_item)) + total = total + len(tmp) + fd.write("query: N-%d, intersection: %d, total: %d\n" % (index, len(tmp), total)) + fd.write("%s\n" % str(item)) + fd.write("%s\n" % str(true_item)) + acc_value = self.get_recall_value(true_ids_all[:nq, :top_k].tolist(), result_ids) + logger.info("Query: <%s> accuracy: %s" % (id_prefix, acc_value)) + # # print accuracy collection + # headers = [collection_name] + # headers.extend([str(top_k) for top_k in top_ks]) + # utils.print_collection(headers, nqs, res) + + # remove container, and run next definition + logger.info("remove container, and run next definition") + utils.remove_container(container) + + elif run_type == "stability": + for op_type, op_value in definition.items(): + if op_type != "query": + logger.warning("invalid operation: %s in accuracy test, only support query operation" % op_type) + break + run_count = op_value["run_count"] + run_params = op_value["params"] + container = None + for index, param in enumerate(run_params): + logger.info("Definition param: %s" % str(param)) + collection_name = param["dataset"] + index_type = param["index_type"] + volume_name = param["db_path_prefix"] + (data_type, collection_size, index_file_size, dimension, metric_type) = parser.collection_parser(collection_name) + + # set default test time + if "during_time" not in param: + during_time = 100 # seconds + else: + during_time = int(param["during_time"]) * 60 + # set default query process num + if "query_process_num" not in param: + query_process_num = 10 + else: + query_process_num = int(param["query_process_num"]) + + for k, v in param.items(): + if k.startswith("server."): + utils.modify_config(k, v, type="server") + + container = utils.run_server(self.image, test_type="remote", volume_name=volume_name, db_slave=None) + time.sleep(2) + milvus = MilvusClient(collection_name) + # Check has collection or not + if not milvus.exists_collection(): + logger.warning("Table %s not existed, continue exec next params ..." % collection_name) + continue + + start_time = time.time() + insert_vectors = [[random.random() for _ in range(dimension)] for _ in range(10000)] + i = 0 + while time.time() < start_time + during_time: + i = i + 1 + processes = [] + # do query + # for i in range(query_process_num): + # milvus_instance = MilvusClient(collection_name) + # top_k = random.choice([x for x in range(1, 100)]) + # nq = random.choice([x for x in range(1, 100)]) + # nprobe = random.choice([x for x in range(1, 1000)]) + # # logger.info("index_type: %s, nlist: %s, metric_type: %s, nprobe: %s" % (index_type, nlist, metric_type, nprobe)) + # p = Process(target=self.do_query, args=(milvus_instance, collection_name, [top_k], [nq], [nprobe], run_count, )) + # processes.append(p) + # p.start() + # time.sleep(0.1) + # for p in processes: + # p.join() + milvus_instance = MilvusClient(collection_name) + top_ks = random.sample([x for x in range(1, 100)], 3) + nqs = random.sample([x for x in range(1, 1000)], 3) + nprobe = random.choice([x for x in range(1, 500)]) + res = self.do_query(milvus, collection_name, top_ks, nqs, nprobe, run_count) + if i % 10 == 0: + status, res = milvus_instance.insert(insert_vectors, ids=[x for x in range(len(insert_vectors))]) + if not status.OK(): + logger.error(status) + # status = milvus_instance.drop_index() + # if not status.OK(): + # logger.error(status) + # index_type = random.choice(["flat", "ivf_flat", "ivf_sq8"]) + milvus_instance.create_index(index_type, 16384) + result = milvus.describe_index() + logger.info(result) + # milvus_instance.create_index("ivf_sq8", 16384) + utils.remove_container(container) + + else: + logger.warning("Run type: %s not supported" % run_type) + diff --git a/tests/benchmark/milvus_benchmark/runners/docker_utils.py b/tests/benchmark/milvus_benchmark/runners/docker_utils.py new file mode 100644 index 0000000000..504a7f51c9 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/docker_utils.py @@ -0,0 +1,126 @@ +# def pull_image(image): +# registry = image.split(":")[0] +# image_tag = image.split(":")[1] +# client = docker.APIClient(base_url='unix://var/run/docker.sock') +# logger.info("Start pulling image: %s" % image) +# return client.pull(registry, image_tag) + + +# def run_server(image, mem_limit=None, timeout=30, test_type="local", volume_name=None, db_slave=None): +# import colors + +# client = docker.from_env() +# # if mem_limit is None: +# # mem_limit = psutil.virtual_memory().available +# # logger.info('Memory limit:', mem_limit) +# # cpu_limit = "0-%d" % (multiprocessing.cpu_count() - 1) +# # logger.info('Running on CPUs:', cpu_limit) +# for dir_item in ['logs', 'db']: +# try: +# os.mkdir(os.path.abspath(dir_item)) +# except Exception as e: +# pass + +# if test_type == "local": +# volumes = { +# os.path.abspath('conf'): +# {'bind': '/opt/milvus/conf', 'mode': 'ro'}, +# os.path.abspath('logs'): +# {'bind': '/opt/milvus/logs', 'mode': 'rw'}, +# os.path.abspath('db'): +# {'bind': '/opt/milvus/db', 'mode': 'rw'}, +# } +# elif test_type == "remote": +# if volume_name is None: +# raise Exception("No volume name") +# remote_log_dir = volume_name+'/logs' +# remote_db_dir = volume_name+'/db' + +# for dir_item in [remote_log_dir, remote_db_dir]: +# if not os.path.isdir(dir_item): +# os.makedirs(dir_item, exist_ok=True) +# volumes = { +# os.path.abspath('conf'): +# {'bind': '/opt/milvus/conf', 'mode': 'ro'}, +# remote_log_dir: +# {'bind': '/opt/milvus/logs', 'mode': 'rw'}, +# remote_db_dir: +# {'bind': '/opt/milvus/db', 'mode': 'rw'} +# } +# # add volumes +# if db_slave and isinstance(db_slave, int): +# for i in range(2, db_slave+1): +# remote_db_dir = volume_name+'/data'+str(i) +# if not os.path.isdir(remote_db_dir): +# os.makedirs(remote_db_dir, exist_ok=True) +# volumes[remote_db_dir] = {'bind': '/opt/milvus/data'+str(i), 'mode': 'rw'} + +# container = client.containers.run( +# image, +# volumes=volumes, +# runtime="nvidia", +# ports={'19530/tcp': 19530, '8080/tcp': 8080}, +# # environment=["OMP_NUM_THREADS=48"], +# # cpuset_cpus=cpu_limit, +# # mem_limit=mem_limit, +# # environment=[""], +# detach=True) + +# def stream_logs(): +# for line in container.logs(stream=True): +# logger.info(colors.color(line.decode().rstrip(), fg='blue')) + +# if sys.version_info >= (3, 0): +# t = threading.Thread(target=stream_logs, daemon=True) +# else: +# t = threading.Thread(target=stream_logs) +# t.daemon = True +# t.start() + +# logger.info('Container: %s started' % container) +# return container +# # exit_code = container.wait(timeout=timeout) +# # # Exit if exit code +# # if exit_code == 0: +# # return container +# # elif exit_code is not None: +# # print(colors.color(container.logs().decode(), fg='red')) + +# def restart_server(container): +# client = docker.APIClient(base_url='unix://var/run/docker.sock') + +# client.restart(container.name) +# logger.info('Container: %s restarted' % container.name) +# return container + + +# def remove_container(container): +# container.remove(force=True) +# logger.info('Container: %s removed' % container) + + +# def remove_all_containers(image): +# client = docker.from_env() +# try: +# for container in client.containers.list(): +# if image in container.image.tags: +# container.stop(timeout=30) +# container.remove(force=True) +# except Exception as e: +# logger.error("Containers removed failed") + + +# def container_exists(image): +# ''' +# Check if container existed with the given image name +# @params: image name +# @return: container if exists +# ''' +# res = False +# client = docker.from_env() +# for container in client.containers.list(): +# if image in container.image.tags: +# # True +# res = container +# return res + diff --git a/tests/benchmark/milvus_benchmark/runners/get.py b/tests/benchmark/milvus_benchmark/runners/get.py new file mode 100644 index 0000000000..4a95a668db --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/get.py @@ -0,0 +1,121 @@ +import time +import copy +import logging +from milvus_benchmark import parser +from milvus_benchmark.runners import utils +from milvus_benchmark.runners.base import BaseRunner + +logger = logging.getLogger("milvus_benchmark.runners.get") + + +def get_ids(length, size): + ids_list = [] + step = size // length + for i in range(length): + ids_list.append(step * i) + return ids_list + + +class GetRunner(BaseRunner): + """run get""" + name = "get_performance" + + def __init__(self, env, metric): + super(GetRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_per = collection["ni_per"] + vector_type = utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + ids_length_list = collection["ids_length_list"] + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + index_field_name = utils.get_default_field_name(vector_type) + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + flush = True + if "flush" in collection and collection["flush"] == "no": + flush = False + self.init_metric(self.name, collection_info, index_info, search_info=None) + case_metrics = list() + for ids_length in ids_length_list: + ids = get_ids(ids_length, collection_size) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_params = list() + case_metric.run_params = {"ids_length": ids_length} + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "flush_after_insert": flush, + "index_field_name": index_field_name, + "index_type": index_type, + "index_param": index_param, + "ids": ids + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + self.milvus.set_collection(collection_name) + if not self.milvus.exists_collection(): + logger.info("collection not exist") + logger.debug({"collection count": self.milvus.count()}) + + def run_case(self, case_metric, **case_param): + ids = case_param["ids"] + start_time = time.time() + self.milvus.get(ids) + get_time = round(time.time() - start_time, 2) + tmp_result = {"get_time": get_time} + return tmp_result + + +class InsertGetRunner(GetRunner): + """run insert and get""" + name = "insert_get_performance" + + def __init__(self, env, metric): + super(InsertGetRunner, self).__init__(env, metric) + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, other_fields=other_fields) + self.insert(self.milvus, collection_name, case_param["data_type"], dimension, + case_param["collection_size"], case_param["ni_per"]) + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time() - start_time, 2) + logger.debug({"collection count": self.milvus.count()}) + logger.debug({"flush_time": flush_time}) + logger.debug("Start load collection") + self.milvus.load_collection(timeout=1200) + logger.debug("Load collection end") diff --git a/tests/benchmark/milvus_benchmark/runners/insert.py b/tests/benchmark/milvus_benchmark/runners/insert.py new file mode 100644 index 0000000000..236fe68e19 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/insert.py @@ -0,0 +1,243 @@ +import time +import pdb +import copy +import logging +from milvus_benchmark import parser +from milvus_benchmark.runners import utils +from milvus_benchmark.runners.base import BaseRunner + +logger = logging.getLogger("milvus_benchmark.runners.insert") + + +class InsertRunner(BaseRunner): + """run insert""" + name = "insert_performance" + + def __init__(self, env, metric): + super(InsertRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_per = collection["ni_per"] + build_index = collection["build_index"] if "build_index" in collection else False + index_info = None + vector_type = utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + index_field_name = None + index_type = None + index_param = None + if build_index is True: + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + index_field_name = utils.get_default_field_name(vector_type) + flush = True + if "flush" in collection and collection["flush"] == "no": + flush = False + self.init_metric(self.name, collection_info, index_info, None) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics = list() + case_params = list() + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "build_index": build_index, + "flush_after_insert": flush, + "index_field_name": index_field_name, + "index_type": index_type, + "index_param": index_param, + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, + other_fields=other_fields) + # TODO: update fields in collection_info + # fields = self.get_fields(self.milvus, collection_name) + # collection_info = { + # "dimension": dimension, + # "metric_type": metric_type, + # "dataset_name": collection_name, + # "fields": fields + # } + if build_index is True: + if case_param["index_type"]: + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + logger.debug(self.milvus.describe_index(index_field_name)) + else: + build_index = False + logger.warning("Please specify the index_type") + + # TODO: error handler + def run_case(self, case_metric, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + + tmp_result = self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"]) + flush_time = 0.0 + build_time = 0.0 + if case_param["flush_after_insert"] is True: + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time()-start_time, 2) + logger.debug(self.milvus.count()) + if build_index is True: + logger.debug("Start build index for last file") + start_time = time.time() + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + build_time = round(time.time()-start_time, 2) + tmp_result.update({"flush_time": flush_time, "build_time": build_time}) + return tmp_result + + +class BPInsertRunner(BaseRunner): + """run insert""" + name = "bp_insert_performance" + + def __init__(self, env, metric): + super(BPInsertRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_pers = collection["ni_pers"] + build_index = collection["build_index"] if "build_index" in collection else False + index_info = None + vector_type = utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + index_field_name = None + index_type = None + index_param = None + if build_index is True: + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + index_field_name = utils.get_default_field_name(vector_type) + flush = True + if "flush" in collection and collection["flush"] == "no": + flush = False + case_metrics = list() + case_params = list() + + for ni_per in ni_pers: + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + self.init_metric(self.name, collection_info, index_info, None) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "build_index": build_index, + "flush_after_insert": flush, + "index_field_name": index_field_name, + "index_type": index_type, + "index_param": index_param, + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, + other_fields=other_fields) + # TODO: update fields in collection_info + # fields = self.get_fields(self.milvus, collection_name) + # collection_info = { + # "dimension": dimension, + # "metric_type": metric_type, + # "dataset_name": collection_name, + # "fields": fields + # } + if build_index is True: + if case_param["index_type"]: + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + logger.debug(self.milvus.describe_index(index_field_name)) + else: + build_index = False + logger.warning("Please specify the index_type") + + # TODO: error handler + def run_case(self, case_metric, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + # TODO: + tmp_result = self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"]) + flush_time = 0.0 + build_time = 0.0 + if case_param["flush_after_insert"] is True: + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time()-start_time, 2) + logger.debug(self.milvus.count()) + if build_index is True: + logger.debug("Start build index for last file") + start_time = time.time() + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + build_time = round(time.time()-start_time, 2) + tmp_result.update({"flush_time": flush_time, "build_time": build_time}) + return tmp_result diff --git a/tests/benchmark/milvus_benchmark/runners/locust.py b/tests/benchmark/milvus_benchmark/runners/locust.py new file mode 100644 index 0000000000..4bf268fa0f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/locust.py @@ -0,0 +1,399 @@ +import pdb +import time +import copy +import logging +from . import locust_user +from .base import BaseRunner +from milvus_benchmark import parser +from milvus_benchmark import utils +from milvus_benchmark.runners import utils as runner_utils + +logger = logging.getLogger("milvus_benchmark.runners.locust") + + +class LocustRunner(BaseRunner): + def __init__(self, env, metric): + super(LocustRunner, self).__init__(env, metric) + + def run_case(self, case_metric, **case_param): + collection_name = case_param["collection_name"] + task = case_param["task"] + connection_type = case_param["connection_type"] + + # spawn locust requests + task["during_time"] = utils.timestr_to_int(task["during_time"]) + task_types = task["types"] + run_params = {"tasks": {}} + run_params.update(task) + info_in_params = { + "index_field_name": case_param["index_field_name"], + "vector_field_name": case_param["vector_field_name"], + "dimension": case_param["dimension"], + "collection_info": self.milvus.get_info(collection_name)} + logger.info(info_in_params) + run_params.update({"op_info": info_in_params}) + for task_type in task_types: + run_params["tasks"].update({ + task_type["type"]: { + "weight": task_type["weight"] if "weight" in task_type else 1, + "params": task_type["params"] if "params" in task_type else None, + } + }) + # collect stats + # pdb.set_trace() + logger.info(run_params) + locust_stats = locust_user.locust_executor(self.hostname, self.port, collection_name, + connection_type=connection_type, run_params=run_params) + return locust_stats + + +class LocustInsertRunner(LocustRunner): + """run insert""" + name = "locust_insert_performance" + + def __init__(self, env, metric): + super(LocustInsertRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_per = collection["ni_per"] + build_index = collection["build_index"] if "build_index" in collection else False + vector_type = runner_utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + index_field_name = None + index_type = None + index_param = None + index_info = None + vector_field_name = runner_utils.get_default_field_name(vector_type) + if build_index is True: + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + index_field_name = runner_utils.get_default_field_name(vector_type) + task = collection["task"] + connection_type = "single" + connection_num = task["connection_num"] + if connection_num > 1: + connection_type = "multi" + run_params = { + "task": collection["task"], + "connection_type": connection_type, + } + self.init_metric(self.name, collection_info, index_info, None, run_params) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics = list() + case_params = list() + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "build_index": build_index, + "index_field_name": index_field_name, + "vector_field_name": vector_field_name, + "index_type": index_type, + "index_param": index_param, + "task": collection["task"], + "connection_type": connection_type, + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(runner_utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, + other_fields=other_fields) + # TODO: update fields in collection_info + # fields = self.get_fields(self.milvus, collection_name) + # collection_info = { + # "dimension": dimension, + # "metric_type": metric_type, + # "dataset_name": collection_name, + # "fields": fields + # } + if build_index is True: + if case_param["index_type"]: + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + logger.debug(self.milvus.describe_index(index_field_name)) + else: + build_index = False + logger.warning("Please specify the index_type") + + +class LocustSearchRunner(LocustRunner): + """run search""" + name = "locust_search_performance" + + def __init__(self, env, metric): + super(LocustSearchRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_per = collection["ni_per"] + build_index = collection["build_index"] if "build_index" in collection else False + vector_type = runner_utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + index_field_name = None + index_type = None + index_param = None + index_info = None + if build_index is True: + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + index_field_name = runner_utils.get_default_field_name(vector_type) + vector_field_name = runner_utils.get_default_field_name(vector_type) + task = collection["task"] + connection_type = "single" + connection_num = task["connection_num"] + if connection_num > 1: + connection_type = "multi" + run_params = { + "task": collection["task"], + "connection_type": connection_type, + } + self.init_metric(self.name, collection_info, index_info, None, run_params) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics = list() + case_params = list() + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "build_index": build_index, + "index_field_name": index_field_name, + "vector_field_name": vector_field_name, + "index_type": index_type, + "index_param": index_param, + "task": collection["task"], + "connection_type": connection_type, + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + index_field_name = case_param["index_field_name"] + metric_type = case_param["metric_type"] + build_index = case_param["build_index"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(runner_utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, + other_fields=other_fields) + # TODO: update fields in collection_info + # fields = self.get_fields(self.milvus, collection_name) + # collection_info = { + # "dimension": dimension, + # "metric_type": metric_type, + # "dataset_name": collection_name, + # "fields": fields + # } + if build_index is True: + if case_param["index_type"]: + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + logger.debug(self.milvus.describe_index(index_field_name)) + else: + build_index = False + logger.warning("Please specify the index_type") + self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"]) + build_time = 0.0 + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time()-start_time, 2) + logger.debug(self.milvus.count()) + if build_index is True: + logger.debug("Start build index for last file") + start_time = time.time() + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + build_time = round(time.time()-start_time, 2) + logger.debug({"flush_time": flush_time, "build_time": build_time}) + logger.info(self.milvus.count()) + logger.info("Start load collection") + load_start_time = time.time() + self.milvus.load_collection() + logger.debug({"load_time": round(time.time()-load_start_time, 2)}) + # search_param = None + # for op in case_param["task"]["types"]: + # if op["type"] == "query": + # search_param = op["params"]["search_param"] + # break + # logger.info("index_field_name: {}".format(index_field_name)) + # TODO: enable warm query + # self.milvus.warm_query(index_field_name, search_param, metric_type, times=2) + + +class LocustRandomRunner(LocustRunner): + """run random interface""" + name = "locust_random_performance" + + def __init__(self, env, metric): + super(LocustRandomRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + ni_per = collection["ni_per"] + build_index = collection["build_index"] if "build_index" in collection else False + vector_type = runner_utils.get_vector_type(data_type) + other_fields = collection["other_fields"] if "other_fields" in collection else None + + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "other_fields": other_fields, + "ni_per": ni_per + } + index_field_name = None + index_type = None + index_param = None + index_info = None + vector_field_name = runner_utils.get_default_field_name(vector_type) + if build_index is True: + index_type = collection["index_type"] + index_param = collection["index_param"] + index_info = { + "index_type": index_type, + "index_param": index_param + } + index_field_name = runner_utils.get_default_field_name(vector_type) + task = collection["task"] + connection_type = "single" + connection_num = task["connection_num"] + if connection_num > 1: + connection_type = "multi" + run_params = { + "task": collection["task"], + "connection_type": connection_type, + } + self.init_metric(self.name, collection_info, index_info, None, run_params) + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metrics = list() + case_params = list() + case_metrics.append(case_metric) + case_param = { + "collection_name": collection_name, + "data_type": data_type, + "dimension": dimension, + "collection_size": collection_size, + "ni_per": ni_per, + "metric_type": metric_type, + "vector_type": vector_type, + "other_fields": other_fields, + "build_index": build_index, + "index_field_name": index_field_name, + "vector_field_name": vector_field_name, + "index_type": index_type, + "index_param": index_param, + "task": collection["task"], + "connection_type": connection_type, + } + case_params.append(case_param) + return case_params, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(runner_utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, + other_fields=other_fields) + # TODO: update fields in collection_info + # fields = self.get_fields(self.milvus, collection_name) + # collection_info = { + # "dimension": dimension, + # "metric_type": metric_type, + # "dataset_name": collection_name, + # "fields": fields + # } + if build_index is True: + if case_param["index_type"]: + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + logger.debug(self.milvus.describe_index(index_field_name)) + else: + build_index = False + logger.warning("Please specify the index_type") + self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"]) + build_time = 0.0 + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time()-start_time, 2) + logger.debug(self.milvus.count()) + if build_index is True: + logger.debug("Start build index for last file") + start_time = time.time() + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + build_time = round(time.time()-start_time, 2) + logger.debug({"flush_time": flush_time, "build_time": build_time}) + logger.info(self.milvus.count()) + logger.info("Start load collection") + load_start_time = time.time() + self.milvus.load_collection() + logger.debug({"load_time": round(time.time()-load_start_time, 2)}) diff --git a/tests/benchmark/milvus_benchmark/runners/locust_file.py b/tests/benchmark/milvus_benchmark/runners/locust_file.py new file mode 100644 index 0000000000..97f1f65d42 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/locust_file.py @@ -0,0 +1,30 @@ + +import random +from locust import HttpUser, task, between + + +collection_name = "random_1m_2048_512_ip_sq8" +headers = {'Content-Type': "application/json"} +url = '/collections/%s/vectors' % collection_name +top_k = 2 +nq = 1 +dim = 512 +vectors = [[random.random() for _ in range(dim)] for _ in range(nq)] +data = { + "search":{ + "topk": top_k, + "vectors": vectors, + "params": { + "nprobe": 1 + } + } +} + +class MyUser(HttpUser): + wait_time = between(0, 0.1) + host = "http://192.168.1.112:19122" + + @task + def search(self): + response = self.client.put(url=url, json=data, headers=headers, timeout=2) + print(response) diff --git a/tests/benchmark/milvus_benchmark/runners/locust_task.py b/tests/benchmark/milvus_benchmark/runners/locust_task.py new file mode 100644 index 0000000000..4b79b24f0b --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/locust_task.py @@ -0,0 +1,38 @@ +import time +import pdb +import random +import logging +from locust import User, events +from milvus_benchmark.client import MilvusClient + +logger = logging.getLogger("milvus_benchmark.runners.locust_task") + + +class MilvusTask(object): + def __init__(self, *args, **kwargs): + self.request_type = "grpc" + connection_type = kwargs.get("connection_type") + if connection_type == "single": + self.m = kwargs.get("m") + elif connection_type == "multi": + host = kwargs.get("host") + port = kwargs.get("port") + collection_name = kwargs.get("collection_name") + self.m = MilvusClient(host=host, port=port, collection_name=collection_name) + + def __getattr__(self, name): + func = getattr(self.m, name) + + def wrapper(*args, **kwargs): + start_time = time.time() + try: + result = func(*args, **kwargs) + total_time = int((time.time() - start_time) * 1000) + events.request_success.fire(request_type=self.request_type, name=name, response_time=total_time, + response_length=0) + except Exception as e: + total_time = int((time.time() - start_time) * 1000) + events.request_failure.fire(request_type=self.request_type, name=name, response_time=total_time, + exception=e, response_length=0) + + return wrapper diff --git a/tests/benchmark/milvus_benchmark/runners/locust_tasks.py b/tests/benchmark/milvus_benchmark/runners/locust_tasks.py new file mode 100644 index 0000000000..b00cc538c1 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/locust_tasks.py @@ -0,0 +1,79 @@ +import pdb +import random +import time +import logging +import math +from locust import TaskSet, task +from . import utils + +logger = logging.getLogger("milvus_benchmark.runners.locust_tasks") + + +class Tasks(TaskSet): + @task + def query(self): + op = "query" + # X = utils.generate_vectors(self.params[op]["nq"], self.op_info["dimension"]) + vector_query = {"vector": {self.op_info["vector_field_name"]: { + "topk": self.params[op]["top_k"], + "query": self.values["X"][:self.params[op]["nq"]], + "metric_type": self.params[op]["metric_type"] if "metric_type" in self.params[op] else utils.DEFAULT_METRIC_TYPE, + "params": self.params[op]["search_param"]} + }} + filter_query = [] + if "filters" in self.params[op]: + for filter in self.params[op]["filters"]: + if isinstance(filter, dict) and "range" in filter: + filter_query.append(eval(filter["range"])) + if isinstance(filter, dict) and "term" in filter: + filter_query.append(eval(filter["term"])) + # logger.debug(filter_query) + self.client.query(vector_query, filter_query=filter_query, log=False, timeout=30) + + @task + def flush(self): + self.client.flush(log=False, timeout=30) + + @task + def load(self): + self.client.load_collection(timeout=30) + + @task + def release(self): + self.client.release_collection() + self.client.load_collection(timeout=30) + + # @task + # def release_index(self): + # self.client.release_index() + + # @task + # def create_index(self): + # self.client.release_index() + + @task + def insert(self): + op = "insert" + # ids = [random.randint(1000000, 10000000) for _ in range(self.params[op]["ni_per"])] + # X = [[random.random() for _ in range(self.op_info["dimension"])] for _ in range(self.params[op]["ni_per"])] + entities = utils.generate_entities(self.op_info["collection_info"], self.values["X"][:self.params[op]["ni_per"]], self.values["ids"][:self.params[op]["ni_per"]]) + self.client.insert(entities, log=False) + + @task + def insert_flush(self): + op = "insert_flush" + # ids = [random.randint(1000000, 10000000) for _ in range(self.params[op]["ni_per"])] + # X = [[random.random() for _ in range(self.op_info["dimension"])] for _ in range(self.params[op]["ni_per"])] + entities = utils.generate_entities(self.op_info["collection_info"], self.values["X"][:self.params[op]["ni_per"]], self.values["ids"][:self.params[op]["ni_per"]]) + self.client.insert(entities, log=False) + self.client.flush(log=False) + + @task + def insert_rand(self): + self.client.insert_rand(log=False) + + @task + def get(self): + op = "get" + # ids = [random.randint(1, 10000000) for _ in range(self.params[op]["ids_length"])] + self.client.get(self.values["get_ids"][:self.params[op]["ids_length"]]) diff --git a/tests/benchmark/milvus_benchmark/runners/locust_user.py b/tests/benchmark/milvus_benchmark/runners/locust_user.py new file mode 100644 index 0000000000..16ee4fa1e8 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/locust_user.py @@ -0,0 +1,109 @@ +import logging +import random +import pdb +import gevent +# import gevent.monkey +# gevent.monkey.patch_all() +from locust import User, between, events, stats +from locust.env import Environment +import locust.stats +import math +from locust import LoadTestShape +from locust.stats import stats_printer, print_stats +from locust.log import setup_logging, greenlet_exception_logger +from milvus_benchmark.client import MilvusClient +from .locust_task import MilvusTask +from .locust_tasks import Tasks +from . import utils + +locust.stats.CONSOLE_STATS_INTERVAL_SEC = 20 +logger = logging.getLogger("milvus_benchmark.runners.locust_user") +nq = 10000 +nb = 100000 + + +class StepLoadShape(LoadTestShape): + """ + A step load shape + Keyword arguments: + step_time -- Time between steps + step_load -- User increase amount at each step + spawn_rate -- Users to stop/start per second at every step + time_limit -- Time limit in seconds + """ + + def init(self, step_time, step_load, spawn_rate, time_limit): + self.step_time = step_time + self.step_load = step_load + self.spawn_rate = spawn_rate + self.time_limit = time_limit + + def tick(self): + run_time = self.get_run_time() + + if run_time > self.time_limit: + return None + + current_step = math.floor(run_time / self.step_time) + 1 + return (current_step * self.step_load, self.spawn_rate) + + +class MyUser(User): + # task_set = None + # wait_time = between(0.001, 0.002) + pass + + +def locust_executor(host, port, collection_name, connection_type="single", run_params=None): + m = MilvusClient(host=host, port=port, collection_name=collection_name) + MyUser.tasks = {} + MyUser.op_info = run_params["op_info"] + MyUser.params = {} + tasks = run_params["tasks"] + for op, value in tasks.items(): + task = {eval("Tasks." + op): value["weight"]} + MyUser.tasks.update(task) + MyUser.params[op] = value["params"] if "params" in value else None + logger.info(MyUser.tasks) + MyUser.values = { + "ids": [random.randint(1000000, 10000000) for _ in range(nb)], + "get_ids": [random.randint(1, 10000000) for _ in range(nb)], + "X": utils.generate_vectors(nq, MyUser.op_info["dimension"]) + } + + # MyUser.tasks = {Tasks.query: 1, Tasks.flush: 1} + MyUser.client = MilvusTask(host=host, port=port, collection_name=collection_name, connection_type=connection_type, + m=m) + if "load_shape" in run_params and run_params["load_shape"]: + test = StepLoadShape() + test.init(run_params["step_time"], run_params["step_load"], run_params["spawn_rate"], run_params["during_time"]) + env = Environment(events=events, user_classes=[MyUser], shape_class=test) + runner = env.create_local_runner() + env.runner.start_shape() + else: + env = Environment(events=events, user_classes=[MyUser]) + runner = env.create_local_runner() + # setup logging + # setup_logging("WARNING", "/dev/null") + # greenlet_exception_logger(logger=logger) + gevent.spawn(stats_printer(env.stats)) + # env.create_web_ui("127.0.0.1", 8089) + # gevent.spawn(stats_printer(env.stats), env, "test", full_history=True) + # events.init.fire(environment=env, runner=runner) + clients_num = run_params["clients_num"] if "clients_num" in run_params else 0 + step_load = run_params["step_load"] if "step_load" in run_params else 0 + step_time = run_params["step_time"] if "step_time" in run_params else 0 + spawn_rate = run_params["spawn_rate"] + during_time = run_params["during_time"] + runner.start(clients_num, spawn_rate=spawn_rate) + gevent.spawn_later(during_time, lambda: runner.quit()) + runner.greenlet.join() + print_stats(env.stats) + result = { + "rps": round(env.stats.total.current_rps, 1), + "fail_ratio": env.stats.total.fail_ratio, + "max_response_time": round(env.stats.total.max_response_time, 1), + "avg_response_time": round(env.stats.total.avg_response_time, 1) + } + runner.stop() + return result diff --git a/tests/benchmark/milvus_benchmark/runners/search.py b/tests/benchmark/milvus_benchmark/runners/search.py new file mode 100644 index 0000000000..29b7d0eb22 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/search.py @@ -0,0 +1,290 @@ +import time +import pdb +import copy +import json +import logging +from milvus_benchmark import parser +from milvus_benchmark.runners import utils +from milvus_benchmark.runners.base import BaseRunner + +logger = logging.getLogger("milvus_benchmark.runners.search") + + +class SearchRunner(BaseRunner): + """run search""" + name = "search_performance" + + def __init__(self, env, metric): + super(SearchRunner, self).__init__(env, metric) + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + run_count = collection["run_count"] + top_ks = collection["top_ks"] + nqs = collection["nqs"] + filters = collection["filters"] if "filters" in collection else [] + + search_params = collection["search_params"] + # TODO: get fields by describe_index + # fields = self.get_fields(self.milvus, collection_name) + fields = None + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "collection_size": collection_size, + "fields": fields + } + # TODO: need to get index_info + index_info = None + vector_type = utils.get_vector_type(data_type) + index_field_name = utils.get_default_field_name(vector_type) + base_query_vectors = utils.get_vectors_from_binary(utils.MAX_NQ, dimension, data_type) + cases = list() + case_metrics = list() + self.init_metric(self.name, collection_info, index_info, None) + for search_param in search_params: + logger.info("Search param: %s" % json.dumps(search_param)) + for filter in filters: + filter_query = [] + filter_param = [] + if filter and isinstance(filter, dict): + if "range" in filter: + filter_query.append(eval(filter["range"])) + filter_param.append(filter["range"]) + elif "term" in filter: + filter_query.append(eval(filter["term"])) + filter_param.append(filter["term"]) + else: + raise Exception("%s not supported" % filter) + logger.info("filter param: %s" % json.dumps(filter_param)) + for nq in nqs: + query_vectors = base_query_vectors[0:nq] + for top_k in top_ks: + search_info = { + "topk": top_k, + "query": query_vectors, + "metric_type": utils.metric_type_trans(metric_type), + "params": search_param} + # TODO: only update search_info + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metric.search = { + "nq": nq, + "topk": top_k, + "search_param": search_param, + "filter": filter_param + } + vector_query = {"vector": {index_field_name: search_info}} + case = { + "collection_name": collection_name, + "index_field_name": index_field_name, + "run_count": run_count, + "filter_query": filter_query, + "vector_query": vector_query, + } + cases.append(case) + case_metrics.append(case_metric) + return cases, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + self.milvus.set_collection(collection_name) + if not self.milvus.exists_collection(): + logger.error("collection name: {} not existed".format(collection_name)) + return False + logger.debug(self.milvus.count()) + logger.info("Start load collection") + self.milvus.load_collection(timeout=1200) + # TODO: enable warm query + # self.milvus.warm_query(index_field_name, search_params[0], times=2) + + def run_case(self, case_metric, **case_param): + # index_field_name = case_param["index_field_name"] + run_count = case_param["run_count"] + avg_query_time = 0.0 + min_query_time = 0.0 + total_query_time = 0.0 + for i in range(run_count): + logger.debug("Start run query, run %d of %s" % (i+1, run_count)) + start_time = time.time() + _query_res = self.milvus.query(case_param["vector_query"], filter_query=case_param["filter_query"]) + interval_time = time.time() - start_time + total_query_time += interval_time + if (i == 0) or (min_query_time > interval_time): + min_query_time = round(interval_time, 2) + avg_query_time = round(total_query_time/run_count, 2) + tmp_result = {"search_time": min_query_time, "avc_search_time": avg_query_time} + return tmp_result + + +class InsertSearchRunner(BaseRunner): + """run insert and search""" + name = "insert_search_performance" + + def __init__(self, env, metric): + super(InsertSearchRunner, self).__init__(env, metric) + self.build_time = None + self.insert_result = None + + def extract_cases(self, collection): + collection_name = collection["collection_name"] if "collection_name" in collection else None + (data_type, collection_size, dimension, metric_type) = parser.collection_parser(collection_name) + build_index = collection["build_index"] if "build_index" in collection else False + index_type = collection["index_type"] if "index_type" in collection else None + index_param = collection["index_param"] if "index_param" in collection else None + run_count = collection["run_count"] + top_ks = collection["top_ks"] + nqs = collection["nqs"] + other_fields = collection["other_fields"] if "other_fields" in collection else None + filters = collection["filters"] if "filters" in collection else [] + filter_query = [] + search_params = collection["search_params"] + ni_per = collection["ni_per"] + + # TODO: get fields by describe_index + # fields = self.get_fields(self.milvus, collection_name) + fields = None + collection_info = { + "dimension": dimension, + "metric_type": metric_type, + "dataset_name": collection_name, + "fields": fields + } + index_info = { + "index_type": index_type, + "index_param": index_param + } + vector_type = utils.get_vector_type(data_type) + index_field_name = utils.get_default_field_name(vector_type) + base_query_vectors = utils.get_vectors_from_binary(utils.MAX_NQ, dimension, data_type) + cases = list() + case_metrics = list() + self.init_metric(self.name, collection_info, index_info, None) + + for search_param in search_params: + if not filters: + filters.append(None) + for filter in filters: + # filter_param = [] + filter_query = [] + if isinstance(filter, dict) and "range" in filter: + filter_query.append(eval(filter["range"])) + # filter_param.append(filter["range"]) + if isinstance(filter, dict) and "term" in filter: + filter_query.append(eval(filter["term"])) + # filter_param.append(filter["term"]) + # logger.info("filter param: %s" % json.dumps(filter_param)) + for nq in nqs: + query_vectors = base_query_vectors[0:nq] + for top_k in top_ks: + search_info = { + "topk": top_k, + "query": query_vectors, + "metric_type": utils.metric_type_trans(metric_type), + "params": search_param} + # TODO: only update search_info + case_metric = copy.deepcopy(self.metric) + case_metric.set_case_metric_type() + case_metric.search = { + "nq": nq, + "topk": top_k, + "search_param": search_param, + "filter": filter_query + } + vector_query = {"vector": {index_field_name: search_info}} + case = { + "collection_name": collection_name, + "index_field_name": index_field_name, + "other_fields": other_fields, + "dimension": dimension, + "data_type": data_type, + "vector_type": vector_type, + "collection_size": collection_size, + "ni_per": ni_per, + "build_index": build_index, + "index_type": index_type, + "index_param": index_param, + "metric_type": metric_type, + "run_count": run_count, + "filter_query": filter_query, + "vector_query": vector_query, + } + cases.append(case) + case_metrics.append(case_metric) + return cases, case_metrics + + def prepare(self, **case_param): + collection_name = case_param["collection_name"] + dimension = case_param["dimension"] + vector_type = case_param["vector_type"] + other_fields = case_param["other_fields"] + index_field_name = case_param["index_field_name"] + build_index = case_param["build_index"] + + self.milvus.set_collection(collection_name) + if self.milvus.exists_collection(): + logger.debug("Start drop collection") + self.milvus.drop() + time.sleep(utils.DELETE_INTERVAL_TIME) + self.milvus.create_collection(dimension, data_type=vector_type, + other_fields=other_fields) + # TODO: update fields in collection_info + # fields = self.get_fields(self.milvus, collection_name) + # collection_info = { + # "dimension": dimension, + # "metric_type": metric_type, + # "dataset_name": collection_name, + # "fields": fields + # } + if build_index is True: + if case_param["index_type"]: + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + logger.debug(self.milvus.describe_index(index_field_name)) + else: + build_index = False + logger.warning("Please specify the index_type") + insert_result = self.insert(self.milvus, collection_name, case_param["data_type"], dimension, case_param["collection_size"], case_param["ni_per"]) + self.insert_result = insert_result + build_time = 0.0 + start_time = time.time() + self.milvus.flush() + flush_time = round(time.time()-start_time, 2) + logger.debug(self.milvus.count()) + if build_index is True: + logger.debug("Start build index for last file") + start_time = time.time() + self.milvus.create_index(index_field_name, case_param["index_type"], case_param["metric_type"], index_param=case_param["index_param"]) + build_time = round(time.time()-start_time, 2) + logger.debug({"flush_time": flush_time, "build_time": build_time}) + self.build_time = build_time + logger.info(self.milvus.count()) + logger.info("Start load collection") + load_start_time = time.time() + self.milvus.load_collection(timeout=1200) + logger.debug({"load_time": round(time.time()-load_start_time, 2)}) + + def run_case(self, case_metric, **case_param): + run_count = case_param["run_count"] + avg_query_time = 0.0 + min_query_time = 0.0 + total_query_time = 0.0 + for i in range(run_count): + logger.debug("Start run query, run %d of %s" % (i+1, run_count)) + logger.info(case_metric.search) + start_time = time.time() + _query_res = self.milvus.query(case_param["vector_query"], filter_query=case_param["filter_query"]) + interval_time = time.time() - start_time + total_query_time += interval_time + if (i == 0) or (min_query_time > interval_time): + min_query_time = round(interval_time, 2) + avg_query_time = round(total_query_time/run_count, 2) + logger.info("Min query time: %.2f, avg query time: %.2f" % (min_query_time, avg_query_time)) + tmp_result = {"insert": self.insert_result, "build_time": self.build_time, "search_time": min_query_time, "avc_search_time": avg_query_time} + # + # logger.info("Start load collection") + # self.milvus.load_collection(timeout=1200) + # logger.info("Release load collection") + # self.milvus.release_collection() + return tmp_result \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/runners/test.py b/tests/benchmark/milvus_benchmark/runners/test.py new file mode 100644 index 0000000000..9ece3b042c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/test.py @@ -0,0 +1,40 @@ +import math +from locust import User, TaskSet, task, constant +from locust import LoadTestShape + + +class StepLoadShape(LoadTestShape): + """ + A step load shape + Keyword arguments: + step_time -- Time between steps + step_load -- User increase amount at each step + spawn_rate -- Users to stop/start per second at every step + time_limit -- Time limit in seconds + """ + + step_time = 30 + step_load = 10 + spawn_rate = 10 + time_limit = 600 + + def tick(self): + run_time = self.get_run_time() + + if run_time > self.time_limit: + return None + + current_step = math.floor(run_time / self.step_time) + 1 + return (current_step * self.step_load, self.spawn_rate) + + +class UserTasks(TaskSet): + @task + def get_root(self): + print("in usertasks") + + +class WebsiteUser(User): + wait_time = constant(0.5) + tasks = [UserTasks] + shape = StepLoadShape diff --git a/tests/benchmark/milvus_benchmark/runners/utils.py b/tests/benchmark/milvus_benchmark/runners/utils.py new file mode 100644 index 0000000000..a882d612c4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/runners/utils.py @@ -0,0 +1,265 @@ +import os +import pdb +import logging +import numpy as np +import sklearn.preprocessing +import h5py +import random +from itertools import product + +from pymilvus import DataType +from milvus_benchmark import config + +logger = logging.getLogger("milvus_benchmark.runners.utils") + +DELETE_INTERVAL_TIME = 2 + +VECTORS_PER_FILE = 1000000 +SIFT_VECTORS_PER_FILE = 100000 +BINARY_VECTORS_PER_FILE = 2000000 + +MAX_NQ = 10001 +FILE_PREFIX = "binary_" + +WARM_TOP_K = 1 +WARM_NQ = 1 +DEFAULT_DIM = 512 +DEFAULT_METRIC_TYPE = "L2" + +RANDOM_SRC_DATA_DIR = config.RAW_DATA_DIR + 'random/' +SIFT_SRC_DATA_DIR = config.RAW_DATA_DIR + 'sift1b/' +DEEP_SRC_DATA_DIR = config.RAW_DATA_DIR + 'deep1b/' +JACCARD_SRC_DATA_DIR = config.RAW_DATA_DIR + 'jaccard/' +HAMMING_SRC_DATA_DIR = config.RAW_DATA_DIR + 'hamming/' +STRUCTURE_SRC_DATA_DIR = config.RAW_DATA_DIR + 'structure/' +BINARY_SRC_DATA_DIR = config.RAW_DATA_DIR + 'binary/' +SIFT_SRC_GROUNDTRUTH_DATA_DIR = SIFT_SRC_DATA_DIR + 'gnd' + +DEFAULT_F_FIELD_NAME = 'float_vector' +DEFAULT_B_FIELD_NAME = 'binary_vector' +DEFAULT_INT_FIELD_NAME = 'int64' +DEFAULT_FLOAT_FIELD_NAME = 'float' +DEFAULT_DOUBLE_FIELD_NAME = "double" + +GROUNDTRUTH_MAP = { + "1000000": "idx_1M.ivecs", + "2000000": "idx_2M.ivecs", + "5000000": "idx_5M.ivecs", + "10000000": "idx_10M.ivecs", + "20000000": "idx_20M.ivecs", + "50000000": "idx_50M.ivecs", + "100000000": "idx_100M.ivecs", + "200000000": "idx_200M.ivecs", + "500000000": "idx_500M.ivecs", + "1000000000": "idx_1000M.ivecs", +} + +METRIC_MAP = { + "l2": "L2", + "ip": "IP", + "jaccard": "JACCARD", + "hamming": "HAMMING", + "sub": "SUBSTRUCTURE", + "super": "SUPERSTRUCTURE" +} + + +def get_len_vectors_per_file(data_type, dimension): + if data_type == "random": + if dimension == 512: + vectors_per_file = VECTORS_PER_FILE + elif dimension == 4096: + vectors_per_file = 100000 + elif dimension == 16384: + vectors_per_file = 10000 + elif data_type == "sift": + vectors_per_file = SIFT_VECTORS_PER_FILE + elif data_type in ["binary"]: + vectors_per_file = BINARY_VECTORS_PER_FILE + elif data_type == "local": + vectors_per_file = SIFT_VECTORS_PER_FILE + else: + raise Exception("data_type: %s not supported" % data_type) + return vectors_per_file + + +def get_vectors_from_binary(nq, dimension, data_type): + # use the first file, nq should be less than VECTORS_PER_FILE + if nq > MAX_NQ: + raise Exception("Over size nq") + if data_type == "local": + return generate_vectors(nq, dimension) + elif data_type == "random": + file_name = RANDOM_SRC_DATA_DIR + 'query_%d.npy' % dimension + elif data_type == "sift": + file_name = SIFT_SRC_DATA_DIR + 'query.npy' + elif data_type == "deep": + file_name = DEEP_SRC_DATA_DIR + 'query.npy' + elif data_type == "binary": + file_name = BINARY_SRC_DATA_DIR + 'query.npy' + data = np.load(file_name) + vectors = data[0:nq].tolist() + return vectors + + +def generate_vectors(nb, dim): + return [[random.random() for _ in range(dim)] for _ in range(nb)] + + +def generate_values(data_type, vectors, ids): + values = None + if data_type in [DataType.INT32, DataType.INT64]: + values = ids + elif data_type in [DataType.FLOAT, DataType.DOUBLE]: + values = [(i + 0.0) for i in ids] + elif data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]: + values = vectors + return values + + +def generate_entities(info, vectors, ids=None): + entities = [] + for field in info["fields"]: + # if field["name"] == "_id": + # continue + field_type = field["type"] + entities.append( + {"name": field["name"], "type": field_type, "values": generate_values(field_type, vectors, ids)}) + return entities + + +def metric_type_trans(metric_type): + if metric_type in METRIC_MAP.keys(): + return METRIC_MAP[metric_type] + else: + raise Exception("metric_type: %s not in METRIC_MAP" % metric_type) + + +def get_dataset(hdf5_file_path): + if not os.path.exists(hdf5_file_path): + raise Exception("%s not existed" % hdf5_file_path) + dataset = h5py.File(hdf5_file_path) + return dataset + + +def get_default_field_name(data_type=DataType.FLOAT_VECTOR): + if data_type == DataType.FLOAT_VECTOR: + field_name = DEFAULT_F_FIELD_NAME + elif data_type == DataType.BINARY_VECTOR: + field_name = DEFAULT_B_FIELD_NAME + elif data_type == DataType.INT64: + field_name = DEFAULT_INT_FIELD_NAME + elif data_type == DataType.FLOAT: + field_name = DEFAULT_FLOAT_FIELD_NAME + else: + logger.error(data_type) + raise Exception("Not supported data type") + return field_name + + +def get_vector_type(data_type): + vector_type = '' + if data_type in ["random", "sift", "deep", "glove", "local"]: + vector_type = DataType.FLOAT_VECTOR + elif data_type in ["binary"]: + vector_type = DataType.BINARY_VECTOR + else: + raise Exception("Data type: %s not defined" % data_type) + return vector_type + + +def get_vector_type_from_metric(metric_type): + vector_type = '' + if metric_type in ["hamming", "jaccard"]: + vector_type = DataType.BINARY_VECTOR + else: + vector_type = DataType.FLOAT_VECTOR + return vector_type + + +def normalize(metric_type, X): + if metric_type == "ip": + logger.info("Set normalize for metric_type: %s" % metric_type) + X = sklearn.preprocessing.normalize(X, axis=1, norm='l2') + X = X.astype(np.float32) + elif metric_type == "l2": + X = X.astype(np.float32) + elif metric_type in ["jaccard", "hamming", "sub", "super"]: + tmp = [] + for item in X: + new_vector = bytes(np.packbits(item, axis=-1).tolist()) + tmp.append(new_vector) + X = tmp + return X + + +def generate_combinations(args): + if isinstance(args, list): + args = [el if isinstance(el, list) else [el] for el in args] + return [list(x) for x in product(*args)] + elif isinstance(args, dict): + flat = [] + for k, v in args.items(): + if isinstance(v, list): + flat.append([(k, el) for el in v]) + else: + flat.append([(k, v)]) + return [dict(x) for x in product(*flat)] + else: + raise TypeError("No args handling exists for %s" % type(args).__name__) + + +def gen_file_name(idx, dimension, data_type): + s = "%05d" % idx + fname = FILE_PREFIX + str(dimension) + "d_" + s + ".npy" + if data_type == "random": + fname = RANDOM_SRC_DATA_DIR + fname + elif data_type == "sift": + fname = SIFT_SRC_DATA_DIR + fname + elif data_type == "deep": + fname = DEEP_SRC_DATA_DIR + fname + elif data_type == "jaccard": + fname = JACCARD_SRC_DATA_DIR + fname + elif data_type == "hamming": + fname = HAMMING_SRC_DATA_DIR + fname + elif data_type == "sub" or data_type == "super": + fname = STRUCTURE_SRC_DATA_DIR + fname + return fname + + +def get_recall_value(true_ids, result_ids): + """ + Use the intersection length + """ + sum_radio = 0.0 + for index, item in enumerate(result_ids): + # tmp = set(item).intersection(set(flat_id_list[index])) + tmp = set(true_ids[index]).intersection(set(item)) + sum_radio = sum_radio + len(tmp) / len(item) + # logger.debug(sum_radio) + return round(sum_radio / len(result_ids), 3) + + +def get_ground_truth_ids(collection_size): + fname = GROUNDTRUTH_MAP[str(collection_size)] + fname = SIFT_SRC_GROUNDTRUTH_DATA_DIR + "/" + fname + a = np.fromfile(fname, dtype='int32') + d = a[0] + true_ids = a.reshape(-1, d + 1)[:, 1:].copy() + return true_ids + + +def normalize(metric_type, X): + if metric_type == "ip": + logger.info("Set normalize for metric_type: %s" % metric_type) + X = sklearn.preprocessing.normalize(X, axis=1, norm='l2') + X = X.astype(np.float32) + elif metric_type == "l2": + X = X.astype(np.float32) + elif metric_type in ["jaccard", "hamming", "sub", "super"]: + tmp = [] + for item in X: + new_vector = bytes(np.packbits(item, axis=-1).tolist()) + tmp.append(new_vector) + X = tmp + return X \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler.py b/tests/benchmark/milvus_benchmark/scheduler.py new file mode 100644 index 0000000000..80409d193b --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler.py @@ -0,0 +1,27 @@ +# import logging +# from apscheduler.schedulers.background import BackgroundScheduler +# from apscheduler.schedulers.blocking import BlockingScheduler + +# from apscheduler.jobstores.mongodb import MongoDBJobStore +# from apscheduler.executors.pool import ProcessPoolExecutor, ThreadPoolExecutor +# from apscheduler.executors.debug import DebugExecutor +# import config +# from pymongo import MongoClient + +# logger = logging.basicConfig() + +# mongo_client = MongoClient(config.MONGO_SERVER) +# jobstores = { +# 'default': MongoDBJobStore(database=config.SCHEDULER_DB, collection=config.JOB_COLLECTION, client=mongo_client) +# } + +# executors = { +# 'default': ThreadPoolExecutor(max_workers=100) +# } + +# job_defaults = { +# 'coalesce': True, +# 'max_instances': 32 +# } +# # TODO: +# back_scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, logger=logger) \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/010_data.json b/tests/benchmark/milvus_benchmark/scheduler/010_data.json new file mode 100644 index 0000000000..d7074b63f5 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/010_data.json @@ -0,0 +1,65 @@ +[ + { + "server": "athena", + "suite_params": [ + { + "suite": "080_gpu_accuracy.yaml", + "image_type": "gpu" + }, + { + "suite": "080_search_stability.yaml", + "image_type": "gpu" + }, + { + "suite": "gpu_accuracy_ann.yaml", + "image_type": "gpu" + } + ] + }, + { + "server": "poseidon", + "suite_params": [ + { + "suite": "080_gpu_search.yaml", + "image_type": "gpu" + }, + { + "suite": "080_cpu_search.yaml", + "image_type": "cpu" + }, + { + "suite": "080_gpu_build.yaml", + "image_type": "gpu" + }, + { + "suite": "080_cpu_accuracy.yaml", + "image_type": "cpu" + }, + { + "suite": "locust_search.yaml", + "image_type": "cpu" + } + ] + }, + { + "server": "apollo", + "suite_params": [ + { + "suite": "cpu_accuracy_ann.yaml", + "image_type": "cpu" + }, + { + "suite": "080_cpu_build.yaml", + "image_type": "cpu" + }, + { + "suite": "080_insert_performance.yaml", + "image_type": "cpu" + }, + { + "suite": "add_flush_performance.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/011_data.json b/tests/benchmark/milvus_benchmark/scheduler/011_data.json new file mode 100644 index 0000000000..7b40300c58 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/011_data.json @@ -0,0 +1,62 @@ +[ + { + "server": "idc-sh002", + "suite_params": [ + { + "suite": "011_cpu_accuracy_ann.yaml", + "image_type": "cpu" + }, + { + "suite": "011_gpu_accuracy_ann.yaml", + "image_type": "gpu" + } + ] + }, + { + "server": "idc-sh003", + "suite_params": [ + { + "suite": "locust_mix.yaml", + "image_type": "gpu" + } + ] + }, + { + "server": "idc-sh004", + "suite_params": [ + { + "suite": "011_insert_performance.yaml", + "image_type": "cpu" + }, + { + "suite": "011_gpu_accuracy.yaml", + "image_type": "gpu" + }, + { + "suite": "011_gpu_build.yaml", + "image_type": "gpu" + } + ] + }, + { + "server": "idc-sh005", + "suite_params": [ + { + "suite": "011_gpu_search.yaml", + "image_type": "gpu" + }, + { + "suite": "011_cpu_search.yaml", + "image_type": "cpu" + }, + { + "suite": "011_cpu_accuracy.yaml", + "image_type": "cpu" + }, + { + "suite": "011_locust_search.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json b/tests/benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json new file mode 100644 index 0000000000..3bb0df2259 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/011_data_acc_debug.json @@ -0,0 +1,11 @@ +[ + { + "server": "apollo", + "suite_params": [ + { + "suite": "011_cpu_accuracy_ann.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json b/tests/benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json new file mode 100644 index 0000000000..ed9642fa91 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/011_data_gpu_build.json @@ -0,0 +1,11 @@ +[ + { + "server": "eros", + "suite_params": [ + { + "suite": "011_gpu_build_sift10m.yaml", + "image_type": "gpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/011_data_insert.json b/tests/benchmark/milvus_benchmark/scheduler/011_data_insert.json new file mode 100644 index 0000000000..c8bb875ac9 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/011_data_insert.json @@ -0,0 +1,11 @@ +[ + { + "server": "eros", + "suite_params": [ + { + "suite": "011_insert_data.yaml", + "image_type": "cpu" + } + ] + } + ] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/011_delete.json b/tests/benchmark/milvus_benchmark/scheduler/011_delete.json new file mode 100644 index 0000000000..cc80004991 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/011_delete.json @@ -0,0 +1,15 @@ +[ + { + "server": "apollo", + "suite_params": [ + { + "suite": "011_insert_performance.yaml", + "image_type": "cpu" + }, + { + "suite": "011_delete_performance.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/2_cluster_data.json b/tests/benchmark/milvus_benchmark/scheduler/2_cluster_data.json new file mode 100644 index 0000000000..2824c2b88d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/2_cluster_data.json @@ -0,0 +1,41 @@ +[ + { + "server": "idc-sh002", + "deploy_mode": "cluster", + "suite_params": [ + { + "suite": "2_insert_search_sift10m_512.yaml", + "image_type": "cpu" + } + ] + }, + { + "deploy_mode": "cluster", + "suite_params": [ + { + "suite": "2_cpu_ann_accuracy.yaml", + "image_type": "cpu" + } + ] + }, + { + "server": "idc-sh003", + "deploy_mode": "cluster", + "suite_params": [ + { + "suite": "2_insert_search_sift10m_2048.yaml", + "image_type": "cpu" + } + ] + }, + { + "server": "idc-sh005", + "deploy_mode": "cluster", + "suite_params": [ + { + "suite": "2_insert_search_sift10m_4096.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/2_data.json b/tests/benchmark/milvus_benchmark/scheduler/2_data.json new file mode 100644 index 0000000000..e3175a0b2e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/2_data.json @@ -0,0 +1,34 @@ +[ + { + "deploy_mode": "cluster", + "server": "idc-sh002", + "suite_params": [ + { + "suite": "2_insert_search_sift10m_2048.yaml", + "image_type": "cpu" + } + ] + }, + { + "server": "idc-sh005", + "suite_params": [ + { + "suite": "2_insert_search_sift50m_2048.yaml", + "image_type": "cpu" + } + ] + }, + { + "server": "idc-sh004", + "suite_params": [ + { + "suite": "2_locust_search.yaml", + "image_type": "cpu" + }, + { + "suite": "2_locust_random.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/acc.json b/tests/benchmark/milvus_benchmark/scheduler/acc.json new file mode 100644 index 0000000000..f4f94a84ab --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/acc.json @@ -0,0 +1,10 @@ +[ + { + "suite_params": [ + { + "suite": "2_accuracy_ann_debug.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/build.json b/tests/benchmark/milvus_benchmark/scheduler/build.json new file mode 100644 index 0000000000..bf6cc18842 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/build.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh004", + "suite_params": [ + { + "suite": "2_insert_build.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/clean.json b/tests/benchmark/milvus_benchmark/scheduler/clean.json new file mode 100644 index 0000000000..be9cbbe231 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/clean.json @@ -0,0 +1,11 @@ +[ + { + "server": "poseidon", + "suite_params": [ + { + "suite": "clean.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/cluster.json b/tests/benchmark/milvus_benchmark/scheduler/cluster.json new file mode 100644 index 0000000000..ce5652521d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/cluster.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh004", + "suite_params": [ + { + "suite": "2_insert_cluster.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/debug.json b/tests/benchmark/milvus_benchmark/scheduler/debug.json new file mode 100644 index 0000000000..aeca96947e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/debug.json @@ -0,0 +1,19 @@ +[ + { + "deploy_mode": "cluster", + "suite_params": [ + { + "suite": "2_cpu_ann_accuracy.yaml", + "image_type": "cpu" + } + ] + }, + { + "suite_params": [ + { + "suite": "2_cpu_ann_accuracy.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/debug1.json b/tests/benchmark/milvus_benchmark/scheduler/debug1.json new file mode 100644 index 0000000000..fa6199027d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/debug1.json @@ -0,0 +1,20 @@ +[ + { + "server_tag": "8c16m1g", + "suite_params": [ + { + "suite": "080_gpu_search_debug.yaml", + "image_type": "gpu" + } + ] + }, + { + "server_tag": "16c32m1g", + "suite_params": [ + { + "suite": "080_gpu_search_debug.yaml", + "image_type": "gpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/debug2.json b/tests/benchmark/milvus_benchmark/scheduler/debug2.json new file mode 100644 index 0000000000..e5e9e9a747 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/debug2.json @@ -0,0 +1,12 @@ +[ + { + "server": "idc-sh005", + "deploy_mode": "cluster", + "suite_params": [ + { + "suite": "2_insert_search_sift10m_512.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/filter.json b/tests/benchmark/milvus_benchmark/scheduler/filter.json new file mode 100644 index 0000000000..2a0baed660 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/filter.json @@ -0,0 +1,11 @@ +[ + { + "server": "poseidon", + "suite_params": [ + { + "suite": "011_search_dsl.yaml", + "image_type": "gpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/idc.json b/tests/benchmark/milvus_benchmark/scheduler/idc.json new file mode 100644 index 0000000000..598be6da15 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/idc.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh004", + "suite_params": [ + { + "suite": "011_cpu_search_debug.yaml", + "image_type": "gpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/insert.json b/tests/benchmark/milvus_benchmark/scheduler/insert.json new file mode 100644 index 0000000000..da7387b8aa --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/insert.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh002", + "suite_params": [ + { + "suite": "2_insert_data.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/insert2.json b/tests/benchmark/milvus_benchmark/scheduler/insert2.json new file mode 100644 index 0000000000..ad8556d39e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/insert2.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh002", + "suite_params": [ + { + "suite": "2_insert_data.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/jaccard.json b/tests/benchmark/milvus_benchmark/scheduler/jaccard.json new file mode 100644 index 0000000000..5d5ebd6d13 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/jaccard.json @@ -0,0 +1,11 @@ +[ + { + "server": "athena", + "suite_params": [ + { + "suite": "011_cpu_search_binary.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/locust.json b/tests/benchmark/milvus_benchmark/scheduler/locust.json new file mode 100644 index 0000000000..54d01a38ff --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/locust.json @@ -0,0 +1,15 @@ +[ + { + "server": "idc-sh002", + "suite_params": [ + { + "suite": "2_locust_search.yaml", + "image_type": "cpu" + }, + { + "suite": "2_locust_search_index.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/locust_insert.json b/tests/benchmark/milvus_benchmark/scheduler/locust_insert.json new file mode 100644 index 0000000000..30c421aacc --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/locust_insert.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh005", + "suite_params": [ + { + "suite": "2_locust_insert.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/locust_mix.json b/tests/benchmark/milvus_benchmark/scheduler/locust_mix.json new file mode 100644 index 0000000000..c06ae9ee83 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/locust_mix.json @@ -0,0 +1,11 @@ +[ + { + "server": "athena", + "suite_params": [ + { + "suite": "locust_mix.yaml", + "image_type": "gpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/locust_mix_debug.json b/tests/benchmark/milvus_benchmark/scheduler/locust_mix_debug.json new file mode 100644 index 0000000000..343d429338 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/locust_mix_debug.json @@ -0,0 +1,10 @@ +[ + { + "suite_params": [ + { + "suite": "locust_mix.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/locust_search.json b/tests/benchmark/milvus_benchmark/scheduler/locust_search.json new file mode 100644 index 0000000000..0510e043db --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/locust_search.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh002", + "suite_params": [ + { + "suite": "2_locust_search.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/loop.json b/tests/benchmark/milvus_benchmark/scheduler/loop.json new file mode 100644 index 0000000000..2a93fca8d3 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/loop.json @@ -0,0 +1,10 @@ +[ + { + "suite_params": [ + { + "suite": "2_locust_insert_5h.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/loop_search.json b/tests/benchmark/milvus_benchmark/scheduler/loop_search.json new file mode 100644 index 0000000000..29c98dae55 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/loop_search.json @@ -0,0 +1,10 @@ +[ + { + "suite_params": [ + { + "suite": "2_locust_search_5h.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/nlist.json b/tests/benchmark/milvus_benchmark/scheduler/nlist.json new file mode 100644 index 0000000000..fdb9f9e083 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/nlist.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh003", + "suite_params": [ + { + "suite": "2_insert_search_sift50m_2048.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/search.json b/tests/benchmark/milvus_benchmark/scheduler/search.json new file mode 100644 index 0000000000..bebaa311a9 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/search.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh003", + "suite_params": [ + { + "suite": "2_insert_search.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/search2.json b/tests/benchmark/milvus_benchmark/scheduler/search2.json new file mode 100644 index 0000000000..f9e2388af0 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/search2.json @@ -0,0 +1,11 @@ +[ + { + "server_tag": "16c32m0g", + "suite_params": [ + { + "suite": "2_cpu_search.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/search_debug.json b/tests/benchmark/milvus_benchmark/scheduler/search_debug.json new file mode 100644 index 0000000000..cfba85c91f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/search_debug.json @@ -0,0 +1,11 @@ +[ + { + "server": "idc-sh005", + "suite_params": [ + { + "suite": "2_insert_search_sift10m_4096.yaml", + "image_type": "cpu" + } + ] + } +] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/scheduler/shards_ann.json b/tests/benchmark/milvus_benchmark/scheduler/shards_ann.json new file mode 100644 index 0000000000..6282c234cc --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/shards_ann.json @@ -0,0 +1,10 @@ +[ + { + "suite_params": [ + { + "suite": "shards_ann_debug.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/shards_debug.json b/tests/benchmark/milvus_benchmark/scheduler/shards_debug.json new file mode 100644 index 0000000000..9e79ee7758 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/shards_debug.json @@ -0,0 +1,15 @@ +[ + { + "server": "apollo", + "suite_params": [ + { + "suite": "shards_insert_performance_sift1m.yaml", + "image_type": "cpu" + }, + { + "suite": "shards_search_performance_sift1m.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/shards_stability.json b/tests/benchmark/milvus_benchmark/scheduler/shards_stability.json new file mode 100644 index 0000000000..c2b71d9472 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/shards_stability.json @@ -0,0 +1,10 @@ +[ + { + "suite_params": [ + { + "suite": "shards_loop_stability.yaml", + "image_type": "cpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/scheduler/stability.json b/tests/benchmark/milvus_benchmark/scheduler/stability.json new file mode 100644 index 0000000000..497a161200 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/scheduler/stability.json @@ -0,0 +1,11 @@ +[ + { + "server": "eros", + "suite_params": [ + { + "suite": "gpu_search_stability.yaml", + "image_type": "gpu" + } + ] + } +] diff --git a/tests/benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml b/tests/benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml new file mode 100644 index 0000000000..b8a7090d9b --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_add_flush_performance.yaml @@ -0,0 +1,20 @@ +insert_flush_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_2m_128_128_l2_flush + cache_config.cpu_cache_capacity: 8 + cache_config.insert_buffer_size: 2 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + db_config.auto_flush_interval: 300 + collection_name: sift_2m_128_l2 + ni_per: 100000 diff --git a/tests/benchmark/milvus_benchmark/suites/011_build_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_build_debug.yaml new file mode 100644 index 0000000000..f9d1a0f4e9 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_build_debug.yaml @@ -0,0 +1,92 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_hnsw + cache_config.cpu_cache_capacity: 4GB + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_128_l2 + index_type: hnsw + index_param: + M: 48 + efConstruction: 500 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat_16384 + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_128_l2 + # index_type: ivf_flat + # index_param: + # nlist: 16384 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8_16384 + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_128_l2 + # index_type: ivf_sq8 + # index_param: + # nlist: 16384 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8h_16384 + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_128_l2 + # index_type: ivf_sq8h + # index_param: + # nlist: 16384 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_pq_16384 + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_128_l2 + # index_type: ivf_pq + # index_param: + # nlist: 16384 + # m: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml b/tests/benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml new file mode 100644 index 0000000000..63f26c1307 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cluster_cpu_accuracy_ann.yaml @@ -0,0 +1,336 @@ +ann_accuracy: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['flat'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [1024, 16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [32] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['hnsw'] + index_params: + M: [16] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['flat'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [20] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['hnsw'] + index_params: + M: [36] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [10, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/glove_200_angular + suffix_path: true + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + cluster: true + readonly: + replicas: 2 + external_mysql: true + + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['nsg'] + index_params: + search_length: 45 + out_degree: 50 + candidate_pool_size: 300 + knng: 100 + top_ks: [10] + nqs: [10000] + search_params: + search_length: [50] diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml new file mode 100644 index 0000000000..b254849708 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy.yaml @@ -0,0 +1,55 @@ +accuracy: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_10m_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_10m_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_hnsw + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_10m_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + ef: [64, 100, 200, 500, 700] diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml new file mode 100644 index 0000000000..d7b736dd1f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann.yaml @@ -0,0 +1,260 @@ +ann_accuracy: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['flat'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [1024, 16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [32] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['hnsw'] + index_params: + M: [16] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['flat'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [20] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['hnsw'] + index_params: + M: [36] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [10, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['nsg'] + index_params: + search_length: 45 + out_degree: 50 + candidate_pool_size: 300 + knng: 100 + top_ks: [10] + nqs: [10000] + search_params: + search_length: [50] diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml new file mode 100644 index 0000000000..59fdf1f74c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_ann_debug.yaml @@ -0,0 +1,50 @@ +ann_accuracy: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_128_euclidean + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + cluster: true + readonly: + replicas: 2 + external_mysql: true + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [1024, 16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 1024] + + # - + # milvus: + # cache_config.cpu_cache_capacity: 16GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # cluster: false + # external_mysql: false + # source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + # collection_name: sift_128_euclidean + # index_types: ['ivf_flat', 'ivf_sq8'] + # index_params: + # nlist: [1024, 16384] + # top_ks: [10] + # nqs: [10000] + # search_params: + # nprobe: [1, 2, 4, 8, 1024] diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml new file mode 100644 index 0000000000..b9cb3b8d2f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_accuracy_rhnsw.yaml @@ -0,0 +1,36 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_rhnsw_pq + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_10m_128_l2 + top_ks: [32] + nqs: [1000] + search_params: + ef: [32, 64, 100] +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_rhnsw_sq +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 1 +# gpu_resource_config.enable: false +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# gpu_resource_config.build_index_resources: +# - gpu0 +# collection_name: sift_50m_128_l2 +# top_ks: [64] +# nqs: [1000] +# search_params: +# ef: [32, 64, 100, 200, 500] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml new file mode 100644 index 0000000000..d3beb3bd5e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_build_binary.yaml @@ -0,0 +1,11 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard_ivf + cache_config.cpu_cache_capacity: 8GB + gpu_resource_config.enable: false + collection_name: binary_50m_512_jaccard + index_type: bin_ivf_flat + index_param: + nlist: 2048 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml new file mode 100644 index 0000000000..6f49fc5cbd --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_build_hnsw.yaml @@ -0,0 +1,12 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_ip_hnsw + cache_config.cpu_cache_capacity: 8GB + gpu_resource_config.enable: false + collection_name: sift_10m_128_ip + index_type: hnsw + index_param: + M: 48 + efConstruction: 500 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml new file mode 100644 index 0000000000..bc5096e6e7 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_build_rhnsw.yaml @@ -0,0 +1,23 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_rhnsw_pq + cache_config.cpu_cache_capacity: 8GB + gpu_resource_config.enable: false + collection_name: sift_10m_128_l2 + index_type: rhnsw_pq + index_param: + M: 48 + efConstruction: 500 + PQM: 16 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_rhnsw_sq + cache_config.cpu_cache_capacity: 8GB + gpu_resource_config.enable: false + collection_name: sift_50m_128_l2 + index_type: rhnsw_sq + index_param: + M: 48 + efConstruction: 500 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search.yaml new file mode 100644 index 0000000000..e576b913e3 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search.yaml @@ -0,0 +1,255 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1b_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2 + cache_config.cpu_cache_capacity: 64GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2 + cache_config.cpu_cache_capacity: 64GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 64GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_pq + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1000] + search_params: + - + nprobe: 8 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_nsg + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + search_length: 50 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_annoy + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + search_k: 100 + - + search_k: 500 + - + search_k: 1000 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: binary_50m_512_jaccard + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard_ivf + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: binary_50m_512_jaccard + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml new file mode 100644 index 0000000000..6539999b4a --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_binary.yaml @@ -0,0 +1,49 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: binary_50m_512_jaccard + run_count: 2 + top_ks: [10, 1, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/binary_50m_512_jaccard_ivf + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: binary_50m_512_jaccard + run_count: 2 + top_ks: [10, 1, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml new file mode 100644 index 0000000000..eca76ee3e4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_debug.yaml @@ -0,0 +1,26 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1b_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml new file mode 100644 index 0000000000..6e12b3a61d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m.yaml @@ -0,0 +1,123 @@ +search_performance: + collections: +# - +# milvus: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2 +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 1000] +# nqs: [1, 100, 1200] +# search_params: +# - +# nprobe: 8 + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_10m_128_l2 + cache_config.cpu_cache_capacity: 64GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + cluster: true + readonly: + replicas: 2 + external_mysql: true + collection_name: sift_10m_128_l2_011 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_ivf_flat_16384 +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000, 1200] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 +# +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8_16384 +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000, 1200] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 +# +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_pq_16384 +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000, 1200] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml new file mode 100644 index 0000000000..284aff9fdf --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_filter.yaml @@ -0,0 +1,97 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2/ + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1200] + search_params: + - + nprobe: 8 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_pq + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml new file mode 100644 index 0000000000..49954ce262 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_hnsw.yaml @@ -0,0 +1,40 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_hnsw + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + ef: 100 + - + ef: 200 + - + ef: 500 + - + ef: 1000 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_ip_hnsw + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + wal_enable: true + collection_name: sift_10m_128_ip + run_count: 2 + top_ks: [100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + ef: 100 + - + ef: 200 + - + ef: 500 + - + ef: 1000 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml new file mode 100644 index 0000000000..8d00bdbae7 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift10m_ivf.yaml @@ -0,0 +1,32 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_ivf_flat_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_ip_ivf_flat_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + wal_enable: true + collection_name: sift_10m_128_ip + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml new file mode 100644 index 0000000000..559c275a81 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift1b.yaml @@ -0,0 +1,26 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1b_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml new file mode 100644 index 0000000000..6a96e0e721 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_cpu_search_sift50m.yaml @@ -0,0 +1,98 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1200] + search_params: + - + nprobe: 8 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_pq_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_delete_performance.yaml b/tests/benchmark/milvus_benchmark/suites/011_delete_performance.yaml new file mode 100644 index 0000000000..af902eae92 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_delete_performance.yaml @@ -0,0 +1,17 @@ +delete_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_1m_128_128_l2_delete + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_1m_50000_128_l2_2 + ni_per: 50000 + auto_flush: false diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml new file mode 100644 index 0000000000..943337b75f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy.yaml @@ -0,0 +1,61 @@ +accuracy: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1] + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_ip + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_128_ip + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1] diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml new file mode 100644 index 0000000000..fb2fad25a4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann.yaml @@ -0,0 +1,165 @@ +ann_accuracy: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [32] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['hnsw'] + index_params: + M: [16] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['hnsw'] + index_params: + M: [36] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [10, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/kosarak-27983-jaccard.hdf5 + collection_name: kosarak_27984_jaccard + index_types: ['bin_flat', 'bin_ivf_flat'] + index_params: + nlist: [2048] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-256-hamming.hdf5 + collection_name: sift_256_hamming + index_types: ['bin_flat', 'bin_ivf_flat'] + index_params: + nlist: [2048] + top_ks: [100] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml new file mode 100644 index 0000000000..155694006d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_ann_debug.yaml @@ -0,0 +1,24 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml new file mode 100644 index 0000000000..aa592aac20 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_accuracy_debug.yaml @@ -0,0 +1,23 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [32, 64, 128] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_build.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_build.yaml new file mode 100644 index 0000000000..488dc16b67 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_build.yaml @@ -0,0 +1,21 @@ +build_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8_4096 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_100000_128_l2 + index_type: ivf_sq8 + index_param: + nlist: 4096 + diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml new file mode 100644 index 0000000000..8e534adc92 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_debug.yaml @@ -0,0 +1,151 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8_4096 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu2 + collection_name: sift_10m_100000_128_l2 + index_type: ivf_sq8 + index_param: + nlist: 4096 + + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_ivf + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_10m_100000_128_l2 + # index_type: ivf_flat + # index_param: + # nlist: 1024 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8h + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_10m_100000_128_l2 + # index_type: ivf_sq8h + # index_param: + # nlist: 1024 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8 + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_10m_100000_128_l2 + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_pq + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_10m_100000_128_l2 + # index_type: ivf_pq + # index_param: + # nlist: 1024 + # m: 32 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_hnsw + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_10m_100000_128_l2 + # index_type: hnsw + # index_param: + # M: 48 + # efConstruction: 500 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_annoy +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 1 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# collection_name: sift_10m_100000_128_l2 +# index_type: annoy +# index_param: +# n_trees: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_nsg +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 1 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# collection_name: sift_10m_100000_128_l2 +# index_type: nsg +# index_param: +# search_length: 50 +# out_degree: 40 +# candidate_pool_size: 100 +# knng: 50 diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml new file mode 100644 index 0000000000..f5ffe104ef --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift10m.yaml @@ -0,0 +1,148 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + collection_name: sift_10m_128_l2 + index_type: ivf_flat + index_param: + nlist: 1024 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + collection_name: sift_10m_128_l2 + index_type: ivf_sq8 + index_param: + nlist: 1024 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8h + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + collection_name: sift_10m_128_l2 + index_type: ivf_sq8h + index_param: + nlist: 1024 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_pq + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + collection_name: sift_10m_128_l2 + index_type: ivf_pq + index_param: + nlist: 1024 + m: 32 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_hnsw +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 1 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# - gpu2 +# - gpu3 +# collection_name: sift_10m_100000_128_l2 +# index_type: hnsw +# index_param: +# M: 48 +# efConstruction: 500 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_annoy +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# - gpu2 +# - gpu3 +# collection_name: sift_10m_100000_128_l2 +# index_type: annoy +# index_param: +# n_trees: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_nsg +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# - gpu2 +# - gpu3 +# collection_name: sift_10m_100000_128_l2 +# index_type: nsg +# index_param: +# search_length: 50 +# out_degree: 40 +# candidate_pool_size: 100 +# knng: 50 + diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml new file mode 100644 index 0000000000..d17b6a75f6 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift1b.yaml @@ -0,0 +1,42 @@ +build_performance: + collections: +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8h +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 1 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# - gpu2 +# - gpu3 +# collection_name: sift_1b_128_l2 +# index_type: ivf_sq8h +# index_param: +# nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + collection_name: sift_1b_128_l2 + index_type: ivf_sq8 + index_param: + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml new file mode 100644 index 0000000000..9e56da3c99 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_build_sift50m.yaml @@ -0,0 +1,75 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_pq + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_128_l2 + index_type: ivf_pq + index_param: + nlist: 16384 + m: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_128_l2 + index_type: ivf_flat + index_param: + nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8 + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_128_l2 + index_type: ivf_sq8 + index_param: + nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8h + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_128_l2 + index_type: ivf_sq8h + index_param: + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search.yaml new file mode 100644 index 0000000000..e717c6e325 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search.yaml @@ -0,0 +1,251 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8h + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_pq + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_hnsw + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + ef: 100 + - + ef: 200 + - + ef: 500 + - + ef: 1000 + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_annoy + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + search_k: 100 + - + search_k: 500 + - + search_k: 1000 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_nsg + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + search_length: 50 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8h + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1b_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1b_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml new file mode 100644 index 0000000000..fa723a6aef --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_debug.yaml @@ -0,0 +1,79 @@ +search_performance: + collections: + # - + # milvus: + # db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8h + # cache_config.cpu_cache_capacity: 150GB + # engine_config.use_blas_threshold: 0 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # wal_enable: true + # collection_name: sift_1b_128_l2 + # run_count: 2 + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 1000, 1200] + # search_params: + # - + # nprobe: 8 + # - + # nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1b_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + # - + # milvus: + # db_config.primary_path: /test/milvus/db_data_011/cluster/sift_50m_128_l2 + # cache_config.cpu_cache_capacity: 32GB + # engine_config.use_blas_threshold: 0 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # wal_enable: true + # cluster: true + # external_mysql: true + # collection_name: sift_50m_128_l2_011 + # run_count: 2 + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 1000, 1200] + # search_params: + # - + # nprobe: 8 + # - + # nprobe: 32 + # diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml new file mode 100644 index 0000000000..b4bd91d225 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m.yaml @@ -0,0 +1,145 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1200] + search_params: + - + nprobe: 8 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8h + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_pq + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_pq_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml new file mode 100644 index 0000000000..e76d235406 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_100k.yaml @@ -0,0 +1,121 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1200] + search_params: + - + nprobe: 8 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000, 8192] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_sq8h + # cache_config.cpu_cache_capacity: 32GB + # engine_config.use_blas_threshold: 0 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # wal_enable: true + # collection_name: sift_10m_100000_128_l2 + # run_count: 2 + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 1000, 1200] + # search_params: + # - + # nprobe: 8 + # - + # nprobe: 32 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_011/sift_10m_100000_128_l2_pq + # cache_config.cpu_cache_capacity: 32GB + # engine_config.use_blas_threshold: 0 + # engine_config.gpu_search_threshold: 100 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6GB + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # wal_enable: true + # collection_name: sift_10m_100000_128_l2 + # run_count: 2 + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 1000, 1200] + # search_params: + # - + # nprobe: 8 + # - + # nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml new file mode 100644 index 0000000000..c251a73bed --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_filter.yaml @@ -0,0 +1,126 @@ +search_performance: + collections: +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2/ +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 1000] +# nqs: [1, 100, 1200] +# search_params: +# - +# nprobe: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_ivf_flat +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000, 1200] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 + +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8 +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000, 1200] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_sq8h + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + filters: + - + range: "{'range': {'float': {'GT': -1.0, 'LT': collection_size * 0.1}}}" + - + range: "{'range': {'float': {'GT': -1.0, 'LT': collection_size * 0.5}}}" + - + range: "{'range': {'float': {'GT': -1.0, 'LT': collection_size * 0.9}}}" + search_params: + - + nprobe: 32 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_pq +# cache_config.cpu_cache_capacity: 32GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 100 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_10m_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000, 1200] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml new file mode 100644 index 0000000000..a76983ed09 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift10m_ivf.yaml @@ -0,0 +1,50 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_ivf_flat_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_ip_ivf_flat_16384 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_ip + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml new file mode 100644 index 0000000000..7aae432ca2 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_search_sift50m.yaml @@ -0,0 +1,121 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1200] + search_params: + - + nprobe: 8 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_sq8h + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_pq + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml new file mode 100644 index 0000000000..8bf700c870 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_sift50m_ivf.yaml @@ -0,0 +1,26 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_gpu_stability.yaml b/tests/benchmark/milvus_benchmark/suites/011_gpu_stability.yaml new file mode 100644 index 0000000000..9e7ec4f006 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_gpu_stability.yaml @@ -0,0 +1,39 @@ +stability: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_1024_128_l2_sq8_stability + cache_config.cpu_cache_capacity: 64GB + cache_config.cache_insert_data: true + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 50 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1m_50000_128_l2_2 + during_time: 5 + operations: + insert: + weight: 4 + xb: 100 + delete: + weight: 4 + xb: 100 + flush: + weight: 1 + # async: true + compact: + weight: 1 + # # async: true + query: + weight: 2 + # async: true + top_ks: 1-100 + nqs: 1-100 + search_params: + nprobe: 1-100 diff --git a/tests/benchmark/milvus_benchmark/suites/011_insert_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_insert_debug.yaml new file mode 100644 index 0000000000..34a598dabe --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_insert_debug.yaml @@ -0,0 +1,25 @@ +insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_1b_128_l2_sq8 + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + external_mysql: true + cluster: true + collection_name: sift_1b_128_l2_sq8_011 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/011_insert_performance.yaml b/tests/benchmark/milvus_benchmark/suites/011_insert_performance.yaml new file mode 100644 index 0000000000..0bd3643408 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_insert_performance.yaml @@ -0,0 +1,113 @@ +insert_performance: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_5m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + + - + milvus: + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_5m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 50000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 100000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 200000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 500000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 diff --git a/tests/benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml b/tests/benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml new file mode 100644 index 0000000000..34d8328dc2 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_insert_performance_debug.yaml @@ -0,0 +1,131 @@ +insert_performance: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_5m_128_l2 + ni_per: 10000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + + - + milvus: + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_5m_128_l2 + ni_per: 1000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + + - + milvus: + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 1100 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_5m_128_l2 + ni_per: 1 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 50000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 100000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 200000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 + + # - + # server: + # cache_config.cpu_cache_capacity: 8GB + # engine_config.use_blas_threshold: 1100 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4GB + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # wal_enable: true + # collection_name: sift_5m_100000_128_l2 + # ni_per: 500000 + # flush: no + # build_index: false + # index_type: ivf_sq8 + # index_param: + # nlist: 1024 diff --git a/tests/benchmark/milvus_benchmark/suites/011_search_dsl.yaml b/tests/benchmark/milvus_benchmark/suites/011_search_dsl.yaml new file mode 100644 index 0000000000..f5c8cdaf61 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_search_dsl.yaml @@ -0,0 +1,76 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2/ + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 1000] + nqs: [1, 100, 1200] + filters: + - + term: "{'term': {'float': {'values': [float(i) for i in range(collection_size // 2)]}}}" + - + range: "{'range': {'int64': {'LT': 0, 'GT':collection_size // 2}}}" + - + range: "{'range': {'int64': {'LT': 0, 'GT':collection_size}}}" + - + term: "{'term': {'float': {'values': [float(i) for i in range(collection_size)]}}}" + - + range: "{'range': {'int64': {'LT': 0, 'GT':collection_size // 100000}}}" + - + range: "{'range': {'int64': {'LT': collection_size // 2, 'GT': collection_size}}}" + term: "{'term': {'float': {'values': [float(i) for i in range(collection_size // 2)]}}}" + search_params: + - + nprobe: 8 + - + server: + db_config.primary_path: /test/milvus/db_data_011/filter/sift_10m_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + filters: + - + term: "{'term': {'float': {'values': [float(i) for i in range(collection_size // 2)]}}}" + - + range: "{'range': {'int64': {'LT': 0, 'GT':collection_size // 2}}}" + - + range: "{'range': {'int64': {'LT': 0, 'GT':collection_size}}}" + - + term: "{'term': {'float': {'values': [float(i) for i in range(collection_size)]}}}" + - + range: "{'range': {'int64': {'LT': 0, 'GT':collection_size // 100000}}}" + - + range: "{'range': {'int64': {'LT': collection_size // 2, 'GT': collection_size}}}" + term: "{'term': {'float': {'values': [float(i) for i in range(collection_size // 2)]}}}" + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_search_stability.yaml b/tests/benchmark/milvus_benchmark/suites/011_search_stability.yaml new file mode 100644 index 0000000000..840b9378eb --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_search_stability.yaml @@ -0,0 +1,20 @@ +search_stability: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8_16384_stability + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_50m_128_l2 + during_time: 180 + top_ks: 1-200 + nqs: 1-200 + search_params: + nprobe: 1-100 diff --git a/tests/benchmark/milvus_benchmark/suites/011_search_threshold.yaml b/tests/benchmark/milvus_benchmark/suites/011_search_threshold.yaml new file mode 100644 index 0000000000..d512f25ab5 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_search_threshold.yaml @@ -0,0 +1,50 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_100000_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 1300 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_100000_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml b/tests/benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml new file mode 100644 index 0000000000..5b9d10ee87 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/011_sift50m_acc.yaml @@ -0,0 +1,19 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_50m_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml b/tests/benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml new file mode 100644 index 0000000000..4e95d11915 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_accuracy_ann_debug.yaml @@ -0,0 +1,32 @@ +ann_accuracy: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['flat'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1] + + # - milvus: + # cache_config.cpu_cache_capacity: 16GB + # engine_config.use_blas_threshold: 1100 + # server: + # cpus: 12 + # source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + # collection_name: glove_200_angular + # index_types: ['annoy'] + # index_params: + # n_trees: [8, 32] + # top_ks: [10] + # nqs: [10000] + # search_params: + # search_k: [50, 100, 500, 1000] diff --git a/tests/benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml b/tests/benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml new file mode 100644 index 0000000000..d2bfc20150 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_cpu_accuracy.yaml @@ -0,0 +1,21 @@ +accuracy: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_sq8 + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1m_128_l2 + top_ks: [5] + nqs: [10] + search_params: + nprobe: [8, 32] diff --git a/tests/benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml b/tests/benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml new file mode 100644 index 0000000000..1fd818831f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_cpu_ann_accuracy.yaml @@ -0,0 +1,194 @@ +ann_accuracy: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['flat'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_flat'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_sq8'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_pq'] + index_params: + nlist: [1024] + m: [32] + nbits: [8] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['hnsw'] + index_params: + M: [16] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['flat'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_flat'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_sq8'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_pq'] + index_params: + nlist: [1024] + m: [20] + nbits: [8] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + milvus: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + server: + cpus: 12 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['hnsw'] + index_params: + M: [36] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [10, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/2_cpu_build.yaml b/tests/benchmark/milvus_benchmark/suites/2_cpu_build.yaml new file mode 100644 index 0000000000..8c121ce9c0 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_cpu_build.yaml @@ -0,0 +1,22 @@ +build_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1m_128_128_l2_sq8 + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 1024 diff --git a/tests/benchmark/milvus_benchmark/suites/2_cpu_search.yaml b/tests/benchmark/milvus_benchmark/suites/2_cpu_search.yaml new file mode 100644 index 0000000000..17851d3b8f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_cpu_search.yaml @@ -0,0 +1,29 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_10w_128_l2_sq8 + cache_config.cpu_cache_capacity: 150GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 + ni_per: 50000 + index_type: ivf_flat + index_param: + nlist: 16384 + + run_count: 2 + top_ks: [1] + nqs: [1, 10] + search_params: + - + nprobe: 8 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_build.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_build.yaml new file mode 100644 index 0000000000..4a16d35732 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_build.yaml @@ -0,0 +1,22 @@ +insert_build_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1m_128_128_l2_pq + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_cluster.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_cluster.yaml new file mode 100644 index 0000000000..5b38698b50 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_cluster.yaml @@ -0,0 +1,24 @@ +insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_1m_128_l2 + cache_config.cpu_cache_capacity: 4GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_10m_128_l2 +# other_fields: int,float + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_data.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_data.yaml new file mode 100644 index 0000000000..c5ac542069 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_data.yaml @@ -0,0 +1,13 @@ +insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_2/cluster/sift_1m_128_l2 + wal_enable: true + collection_name: sift_1m_128_l2 +# other_fields: int,float + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_get.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_get.yaml new file mode 100644 index 0000000000..0d04899947 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_get.yaml @@ -0,0 +1,13 @@ +insert_get_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/sift_1m_128_128_l2 + collection_name: sift_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 16384 + ids_length_list: + [1, 100, 100000] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search.yaml new file mode 100644 index 0000000000..dae540b335 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search.yaml @@ -0,0 +1,157 @@ +insert_search_performance: + collections: +# - +# milvus: +# db_config.primary_path: /test/milvus/distribued/sift_10m_128_l2_flat +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# server: +# cpus: 64 +# collection_name: sift_10m_128_l2 +# ni_per: 50000 +# build_index: true +# index_type: flat +# index_param: +# nlist: 2048 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000, 1200] +# search_params: +# - +# nprobe: 32 +# - +# nprobe: 64 +# - +# milvus: +# db_config.primary_path: /test/milvus/distribued/sift_10m_128_l2_ivf_flat +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# server: +# cpus: 64 +# collection_name: sift_10m_128_l2 +# ni_per: 50000 +# build_index: true +# index_type: ivf_flat +# index_param: +# nlist: 2048 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000, 1200] +# search_params: +# - +# nprobe: 32 +# - +# nprobe: 64 +# - +# milvus: +# db_config.primary_path: /test/milvus/distribued/sift_10m_128_l2_ivf_sq8 +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# server: +# cpus: 64 +# collection_name: sift_10m_128_l2 +# ni_per: 50000 +# build_index: true +# index_type: ivf_sq8 +# index_param: +# nlist: 2048 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000, 1200] +# search_params: +# - +# nprobe: 32 +# - +# nprobe: 64 + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10m_128_l2_ivf_pq + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_pq + index_param: + m: 32 + nlist: 2048 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 32 + - + nprobe: 64 +# - +# milvus: +# db_config.primary_path: /test/milvus/distribued/sift_10m_128_l2_ivf_hnsw +# cache_config.cpu_cache_capacity: 8GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 4GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# server: +# cpus: 64 +# collection_name: sift_10m_128_l2 +# ni_per: 50000 +# build_index: true +# index_type: hnsw +# index_param: +# M: 32 +# efConstruction: 100 +# run_count: 2 +# top_ks: [1, 10, 100] +# nqs: [1, 10, 100, 200, 500, 1000, 1200] +# search_params: +# - +# ef: 120 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml new file mode 100644 index 0000000000..82adef5a07 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_debug.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_10w_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_flat + index_param: + nlist: 1024 + run_count: 2 + top_ks: [1, 10] + nqs: [1, 10, 100] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml new file mode 100644 index 0000000000..d572ed66c6 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 1024 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml new file mode 100644 index 0000000000..bdd1d2cd95 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_1024.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 1024 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml new file mode 100644 index 0000000000..19feb0c57f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_2048.yaml @@ -0,0 +1,89 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: false + index_type: flat + index_param: + nlist: 2048 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 2048 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_sq8 + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 2048 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 + + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_pq + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_pq + index_param: + nlist: 2048 + m: 32 + nbits: 8 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml new file mode 100644 index 0000000000..cf3547fbdc --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_4096.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 4096 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml new file mode 100644 index 0000000000..25d0ceca38 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift10m_512.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_10m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 512 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml new file mode 100644 index 0000000000..54a2ae3265 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_1024.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_50m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 1024 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml new file mode 100644 index 0000000000..651158ea76 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_2048.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_50m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 2048 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml new file mode 100644 index 0000000000..14c2a48f5e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_4096.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_50m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 4096 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml new file mode 100644 index 0000000000..03079145af --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_insert_search_sift50m_512.yaml @@ -0,0 +1,33 @@ +insert_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/distribued/sift_10w_128_l2_ivf_flat + cache_config.cpu_cache_capacity: 8GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + server: + cpus: 64 + collection_name: sift_50m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_flat + index_param: + nlist: 512 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000, 1200] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_insert.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_insert.yaml new file mode 100644 index 0000000000..744e9606e6 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_insert.yaml @@ -0,0 +1,26 @@ +locust_insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/insert_sift_1m_128_l2_2 + collection_name: local_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + load_shape: false + step_time: 100 + step_load: 50 + spawn_rate: 2 + connection_num: 1 + clients_num: 100 + during_time: 600 + types: + - + type: insert + weight: 1 + params: + ni_per: 1 + diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml new file mode 100644 index 0000000000..ba06977876 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_insert_5h.yaml @@ -0,0 +1,34 @@ +locust_insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/insert_sift_1m_128_l2_2 + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + connection_num: 1 + clients_num: 10 + hatch_rate: 2 + during_time: 18000 + types: + - + type: insert + weight: 1 + params: + ni_per: 1 + diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml new file mode 100644 index 0000000000..d798e85950 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_insert_flush.yaml @@ -0,0 +1,25 @@ +locust_insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/insert_sift_1m_128_l2_2 + collection_name: local_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + load_shape: false + step_time: 100 + step_load: 50 + spawn_rate: 2 + connection_num: 1 + clients_num: 100 + during_time: 600 + types: + - + type: insert_flush + weight: 1 + params: + ni_per: 1 diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml new file mode 100644 index 0000000000..98f8991e31 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_load_insert.yaml @@ -0,0 +1,25 @@ +locust_insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/insert_sift_1m_128_l2_2 + collection_name: local_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + load_shape: true + step_time: 100 + step_load: 50 + spawn_rate: 50 + connection_num: 1 + clients_num: 100 + during_time: 600 + types: + - + type: insert + weight: 1 + params: + ni_per: 1 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml new file mode 100644 index 0000000000..c7314ea657 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_load_insert_flush.yaml @@ -0,0 +1,25 @@ +locust_insert_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/insert_sift_1m_128_l2_2 + collection_name: local_1m_128_l2 + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + load_shape: true + step_time: 100 + step_load: 50 + spawn_rate: 50 + connection_num: 1 + clients_num: 100 + during_time: 600 + types: + - + type: insert_flush + weight: 1 + params: + ni_per: 1 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_random.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_random.yaml new file mode 100644 index 0000000000..324214c4c5 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_random.yaml @@ -0,0 +1,48 @@ +locust_random_performance: + collections: + - + milvus: + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + wal_enable: true + collection_name: sift_1m_128_l2 + # other_fields: int + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + types: + - type: flush + weight: 1 + - + type: query + weight: 20 + params: + top_k: 10 + nq: 100 + # filters: + # - range: + # int64: + # LT: 0 + # GT: 1000000 + search_param: + nprobe: 16 + - + type: insert + weight: 20 + params: + ni_per: 1 + - + type: load + weight: 1 + - + type: get + weight: 2 + params: + ids_length: 10 + connection_num: 1 + clients_num: 20 + hatch_rate: 2 + during_time: 600 diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml new file mode 100644 index 0000000000..b46637a218 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_random_load_release.yaml @@ -0,0 +1,32 @@ +locust_random_performance: + collections: + - + milvus: + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + gpu0 + gpu_resource_config.build_index_resources: + gpu0 + wal_enable: true + collection_name: sift_10w_128_l2 + # other_fields: int + ni_per: 50000 + build_index: false + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + types: + - type: load + weight: 1 + - + type: release + weight: 2 + connection_num: 1 + clients_num: 100 + hatch_rate: 2 + during_time: 1800 diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_search.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_search.yaml new file mode 100644 index 0000000000..7725841c9a --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_search.yaml @@ -0,0 +1,43 @@ +locust_search_performance: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + connection_num: 1 + clients_num: 100 + hatch_rate: 2 + during_time: 600 + types: + - + type: query + weight: 1 + params: + top_k: 10 + nq: 1 + # filters: + # - + # range: + # int64: + # LT: 0 + # GT: 1000000 + search_param: + nprobe: 16 diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml new file mode 100644 index 0000000000..64bb6c7f0c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_search_5h.yaml @@ -0,0 +1,43 @@ +locust_search_performance: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + connection_num: 1 + clients_num: 10 + hatch_rate: 2 + during_time: 18000 + types: + - + type: query + weight: 1 + params: + top_k: 10 + nq: 1 + # filters: + # - + # range: + # int64: + # LT: 0 + # GT: 1000000 + search_param: + nprobe: 16 diff --git a/tests/benchmark/milvus_benchmark/suites/2_locust_search_index.yaml b/tests/benchmark/milvus_benchmark/suites/2_locust_search_index.yaml new file mode 100644 index 0000000000..7725841c9a --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/2_locust_search_index.yaml @@ -0,0 +1,43 @@ +locust_search_performance: + collections: + - + milvus: + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1m_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + connection_num: 1 + clients_num: 100 + hatch_rate: 2 + during_time: 600 + types: + - + type: query + weight: 1 + params: + top_k: 10 + nq: 1 + # filters: + # - + # range: + # int64: + # LT: 0 + # GT: 1000000 + search_param: + nprobe: 16 diff --git a/tests/benchmark/milvus_benchmark/suites/add_flush_performance.yaml b/tests/benchmark/milvus_benchmark/suites/add_flush_performance.yaml new file mode 100644 index 0000000000..d8c1308732 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/add_flush_performance.yaml @@ -0,0 +1,20 @@ +insert_flush_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_080/sift_2m_128_128_l2_flush + cache_config.cpu_cache_capacity: 8 + cache_config.insert_buffer_size: 2 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + db_config.auto_flush_interval: 300 + collection_name: sift_2m_128_128_l2 + ni_per: 100000 diff --git a/tests/benchmark/milvus_benchmark/suites/ann_debug.yaml b/tests/benchmark/milvus_benchmark/suites/ann_debug.yaml new file mode 100644 index 0000000000..7ed7c948c3 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/ann_debug.yaml @@ -0,0 +1,26 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [1024] + index_types: ['ivf_sq8'] + index_params: + nlist: [1024] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 8] + diff --git a/tests/benchmark/milvus_benchmark/suites/clean.yaml b/tests/benchmark/milvus_benchmark/suites/clean.yaml new file mode 100644 index 0000000000..18d722ed4b --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/clean.yaml @@ -0,0 +1,24 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_7/not_exist + cache_config.cpu_cache_capacity: 64 + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_2048_128_l2 + run_count: 2 + top_ks: [1] + nqs: [1] + search_params: + - + nprobe: 8 diff --git a/tests/benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml b/tests/benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml new file mode 100644 index 0000000000..cb1b457644 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cluster_locust_mix.yaml @@ -0,0 +1,47 @@ +locust_mix_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/locust_mix + suffix_path: true + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + gpu0 + gpu_resource_config.build_index_resources: + gpu0 + wal_enable: true + external_mysql: true + cluster: true + readonly: + replicas: 2 + collection_name: sift_1m_500000_128_l2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 1024 + task: + types: + - type: flush + weight: 1 + - + type: query + weight: 30 + params: + top_k: 10 + nq: 100 + search_param: + nprobe: 16 + - + type: insert + weight: 10 + params: + nb: 1 + connection_num: 1 + clients_num: 10 + hatch_rate: 2 + during_time: 600 diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_accuracy.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy.yaml new file mode 100644 index 0000000000..d780178089 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy.yaml @@ -0,0 +1,61 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_ip_sq8_wal + cache_config.cpu_cache_capacity: 30 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_2048_128_ip + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_hnsw_wal + cache_config.cpu_cache_capacity: 64 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_2048_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + ef: [64, 100, 200, 500, 700] + + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8_wal + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml new file mode 100644 index 0000000000..b980186ae0 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann.yaml @@ -0,0 +1,212 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['flat'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [32] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['annoy'] + index_params: + n_trees: [8, 32] + top_ks: [10] + nqs: [10000] + search_params: + search_k: [50, 100, 500, 1000] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_types: ['hnsw'] + index_params: + M: [16] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['flat'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 512, 16384] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_flat', 'ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [20] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_types: ['hnsw'] + index_params: + M: [36] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [10, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml new file mode 100644 index 0000000000..019a05c06d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_debug.yaml @@ -0,0 +1,25 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [1024] + index_types: ['ivf_sq8', 'ivf_sq8h'] + index_params: + nlist: 2048 + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + top_ks: [100] + nqs: [1000] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml new file mode 100644 index 0000000000..8f649e136d --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_hnsw.yaml @@ -0,0 +1,43 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_file_sizes: [256] + index_types: ['hnsw'] + nlists: [16384] + search_params: + nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + top_ks: [10] + nqs: [10000] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [256] + index_types: ['hnsw'] + nlists: [16384] + search_params: + nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + top_ks: [10] + nqs: [10000] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml new file mode 100644 index 0000000000..1e087d3fbb --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_ann_pq.yaml @@ -0,0 +1,26 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_file_sizes: [1024] + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [20] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml new file mode 100644 index 0000000000..c2e6194e10 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_accuracy_nsg.yaml @@ -0,0 +1,21 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_10m_1024_128_l2_nsg_wal + cache_config.cpu_cache_capacity: 50 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_1024_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + search_length: [50] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_build_performance.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_build_performance.yaml new file mode 100644 index 0000000000..660aff14f5 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_build_performance.yaml @@ -0,0 +1,19 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_070/sift_50m_1024_128_l2_sq8h_wal + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_1024_128_l2 + index_type: ivf_sq8h + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_search_binary.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_search_binary.yaml new file mode 100644 index 0000000000..37aa2d0a35 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_search_binary.yaml @@ -0,0 +1,67 @@ +search_performance: + collections: + # - + # server: + # db_config.primary_path: /test/milvus/db_data_7/sub_50m_512_512_sub_wal + # cache_config.cpu_cache_capacity: 32 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # collection_name: sub_50m_512_512_sub + # run_count: 2 + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # search_params: + # - + # nprobe: 8 + # - + # nprobe: 32 + + # - + # server: + # db_config.primary_path: /test/milvus/db_data_7/super_50m_512_512_super_wal + # cache_config.cpu_cache_capacity: 32 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 1 + # gpu_resource_config.enable: false + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # gpu_resource_config.build_index_resources: + # - gpu0 + # collection_name: super_50m_512_512_super + # run_count: 2 + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # search_params: + # - + # nprobe: 8 + # - + # nprobe: 32 + + - + server: + db_config.primary_path: /test/milvus/db_data_7/jaccard_50m_512_512_jaccard_wal + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: jaccard_50m_512_512_jaccard + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml new file mode 100644 index 0000000000..0266cababe --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_jaccard.yaml @@ -0,0 +1,20 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_070/jaccard_50m_512_512_jaccard_wal + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: jaccard_50m_512_512_jaccard + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml new file mode 100644 index 0000000000..f4a48f9a15 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift1b.yaml @@ -0,0 +1,22 @@ +search_performance: + collections: + + # sift_1b + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8 + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_1b_2048_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml new file mode 100644 index 0000000000..b74bb9e56e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_search_performance_sift50m.yaml @@ -0,0 +1,20 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8 + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_50m_1024_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] diff --git a/tests/benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml b/tests/benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml new file mode 100644 index 0000000000..61b203173e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/cpu_stability_sift50m.yaml @@ -0,0 +1,27 @@ +stability: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192_stability + cache_config.cpu_cache_capacity: 64 + cache_config.cache_insert_data: true + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_1024_128_l2 + during_time: 480 + search_params: + nprobes: 1-200 + top_ks: 1-200 + nqs: 1-200 + # length of insert vectors + insert_xb: 100000 + # insert after search 4 times + insert_interval: 4 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/debug.yaml b/tests/benchmark/milvus_benchmark/suites/debug.yaml new file mode 100644 index 0000000000..07c0297ef7 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/debug.yaml @@ -0,0 +1,90 @@ +search_performance: + collections: +# - +# server: +# db_config.primary_path: /test/milvus/db_data_8/sift_1b_2048_128_l2_sq8_wal +# cache_config.cpu_cache_capacity: 150GB +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6GB +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_1b_2048_128_l2 +# run_count: 4 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 1000] +# search_params: +# - +# nprobe: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8h_wal +# cache_config.cpu_cache_capacity: 150 +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6 +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_1b_2048_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000] +# search_params: +# - +# nprobe: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_ivf_wal +# cache_config.cpu_cache_capacity: 64 +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6 +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_50m_2048_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000] +# search_params: +# - +# nprobe: 8 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_sq8h_wal +# cache_config.cpu_cache_capacity: 64 +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6 +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_50m_2048_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000] +# search_params: +# - +# nprobe: 8 diff --git a/tests/benchmark/milvus_benchmark/suites/debug_build.yaml b/tests/benchmark/milvus_benchmark/suites/debug_build.yaml new file mode 100644 index 0000000000..09566b5c9c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/debug_build.yaml @@ -0,0 +1,23 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_10m_1024_128_l2_nsg_wal + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_10m_1024_128_l2 + index_type: nsg + index_param: + search_length: 50 + out_degree: 40 + candidate_pool_size: 100 + knng: 50 diff --git a/tests/benchmark/milvus_benchmark/suites/debug_gpu_search.yaml b/tests/benchmark/milvus_benchmark/suites/debug_gpu_search.yaml new file mode 100644 index 0000000000..924d7fb684 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/debug_gpu_search.yaml @@ -0,0 +1,30 @@ +search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_50m_128_l2_sq8 + cache_config.cpu_cache_capacity: 64GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + external_mysql: true + cluster: true + readonly: + replicas: 3 + collection_name: sift_50m_128_l2_sq8_011 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 8 + - + nprobe: 32 diff --git a/tests/benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml b/tests/benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml new file mode 100644 index 0000000000..a29461238b --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/flush_kill_query_pod.yaml @@ -0,0 +1,44 @@ +simple_chaos: + collections: + - + milvus: null + before: + - + interface_name: create_collection + params: + data_type: local + dimension: 128 + - + interface_name: insert + params: + batch_size: 5000 + collection_size: 1000000 +# - interface_name: create_index +# params: +# metric_type: l2 +# index_type: ivf_flat +# index_param: +# nlist: 16384 + processing: + interface_name: create_index + params: + field_name: float_vector + metric_type: l2 + index_type: ivf_flat + index_param: + nlist: 16384 + chaos: + kind: PodChaos + spec: + action: pod-kill + selector: + labelSelectors: + "statefulset.kubernetes.io/pod-name": etcd + scheduler: + cron: "@every 60s" + assertion: fail + after: + interface_name: describe_index + params: + field_name: float_vector + assertion: pass \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_accuracy.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy.yaml new file mode 100644 index 0000000000..2f5a086210 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy.yaml @@ -0,0 +1,41 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_sq8_wal + cache_config.cpu_cache_capacity: 30 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_2048_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8h_wal + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml new file mode 100644 index 0000000000..900abf73ed --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_ann.yaml @@ -0,0 +1,172 @@ +ann_accuracy: + collections: + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [1024] + index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 16384] + + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [1024] + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [32] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [256] + index_types: ['hnsw'] + index_params: + M: [16] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_file_sizes: [1024] + index_types: ['flat', 'ivf_flat', 'ivf_sq8', 'ivf_sq8h'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 16384] + + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_file_sizes: [256] + index_types: ['hnsw'] + index_params: + M: [36] + efConstruction: [500] + top_ks: [10] + nqs: [10000] + search_params: + ef: [10, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/kosarak-27983-jaccard.hdf5 + collection_name: kosarak_27984_jaccard + index_file_sizes: [1024] + index_types: ['flat', 'ivf_flat'] + index_params: + nlist: [2048] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + + - + server: + cache_config.cpu_cache_capacity: 16GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-256-hamming.hdf5 + collection_name: sift_256_hamming + index_file_sizes: [1024] + index_types: ['flat', 'ivf_flat'] + index_params: + nlist: [2048] + top_ks: [100] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml new file mode 100644 index 0000000000..4c4b75f725 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_debug.yaml @@ -0,0 +1,40 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8h_wal + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8h_wal + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml new file mode 100644 index 0000000000..dbfe2abe8c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1b.yaml @@ -0,0 +1,59 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8 + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + search_params: + nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + top_ks: [64] + nqs: [1000] + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + search_params: + nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + top_ks: [64] + nqs: [1000] + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + search_params: + nprobes: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] + top_ks: [64] + nqs: [1000] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml new file mode 100644 index 0000000000..0955622270 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_accuracy_sift1m.yaml @@ -0,0 +1,21 @@ +accuracy: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_1m_256_128_l2_sq8 + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1m_256_128_l2 + top_ks: [64] + nqs: [1000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml new file mode 100644 index 0000000000..fdf9cccd26 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_build_performance_jaccard50m.yaml @@ -0,0 +1,20 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/jaccard_50m_512_512_jaccard_ivf_wal_debug + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: jaccard_50m_512_512_jaccard + index_type: ivf_flat + index_param: + nlist: 2048 diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml new file mode 100644 index 0000000000..a4d823cafb --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_build_sift1b_sq8h.yaml @@ -0,0 +1,20 @@ +build_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_1b_2048_128_l2_sq8h_wal + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + index_type: ivf_sq8h + index_param: + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_search_performance.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance.yaml new file mode 100644 index 0000000000..78f6334a7f --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance.yaml @@ -0,0 +1,247 @@ +search_performance: + collections: + # sift_50m + - + server: + db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_ivf + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_1024_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + - + server: + db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_sq8 + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_1024_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + - + server: + db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_sq8h + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_1024_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq + # cache_config.cpu_cache_capacity: 32 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_nsg + # cache_config.cpu_cache_capacity: 50 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + + # random_50m + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu_crud/random_50m_1024_512_ip_ivf + # cache_config.cpu_cache_capacity: 110 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: random_50m_1024_512_ip + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8 + # cache_config.cpu_cache_capacity: 30 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: random_50m_1024_512_ip + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_sq8h + # cache_config.cpu_cache_capacity: 30 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: random_50m_1024_512_ip + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/random_50m_1024_512_ip_nsg + # cache_config.cpu_cache_capacity: 200 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 6 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: random_50m_1024_512_ip + # run_count: 2 + # search_params: + # nprobes: [8] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + + # sift_1b + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_1b_1024_128_l2_sq8 + # cache_config.cpu_cache_capacity: 150 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_1b_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_1b_2048_128_l2_sq8h + # cache_config.cpu_cache_capacity: 150 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_1b_2048_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq + # cache_config.cpu_cache_capacity: 150 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_1b_2048_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml new file mode 100644 index 0000000000..c9a1ed99be --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_jaccard50m.yaml @@ -0,0 +1,22 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/jaccard_50m_128_512_jaccard_ivf + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: jaccard_50m_128_512_jaccard + run_count: 1 + search_params: + nprobes: [8, 32] + top_ks: [1, 16, 64, 128, 256, 512, 1000] + nqs: [1, 10, 100, 200, 500, 1000] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml new file mode 100644 index 0000000000..2e317d9861 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift1b.yaml @@ -0,0 +1,62 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8 + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_sq8h + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_1b_2048_128_l2_pq + cache_config.cpu_cache_capacity: 150 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_1b_2048_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml new file mode 100644 index 0000000000..4f49bf4937 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_search_performance_sift50m.yaml @@ -0,0 +1,146 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_ivf + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + collection_name: sift_50m_1024_128_l2 + run_count: 2 + search_params: + nprobes: [8, 32] + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8 + # cache_config.cpu_cache_capacity: 16 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8h + # cache_config.cpu_cache_capacity: 16 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + + # git issue num: #626 + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_pq + # cache_config.cpu_cache_capacity: 32 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_nsg + # cache_config.cpu_cache_capacity: 50 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_8192 + # cache_config.cpu_cache_capacity: 16 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] + # - + # server: + # db_config.primary_path: /test/milvus/db_data_gpu/sift_50m_1024_128_l2_sq8_4096 + # cache_config.cpu_cache_capacity: 16 + # engine_config.use_blas_threshold: 1100 + # engine_config.gpu_search_threshold: 200 + # gpu_resource_config.enable: true + # gpu_resource_config.cache_capacity: 4 + # gpu_resource_config.search_resources: + # - gpu0 + # - gpu1 + # gpu_resource_config.build_index_resources: + # - gpu0 + # - gpu1 + # collection_name: sift_50m_1024_128_l2 + # run_count: 2 + # search_params: + # nprobes: [8, 32] + # top_ks: [1, 10, 100, 1000] + # nqs: [1, 10, 100, 200, 500, 1000] \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/gpu_search_stability.yaml b/tests/benchmark/milvus_benchmark/suites/gpu_search_stability.yaml new file mode 100644 index 0000000000..a41ea817cd --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/gpu_search_stability.yaml @@ -0,0 +1,23 @@ +search_stability: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu_crud/sift_50m_1024_128_l2_sq8 + cache_config.cpu_cache_capacity: 50 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 100 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + - gpu2 + - gpu3 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sift_50m_1024_128_l2 + during_time: 240 + search_params: + nprobes: 1-200 + top_ks: 1-200 + nqs: 1-200 diff --git a/tests/benchmark/milvus_benchmark/suites/insert_binary.yaml b/tests/benchmark/milvus_benchmark/suites/insert_binary.yaml new file mode 100644 index 0000000000..79fa2b3567 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/insert_binary.yaml @@ -0,0 +1,39 @@ +insert_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sub_50m_512_512_sub_wal + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: sub_50m_512_512_sub + ni_per: 100000 + build_index: false + index_type: flat + index_param: + nlist: 2048 + + - + server: + db_config.primary_path: /test/milvus/db_data_7/super_50m_512_512_super_wal + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: super_50m_512_512_super + ni_per: 100000 + build_index: false + index_type: flat + index_param: + nlist: 2048 diff --git a/tests/benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml b/tests/benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml new file mode 100644 index 0000000000..27dc83c63e --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/insert_performance_deep1b.yaml @@ -0,0 +1,87 @@ +insert_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_ivf + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: deep_1b_1024_96_ip + ni_per: 100000 + build_index: false + # index_type: ivf_flat + # nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_sq8 + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: deep_1b_1024_96_ip + ni_per: 100000 + build_index: false + # index_type: ivf_sq8 + # nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_sq8h + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: deep_1b_1024_96_ip + ni_per: 100000 + build_index: false + # index_type: ivf_sq8h + # nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_pq + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: deep_1b_1024_96_ip + ni_per: 100000 + build_index: false + # index_type: ivf_pq + # nlist: 16384 + - + server: + db_config.primary_path: /test/milvus/db_data_gpu/deep_1b_1024_96_ip_nsg + cache_config.cpu_cache_capacity: 8 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + collection_name: deep_1b_1024_96_ip + ni_per: 100000 + build_index: false + # index_type: nsg + # nlist: 16384 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/locust_cluster_search.yaml b/tests/benchmark/milvus_benchmark/suites/locust_cluster_search.yaml new file mode 100644 index 0000000000..d7641819a2 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/locust_cluster_search.yaml @@ -0,0 +1,45 @@ +locust_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_1m_128_l2_2 + suffix_path: true + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_1m_128_l2_2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 16384 + task: + connection_num: 1 + clients_num: 100 + hatch_rate: 10 + during_time: 10 + types: + - + type: query + weight: 1 + params: + top_k: 10 + nq: 1 + # filters: + # - + # range: + # int64: + # LT: 0 + # GT: 1000000 + search_param: + nprobe: 16 diff --git a/tests/benchmark/milvus_benchmark/suites/locust_insert.yaml b/tests/benchmark/milvus_benchmark/suites/locust_insert.yaml new file mode 100644 index 0000000000..cdbdeaca20 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/locust_insert.yaml @@ -0,0 +1,23 @@ +locust_insert_performance: + collections: + - + server: + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + task: + type: insert + connection_num: 1 + clients_num: 10 + hatch_rate: 5 + during_time: 2m diff --git a/tests/benchmark/milvus_benchmark/suites/locust_search.yaml b/tests/benchmark/milvus_benchmark/suites/locust_search.yaml new file mode 100644 index 0000000000..eac80389c3 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/locust_search.yaml @@ -0,0 +1,49 @@ +locust_search_performance: + collections: + - + milvus: + db_config.primary_path: /test/milvus/db_data_011/cluster/sift_1m_128_l2_2 + suffix_path: true + cache_config.cpu_cache_capacity: 8GB + cache_config.insert_buffer_size: 2GB + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4GB + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + cluster: true + external_mysql: true + readonly: + replicas: 1 + collection_name: sift_1m_128_l2_2 + ni_per: 50000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 16384 + task: + connection_num: 2 + clients_num: 100 + hatch_rate: 10 + during_time: 3600 + types: + - + type: query + weight: 1 + params: + top_k: 10 + nq: 1 + # filters: + # - + # range: + # int64: + # LT: 0 + # GT: 1000000 + search_param: + nprobe: 16 diff --git a/tests/benchmark/milvus_benchmark/suites/loop_stability.yaml b/tests/benchmark/milvus_benchmark/suites/loop_stability.yaml new file mode 100644 index 0000000000..a304695feb --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/loop_stability.yaml @@ -0,0 +1,17 @@ +loop_stability: + collections: + - + server: + suffix_path: true + db_config.primary_path: /test/milvus/db_data_11/loop_stability + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 10 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 2GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + pull_interval: 20 + collection_num: 4 diff --git a/tests/benchmark/milvus_benchmark/suites/metric.yaml b/tests/benchmark/milvus_benchmark/suites/metric.yaml new file mode 100644 index 0000000000..263176b6e8 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/metric.yaml @@ -0,0 +1,47 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_l2_sq8_wal + cache_config.cpu_cache_capacity: 32 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 0 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 6 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + wal_enable: true + collection_name: sift_50m_2048_128_l2 + run_count: 2 + top_ks: [1, 10, 100, 1000] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + nprobe: 64 + +# - +# server: +# db_config.primary_path: /test/milvus/db_data_7/sift_50m_2048_128_ip_sq8_wal +# cache_config.cpu_cache_capacity: 32 +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6 +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_50m_2048_128_ip +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000] +# search_params: +# - +# nprobe: 64 diff --git a/tests/benchmark/milvus_benchmark/suites/pq.yaml b/tests/benchmark/milvus_benchmark/suites/pq.yaml new file mode 100644 index 0000000000..6e69156ac0 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/pq.yaml @@ -0,0 +1,27 @@ +ann_accuracy: + collections: + + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_file_sizes: [1024] + index_types: ['ivf_pq'] + index_params: + nlist: [16384] + m: [20] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 2, 4, 8, 16, 32, 64, 128, 256, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/qps.yaml b/tests/benchmark/milvus_benchmark/suites/qps.yaml new file mode 100644 index 0000000000..6e0b94c527 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/qps.yaml @@ -0,0 +1,27 @@ +search_performance_concurrents: + collections: + - + server: + cache_config.cpu_cache_capacity: 16 + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 4 + gpu_resource_config.search_resources: + - gpu0 + - gpu1 + gpu_resource_config.build_index_resources: + - gpu0 + - gpu1 + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + concurrents: [2000] + use_single_connection: false + index_file_size: [1024] + index_types: ['ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [1] + search_params: + nprobe: [16] diff --git a/tests/benchmark/milvus_benchmark/suites/search_debug.yaml b/tests/benchmark/milvus_benchmark/suites/search_debug.yaml new file mode 100644 index 0000000000..5c5c389bfe --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/search_debug.yaml @@ -0,0 +1,92 @@ +search_performance: + collections: +# - +# server: +# db_config.primary_path: /test/milvus/db_data_8/sift_1b_2048_128_l2_sq8 +# cache_config.cpu_cache_capacity: 150 +# engine_config.use_blas_threshold: 0 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6 +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_1b_2048_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 +# - +# server: +# db_config.primary_path: /test/milvus/db_data_8/sift_1b_2048_128_l2_sq8 +# cache_config.cpu_cache_capacity: 150 +# engine_config.use_blas_threshold: 1100 +# engine_config.gpu_search_threshold: 200 +# gpu_resource_config.enable: true +# gpu_resource_config.cache_capacity: 6 +# gpu_resource_config.search_resources: +# - gpu0 +# - gpu1 +# gpu_resource_config.build_index_resources: +# - gpu0 +# - gpu1 +# wal_enable: true +# collection_name: sift_1b_2048_128_l2 +# run_count: 2 +# top_ks: [1, 10, 100, 1000] +# nqs: [1, 10, 100, 200, 500, 1000] +# search_params: +# - +# nprobe: 8 +# - +# nprobe: 32 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_10m_128_l2_rhnsw_pq + cache_config.cpu_cache_capacity: 32GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + wal_enable: true + collection_name: sift_10m_128_l2 + run_count: 2 + top_ks: [100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + ef: 100 + - + ef: 200 + - + ef: 500 + - + ef: 1000 + - + server: + db_config.primary_path: /test/milvus/db_data_011/sift_50m_128_l2_rhnsw_sq + cache_config.cpu_cache_capacity: 64GB + engine_config.use_blas_threshold: 0 + engine_config.gpu_search_threshold: 200 + gpu_resource_config.enable: false + wal_enable: true + collection_name: sift_50m_128_l2 + run_count: 2 + top_ks: [100] + nqs: [1, 10, 100, 200, 500, 1000] + search_params: + - + ef: 100 + - + ef: 200 + - + ef: 500 + - + ef: 1000 \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/suites/shards_ann_debug.yaml b/tests/benchmark/milvus_benchmark/suites/shards_ann_debug.yaml new file mode 100644 index 0000000000..ba5db54eb4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/shards_ann_debug.yaml @@ -0,0 +1,25 @@ +ann_accuracy: + collections: + - + source_file: /test/milvus/ann_hdf5/sift-128-euclidean.hdf5 + collection_name: sift_128_euclidean + index_file_sizes: [1024] + index_types: ['flat', 'ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 32, 512] + + - + source_file: /test/milvus/ann_hdf5/glove-200-angular.hdf5 + collection_name: glove_200_angular + index_file_sizes: [1024] + index_types: ['flat', 'ivf_sq8'] + index_params: + nlist: [16384] + top_ks: [10] + nqs: [10000] + search_params: + nprobe: [1, 32, 512] diff --git a/tests/benchmark/milvus_benchmark/suites/shards_insert_performance.yaml b/tests/benchmark/milvus_benchmark/suites/shards_insert_performance.yaml new file mode 100644 index 0000000000..eada67a3b4 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/shards_insert_performance.yaml @@ -0,0 +1,17 @@ +insert_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_8/shards_sift_1m_128_128_l2_insert + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_1m_128_128_l2 + ni_per: 10000 + build_index: false + index_type: flat diff --git a/tests/benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml b/tests/benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml new file mode 100644 index 0000000000..dbc2929fda --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/shards_insert_performance_sift1m.yaml @@ -0,0 +1,19 @@ +insert_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_010/shards_sift_1m_128_128_l2_insert + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 1 + gpu_resource_config.enable: false + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + collection_name: sift_1m_1024_128_l2 + ni_per: 10000 + build_index: true + index_type: ivf_sq8 + index_param: + nlist: 16384 diff --git a/tests/benchmark/milvus_benchmark/suites/shards_loop_stability.yaml b/tests/benchmark/milvus_benchmark/suites/shards_loop_stability.yaml new file mode 100644 index 0000000000..4494b8f4b3 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/shards_loop_stability.yaml @@ -0,0 +1,16 @@ +loop_stability: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_8/shards_loop_stability + engine_config.use_blas_threshold: 1100 + engine_config.gpu_search_threshold: 10 + gpu_resource_config.enable: true + gpu_resource_config.cache_capacity: 2GB + gpu_resource_config.search_resources: + - gpu0 + gpu_resource_config.build_index_resources: + - gpu0 + wal_enable: true + pull_interval: 2 + collection_num: 2 diff --git a/tests/benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml b/tests/benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml new file mode 100644 index 0000000000..71ed51ce72 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/suites/shards_search_performance_sift1m.yaml @@ -0,0 +1,12 @@ +search_performance: + collections: + - + server: + db_config.primary_path: /test/milvus/db_data_010/shards_sift_1m_128_128_l2_insert + wal_enable: true + collection_name: sift_1m_1024_128_l2 + run_count: 2 + top_ks: [1, 10, 100] + nqs: [1, 10, 100] + search_params: + - nprobe: 8 diff --git a/tests/benchmark/milvus_benchmark/test.py b/tests/benchmark/milvus_benchmark/test.py new file mode 100644 index 0000000000..250b7cce50 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/test.py @@ -0,0 +1,36 @@ +import random +from pymilvus import Milvus, DataType + +dim = 128 +name = "sift_1m_128_l2" + +def generate_values(data_type, vectors, ids): + values = None + if data_type in [DataType.INT32, DataType.INT64]: + values = ids + elif data_type in [DataType.FLOAT, DataType.DOUBLE]: + values = [(i + 0.0) for i in ids] + elif data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]: + values = vectors + return values + + +def generate_entities(info, vectors, ids=None): + entities = [] + for field in info["fields"]: + if field["name"] == "_id": + continue + field_type = field["type"] + entities.append( + {"name": field["name"], "type": field_type, "values": generate_values(field_type, vectors, ids)}) + return entities + + +m = Milvus(host="127.0.0.1") +info = m.describe_collection(name) +print(info) +ids = [random.randint(1, 10000000)] +X = [[random.random() for _ in range(dim)] for _ in range(1)] +entities = generate_entities(info, X, ids) +print(entities) +m.insert(name, entities, ids=ids) diff --git a/tests/benchmark/milvus_benchmark/tests/locust_user_test.py b/tests/benchmark/milvus_benchmark/tests/locust_user_test.py new file mode 100644 index 0000000000..596ab4a33c --- /dev/null +++ b/tests/benchmark/milvus_benchmark/tests/locust_user_test.py @@ -0,0 +1,15 @@ +from pymilvus import DataType +from milvus_benchmark.runners.locust_user import locust_executor +from milvus_benchmark.client import MilvusClient + + +if __name__ == "__main__": + connection_type = "single" + host = "127.0.0.1" + port = 19530 + collection_name = "sift_1m_128_l2" + run_params = {"tasks": {"insert": 1}, "clients_num": 10, "spawn_rate": 2, "during_time": 3600} + dim = 128 + m = MilvusClient(host=host, port=port, collection_name=collection_name) + m.create_collection(dim, data_type=DataType.FLOAT_VECTOR, auto_id=False, other_fields=None) + locust_executor(host, port, collection_name, run_params=run_params) diff --git a/tests/benchmark/milvus_benchmark/tests/test_scheduler.py b/tests/benchmark/milvus_benchmark/tests/test_scheduler.py new file mode 100644 index 0000000000..83628c0dc0 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/tests/test_scheduler.py @@ -0,0 +1,11 @@ +from milvus_benchmark import back_scheduler + + +def job_runner(): + print("job_runner") + + + +for i in range(30): + back_scheduler.add_job(job_runner, args=[], misfire_grace_time=300) +back_scheduler.start() \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/update.py b/tests/benchmark/milvus_benchmark/update.py new file mode 100644 index 0000000000..db0b9474f5 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/update.py @@ -0,0 +1,218 @@ +import sys +import re +import logging +import traceback +import argparse +from yaml import full_load, dump +import config +import utils + + +def parse_server_tag(server_tag): + # tag format: "8c"/"8c16m"/"8c16m1g" + if server_tag[-1] == "c": + p = r"(\d+)c" + elif server_tag[-1] == "m": + p = r"(\d+)c(\d+)m" + elif server_tag[-1] == "g": + p = r"(\d+)c(\d+)m(\d+)g" + m = re.match(p, server_tag) + cpus = int(m.groups()[0]) + mems = None + gpus = None + if len(m.groups()) > 1: + mems = int(m.groups()[1]) + if len(m.groups()) > 2: + gpus = int(m.groups()[2]) + return {"cpus": cpus, "mems": mems, "gpus": gpus} + + +""" +description: update values.yaml +return: no return +""" +def update_values(src_values_file, deploy_params_file): + # deploy_mode, hostname, server_tag, milvus_config, server_config=None + try: + with open(src_values_file) as f: + values_dict = full_load(f) + f.close() + with open(deploy_params_file) as f: + deploy_params = full_load(f) + f.close() + except Exception as e: + logging.error(str(e)) + raise Exception("File not found") + deploy_mode = utils.get_deploy_mode(deploy_params) + print(deploy_mode) + cluster = False + values_dict["service"]["type"] = "ClusterIP" + if deploy_mode != config.DEFUALT_DEPLOY_MODE: + cluster = True + values_dict["cluster"]["enabled"] = True + server_tag = utils.get_server_tag(deploy_params) + print(server_tag) + # TODO: update milvus config + # # update values.yaml with the given host + # node_config = None + perf_tolerations = [{ + "key": "node-role.kubernetes.io/benchmark", + "operator": "Exists", + "effect": "NoSchedule" + }] + # if server_name: + # node_config = {'kubernetes.io/hostname': server_name} + # elif server_tag: + # # server tag + # node_config = {'instance-type': server_tag} + cpus = None + mems = None + gpus = None + if server_tag: + res = parse_server_tag(server_tag) + cpus = res["cpus"] + mems = res["mems"] + gpus = res["gpus"] + if cpus: + resources = { + "limits": { + "cpu": str(int(cpus)) + ".0" + }, + "requests": { + "cpu": str(int(cpus) // 2 + 1) + ".0" + # "cpu": "4.0" + # "cpu": str(int(cpus) - 1) + ".0" + } + } + # use external minio/s3 + + # TODO: disable temp + # values_dict['minio']['enabled'] = False + values_dict['minio']['enabled'] = True + # values_dict["externalS3"]["enabled"] = True + values_dict["externalS3"]["enabled"] = False + values_dict["externalS3"]["host"] = config.MINIO_HOST + values_dict["externalS3"]["port"] = config.MINIO_PORT + values_dict["externalS3"]["accessKey"] = config.MINIO_ACCESS_KEY + values_dict["externalS3"]["secretKey"] = config.MINIO_SECRET_KEY + values_dict["externalS3"]["bucketName"] = config.MINIO_BUCKET_NAME + logging.debug(values_dict["externalS3"]) + + if cluster is False: + # TODO: support pod affinity for standalone mode + if cpus: + # values_dict['standalone']['nodeSelector'] = node_config + # values_dict['minio']['nodeSelector'] = node_config + # values_dict['etcd']['nodeSelector'] = node_config + # # set limit/request cpus in resources + values_dict['standalone']['resources'] = resources + if mems: + values_dict['standalone']['resources']["limits"].update({"memory": str(int(mems)) + "Gi"}) + values_dict['standalone']['resources']["requests"].update({"memory": str(int(mems) // 2 + 1) + "Gi"}) + if gpus: + logging.info("TODO: Need to schedule pod on GPU server") + logging.debug("Add tolerations into standalone server") + values_dict['standalone']['tolerations'] = perf_tolerations + values_dict['minio']['tolerations'] = perf_tolerations + values_dict['etcd']['tolerations'] = perf_tolerations + else: + # TODO: mem limits on distributed mode + # values_dict['pulsar']["broker"]["configData"].update({"maxMessageSize": "52428800", "PULSAR_MEM": BOOKKEEPER_PULSAR_MEM}) + # values_dict['pulsar']["bookkeeper"]["configData"].update({"nettyMaxFrameSizeBytes": "52428800", "PULSAR_MEM": BROKER_PULSAR_MEM}) + if cpus: + # values_dict['standalone']['nodeSelector'] = node_config + # values_dict['minio']['nodeSelector'] = node_config + # values_dict['etcd']['nodeSelector'] = node_config + # # set limit/request cpus in resources + # values_dict['proxy']['resources'] = resources + values_dict['queryNode']['resources'] = resources + values_dict['indexNode']['resources'] = resources + values_dict['dataNode']['resources'] = resources + # values_dict['minio']['resources'] = resources + # values_dict['pulsarStandalone']['resources'] = resources + if mems: + logging.debug("TODO: Update mem resources") + # # pulsar distributed mode + # values_dict['pulsar']["enabled"] = True + # values_dict['pulsar']['autoRecovery']['nodeSelector'] = node_config + # values_dict['pulsar']['proxy']['nodeSelector'] = node_config + # values_dict['pulsar']['broker']['nodeSelector'] = node_config + # values_dict['pulsar']['bookkeeper']['nodeSelector'] = node_config + # values_dict['pulsar']['zookeeper']['nodeSelector'] = node_config + + logging.debug("Add tolerations into cluster server") + values_dict['proxy']['tolerations'] = perf_tolerations + values_dict['queryNode']['tolerations'] = perf_tolerations + values_dict['indexNode']['tolerations'] = perf_tolerations + values_dict['dataNode']['tolerations'] = perf_tolerations + values_dict['etcd']['tolerations'] = perf_tolerations + values_dict['minio']['tolerations'] = perf_tolerations + values_dict['pulsarStandalone']['tolerations'] = perf_tolerations + # TODO: for distributed deployment + # values_dict['pulsar']['autoRecovery']['tolerations'] = perf_tolerations + # values_dict['pulsar']['proxy']['tolerations'] = perf_tolerations + # values_dict['pulsar']['broker']['tolerations'] = perf_tolerations + # values_dict['pulsar']['bookkeeper']['tolerations'] = perf_tolerations + # values_dict['pulsar']['zookeeper']['tolerations'] = perf_tolerations + milvus_params = deploy_params["milvus"] + if "datanode" in milvus_params: + if "replicas" in milvus_params["datanode"]: + values_dict['dataNode']["replicas"] = milvus_params["datanode"]["replicas"] + if "querynode"in milvus_params: + if "replicas" in milvus_params["querynode"]: + values_dict['queryNode']["replicas"] = milvus_params["querynode"]["replicas"] + if "indexnode"in milvus_params: + if "replicas" in milvus_params["indexnode"]: + values_dict['indexNode']["replicas"] = milvus_params["indexnode"]["replicas"] + if "proxy"in milvus_params: + if "replicas" in milvus_params["proxy"]: + values_dict['proxy']["replicas"] = milvus_params["proxy"]["replicas"] + # add extra volumes + values_dict['extraVolumes'] = [{ + 'name': 'test', + 'flexVolume': { + 'driver': "fstab/cifs", + 'fsType': "cifs", + 'secretRef': { + 'name': "cifs-test-secret" + }, + 'options': { + 'networkPath': config.IDC_NAS_URL, + 'mountOptions': "vers=1.0" + } + } + }] + values_dict['extraVolumeMounts'] = [{ + 'name': 'test', + 'mountPath': '/test' + }] + + with open(src_values_file, 'w') as f: + dump(values_dict, f, default_flow_style=False) + f.close() + + +if __name__ == "__main__": + arg_parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + arg_parser.add_argument( + '--src-values', + help='src values.yaml') + arg_parser.add_argument( + '--deploy-params', + help='deploy params') + + args = arg_parser.parse_args() + src_values_file = args.src_values + deploy_params_file = args.deploy_params + if not src_values_file or not deploy_params_file: + logging.error("No valid file input") + sys.exit(-1) + try: + update_values(src_values_file, deploy_params_file) + logging.info("Values.yaml updated") + except Exception as e: + logging.error(str(e)) + logging.error(traceback.format_exc()) + sys.exit(-1) \ No newline at end of file diff --git a/tests/benchmark/milvus_benchmark/utils.py b/tests/benchmark/milvus_benchmark/utils.py new file mode 100644 index 0000000000..8b0f20a0c9 --- /dev/null +++ b/tests/benchmark/milvus_benchmark/utils.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +import time +import logging +import string +import random +from yaml import full_load, dump +import yaml +import tableprint as tp +from pprint import pprint +import config + +logger = logging.getLogger("milvus_benchmark.utils") + + +def timestr_to_int(time_str): + time_int = 0 + if isinstance(time_str, int) or time_str.isdigit(): + time_int = int(time_str) + elif time_str.endswith("s"): + time_int = int(time_str.split("s")[0]) + elif time_str.endswith("m"): + time_int = int(time_str.split("m")[0]) * 60 + elif time_str.endswith("h"): + time_int = int(time_str.split("h")[0]) * 60 * 60 + else: + raise Exception("%s not support" % time_str) + return time_int + + +class literal_str(str): pass + + +def change_style(style, representer): + def new_representer(dumper, data): + scalar = representer(dumper, data) + scalar.style = style + return scalar + + return new_representer + + +from yaml.representer import SafeRepresenter + +# represent_str does handle some corner cases, so use that +# instead of calling represent_scalar directly +represent_literal_str = change_style('|', SafeRepresenter.represent_str) + +yaml.add_representer(literal_str, represent_literal_str) + + +def retry(times): + """ + This decorator prints the execution time for the decorated function. + """ + def wrapper(func): + def newfn(*args, **kwargs): + attempt = 0 + while attempt < times: + try: + result = func(*args, **kwargs) + if result: + break + else: + raise Exception("Result false") + except Exception as e: + logger.info(str(e)) + time.sleep(3) + attempt += 1 + return result + return newfn + return wrapper + + +def convert_nested(dct): + def insert(dct, lst): + for x in lst[:-2]: + dct[x] = dct = dct.get(x, dict()) + dct.update({lst[-2]: lst[-1]}) + + # empty dict to store the result + + result = dict() + + # create an iterator of lists + # representing nested or hierarchial flow + lsts = ([*k.split("."), v] for k, v in dct.items()) + + # insert each list into the result + for lst in lsts: + insert(result, lst) + return result + + +def get_unique_name(prefix=None): + if prefix is None: + prefix = "distributed-benchmark-test-" + return prefix + "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)).lower() + + +def get_current_time(): + return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + + +def print_table(headers, columns, data): + bodys = [] + for index, value in enumerate(columns): + tmp = [value] + tmp.extend(data[index]) + bodys.append(tmp) + tp.table(bodys, headers) + + +def get_deploy_mode(deploy_params): + deploy_mode = None + if deploy_params: + milvus_params = None + if "milvus" in deploy_params: + milvus_params = deploy_params["milvus"] + if not milvus_params: + deploy_mode = config.DEFUALT_DEPLOY_MODE + elif "deploy_mode" in milvus_params: + deploy_mode = milvus_params["deploy_mode"] + if deploy_mode not in [config.SINGLE_DEPLOY_MODE, config.CLUSTER_DEPLOY_MODE]: + raise Exception("Invalid deploy mode: %s" % deploy_mode) + return deploy_mode + + +def get_server_tag(deploy_params): + server_tag = "" + if deploy_params and "server" in deploy_params: + server = deploy_params["server"] + # server_name = server["server_name"] if "server_name" in server else "" + server_tag = server["server_tag"] if "server_tag" in server else "" + return server_tag \ No newline at end of file diff --git a/tests/benchmark/requirements.txt b/tests/benchmark/requirements.txt new file mode 100644 index 0000000000..18546eeec3 --- /dev/null +++ b/tests/benchmark/requirements.txt @@ -0,0 +1,23 @@ +# pymilvus==0.2.14 +# pymilvus-distributed>=0.0.61 + +# for local install +# --extra-index-url https://test.pypi.org/simple/ +# pymilvus==2.0.0rc3.dev8 + +grpcio==1.37.1 +grpcio-testing==1.37.1 +grpcio-tools==1.37.1 + +scipy==1.3.1 +scikit-learn==0.19.1 +h5py==2.7.1 +# influxdb==5.2.2 +pyyaml>=5.1 +tableprint==0.8.0 +ansicolors==1.1.8 +kubernetes==10.0.1 +# rq==1.2.0 +locust>=1.3.2 +pymongo==3.10.0 +apscheduler==3.7.0 \ No newline at end of file diff --git a/tests/docker/.env b/tests/docker/.env index 57057b97e5..68650c7bb2 100644 --- a/tests/docker/.env +++ b/tests/docker/.env @@ -1,6 +1,6 @@ MILVUS_SERVICE_IP=127.0.0.1 MILVUS_SERVICE_PORT=19530 -MILVUS_PYTEST_WORKSPACE=/milvus/tests/python_test +MILVUS_PYTEST_WORKSPACE=/milvus/tests/python_client MILVUS_PYTEST_LOG_PATH=/milvus/_artifacts/tests/pytest_logs IMAGE_REPO=milvusdb IMAGE_TAG=20210802-87c5a49 diff --git a/tests/docker/Dockerfile b/tests/docker/Dockerfile index 7fd8808e3f..24883e6d96 100644 --- a/tests/docker/Dockerfile +++ b/tests/docker/Dockerfile @@ -11,7 +11,7 @@ FROM python:3.6.8-jessie -COPY ./tests/python_test/requirements.txt /requirements.txt +COPY ./tests/python_client/requirements.txt /requirements.txt RUN python3 -m pip install --no-cache-dir -r /requirements.txt diff --git a/tests20/go_client/README.md b/tests/go_client/README.md similarity index 100% rename from tests20/go_client/README.md rename to tests/go_client/README.md diff --git a/tests20/java_client/README.md b/tests/java_client/README.md similarity index 100% rename from tests20/java_client/README.md rename to tests/java_client/README.md diff --git a/tests/python_client/.dockerignore b/tests/python_client/.dockerignore new file mode 100644 index 0000000000..c97d9d043c --- /dev/null +++ b/tests/python_client/.dockerignore @@ -0,0 +1,14 @@ +node_modules +npm-debug.log +Dockerfile* +docker-compose* +.dockerignore +.git +.gitignore +.env +*/bin +*/obj +README.md +LICENSE +.vscode +__pycache__ \ No newline at end of file diff --git a/tests/python_client/.gitignore b/tests/python_client/.gitignore new file mode 100644 index 0000000000..7b6533bf7c --- /dev/null +++ b/tests/python_client/.gitignore @@ -0,0 +1,17 @@ +# python files +.pytest_cache +**/.pytest_cache +.idea +*.html + +.python-version +__pycache__ +.vscode + +test_out/ +*.pyc + +db/ +logs/ + +.coverage \ No newline at end of file diff --git a/tests20/python_client/README.md b/tests/python_client/README.md similarity index 98% rename from tests20/python_client/README.md rename to tests/python_client/README.md index 2d2d402d7b..020caf3798 100644 --- a/tests20/python_client/README.md +++ b/tests/python_client/README.md @@ -2,7 +2,7 @@ This document guides you through the pytest-based PyMilvus ORM test framework. -> You can find the test code on [GitHub](https://github.com/milvus-io/milvus/tree/master/tests20/python_client). +> You can find the test code on [GitHub](https://github.com/milvus-io/milvus/tree/master/tests/python_client). @@ -77,7 +77,7 @@ We recommend using Python 3 (3.6 or higher), consistent with the version support > Note: Procedures listed below will be completed automatically if you deployed Milvus using KinD. -1. Install the Python package prerequisite for the test, enter ***/milvus/tests20/python_client/**, and execute: +1. Install the Python package prerequisite for the test, enter ***/milvus/tests/python_client/**, and execute: ```python pip install -r requirements.txt @@ -113,7 +113,7 @@ where `host` should be set as the IP address of the Milvus service, and `*.html` ### Module Overview -![Module Overview](https://github.com/milvus-io/milvus/blob/master/tests20/python_client/graphs/module_call_diagram.jpg) +![Module Overview](https://github.com/milvus-io/milvus/blob/master/tests/python_client/graphs/module_call_diagram.jpg) ### Working directories and files diff --git a/tests20/python_client/README_CN.md b/tests/python_client/README_CN.md similarity index 97% rename from tests20/python_client/README_CN.md rename to tests/python_client/README_CN.md index 550975bc31..178996c69f 100644 --- a/tests20/python_client/README_CN.md +++ b/tests/python_client/README_CN.md @@ -1,7 +1,7 @@

测试框架使用指南

简介

基于 pytest 编写的 pymilvus-orm 的测试框架。

-

测试代码:https://github.com/milvus-io/milvus/tree/master/tests20/python_client

+

测试代码:https://github.com/milvus-io/milvus/tree/master/tests/python_client

 

快速开始

部署 Milvus
@@ -65,7 +65,7 @@

推荐使用 Python 3(>= 3.6) ,与 pymilvus_orm 支持的 python 版本保持一致。

Note: 如选择KinD部署方式,以下步骤可以自动完成。

    -
  1. 安装测试所需的 python 包,进入代码 */milvus/tests20/python_client/ 目录,执行命令:
  2. +
  3. 安装测试所需的 python 包,进入代码 */milvus/tests/python_client/ 目录,执行命令:
  4. pip install -r requirements.txt
     
    @@ -90,7 +90,7 @@

     

    模块介绍

    模块调用关系图

    -

    img

    +

    img

    工作目录及文件介绍
    • base:放置已封装好的 pymilvus-orm 模块文件,以及 pytest 框架的 setup 和 teardown 处理等
    • diff --git a/tests20/python_client/base/client_base.py b/tests/python_client/base/client_base.py similarity index 99% rename from tests20/python_client/base/client_base.py rename to tests/python_client/base/client_base.py index 2613c6b462..fad5726694 100644 --- a/tests20/python_client/base/client_base.py +++ b/tests/python_client/base/client_base.py @@ -1,6 +1,6 @@ import pytest import sys -from pymilvus_orm.default_config import DefaultConfig +from pymilvus import DefaultConfig sys.path.append("..") from base.connections_wrapper import ApiConnectionsWrapper diff --git a/tests20/python_client/base/collection_wrapper.py b/tests/python_client/base/collection_wrapper.py similarity index 100% rename from tests20/python_client/base/collection_wrapper.py rename to tests/python_client/base/collection_wrapper.py diff --git a/tests20/python_client/base/connections_wrapper.py b/tests/python_client/base/connections_wrapper.py similarity index 96% rename from tests20/python_client/base/connections_wrapper.py rename to tests/python_client/base/connections_wrapper.py index ce247ca40a..07a8f3de0f 100644 --- a/tests20/python_client/base/connections_wrapper.py +++ b/tests/python_client/base/connections_wrapper.py @@ -1,5 +1,5 @@ -from pymilvus_orm import Connections -from pymilvus_orm.default_config import DefaultConfig +from pymilvus import Connections +from pymilvus import DefaultConfig import sys sys.path.append("..") diff --git a/tests20/python_client/base/index_wrapper.py b/tests/python_client/base/index_wrapper.py similarity index 95% rename from tests20/python_client/base/index_wrapper.py rename to tests/python_client/base/index_wrapper.py index f04c7693b6..f8ba8d4724 100644 --- a/tests20/python_client/base/index_wrapper.py +++ b/tests/python_client/base/index_wrapper.py @@ -1,5 +1,5 @@ import sys -from pymilvus_orm import Index +from pymilvus import Index sys.path.append("..") from check.param_check import * diff --git a/tests20/python_client/base/partition_wrapper.py b/tests/python_client/base/partition_wrapper.py similarity index 100% rename from tests20/python_client/base/partition_wrapper.py rename to tests/python_client/base/partition_wrapper.py diff --git a/tests20/python_client/base/schema_wrapper.py b/tests/python_client/base/schema_wrapper.py similarity index 97% rename from tests20/python_client/base/schema_wrapper.py rename to tests/python_client/base/schema_wrapper.py index d8487c9672..05e778f37c 100644 --- a/tests20/python_client/base/schema_wrapper.py +++ b/tests/python_client/base/schema_wrapper.py @@ -3,7 +3,7 @@ import sys sys.path.append("..") from check.func_check import ResponseChecker from utils.api_request import api_request -from pymilvus_orm import CollectionSchema, FieldSchema +from pymilvus import CollectionSchema, FieldSchema class ApiCollectionSchemaWrapper: diff --git a/tests20/python_client/base/utility_wrapper.py b/tests/python_client/base/utility_wrapper.py similarity index 99% rename from tests20/python_client/base/utility_wrapper.py rename to tests/python_client/base/utility_wrapper.py index 33a58f3cc1..35c33d7009 100644 --- a/tests20/python_client/base/utility_wrapper.py +++ b/tests/python_client/base/utility_wrapper.py @@ -1,4 +1,4 @@ -from pymilvus_orm import utility +from pymilvus import utility import sys sys.path.append("..") diff --git a/tests20/python_client/chaos/README.md b/tests/python_client/chaos/README.md similarity index 97% rename from tests20/python_client/chaos/README.md rename to tests/python_client/chaos/README.md index 3be931e95c..f0f54c1e9d 100644 --- a/tests20/python_client/chaos/README.md +++ b/tests/python_client/chaos/README.md @@ -52,7 +52,7 @@ Run a single test scenario manually(take query node pod is killed as instance): 2. run the commands below: ```bash -cd /milvus/tests20/python_client/chaos +cd /milvus/tests/python_client/chaos pytest test_chaos.py --host x.x.x.x -v ``` diff --git a/tests20/python_client/chaos/chaos_commons.py b/tests/python_client/chaos/chaos_commons.py similarity index 98% rename from tests20/python_client/chaos/chaos_commons.py rename to tests/python_client/chaos/chaos_commons.py index f16241a082..c0e325e3ba 100644 --- a/tests20/python_client/chaos/chaos_commons.py +++ b/tests/python_client/chaos/chaos_commons.py @@ -2,7 +2,7 @@ import os import threading import glob import delayed_assert -import constants +from chaos import constants from yaml import full_load from utils.util_log import test_log as log diff --git a/tests20/python_client/chaos/chaos_objects/chaos_datacoord_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_datacoord_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_datacoord_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_datacoord_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_datanode_container_kill.yaml b/tests/python_client/chaos/chaos_objects/chaos_datanode_container_kill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_datanode_container_kill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_datanode_container_kill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_datanode_pod_failure.yaml b/tests/python_client/chaos/chaos_objects/chaos_datanode_pod_failure.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_datanode_pod_failure.yaml rename to tests/python_client/chaos/chaos_objects/chaos_datanode_pod_failure.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_datanode_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_datanode_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_datanode_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_datanode_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_indexcoord_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_indexcoord_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_indexcoord_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_indexcoord_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_indexnode_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_indexnode_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_indexnode_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_indexnode_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_minio_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_minio_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_minio_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_minio_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_proxy_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_proxy_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_proxy_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_proxy_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_querycoord_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_querycoord_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_querycoord_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_querycoord_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_querynode_pod_failure.yaml b/tests/python_client/chaos/chaos_objects/chaos_querynode_pod_failure.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_querynode_pod_failure.yaml rename to tests/python_client/chaos/chaos_objects/chaos_querynode_pod_failure.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_querynode_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_querynode_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_querynode_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_querynode_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_rootcoord_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_rootcoord_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_rootcoord_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_rootcoord_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_standalone_container_kill.yaml b/tests/python_client/chaos/chaos_objects/chaos_standalone_container_kill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_standalone_container_kill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_standalone_container_kill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/chaos_standalone_podkill.yaml b/tests/python_client/chaos/chaos_objects/chaos_standalone_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/chaos_standalone_podkill.yaml rename to tests/python_client/chaos/chaos_objects/chaos_standalone_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/skip_chaos_etcd_podkill.yaml b/tests/python_client/chaos/chaos_objects/skip_chaos_etcd_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/skip_chaos_etcd_podkill.yaml rename to tests/python_client/chaos/chaos_objects/skip_chaos_etcd_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/skip_chaos_pulsar_podkill.yaml b/tests/python_client/chaos/chaos_objects/skip_chaos_pulsar_podkill.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/skip_chaos_pulsar_podkill.yaml rename to tests/python_client/chaos/chaos_objects/skip_chaos_pulsar_podkill.yaml diff --git a/tests20/python_client/chaos/chaos_objects/testcases.yaml b/tests/python_client/chaos/chaos_objects/testcases.yaml similarity index 100% rename from tests20/python_client/chaos/chaos_objects/testcases.yaml rename to tests/python_client/chaos/chaos_objects/testcases.yaml diff --git a/tests20/python_client/chaos/chaos_opt.py b/tests/python_client/chaos/chaos_opt.py similarity index 98% rename from tests20/python_client/chaos/chaos_opt.py rename to tests/python_client/chaos/chaos_opt.py index 61429cf9a8..f04ae014f0 100644 --- a/tests20/python_client/chaos/chaos_opt.py +++ b/tests/python_client/chaos/chaos_opt.py @@ -1,7 +1,7 @@ from __future__ import print_function from kubernetes import client, config from kubernetes.client.rest import ApiException -import constants as cf +from chaos import constants as cf from utils.util_log import test_log as log diff --git a/tests20/python_client/chaos/checker.py b/tests/python_client/chaos/checker.py similarity index 100% rename from tests20/python_client/chaos/checker.py rename to tests/python_client/chaos/checker.py diff --git a/tests20/python_client/chaos/constants.py b/tests/python_client/chaos/constants.py similarity index 100% rename from tests20/python_client/chaos/constants.py rename to tests/python_client/chaos/constants.py diff --git a/tests20/python_client/chaos/test_chaos.py b/tests/python_client/chaos/test_chaos.py similarity index 99% rename from tests20/python_client/chaos/test_chaos.py rename to tests/python_client/chaos/test_chaos.py index 4abd0521f6..ce9bd5ac87 100644 --- a/tests20/python_client/chaos/test_chaos.py +++ b/tests/python_client/chaos/test_chaos.py @@ -1,7 +1,7 @@ import pytest from time import sleep -from pymilvus_orm import connections +from pymilvus import connections from checker import CreateChecker, InsertFlushChecker, \ SearchChecker, QueryChecker, IndexChecker, Op from chaos_opt import ChaosOpt @@ -9,7 +9,7 @@ from utils.util_log import test_log as log from common import common_func as cf from chaos_commons import * from common.common_type import CaseLabel -import constants +from chaos import constants from delayed_assert import expect, assert_expectations diff --git a/tests20/python_client/chaos/test_chaos_data_consist.py b/tests/python_client/chaos/test_chaos_data_consist.py similarity index 98% rename from tests20/python_client/chaos/test_chaos_data_consist.py rename to tests/python_client/chaos/test_chaos_data_consist.py index e9f7c584df..65089b5e2a 100644 --- a/tests20/python_client/chaos/test_chaos_data_consist.py +++ b/tests/python_client/chaos/test_chaos_data_consist.py @@ -2,14 +2,14 @@ import pytest import datetime from time import sleep -from pymilvus_orm import connections, utility +from pymilvus import connections, utility from base.collection_wrapper import ApiCollectionWrapper from chaos_opt import ChaosOpt from common import common_func as cf from common import common_type as ct from chaos_commons import * from common.common_type import CaseLabel, CheckTasks -import constants +from chaos import constants def reboot_pod(chaos_yaml): diff --git a/tests20/python_client/check/func_check.py b/tests/python_client/check/func_check.py similarity index 99% rename from tests20/python_client/check/func_check.py rename to tests/python_client/check/func_check.py index 14494bea07..3bc2408cb9 100644 --- a/tests20/python_client/check/func_check.py +++ b/tests/python_client/check/func_check.py @@ -3,7 +3,7 @@ from common import common_type as ct from common import common_func as cf from common.common_type import CheckTasks, Connect_Object_Name # from common.code_mapping import ErrorCode, ErrorMessage -from pymilvus_orm import Collection, Partition +from pymilvus import Collection, Partition from utils.api_request import Error import check.param_check as pc diff --git a/tests20/python_client/check/param_check.py b/tests/python_client/check/param_check.py similarity index 100% rename from tests20/python_client/check/param_check.py rename to tests/python_client/check/param_check.py diff --git a/tests20/python_client/common/code_mapping.py b/tests/python_client/common/code_mapping.py similarity index 93% rename from tests20/python_client/common/code_mapping.py rename to tests/python_client/common/code_mapping.py index b75265f758..d9825c4be8 100644 --- a/tests20/python_client/common/code_mapping.py +++ b/tests/python_client/common/code_mapping.py @@ -1,5 +1,5 @@ from enum import Enum -from pymilvus_orm.exceptions import ExceptionsMessage +from pymilvus import ExceptionsMessage class ErrorCode(Enum): diff --git a/tests20/python_client/common/common_func.py b/tests/python_client/common/common_func.py similarity index 99% rename from tests20/python_client/common/common_func.py rename to tests/python_client/common/common_func.py index ba49de8bdc..65a9a79e6b 100644 --- a/tests20/python_client/common/common_func.py +++ b/tests/python_client/common/common_func.py @@ -6,7 +6,7 @@ import numpy as np import pandas as pd from sklearn import preprocessing -from pymilvus_orm.types import DataType +from pymilvus import DataType from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper from common import common_type as ct from utils.util_log import test_log as log diff --git a/tests20/python_client/common/common_type.py b/tests/python_client/common/common_type.py similarity index 100% rename from tests20/python_client/common/common_type.py rename to tests/python_client/common/common_type.py diff --git a/tests/python_client/common/constants.py b/tests/python_client/common/constants.py new file mode 100644 index 0000000000..36f29305e0 --- /dev/null +++ b/tests/python_client/common/constants.py @@ -0,0 +1,22 @@ +import utils.utils as utils + +default_fields = utils.gen_default_fields() +default_binary_fields = utils.gen_binary_default_fields() + +default_entity = utils.gen_entities(1) +default_raw_binary_vector, default_binary_entity = utils.gen_binary_entities(1) + +default_entity_row = utils.gen_entities_rows(1) +default_raw_binary_vector_row, default_binary_entity_row = utils.gen_binary_entities_rows(1) + + +default_entities = utils.gen_entities(utils.default_nb) +default_raw_binary_vectors, default_binary_entities = utils.gen_binary_entities(utils.default_nb) + + +default_entities_new = utils.gen_entities_new(utils.default_nb) +default_raw_binary_vectors_new, default_binary_entities_new = utils.gen_binary_entities_new(utils.default_nb) + + +default_entities_rows = utils.gen_entities_rows(utils.default_nb) +default_raw_binary_vectors_rows, default_binary_entities_rows = utils.gen_binary_entities_rows(utils.default_nb) \ No newline at end of file diff --git a/tests20/python_client/config/log_config.py b/tests/python_client/config/log_config.py similarity index 100% rename from tests20/python_client/config/log_config.py rename to tests/python_client/config/log_config.py diff --git a/tests/python_client/conftest.py b/tests/python_client/conftest.py new file mode 100644 index 0000000000..58738bbdab --- /dev/null +++ b/tests/python_client/conftest.py @@ -0,0 +1,439 @@ +import pytest +import functools +import socket + +import common.common_type as ct +import common.common_func as cf +from utils.util_log import test_log as log +from base.client_base import param_info +from check.param_check import ip_check, number_check +from config.log_config import log_config +from utils.utils import * + + + +timeout = 60 +dimension = 128 +delete_timeout = 60 + + +def pytest_addoption(parser): + parser.addoption("--ip", action="store", default="localhost", help="service's ip") + parser.addoption("--host", action="store", default="localhost", help="service's ip") + parser.addoption("--service", action="store", default="", help="service address") + parser.addoption("--port", action="store", default=19530, help="service's port") + parser.addoption("--http_port", action="store", default=19121, help="http's port") + parser.addoption("--handler", action="store", default="GRPC", help="handler of request") + parser.addoption("--tag", action="store", default="all", help="only run tests matching the tag.") + parser.addoption('--dry_run', action='store_true', default=False, help="") + parser.addoption('--partition_name', action='store', default="partition_name", help="name of partition") + parser.addoption('--connect_name', action='store', default="connect_name", help="name of connect") + parser.addoption('--descriptions', action='store', default="partition_des", help="descriptions of partition") + parser.addoption('--collection_name', action='store', default="collection_name", help="name of collection") + parser.addoption('--search_vectors', action='store', default="search_vectors", help="vectors of search") + parser.addoption('--index_param', action='store', default="index_param", help="index_param of index") + parser.addoption('--data', action='store', default="data", help="data of request") + parser.addoption('--clean_log', action='store_true', default=False, help="clean log before testing") + parser.addoption('--schema', action='store', default="schema", help="schema of test interface") + parser.addoption('--err_msg', action='store', default="err_msg", help="error message of test") + parser.addoption('--term_expr', action='store', default="term_expr", help="expr of query quest") + parser.addoption('--check_content', action='store', default="check_content", help="content of check") + parser.addoption('--field_name', action='store', default="field_name", help="field_name of index") + parser.addoption('--dry-run', action='store_true', default=False) + parser.addoption("--http-port", action="store", default=19121) + + +@pytest.fixture +def ip(request): + return request.config.getoption("--ip") + + +@pytest.fixture +def host(request): + return request.config.getoption("--host") + + +@pytest.fixture +def service(request): + return request.config.getoption("--service") + + +@pytest.fixture +def port(request): + return request.config.getoption("--port") + + +@pytest.fixture +def http_port(request): + return request.config.getoption("--http_port") + + +@pytest.fixture +def handler(request): + return request.config.getoption("--handler") + + +@pytest.fixture +def tag(request): + return request.config.getoption("--tag") + + +@pytest.fixture +def dry_run(request): + return request.config.getoption("--dry_run") + + +@pytest.fixture +def connect_name(request): + return request.config.getoption("--connect_name") + + +@pytest.fixture +def partition_name(request): + return request.config.getoption("--partition_name") + + +@pytest.fixture +def descriptions(request): + return request.config.getoption("--descriptions") + + +@pytest.fixture +def collection_name(request): + return request.config.getoption("--collection_name") + + +@pytest.fixture +def search_vectors(request): + return request.config.getoption("--search_vectors") + + +@pytest.fixture +def index_param(request): + return request.config.getoption("--index_param") + + +@pytest.fixture +def data(request): + return request.config.getoption("--data") + + +@pytest.fixture +def clean_log(request): + return request.config.getoption("--clean_log") + + +@pytest.fixture +def schema(request): + return request.config.getoption("--schema") + + +@pytest.fixture +def err_msg(request): + return request.config.getoption("--err_msg") + + +@pytest.fixture +def term_expr(request): + return request.config.getoption("--term_expr") + + +@pytest.fixture +def check_content(request): + log.error("^" * 50) + log.error("check_content") + return request.config.getoption("--check_content") + + +@pytest.fixture +def field_name(request): + return request.config.getoption("--field_name") + + +""" fixture func """ + + +@pytest.fixture(scope="session", autouse=True) +def initialize_env(request): + """ clean log before testing """ + host = request.config.getoption("--host") + port = request.config.getoption("--port") + handler = request.config.getoption("--handler") + clean_log = request.config.getoption("--clean_log") + + """ params check """ + assert ip_check(host) and number_check(port) + + """ modify log files """ + cf.modify_file(file_path_list=[log_config.log_debug, log_config.log_info, log_config.log_err], is_modify=clean_log) + + log.info("#" * 80) + log.info("[initialize_milvus] Log cleaned up, start testing...") + param_info.prepare_param_info(host, port, handler) + + +@pytest.fixture(params=ct.get_invalid_strs) +def get_invalid_string(request): + yield request.param + + +@pytest.fixture(params=cf.gen_simple_index()) +def get_index_param(request): + yield request.param + + +@pytest.fixture(params=ct.get_invalid_strs) +def get_invalid_collection_name(request): + yield request.param + + +@pytest.fixture(params=ct.get_invalid_strs) +def get_invalid_field_name(request): + yield request.param + + +@pytest.fixture(params=ct.get_invalid_strs) +def get_invalid_index_type(request): + yield request.param + + +# TODO: construct invalid index params for all index types +@pytest.fixture(params=[{"metric_type": "L3", "index_type": "IVF_FLAT"}, + {"metric_type": "L2", "index_type": "IVF_FLAT", "err_params": {"nlist": 10}}, + {"metric_type": "L2", "index_type": "IVF_FLAT", "params": {"nlist": -1}}]) +def get_invalid_index_params(request): + yield request.param + + +@pytest.fixture(params=ct.get_invalid_strs) +def get_invalid_partition_name(request): + yield request.param + + +@pytest.fixture(params=ct.get_invalid_dict) +def get_invalid_vector_dict(request): + yield request.param + +def pytest_configure(config): + # register an additional marker + config.addinivalue_line( + "markers", "tag(name): mark test to run only matching the tag" + ) + + +def pytest_runtest_setup(item): + tags = list() + for marker in item.iter_markers(name="tag"): + for tag in marker.args: + tags.append(tag) + if tags: + cmd_tag = item.config.getoption("--tag") + if cmd_tag != "all" and cmd_tag not in tags: + pytest.skip("test requires tag in {!r}".format(tags)) + + +def pytest_runtestloop(session): + if session.config.getoption('--dry-run'): + total_num = 0 + file_num = 0 + tags_num = 0 + res = {"total_num": total_num, "tags_num": tags_num} + for item in session.items: + print(item.nodeid) + if item.fspath.basename not in res: + res.update({item.fspath.basename: {"total": 1, "tags": 0}}) + else: + res[item.fspath.basename]["total"] += 1 + res["total_num"] += 1 + for marker in item.own_markers: + if marker.name == "tags" and "0331" in marker.args: + res["tags_num"] += 1 + res[item.fspath.basename]["tags"] += 1 + print(res) + return True + + +def check_server_connection(request): + ip = request.config.getoption("--ip") + port = request.config.getoption("--port") + + connected = True + if ip and (ip not in ['localhost', '127.0.0.1']): + try: + socket.getaddrinfo(ip, port, 0, 0, socket.IPPROTO_TCP) + except Exception as e: + print("Socket connnet failed: %s" % str(e)) + connected = False + return connected + + +# @pytest.fixture(scope="session", autouse=True) +# def change_mutation_result_to_primary_keys(): +# def insert_future_decorator(func): +# @functools.wraps(func) +# def change(*args, **kwargs): +# try: +# return func(*args, **kwargs).primary_keys +# except Exception as e: +# raise e +# return change +# +# from pymilvus import MutationFuture +# MutationFuture.result = insert_future_decorator(MutationFuture.result) +# +# def insert_decorator(func): +# @functools.wraps(func) +# def change(*args, **kwargs): +# if kwargs.get("_async", False): +# return func(*args, **kwargs) +# try: +# return func(*args, **kwargs).primary_keys +# except Exception as e: +# raise e +# return change +# Milvus.insert = insert_decorator(Milvus.insert) +# yield + + +@pytest.fixture(scope="module") +def connect(request): + ip = request.config.getoption("--ip") + service_name = request.config.getoption("--service") + port = request.config.getoption("--port") + http_port = request.config.getoption("--http-port") + handler = request.config.getoption("--handler") + if handler == "HTTP": + port = http_port + try: + milvus = get_milvus(host=ip, port=port, handler=handler) + # reset_build_index_threshold(milvus) + except Exception as e: + logging.getLogger().error(str(e)) + pytest.exit("Milvus server can not connected, exit pytest ...") + def fin(): + try: + milvus.close() + pass + except Exception as e: + logging.getLogger().info(str(e)) + request.addfinalizer(fin) + return milvus + + +@pytest.fixture(scope="module") +def dis_connect(request): + ip = request.config.getoption("--ip") + service_name = request.config.getoption("--service") + port = request.config.getoption("--port") + http_port = request.config.getoption("--http-port") + handler = request.config.getoption("--handler") + if handler == "HTTP": + port = http_port + milvus = get_milvus(host=ip, port=port, handler=handler) + milvus.close() + return milvus + + +@pytest.fixture(scope="module") +def args(request): + ip = request.config.getoption("--ip") + service_name = request.config.getoption("--service") + port = request.config.getoption("--port") + http_port = request.config.getoption("--http-port") + handler = request.config.getoption("--handler") + if handler == "HTTP": + port = http_port + args = {"ip": ip, "port": port, "handler": handler, "service_name": service_name} + return args + + +@pytest.fixture(scope="module") +def milvus(request): + ip = request.config.getoption("--ip") + port = request.config.getoption("--port") + http_port = request.config.getoption("--http-port") + handler = request.config.getoption("--handler") + if handler == "HTTP": + port = http_port + return get_milvus(host=ip, port=port, handler=handler) + + +@pytest.fixture(scope="function") +def collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) + try: + default_fields = gen_default_fields() + connect.create_collection(collection_name, default_fields) + except Exception as e: + pytest.exit(str(e)) + def teardown(): + if connect.has_collection(collection_name): + connect.drop_collection(collection_name, timeout=delete_timeout) + request.addfinalizer(teardown) + assert connect.has_collection(collection_name) + return collection_name + + +# customised id +@pytest.fixture(scope="function") +def id_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) + try: + fields = gen_default_fields(auto_id=False) + connect.create_collection(collection_name, fields) + except Exception as e: + pytest.exit(str(e)) + def teardown(): + if connect.has_collection(collection_name): + connect.drop_collection(collection_name, timeout=delete_timeout) + request.addfinalizer(teardown) + assert connect.has_collection(collection_name) + return collection_name + + +@pytest.fixture(scope="function") +def binary_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) + try: + fields = gen_binary_default_fields() + connect.create_collection(collection_name, fields) + except Exception as e: + pytest.exit(str(e)) + def teardown(): + collection_names = connect.list_collections() + if connect.has_collection(collection_name): + connect.drop_collection(collection_name, timeout=delete_timeout) + request.addfinalizer(teardown) + assert connect.has_collection(collection_name) + return collection_name + + +# customised id +@pytest.fixture(scope="function") +def binary_id_collection(request, connect): + ori_collection_name = getattr(request.module, "collection_id", "test") + collection_name = gen_unique_str(ori_collection_name) + try: + fields = gen_binary_default_fields(auto_id=False) + connect.create_collection(collection_name, fields) + except Exception as e: + pytest.exit(str(e)) + def teardown(): + if connect.has_collection(collection_name): + connect.drop_collection(collection_name, timeout=delete_timeout) + request.addfinalizer(teardown) + assert connect.has_collection(collection_name) + return collection_name + +# for test exit in the future +# @pytest.hookimpl(hookwrapper=True, tryfirst=True) +# def pytest_runtest_makereport(): +# result = yield +# report = result.get_result() +# if report.outcome == "failed": +# msg = "The execution of the test case fails and the test exits..." +# log.error(msg) +# pytest.exit(msg) \ No newline at end of file diff --git a/tests20/python_client/graphs/module_call_diagram.jpg b/tests/python_client/graphs/module_call_diagram.jpg similarity index 100% rename from tests20/python_client/graphs/module_call_diagram.jpg rename to tests/python_client/graphs/module_call_diagram.jpg diff --git a/tests20/python_client/load/README.md b/tests/python_client/load/README.md similarity index 100% rename from tests20/python_client/load/README.md rename to tests/python_client/load/README.md diff --git a/tests20/python_client/load/test_workload.py b/tests/python_client/load/test_workload.py similarity index 99% rename from tests20/python_client/load/test_workload.py rename to tests/python_client/load/test_workload.py index f71ead8c9b..4644ed0fc3 100644 --- a/tests20/python_client/load/test_workload.py +++ b/tests/python_client/load/test_workload.py @@ -6,7 +6,7 @@ from common import common_func as cf from common import common_type as ct from common.common_type import CaseLabel from utils.util_log import test_log as log -from pymilvus_orm import utility +from pymilvus import utility rounds = 100 diff --git a/tests20/python_client/pytest.ini b/tests/python_client/pytest.ini similarity index 65% rename from tests20/python_client/pytest.ini rename to tests/python_client/pytest.ini index c9561202de..55aa80c97f 100644 --- a/tests20/python_client/pytest.ini +++ b/tests/python_client/pytest.ini @@ -1,7 +1,7 @@ [pytest] -addopts = --host localhost --html=/tmp/ci_logs/report.html --self-contained-html -v +addopts = --ip localhost --host localhost --html=/tmp/ci_logs/report.html --self-contained-html -v # -;addopts = --host 172.28.255.155 --html=/tmp/report.html # python3 -W ignore -m pytest diff --git a/tests/python_test/requirements.txt b/tests/python_client/requirements.txt similarity index 87% rename from tests/python_test/requirements.txt rename to tests/python_client/requirements.txt index 3d97ebf1db..dce800017c 100644 --- a/tests/python_test/requirements.txt +++ b/tests/python_client/requirements.txt @@ -1,6 +1,6 @@ --extra-index-url https://test.pypi.org/simple/ -grpcio==1.26.0 -grpcio-tools==1.26.0 +grpcio==1.37.1 +grpcio-tools==1.37.1 numpy==1.19.5 pytest-cov==2.8.1 sklearn==0.0 @@ -12,7 +12,7 @@ pytest-print==0.2.1 pytest-level==0.1.1 pytest-xdist==2.2.1 # pytest-parallel -pymilvus-orm==2.0.0rc3.dev15 +pymilvus==2.0.0rc5.dev18 pytest-rerunfailures==9.1.1 git+https://github.com/Projectplace/pytest-tags ndg-httpsclient diff --git a/tests/python_client/run.sh b/tests/python_client/run.sh new file mode 100644 index 0000000000..cee5b061f5 --- /dev/null +++ b/tests/python_client/run.sh @@ -0,0 +1,4 @@ +#/bin/bash + + +pytest . $@ \ No newline at end of file diff --git a/tests20/python_client/scale/README.md b/tests/python_client/scale/README.md similarity index 96% rename from tests20/python_client/scale/README.md rename to tests/python_client/scale/README.md index fa80bab415..e3d1597c76 100644 --- a/tests20/python_client/scale/README.md +++ b/tests/python_client/scale/README.md @@ -42,7 +42,7 @@ Run a single test scenario manually(take scale dataNode as instance): - run the commands below: ```bash - cd /milvus/tests20/python_client/scale + cd /milvus/tests/python_client/scale pytest test_data_node_scale.py::TestDataNodeScale::test_expand_data_node -v -s ``` diff --git a/tests20/python_client/scale/constants.py b/tests/python_client/scale/constants.py similarity index 100% rename from tests20/python_client/scale/constants.py rename to tests/python_client/scale/constants.py diff --git a/tests20/python_client/scale/helm_env.py b/tests/python_client/scale/helm_env.py similarity index 100% rename from tests20/python_client/scale/helm_env.py rename to tests/python_client/scale/helm_env.py diff --git a/tests20/python_client/scale/scale_common.py b/tests/python_client/scale/scale_common.py similarity index 95% rename from tests20/python_client/scale/scale_common.py rename to tests/python_client/scale/scale_common.py index d5b51c7955..acd7f555d7 100644 --- a/tests20/python_client/scale/scale_common.py +++ b/tests/python_client/scale/scale_common.py @@ -1,6 +1,6 @@ import os -from pymilvus_orm import connections, Index +from pymilvus import connections, Index from scale import constants from utils.util_log import test_log as log diff --git a/tests20/python_client/scale/test_data_node_scale.py b/tests/python_client/scale/test_data_node_scale.py similarity index 98% rename from tests20/python_client/scale/test_data_node_scale.py rename to tests/python_client/scale/test_data_node_scale.py index 43dad68c93..0b102e30b6 100644 --- a/tests20/python_client/scale/test_data_node_scale.py +++ b/tests/python_client/scale/test_data_node_scale.py @@ -7,7 +7,7 @@ from common import common_func as cf from common import common_type as ct from scale import constants from scale.helm_env import HelmEnv -from pymilvus_orm import connections, utility +from pymilvus import connections, utility prefix = "data_scale" default_schema = cf.gen_default_collection_schema() diff --git a/tests20/python_client/scale/test_index_node_scale.py b/tests/python_client/scale/test_index_node_scale.py similarity index 99% rename from tests20/python_client/scale/test_index_node_scale.py rename to tests/python_client/scale/test_index_node_scale.py index a36d8eaecb..4151e8525d 100644 --- a/tests20/python_client/scale/test_index_node_scale.py +++ b/tests/python_client/scale/test_index_node_scale.py @@ -1,7 +1,7 @@ import datetime # import pdb import pytest -from pymilvus_orm import connections +from pymilvus import connections from base.collection_wrapper import ApiCollectionWrapper from common.common_type import CaseLabel diff --git a/tests20/python_client/scale/test_proxy_scale.py b/tests/python_client/scale/test_proxy_scale.py similarity index 100% rename from tests20/python_client/scale/test_proxy_scale.py rename to tests/python_client/scale/test_proxy_scale.py diff --git a/tests20/python_client/scale/test_query_node_scale.py b/tests/python_client/scale/test_query_node_scale.py similarity index 98% rename from tests20/python_client/scale/test_query_node_scale.py rename to tests/python_client/scale/test_query_node_scale.py index 29d1e91516..62cba838b1 100644 --- a/tests20/python_client/scale/test_query_node_scale.py +++ b/tests/python_client/scale/test_query_node_scale.py @@ -10,7 +10,7 @@ from utils.util_log import test_log as log from common import common_func as cf from common import common_type as ct from scale import constants -from pymilvus_orm import Index, connections +from pymilvus import Index, connections prefix = "search_scale" nb = 5000 diff --git a/tests/python_client/testcases/collection/test_collection_count.py b/tests/python_client/testcases/collection/test_collection_count.py new file mode 100644 index 0000000000..fdab2f0469 --- /dev/null +++ b/tests/python_client/testcases/collection/test_collection_count.py @@ -0,0 +1,503 @@ +import pytest +from utils.utils import * +from common.constants import * + +uid = "collection_count" +tag = "collection_count_tag" + + +class TestCollectionCount: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 1000, + 2001 + ], + ) + def insert_count(self, request): + yield request.param + + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + return request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count(self, connect, collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection and add vectors in it, + assert the value returned by count_entities method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + entities = gen_entities(insert_count) + result = connect.insert(collection, entities) + assert len(result.primary_keys) == insert_count + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_partition(self, connect, collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection, create partition and add vectors in it, + assert the value returned by count_entities method is equal to length of vectors + expected: the count is equal to the length of vectors + ''' + entities = gen_entities(insert_count) + connect.create_partition(collection, tag) + result = connect.insert(collection, entities, partition_name=tag) + assert len(result.primary_keys) == insert_count + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count + + # def test_collection_count_multi_partitions_A(self, connect, collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in it, + # assert the value returned by count_entities method is equal to length of entities + # expected: the count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # entities = gen_entities(insert_count) + # connect.create_partition(collection, tag) + # connect.create_partition(collection, new_tag) + # res_ids = connect.insert(collection, entities) + # connect.flush([collection]) + # # res = connect.count_entities(collection) + # # assert res == insert_count + # stats = connect.get_collection_stats(collection) + # assert stats[row_count] == insert_count + + # def test_collection_count_multi_partitions_B(self, connect, collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in one of the partitions, + # assert the value returned by count_entities method is equal to length of entities + # expected: the count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # entities = gen_entities(insert_count) + # connect.create_partition(collection, tag) + # connect.create_partition(collection, new_tag) + # res_ids = connect.insert(collection, entities, partition_name=tag) + # connect.flush([collection]) + # # res = connect.count_entities(collection) + # # assert res == insert_count + # stats = connect.get_collection_stats(collection) + # assert stats[row_count] == insert_count + + # def test_collection_count_multi_partitions_C(self, connect, collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in one of the partitions, + # assert the value returned by count_entities method is equal to length of entities + # expected: the count is equal to the length of vectors + # ''' + # new_tag = "new_tag" + # entities = gen_entities(insert_count) + # connect.create_partition(collection, tag) + # connect.create_partition(collection, new_tag) + # res_ids = connect.insert(collection, entities) + # res_ids_2 = connect.insert(collection, entities, partition_name=tag) + # connect.flush([collection]) + # # res = connect.count_entities(collection) + # # assert res == insert_count * 2 + # stats = connect.get_collection_stats(collection) + # assert stats[row_count] == insert_count * 2 + + # def test_collection_count_multi_partitions_D(self, connect, collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in one of the partitions, + # assert the value returned by count_entities method is equal to length of entities + # expected: the collection count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # entities = gen_entities(insert_count) + # connect.create_partition(collection, tag) + # connect.create_partition(collection, new_tag) + # res_ids = connect.insert(collection, entities, partition_name=tag) + # res_ids2 = connect.insert(collection, entities, partition_name=new_tag) + # connect.flush([collection]) + # # res = connect.count_entities(collection) + # # assert res == insert_count * 2 + # stats = connect.get_collection_stats(collection) + # assert stats[row_count] == insert_count * 2 + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count): + ''' + target: test count_entities, after index have been created + method: add vectors in db, and create index, then calling count_entities with correct params + expected: count_entities raise exception + ''' + entities = gen_entities(insert_count) + connect.insert(collection, entities) + connect.flush([collection]) + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count + + @pytest.mark.tags(CaseLabel.L2) + def test_count_without_connection(self, collection, dis_connect): + ''' + target: test count_entities, without connection + method: calling count_entities with correct params, with a disconnected instance + expected: count_entities raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.count_entities(collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_no_vectors(self, connect, collection): + ''' + target: test collection rows_count is correct or not, if collection is empty + method: create collection and no vectors in it, + assert the value returned by count_entities method is equal to 0 + expected: the count is equal to 0 + ''' + stats = connect.get_collection_stats(collection) + assert stats[row_count] == 0 + + +class TestCollectionCountIP: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 1000, + 2001 + ], + ) + def insert_count(self, request): + yield request.param + + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + request.param.update({"metric_type": "IP"}) + return request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_collection_count_after_index_created(self, connect, collection, get_simple_index, insert_count): + ''' + target: test count_entities, after index have been created + method: add vectors in db, and create index, then calling count_entities with correct params + expected: count_entities raise exception + ''' + entities = gen_entities(insert_count) + connect.insert(collection, entities) + connect.flush([collection]) + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count + + +class TestCollectionCountBinary: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 1000, + 2001 + ], + ) + def insert_count(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_jaccard_index(self, request, connect): + request.param["metric_type"] = "JACCARD" + return request.param + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_hamming_index(self, request, connect): + request.param["metric_type"] = "HAMMING" + return request.param + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_substructure_index(self, request, connect): + request.param["metric_type"] = "SUBSTRUCTURE" + return request.param + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_superstructure_index(self, request, connect): + request.param["metric_type"] = "SUPERSTRUCTURE" + return request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count(self, connect, binary_collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + raw_vectors, entities = gen_binary_entities(insert_count) + result = connect.insert(binary_collection, entities) + assert len(result.primary_keys) == insert_count + connect.flush([binary_collection]) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == insert_count + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_partition(self, connect, binary_collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection, create partition and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + raw_vectors, entities = gen_binary_entities(insert_count) + connect.create_partition(binary_collection, tag) + connect.insert(binary_collection, entities, partition_name=tag) + connect.flush([binary_collection]) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == insert_count + + # @pytest.mark.tags(CaseLabel.L2) + # def test_collection_count_multi_partitions_A(self, connect, binary_collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in it, + # assert the value returned by count_entities method is equal to length of entities + # expected: the count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # raw_vectors, entities = gen_binary_entities(insert_count) + # connect.create_partition(binary_collection, tag) + # connect.create_partition(binary_collection, new_tag) + # res_ids = connect.insert(binary_collection, entities) + # connect.flush([binary_collection]) + # # res = connect.count_entities(binary_collection) + # # assert res == insert_count + # stats = connect.get_collection_stats(binary_collection) + # assert stats[row_count] == insert_count + + # @pytest.mark.tags(CaseLabel.L2) + # def test_collection_count_multi_partitions_B(self, connect, binary_collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in one of the partitions, + # assert the value returned by count_entities method is equal to length of entities + # expected: the count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # raw_vectors, entities = gen_binary_entities(insert_count) + # connect.create_partition(binary_collection, tag) + # connect.create_partition(binary_collection, new_tag) + # res_ids = connect.insert(binary_collection, entities, partition_name=tag) + # connect.flush([binary_collection]) + # # res = connect.count_entities(binary_collection) + # # assert res == insert_count + # stats = connect.get_collection_stats(binary_collection) + # assert stats[row_count] == insert_count + + # def test_collection_count_multi_partitions_C(self, connect, binary_collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in one of the partitions, + # assert the value returned by count_entities method is equal to length of entities + # expected: the count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # raw_vectors, entities = gen_binary_entities(insert_count) + # connect.create_partition(binary_collection, tag) + # connect.create_partition(binary_collection, new_tag) + # res_ids = connect.insert(binary_collection, entities) + # res_ids_2 = connect.insert(binary_collection, entities, partition_name=tag) + # connect.flush([binary_collection]) + # # res = connect.count_entities(binary_collection) + # # assert res == insert_count * 2 + # stats = connect.get_collection_stats(binary_collection) + # assert stats[row_count] == insert_count * 2 + + # @pytest.mark.tags(CaseLabel.L2) + # def test_collection_count_multi_partitions_D(self, connect, binary_collection, insert_count): + # ''' + # target: test collection rows_count is correct or not + # method: create collection, create partitions and add entities in one of the partitions, + # assert the value returned by count_entities method is equal to length of entities + # expected: the collection count is equal to the length of entities + # ''' + # new_tag = "new_tag" + # raw_vectors, entities = gen_binary_entities(insert_count) + # connect.create_partition(binary_collection, tag) + # connect.create_partition(binary_collection, new_tag) + # res_ids = connect.insert(binary_collection, entities, partition_name=tag) + # res_ids2 = connect.insert(binary_collection, entities, partition_name=new_tag) + # connect.flush([binary_collection]) + # # res = connect.count_entities(binary_collection) + # # assert res == insert_count * 2 + # stats = connect.get_collection_stats(binary_collection) + # assert stats[row_count] == insert_count * 2 + + # TODO: need to update and enable + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_after_index_created(self, connect, binary_collection, get_jaccard_index, insert_count): + ''' + target: test count_entities, after index have been created + method: add vectors in db, and create index, then calling count_entities with correct params + expected: count_entities raise exception + ''' + raw_vectors, entities = gen_binary_entities(insert_count) + connect.insert(binary_collection, entities) + connect.flush([binary_collection]) + connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == insert_count + + # TODO: need to update and enable + @pytest.mark.tags(CaseLabel.L2) + def test_collection_count_after_index_created_A(self, connect, binary_collection, get_hamming_index, insert_count): + ''' + target: test count_entities, after index have been created + method: add vectors in db, and create index, then calling count_entities with correct params + expected: count_entities raise exception + ''' + raw_vectors, entities = gen_binary_entities(insert_count) + connect.insert(binary_collection, entities) + connect.flush([binary_collection]) + # connect.load_collection(binary_collection) + connect.create_index(binary_collection, default_binary_vec_field_name, get_hamming_index) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == insert_count + + @pytest.mark.tags(CaseLabel.L2) + def test_collection_count_no_entities(self, connect, binary_collection): + ''' + target: test collection rows_count is correct or not, if collection is empty + method: create collection and no vectors in it, + assert the value returned by count_entities method is equal to 0 + expected: the count is equal to 0 + ''' + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == 0 + + +class TestCollectionMultiCollections: + """ + params means different nb, the nb value may trigger merge, or not + """ + + @pytest.fixture( + scope="function", + params=[ + 1, + 1000, + 2001 + ], + ) + def insert_count(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_multi_collections_l2(self, connect, insert_count): + ''' + target: test collection rows_count is correct or not with multiple collections of L2 + method: create collection and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + entities = gen_entities(insert_count) + collection_list = [] + collection_num = 20 + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_fields) + connect.insert(collection_name, entities) + connect.flush(collection_list) + for i in range(collection_num): + stats = connect.get_collection_stats(collection_list[i]) + assert stats[row_count] == insert_count + connect.drop_collection(collection_list[i]) + + @pytest.mark.tags(CaseLabel.L2) + def test_collection_count_multi_collections_binary(self, connect, binary_collection, insert_count): + ''' + target: test collection rows_count is correct or not with multiple collections of JACCARD + method: create collection and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + raw_vectors, entities = gen_binary_entities(insert_count) + connect.insert(binary_collection, entities) + collection_list = [] + collection_num = 20 + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_binary_fields) + connect.insert(collection_name, entities) + connect.flush(collection_list) + for i in range(collection_num): + stats = connect.get_collection_stats(collection_list[i]) + assert stats[row_count] == insert_count + connect.drop_collection(collection_list[i]) + + @pytest.mark.tags(CaseLabel.L2) + def test_collection_count_multi_collections_mix(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of JACCARD + method: create collection and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + collection_list = [] + collection_num = 20 + for i in range(0, int(collection_num / 2)): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_fields) + connect.insert(collection_name, default_entities) + for i in range(int(collection_num / 2), collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_binary_fields) + res = connect.insert(collection_name, default_binary_entities) + connect.flush(collection_list) + for i in range(collection_num): + stats = connect.get_collection_stats(collection_list[i]) + assert stats[row_count] == default_nb + connect.drop_collection(collection_list[i]) diff --git a/tests/python_client/testcases/collection/test_collection_logic.py b/tests/python_client/testcases/collection/test_collection_logic.py new file mode 100644 index 0000000000..02f5b42a36 --- /dev/null +++ b/tests/python_client/testcases/collection/test_collection_logic.py @@ -0,0 +1,138 @@ +import pdb +import pytest +import logging +import itertools +from time import sleep +from multiprocessing import Process +from utils.utils import * + +uid = "collection_logic" + +def create_collection(connect, **params): + connect.create_collection(params["collection_name"], const.default_fields) + +def search_collection(connect, **params): + status, result = connect.search( + params["collection_name"], + params["top_k"], + params["query_vectors"], + params={"nprobe": params["nprobe"]}) + return status + +def load_collection(connect, **params): + connect.load_collection(params["collection_name"]) + +def has(connect, **params): + status, result = connect.has_collection(params["collection_name"]) + return status + +def show(connect, **params): + status, result = connect.list_collections() + return status + +def delete(connect, **params): + status = connect.drop_collection(params["collection_name"]) + return status + +def describe(connect, **params): + status, result = connect.get_collection_info(params["collection_name"]) + return status + +def rowcount(connect, **params): + status, result = connect.count_entities(params["collection_name"]) + return status + +def create_index(connect, **params): + status = connect.create_index(params["collection_name"], params["index_type"], params["index_param"]) + return status + +func_map = { + # 0:has, + 1:show, + 10:create_collection, + 11:describe, + 12:rowcount, + 13:search_collection, + 14:load_collection, + 15:create_index, + 30:delete +} + +def gen_sequence(): + raw_seq = func_map.keys() + result = itertools.permutations(raw_seq) + for x in result: + yield x + + +class TestCollectionLogic(object): + @pytest.mark.parametrize("logic_seq", gen_sequence()) + @pytest.mark.tags(CaseLabel.L2) + def _test_logic(self, connect, logic_seq, args): + if args["handler"] == "HTTP": + pytest.skip("Skip in http mode") + if self.is_right(logic_seq): + self.execute(logic_seq, connect) + else: + self.execute_with_error(logic_seq, connect) + self.tear_down(connect) + + def is_right(self, seq): + if sorted(seq) == seq: + return True + + not_created = True + has_deleted = False + for i in range(len(seq)): + if seq[i] > 10 and not_created: + return False + elif seq [i] > 10 and has_deleted: + return False + elif seq[i] == 10: + not_created = False + elif seq[i] == 30: + has_deleted = True + + return True + + def execute(self, logic_seq, connect): + basic_params = self.gen_params() + for i in range(len(logic_seq)): + # logging.getLogger().info(logic_seq[i]) + f = func_map[logic_seq[i]] + status = f(connect, **basic_params) + assert status.OK() + + def execute_with_error(self, logic_seq, connect): + basic_params = self.gen_params() + + error_flag = False + for i in range(len(logic_seq)): + f = func_map[logic_seq[i]] + status = f(connect, **basic_params) + if not status.OK(): + # logging.getLogger().info(logic_seq[i]) + error_flag = True + break + assert error_flag == True + + def tear_down(self, connect): + names = connect.list_collections()[1] + for name in names: + connect.drop_collection(name) + + def gen_params(self): + collection_name = gen_unique_str(uid) + top_k = 1 + vectors = gen_vectors(2, dim) + param = {'collection_name': collection_name, + 'dimension': dim, + 'metric_type': "L2", + 'nprobe': 1, + 'top_k': top_k, + 'index_type': "IVF_SQ8", + 'index_param': { + 'nlist': 16384 + }, + 'query_vectors': vectors} + return param diff --git a/tests/python_client/testcases/collection/test_collection_stats.py b/tests/python_client/testcases/collection/test_collection_stats.py new file mode 100644 index 0000000000..8af4cfb4cf --- /dev/null +++ b/tests/python_client/testcases/collection/test_collection_stats.py @@ -0,0 +1,415 @@ +import time +import pdb +import threading +import logging +from multiprocessing import Pool, Process + +import pytest +from utils.utils import * +from common.constants import * + +uid = "get_collection_stats" + + +class TestGetCollectionStats: + """ + ****************************************************************** + The following cases are used to test `collection_stats` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_invalid_collection_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("CPU not support index_type: ivf_sq8h") + return request.param + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_jaccard_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] in binary_support(): + request.param["metric_type"] = "JACCARD" + return request.param + else: + pytest.skip("Skip index Temporary") + + @pytest.fixture( + scope="function", + params=[ + 1, + 1000, + 2001 + ], + ) + def insert_count(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_name_not_existed(self, connect, collection): + ''' + target: get collection stats where collection name does not exist + method: call collection_stats with a random collection_name, which is not in db + expected: status not ok + ''' + collection_name = gen_unique_str(uid) + with pytest.raises(Exception) as e: + connect.get_collection_stats(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_name_invalid(self, connect, get_invalid_collection_name): + ''' + target: get collection stats where collection name is invalid + method: call collection_stats with invalid collection_name + expected: status not ok + ''' + collection_name = get_invalid_collection_name + with pytest.raises(Exception) as e: + connect.get_collection_stats(collection_name) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_empty(self, connect, collection): + ''' + target: get collection stats where no entity in collection + method: call collection_stats in empty collection + expected: segment = [] + ''' + stats = connect.get_collection_stats(collection) + connect.flush([collection]) + assert stats[row_count] == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_without_connection(self, collection, dis_connect): + ''' + target: test count_entities, without connection + method: calling count_entities with correct params, with a disconnected instance + expected: count_entities raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.get_collection_stats(collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_batch(self, connect, collection): + ''' + target: get row count with collection_stats + method: add entities, check count in collection info + expected: count as expected + ''' + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert int(stats[row_count]) == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_single(self, connect, collection): + ''' + target: get row count with collection_stats + method: add entity one by one, check count in collection info + expected: count as expected + ''' + nb = 10 + for i in range(nb): + connect.insert(collection, default_entity) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == nb + + @pytest.mark.tags(CaseLabel.L2) + def _test_get_collection_stats_after_delete(self, connect, collection): + ''' + target: get row count with collection_stats + method: add and delete entities, check count in collection info + expected: status ok, count as expected + ''' + ids = connect.insert(collection, default_entities) + status = connect.flush([collection]) + delete_ids = [ids[0], ids[-1]] + connect.delete_entity_by_id(collection, delete_ids) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats["row_count"] == default_nb - 2 + assert stats["partitions"][0]["row_count"] == default_nb - 2 + assert stats["partitions"][0]["segments"][0]["data_size"] > 0 + + # TODO: enable + @pytest.mark.tags(CaseLabel.L2) + def _test_get_collection_stats_after_compact_parts(self, connect, collection): + ''' + target: get row count with collection_stats + method: add and delete entities, and compact collection, check count in collection info + expected: status ok, count as expected + ''' + delete_length = 1000 + ids = connect.insert(collection, default_entities) + status = connect.flush([collection]) + delete_ids = ids[:delete_length] + connect.delete_entity_by_id(collection, delete_ids) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + logging.getLogger().info(stats) + assert stats["row_count"] == default_nb - delete_length + compact_before = stats["partitions"][0]["segments"][0]["data_size"] + connect.compact(collection) + stats = connect.get_collection_stats(collection) + logging.getLogger().info(stats) + compact_after = stats["partitions"][0]["segments"][0]["data_size"] + assert compact_before == compact_after + + @pytest.mark.tags(CaseLabel.L2) + def _test_get_collection_stats_after_compact_delete_one(self, connect, collection): + ''' + target: get row count with collection_stats + method: add and delete one entity, and compact collection, check count in collection info + expected: status ok, count as expected + ''' + ids = connect.insert(collection, default_entities) + status = connect.flush([collection]) + delete_ids = ids[:1] + connect.delete_entity_by_id(collection, delete_ids) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + logging.getLogger().info(stats) + compact_before = stats["partitions"][0]["row_count"] + connect.compact(collection) + stats = connect.get_collection_stats(collection) + logging.getLogger().info(stats) + compact_after = stats["partitions"][0]["row_count"] + # pdb.set_trace() + assert compact_before == compact_after + + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_partition(self, connect, collection): + ''' + target: get partition info in a collection + method: call collection_stats after partition created and check partition_stats + expected: status ok, vectors added to partition + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_partitions(self, connect, collection): + ''' + target: get partition info in a collection + method: create two partitions, add vectors in one of the partitions, call collection_stats and check + expected: status ok, vectors added to one partition but not the other + ''' + new_tag = "new_tag" + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + connect.insert(collection, default_entities, partition_name=new_tag) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb * 2 + connect.insert(collection, default_entities) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb * 3 + + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_partitions_A(self, connect, collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + new_tag = "new_tag" + entities = gen_entities(insert_count) + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + connect.insert(collection, entities) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count + + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_partitions_B(self, connect, collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add entities in one of the partitions, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + new_tag = "new_tag" + entities = gen_entities(insert_count) + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + connect.insert(collection, entities, partition_name=default_tag) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_partitions_C(self, connect, collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add entities in one of the partitions, + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of vectors + ''' + new_tag = "new_tag" + entities = gen_entities(insert_count) + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + connect.insert(collection, entities) + connect.insert(collection, entities, partition_name=default_tag) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count*2 + + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_partitions_D(self, connect, collection, insert_count): + ''' + target: test collection rows_count is correct or not + method: create collection, create partitions and add entities in one of the partitions, + assert the value returned by count_entities method is equal to length of entities + expected: the collection count is equal to the length of entities + ''' + new_tag = "new_tag" + entities = gen_entities(insert_count) + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + connect.insert(collection, entities, partition_name=default_tag) + connect.insert(collection, entities, partition_name=new_tag) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == insert_count*2 + + # TODO: assert metric type in stats response + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_after_index_created(self, connect, collection, get_simple_index): + ''' + target: test collection info after index created + method: create collection, add vectors, create index and call collection_stats + expected: status ok, index created and shown in segments + ''' + connect.insert(collection, default_entities) + connect.flush([collection]) + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + + # TODO: assert metric type in stats response + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_after_index_created_ip(self, connect, collection, get_simple_index): + ''' + target: test collection info after index created + method: create collection, add vectors, create index and call collection_stats + expected: status ok, index created and shown in segments + ''' + get_simple_index["metric_type"] = "IP" + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + get_simple_index.update({"metric_type": "IP"}) + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + + # TODO: assert metric type in stats response + @pytest.mark.tags(CaseLabel.L2) + def test_get_collection_stats_after_index_created_jac(self, connect, binary_collection, get_jaccard_index): + ''' + target: test collection info after index created + method: create collection, add binary entities, create index and call collection_stats + expected: status ok, index created and shown in segments + ''' + ids = connect.insert(binary_collection, default_binary_entities) + connect.flush([binary_collection]) + connect.create_index(binary_collection, default_binary_vec_field_name, get_jaccard_index) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_get_collection_stats_after_create_different_index(self, connect, collection): + ''' + target: test collection info after index created repeatedly + method: create collection, add vectors, create index and call collection_stats multiple times + expected: status ok, index info shown in segments + ''' + result = connect.insert(collection, default_entities) + connect.flush([collection]) + for index_type in ["IVF_FLAT", "IVF_SQ8"]: + connect.create_index(collection, default_float_vec_field_name, + {"index_type": index_type, "params": {"nlist": 1024}, "metric_type": "L2"}) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_count_multi_collections(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of L2 + method: create collection and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: row count in segments + ''' + collection_list = [] + collection_num = 10 + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_fields) + result = connect.insert(collection_name, default_entities) + connect.flush(collection_list) + for i in range(collection_num): + stats = connect.get_collection_stats(collection_list[i]) + assert stats[row_count] == default_nb + connect.drop_collection(collection_list[i]) + + @pytest.mark.tags(CaseLabel.L2) + def test_collection_count_multi_collections_indexed(self, connect): + ''' + target: test collection rows_count is correct or not with multiple collections of L2 + method: create collection and add entities in it, + assert the value returned by count_entities method is equal to length of entities + expected: row count in segments + ''' + collection_list = [] + collection_num = 10 + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_fields) + res = connect.insert(collection_name, default_entities) + connect.flush(collection_list) + index_1 = {"index_type": "IVF_SQ8", "params": {"nlist": 1024}, "metric_type": "L2"} + index_2 = {"index_type": "IVF_FLAT", "params": {"nlist": 1024}, "metric_type": "L2"} + if i % 2: + connect.create_index(collection_name, default_float_vec_field_name, index_1) + else: + connect.create_index(collection_name, default_float_vec_field_name, index_2) + for i in range(collection_num): + stats = connect.get_collection_stats(collection_list[i]) + assert stats[row_count] == default_nb + index = connect.describe_index(collection_list[i], "") + if i % 2: + create_target_index(index_1, default_float_vec_field_name) + assert index == index_1 + else: + create_target_index(index_2, default_float_vec_field_name) + assert index == index_2 + # break + connect.drop_collection(collection_list[i]) diff --git a/tests/python_client/testcases/collection/test_create_collection.py b/tests/python_client/testcases/collection/test_create_collection.py new file mode 100644 index 0000000000..d8f5d32a9f --- /dev/null +++ b/tests/python_client/testcases/collection/test_create_collection.py @@ -0,0 +1,327 @@ +import pdb +import copy +import logging +import itertools +import time +import threading +from multiprocessing import Process +import sklearn.preprocessing +import pytest +from utils.utils import * +from common.constants import * + +uid = "create_collection" + + +class TestCreateCollection: + """ + ****************************************************************** + The following cases are used to test `create_collection` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_single_filter_fields() + ) + def get_filter_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_single_vector_fields() + ) + def get_vector_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_segment_row_limits() + ) + def get_segment_row_limit(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_collection_fields(self, connect, get_filter_field, get_vector_field): + ''' + target: test create normal collection with different fields + method: create collection with diff fields: metric/field_type/... + expected: no exception raised + ''' + filter_field = get_filter_field + logging.getLogger().info(filter_field) + vector_field = get_vector_field + collection_name = gen_unique_str(uid) + fields = { + "fields": [gen_primary_field(), filter_field, vector_field], + # "segment_row_limit": default_segment_row_limit + } + logging.getLogger().info(fields) + connect.create_collection(collection_name, fields) + assert connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def _test_create_collection_segment_row_limit(self, connect, get_segment_row_limit): + ''' + target: test create normal collection with different fields + method: create collection with diff segment_row_limit + expected: no exception raised + ''' + collection_name = gen_unique_str(uid) + fields = copy.deepcopy(default_fields) + # fields["segment_row_limit"] = get_segment_row_limit + connect.create_collection(collection_name, fields) + assert connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_collection_after_insert(self, connect, collection): + ''' + target: test insert vector, then create collection again + method: insert vector and create collection + expected: error raised + ''' + # pdb.set_trace() + connect.insert(collection, default_entity) + + try: + connect.create_collection(collection, default_fields) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_collection_after_insert_flush(self, connect, collection): + ''' + target: test insert vector, then create collection again + method: insert vector and create collection + expected: error raised + ''' + connect.insert(collection, default_entity) + # connect.flush([collection]) + try: + connect.create_collection(collection, default_fields) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection + + # TODO: assert exception + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_without_connection(self, dis_connect): + ''' + target: test create collection, without connection + method: create collection with correct params, with a disconnected instance + expected: error raised + ''' + collection_name = gen_unique_str(uid) + with pytest.raises(Exception) as e: + dis_connect.create_collection(collection_name, default_fields) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_collection_existed(self, connect): + ''' + target: test create collection but the collection name have already existed + method: create collection with the same collection_name + expected: error raised + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + try: + connect.create_collection(collection_name, default_fields) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "Create collection failed: meta table add collection failed,error = collection %s exist" % collection_name + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_after_drop_collection(self, connect, collection): + ''' + target: create with the same collection name after collection dropped + method: delete, then create + expected: create success + ''' + connect.drop_collection(collection) + time.sleep(2) + connect.create_collection(collection, default_fields) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_multithread(self, connect): + ''' + target: test create collection with multithread + method: create collection using multithread, + expected: collections are created + ''' + threads_num = 8 + threads = [] + collection_names = [] + + def create(): + collection_name = gen_unique_str(uid) + collection_names.append(collection_name) + connect.create_collection(collection_name, default_fields) + + for i in range(threads_num): + t = MyThread(target=create, args=()) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + for item in collection_names: + assert item in connect.list_collections() + connect.drop_collection(item) + + +class TestCreateCollectionInvalid(object): + """ + Test creating collections with invalid params + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_metric_types() + ) + def get_metric_type(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_ints() + ) + def get_segment_row_limit(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_ints() + ) + def get_dim(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_invalid_string(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_field_types() + ) + def get_field_type(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def _test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit): + collection_name = gen_unique_str() + fields = copy.deepcopy(default_fields) + fields["segment_row_limit"] = get_segment_row_limit + with pytest.raises(Exception) as e: + connect.create_collection(collection_name, fields) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_with_invalid_dimension(self, connect, get_dim): + dimension = get_dim + collection_name = gen_unique_str() + fields = copy.deepcopy(default_fields) + fields["fields"][-1]["params"]["dim"] = dimension + with pytest.raises(Exception) as e: + connect.create_collection(collection_name, fields) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_with_invalid_collection_name(self, connect, get_invalid_string): + collection_name = get_invalid_string + with pytest.raises(Exception) as e: + connect.create_collection(collection_name, default_fields) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.parametrize("collection_name", ('', None)) + def test_create_collection_with_empty_or_None_collection_name(self, connect, collection_name): + # collection_name = '' + try: + connect.create_collection(collection_name, default_fields) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "Collection name should not be empty" + + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_no_dimension(self, connect): + ''' + target: test create collection with no dimension params + method: create collection with correct params + expected: create status return ok + ''' + collection_name = gen_unique_str(uid) + fields = copy.deepcopy(default_fields) + fields["fields"][-1]["params"].pop("dim") + try: + connect.create_collection(collection_name, fields) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "dimension is not defined in field type params" + + @pytest.mark.tags(CaseLabel.L2) + def _test_create_collection_no_segment_row_limit(self, connect): + ''' + target: test create collection with no segment_row_limit params + method: create collection with correct params + expected: use default default_segment_row_limit + ''' + collection_name = gen_unique_str(uid) + fields = copy.deepcopy(default_fields) + fields.pop("segment_row_limit") + connect.create_collection(collection_name, fields) + res = connect.get_collection_info(collection_name) + logging.getLogger().info(res) + assert res["segment_row_limit"] == default_server_segment_row_limit + + # TODO: assert exception + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_limit_fields(self, connect): + collection_name = gen_unique_str(uid) + limit_num = 64 + fields = copy.deepcopy(default_fields) + for i in range(limit_num): + field_name = gen_unique_str("field_name") + field = {"name": field_name, "type": DataType.INT64} + fields["fields"].append(field) + + try: + connect.create_collection(collection_name, fields) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "maximum field's number should be limited to 64" + + # TODO: assert exception + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_invalid_field_name(self, connect, get_invalid_string): + collection_name = gen_unique_str(uid) + fields = copy.deepcopy(default_fields) + field_name = get_invalid_string + field = {"name": field_name, "type": DataType.INT64} + fields["fields"].append(field) + with pytest.raises(Exception) as e: + connect.create_collection(collection_name, fields) + + # TODO: assert exception + @pytest.mark.tags(CaseLabel.L2) + def test_create_collection_invalid_field_type(self, connect, get_field_type): + collection_name = gen_unique_str(uid) + fields = copy.deepcopy(default_fields) + field_type = get_field_type + field = {"name": "test_field", "type": field_type} + fields["fields"].append(field) + with pytest.raises(Exception) as e: + connect.create_collection(collection_name, fields) diff --git a/tests/python_client/testcases/collection/test_describe_collection.py b/tests/python_client/testcases/collection/test_describe_collection.py new file mode 100644 index 0000000000..243786ed81 --- /dev/null +++ b/tests/python_client/testcases/collection/test_describe_collection.py @@ -0,0 +1,184 @@ +import pytest +import logging +import time +from utils.utils import * +from common.constants import * + +uid = "describe_collection" + + +class TestDescribeCollection: + + @pytest.fixture( + scope="function", + params=gen_single_filter_fields() + ) + def get_filter_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_single_vector_fields() + ) + def get_vector_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + logging.getLogger().info(request.param) + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return request.param + + """ + ****************************************************************** + The following cases are used to test `describe_collection` function, no data in collection + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_collection_fields(self, connect, get_filter_field, get_vector_field): + ''' + target: test create normal collection with different fields, check info returned + method: create collection with diff fields: metric/field_type/..., calling `describe_collection` + expected: no exception raised, and value returned correct + ''' + filter_field = get_filter_field + vector_field = get_vector_field + collection_name = gen_unique_str(uid) + fields = { + "fields": [gen_primary_field(), filter_field, vector_field], + # "segment_row_limit": default_segment_row_limit + } + connect.create_collection(collection_name, fields) + res = connect.describe_collection(collection_name) + # assert res['segment_row_limit'] == default_segment_row_limit + assert len(res["fields"]) == len(fields.get("fields")) + for field in res["fields"]: + if field["type"] == filter_field: + assert field["name"] == filter_field["name"] + elif field["type"] == vector_field: + assert field["name"] == vector_field["name"] + assert field["params"] == vector_field["params"] + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_describe_collection_after_index_created(self, connect, collection, get_simple_index): + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + assert index["index_type"] == get_simple_index["index_type"] + assert index["metric_type"] == get_simple_index["metric_type"] + assert index["params"] == get_simple_index["params"] + + @pytest.mark.tags(CaseLabel.L2) + def test_describe_collection_without_connection(self, collection, dis_connect): + ''' + target: test get collection info, without connection + method: calling get collection info with correct params, with a disconnected instance + expected: get collection info raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.describe_collection(collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_describe_collection_not_existed(self, connect): + ''' + target: test if collection not created + method: random a collection name, create this collection then drop it, + assert the value returned by describe_collection method + expected: False + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + connect.describe_collection(collection_name) + connect.drop_collection(collection_name) + try: + connect.describe_collection(collection_name) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.L2) + def test_describe_collection_multithread(self, connect): + ''' + target: test create collection with multithread + method: create collection using multithread, + expected: collections are created + ''' + threads_num = 4 + threads = [] + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + + def get_info(): + connect.describe_collection(collection_name) + + for i in range(threads_num): + t = MyThread(target=get_info) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + """ + ****************************************************************** + The following cases are used to test `describe_collection` function, and insert data in collection + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_describe_collection_fields_after_insert(self, connect, get_filter_field, get_vector_field): + ''' + target: test create normal collection with different fields, check info returned + method: create collection with diff fields: metric/field_type/..., calling `describe_collection` + expected: no exception raised, and value returned correct + ''' + filter_field = get_filter_field + vector_field = get_vector_field + collection_name = gen_unique_str(uid) + fields = { + "fields": [gen_primary_field(), filter_field, vector_field], + # "segment_row_limit": default_segment_row_limit + } + connect.create_collection(collection_name, fields) + entities = gen_entities_by_fields(fields["fields"], default_nb, vector_field["params"]["dim"]) + res_ids = connect.insert(collection_name, entities) + connect.flush([collection_name]) + res = connect.describe_collection(collection_name) + # assert res['segment_row_limit'] == default_segment_row_limit + assert len(res["fields"]) == len(fields.get("fields")) + for field in res["fields"]: + if field["type"] == filter_field: + assert field["name"] == filter_field["name"] + elif field["type"] == vector_field: + assert field["name"] == vector_field["name"] + assert field["params"] == vector_field["params"] + + +class TestDescribeCollectionInvalid(object): + """ + Test describe collection with invalid params + """ + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_describe_collection_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.describe_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.parametrize("collection_name", ('', None)) + def test_describe_collection_with_empty_or_None_collection_name(self, connect, collection_name): + with pytest.raises(Exception) as e: + connect.describe_collection(collection_name) diff --git a/tests/python_client/testcases/collection/test_drop_collection.py b/tests/python_client/testcases/collection/test_drop_collection.py new file mode 100644 index 0000000000..e4afd3354e --- /dev/null +++ b/tests/python_client/testcases/collection/test_drop_collection.py @@ -0,0 +1,108 @@ +import pdb +import pytest +import logging +import itertools +from time import sleep +import threading +from multiprocessing import Process +from utils.utils import * +from common.constants import * + +uid = "drop_collection" + + +class TestDropCollection: + """ + ****************************************************************** + The following cases are used to test `drop_collection` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_collection_A(self, connect, collection): + ''' + target: test delete collection created with correct params + method: create collection and then delete, + assert the value returned by delete method + expected: status ok, and no collection in collections + ''' + connect.drop_collection(collection) + time.sleep(2) + assert not connect.has_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_collection_without_connection(self, collection, dis_connect): + ''' + target: test describe collection, without connection + method: drop collection with correct params, with a disconnected instance + expected: drop raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.drop_collection(collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_collection_not_existed(self, connect): + ''' + target: test if collection not created + method: random a collection name, which not existed in db, + assert the exception raised returned by drp_collection method + expected: False + ''' + collection_name = gen_unique_str(uid) + try: + connect.drop_collection(collection_name) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.L2) + def test_create_drop_collection_multithread(self, connect): + ''' + target: test create and drop collection with multithread + method: create and drop collection using multithread, + expected: collections are created, and dropped + ''' + threads_num = 8 + threads = [] + collection_names = [] + + def create(): + collection_name = gen_unique_str(uid) + collection_names.append(collection_name) + connect.create_collection(collection_name, default_fields) + connect.drop_collection(collection_name) + for i in range(threads_num): + t = MyThread(target=create, args=()) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + for item in collection_names: + assert not connect.has_collection(item) + + +class TestDropCollectionInvalid(object): + """ + Test has collection with invalid params + """ + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_collection_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.parametrize("collection_name", ('', None)) + def test_drop_collection_with_empty_or_None_collection_name(self, connect, collection_name): + with pytest.raises(Exception) as e: + connect.has_collection(collection_name) diff --git a/tests/python_client/testcases/collection/test_has_collection.py b/tests/python_client/testcases/collection/test_has_collection.py new file mode 100644 index 0000000000..291fc8b52a --- /dev/null +++ b/tests/python_client/testcases/collection/test_has_collection.py @@ -0,0 +1,105 @@ +import pdb +import pytest +import logging +import itertools +import threading +import time +from multiprocessing import Process +from utils.utils import * +from common.constants import * + +uid = "has_collection" + + +class TestHasCollection: + """ + ****************************************************************** + The following cases are used to test `has_collection` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_has_collection(self, connect, collection): + ''' + target: test if the created collection existed + method: create collection, assert the value returned by has_collection method + expected: True + ''' + assert connect.has_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_has_collection_without_connection(self, collection, dis_connect): + ''' + target: test has collection, without connection + method: calling has collection with correct params, with a disconnected instance + expected: has collection raise exception + ''' + with pytest.raises(Exception) as e: + assert dis_connect.has_collection(collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_has_collection_not_existed(self, connect): + ''' + target: test if collection not created + method: random a collection name, create this collection then drop it, + assert the value returned by has_collection method + expected: False + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + assert connect.has_collection(collection_name) + connect.drop_collection(collection_name) + assert not connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_has_collection_multithread(self, connect): + ''' + target: test create collection with multithread + method: create collection using multithread, + expected: collections are created + ''' + threads_num = 4 + threads = [] + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + + def has(): + assert connect.has_collection(collection_name) + # assert not assert_collection(connect, collection_name) + for i in range(threads_num): + t = MyThread(target=has, args=()) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + +class TestHasCollectionInvalid(object): + """ + Test has collection with invalid params + """ + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_has_collection_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_has_collection_with_empty_collection_name(self, connect): + collection_name = '' + with pytest.raises(Exception) as e: + connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_has_collection_with_none_collection_name(self, connect): + collection_name = None + with pytest.raises(Exception) as e: + connect.has_collection(collection_name) + diff --git a/tests/python_client/testcases/collection/test_list_collections.py b/tests/python_client/testcases/collection/test_list_collections.py new file mode 100644 index 0000000000..536901d961 --- /dev/null +++ b/tests/python_client/testcases/collection/test_list_collections.py @@ -0,0 +1,102 @@ +import pytest +import time +from utils.utils import * +from common.constants import * + +uid = "list_collections" + + +class TestListCollections: + """ + ****************************************************************** + The following cases are used to test `list_collections` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_list_collections(self, connect, collection): + ''' + target: test list collections + method: create collection, assert the value returned by list_collections method + expected: True + ''' + assert collection in connect.list_collections() + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_list_collections_multi_collections(self, connect): + ''' + target: test list collections + method: create collection, assert the value returned by list_collections method + expected: True + ''' + collection_num = 50 + collection_names = [] + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_names.append(collection_name) + connect.create_collection(collection_name, default_fields) + assert collection_name in connect.list_collections() + for i in range(collection_num): + connect.drop_collection(collection_names[i]) + + @pytest.mark.tags(CaseLabel.L2) + def test_list_collections_without_connection(self, dis_connect): + ''' + target: test list collections, without connection + method: calling list collections with correct params, with a disconnected instance + expected: list collections raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.list_collections() + + @pytest.mark.tags(CaseLabel.L2) + def test_list_collections_not_existed(self, connect): + ''' + target: test if collection not created + method: random a collection name, create this collection then drop it, + assert the value returned by list_collections method + expected: False + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + assert collection_name in connect.list_collections() + connect.drop_collection(collection_name) + assert collection_name not in connect.list_collections() + + # TODO: make sure to run this case in the end + @pytest.mark.skip("r0.3-test") + @pytest.mark.tags(CaseLabel.L2) + def test_list_collections_no_collection(self, connect): + ''' + target: test show collections is correct or not, if no collection in db + method: delete all collections, + assert the value returned by list_collections method is equal to [] + expected: the status is ok, and the result is equal to [] + ''' + result = connect.list_collections() + if result: + for collection_name in result: + assert connect.has_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_list_collections_multithread(self, connect): + ''' + target: test list collection with multithread + method: list collection using multithread, + expected: list collections correctly + ''' + threads_num = 10 + threads = [] + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + + def _list(): + assert collection_name in connect.list_collections() + + for i in range(threads_num): + t = MyThread(target=_list) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + diff --git a/tests/python_client/testcases/collection/test_load_collection.py b/tests/python_client/testcases/collection/test_load_collection.py new file mode 100644 index 0000000000..ae83b37680 --- /dev/null +++ b/tests/python_client/testcases/collection/test_load_collection.py @@ -0,0 +1,671 @@ +import pdb +import pytest +from utils.utils import * +from common.constants import * + +uid = "load_collection" +field_name = default_float_vec_field_name +default_single_query = { + "bool": { + "must": [ + {"vector": {field_name: {"topk": default_top_k, "query": gen_vectors(1, default_dim), "metric_type": "L2", + "params": {"nprobe": 10}}}} + ] + } +} + + +class TestLoadCollection: + """ + ****************************************************************** + The following cases are used to test `load_collection` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + return request.param + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_binary_index(self, request, connect): + return request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_collection_after_index(self, connect, collection, get_simple_index): + ''' + target: test load collection, after index created + method: insert and create index, load collection with correct params + expected: no error raised + ''' + connect.insert(collection, default_entities) + connect.flush([collection]) + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + connect.load_collection(collection) + connect.release_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_after_index_binary(self, connect, binary_collection, get_binary_index): + ''' + target: test load binary_collection, after index created + method: insert and create index, load binary_collection with correct params + expected: no error raised + ''' + result = connect.insert(binary_collection, default_binary_entities) + assert len(result.primary_keys) == default_nb + connect.flush([binary_collection]) + for metric_type in binary_metrics(): + get_binary_index["metric_type"] = metric_type + connect.drop_index(binary_collection, default_binary_vec_field_name) + if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics(): + with pytest.raises(Exception) as e: + connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index) + else: + connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index) + index = connect.describe_index(binary_collection, "") + create_target_index(get_binary_index, default_binary_vec_field_name) + assert index == get_binary_index + connect.load_collection(binary_collection) + connect.release_collection(binary_collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_empty_collection(self, connect, collection): + ''' + target: test load collection + method: no entities in collection, load collection with correct params + expected: load success + ''' + connect.load_collection(collection) + connect.release_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_dis_connect(self, dis_connect, collection): + ''' + target: test load collection, without connection + method: load collection with correct params, with a disconnected instance + expected: load raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.load_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_release_collection_dis_connect(self, dis_connect, collection): + ''' + target: test release collection, without connection + method: release collection with correct params, with a disconnected instance + expected: release raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.release_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_not_existed(self, connect, collection): + collection_name = gen_unique_str(uid) + try: + connect.load_collection(collection_name) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.L2) + def test_release_collection_not_existed(self, connect, collection): + collection_name = gen_unique_str(uid) + try: + connect.release_collection(collection_name) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_release_collection_not_load(self, connect, collection): + """ + target: test release collection without load + method: + expected: raise exception + """ + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.release_collection(collection) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_collection_after_load_release(self, connect, collection): + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_collection(collection) + connect.release_collection(collection) + connect.load_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_repeatedly(self, connect, collection): + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_collection(collection) + connect.load_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_load_release_collection(self, connect, collection): + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + connect.insert(collection_name, default_entities) + connect.flush([collection_name]) + connect.load_collection(collection_name) + connect.release_collection(collection_name) + connect.drop_collection(collection_name) + try: + connect.load_collection(collection_name) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_name + + try: + connect.release_collection(collection_name) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_release_collection_after_drop(self, connect, collection): + """ + target: test release collection after drop + method: insert and flush, then release collection after load and drop + expected: raise exception + """ + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_collection(collection) + connect.drop_collection(collection) + try: + connect.release_collection(collection) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_collection_without_flush(self, connect, collection): + """ + target: test load collection without flush + method: insert entities without flush, then load collection + expected: load collection failed + """ + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.load_collection(collection) + + # TODO + @pytest.mark.tags(CaseLabel.L2) + def _test_load_collection_larger_than_memory(self): + """ + target: test load collection when memory less than collection size + method: i don't know + expected: raise exception + """ + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_collection_release_part_partitions(self, connect, collection): + """ + target: test release part partitions after load collection + method: load collection and release part partitions + expected: released partitions search empty + """ + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_collection(collection) + connect.release_partitions(collection, [default_tag]) + with pytest.raises(Exception) as e: + connect.search(collection, default_single_query, partition_names=[default_tag]) + res = connect.search(collection, default_single_query, partition_names=[default_partition_name]) + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_release_all_partitions(self, connect, collection): + """ + target: test release all partitions after load collection + method: load collection and release all partitions + expected: search empty + """ + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_collection(collection) + connect.release_partitions(collection, [default_partition_name, default_tag]) + res = connect.search(collection, default_single_query) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_partitions_release_collection(self, connect, collection): + """ + target: test release collection after load partitions + method: insert entities into partitions, search empty after load partitions and release collection + expected: search result empty + """ + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_partitions(collection, [default_tag]) + connect.release_collection(collection) + with pytest.raises(Exception): + connect.search(collection, default_single_query) + # assert len(res[0]) == 0 + + +class TestReleaseAdvanced: + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_release_collection_during_searching(self, connect, collection): + """ + target: test release collection during searching + method: insert entities into collection, flush and load collection, release collection during searching + expected: + """ + nq = 1000 + top_k = 1 + connect.insert(collection, default_entities) + connect.flush([collection]) + connect.load_collection(collection) + query, _ = gen_query_vectors(field_name, default_entities, top_k, nq) + future = connect.search(collection, query, _async=True) + connect.release_collection(collection) + with pytest.raises(Exception): + connect.search(collection, default_single_query) + + @pytest.mark.tags(CaseLabel.L2) + def test_release_partition_during_searching(self, connect, collection): + """ + target: test release partition during searching + method: insert entities into partition, flush and load partition, release partition during searching + expected: + """ + nq = 1000 + top_k = 1 + connect.create_partition(collection, default_tag) + query, _ = gen_query_vectors(field_name, default_entities, top_k, nq) + connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + connect.load_partitions(collection, [default_tag]) + res = connect.search(collection, query, _async=True) + connect.release_partitions(collection, [default_tag]) + res = connect.search(collection, default_single_query) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_release_collection_during_searching_A(self, connect, collection): + """ + target: test release collection during searching + method: insert entities into partition, flush and load partition, release collection during searching + expected: + """ + nq = 1000 + top_k = 1 + connect.create_partition(collection, default_tag) + query, _ = gen_query_vectors(field_name, default_entities, top_k, nq) + connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + connect.load_partitions(collection, [default_tag]) + res = connect.search(collection, query, _async=True) + connect.release_collection(collection) + with pytest.raises(Exception): + connect.search(collection, default_single_query) + + def _test_release_collection_during_loading(self, connect, collection): + """ + target: test release collection during loading + method: insert entities into collection, flush, release collection during loading + expected: + """ + connect.insert(collection, default_entities) + connect.flush([collection]) + + def load(): + connect.load_collection(collection) + + t = threading.Thread(target=load, args=()) + t.start() + connect.release_collection(collection) + with pytest.raises(Exception): + connect.search(collection, default_single_query) + + def _test_release_partition_during_loading(self, connect, collection): + """ + target: test release partition during loading + method: insert entities into partition, flush, release partition during loading + expected: + """ + connect.create_partition(collection, default_tag) + connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + + def load(): + connect.load_collection(collection) + + t = threading.Thread(target=load, args=()) + t.start() + connect.release_partitions(collection, [default_tag]) + res = connect.search(collection, default_single_query) + assert len(res[0]) == 0 + + def _test_release_collection_during_inserting(self, connect, collection): + """ + target: test release collection during inserting + method: load collection, do release collection during inserting + expected: + """ + connect.insert(collection, default_entities) + connect.flush([collection]) + connect.load_collection(collection) + + def insert(): + connect.insert(collection, default_entities) + + t = threading.Thread(target=insert, args=()) + t.start() + connect.release_collection(collection) + with pytest.raises(Exception): + res = connect.search(collection, default_single_query) + # assert len(res[0]) == 0 + + def _test_release_collection_during_indexing(self, connect, collection): + """ + target: test release collection during building index + method: insert and flush, load collection, do release collection during creating index + expected: + """ + pass + + def _test_release_collection_during_droping_index(self, connect, collection): + """ + target: test release collection during droping index + method: insert, create index and flush, load collection, do release collection during droping index + expected: + """ + pass + + +class TestLoadCollectionInvalid(object): + """ + Test load collection with invalid params + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.load_collection(collection_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_release_collection_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.release_collection(collection_name) + + +class TestLoadPartition: + """ + ****************************************************************** + The following cases are used to test `load_collection` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in cpu mode") + return request.param + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_binary_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] in binary_support(): + return request.param + else: + pytest.skip("Skip index Temporary") + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_partition_after_index(self, connect, collection, get_simple_index): + ''' + target: test load collection, after index created + method: insert and create index, load collection with correct params + expected: no error raised + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.create_index(collection, default_float_vec_field_name, get_simple_index) + search_param = get_search_param(get_simple_index["index_type"]) + query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq=1, search_params=search_param) + connect.load_partitions(collection, [default_tag]) + res = connect.search(collection, query, partition_names=[default_tag]) + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_partition_after_index_binary(self, connect, binary_collection, get_binary_index): + ''' + target: test load binary_collection, after index created + method: insert and create index, load binary_collection with correct params + expected: no error raised + ''' + connect.create_partition(binary_collection, default_tag) + result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([binary_collection]) + for metric_type in binary_metrics(): + logging.getLogger().info(metric_type) + get_binary_index["metric_type"] = metric_type + if get_binary_index["index_type"] == "BIN_IVF_FLAT" and metric_type in structure_metrics(): + with pytest.raises(Exception) as e: + connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index) + else: + connect.create_index(binary_collection, default_binary_vec_field_name, get_binary_index) + connect.load_partitions(binary_collection, [default_tag]) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_empty_partition(self, connect, collection): + ''' + target: test load collection + method: no entities in collection, load collection with correct params + expected: load success + ''' + connect.create_partition(collection, default_tag) + connect.load_partitions(collection, [default_tag]) + res = connect.search(collection, default_single_query) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_load_collection_dis_connect(self, connect, dis_connect, collection): + ''' + target: test load collection, without connection + method: load collection with correct params, with a disconnected instance + expected: load raise exception + ''' + connect.create_partition(collection, default_tag) + with pytest.raises(Exception) as e: + dis_connect.load_partitions(collection, [default_tag]) + + @pytest.mark.tags(CaseLabel.L2) + def test_release_partition_dis_connect(self, connect, dis_connect, collection): + ''' + target: test release collection, without connection + method: release collection with correct params, with a disconnected instance + expected: release raise exception + ''' + connect.create_partition(collection, default_tag) + connect.load_partitions(collection, [default_tag]) + with pytest.raises(Exception) as e: + dis_connect.release_partitions(collection, [default_tag]) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_partition_not_existed(self, connect, collection): + partition_name = gen_unique_str(uid) + try: + connect.load_partitions(collection, [partition_name]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "partitionID of partitionName:%s can not be find" % partition_name + + @pytest.mark.tags(CaseLabel.L2) + def test_release_partition_not_existed(self, connect, collection): + partition_name = gen_unique_str(uid) + try: + connect.release_partitions(collection, [partition_name]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "partitionID of partitionName:%s can not be find" % partition_name + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_release_partition_not_load(self, connect, collection): + """ + target: test release collection without load + method: + expected: raise exception + """ + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.release_partitions(collection, [default_tag]) + + @pytest.mark.tags(CaseLabel.L2) + def test_load_release_after_drop(self, connect, collection): + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_partitions(collection, [default_tag]) + connect.release_partitions(collection, [default_tag]) + connect.drop_partition(collection, default_tag) + try: + connect.load_partitions(collection, [default_tag]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "partitionID of partitionName:%s can not be find" % default_tag + + try: + connect.release_partitions(collection, [default_tag]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "partitionID of partitionName:%s can not be find" % default_tag + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_release_partition_after_drop(self, connect, collection): + """ + target: test release collection after drop + method: insert and flush, then release collection after load and drop + expected: raise exception + """ + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_partitions(collection, [default_tag]) + connect.drop_partition(collection, default_tag) + try: + connect.load_partitions(collection, [default_tag]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "partitionID of partitionName:%s can not be find" % default_tag + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_load_release_after_collection_drop(self, connect, collection): + """ + target: test release collection after drop + method: insert and flush, then release collection after load and drop + expected: raise exception + """ + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.load_partitions(collection, [default_tag]) + connect.release_partitions(collection, [default_tag]) + connect.drop_collection(collection) + try: + connect.load_partitions(collection, [default_tag]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection + + try: + connect.release_partitions(collection, [default_tag]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection + + +class TestLoadPartitionInvalid(object): + """ + Test load collection with invalid params + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_partition_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_load_partition_with_invalid_partition_name(self, connect, collection, get_partition_name): + partition_name = get_partition_name + with pytest.raises(Exception) as e: + connect.load_partitions(collection, [partition_name]) + + @pytest.mark.tags(CaseLabel.L2) + def test_release_partition_with_invalid_partition_name(self, connect, collection, get_partition_name): + partition_name = get_partition_name + with pytest.raises(Exception) as e: + connect.load_partitions(collection, [partition_name]) diff --git a/tests/python_client/testcases/entity/test_delete.py b/tests/python_client/testcases/entity/test_delete.py new file mode 100644 index 0000000000..0223a3667d --- /dev/null +++ b/tests/python_client/testcases/entity/test_delete.py @@ -0,0 +1,473 @@ +import time +import random +import pdb +import copy +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * +from common.constants import * + +field_name = default_float_vec_field_name +default_single_query = { + "bool": { + "must": [ + {"vector": {field_name: {"topk": 10, "metric_type":"L2", "query": gen_vectors(1, default_dim), "params": {"nprobe": 10}}}} + ] + } +} + + +# class TestDeleteBase: +# """ +# ****************************************************************** +# The following cases are used to test `delete_entity_by_id` function +# ****************************************************************** +# """ +# +# @pytest.fixture( +# scope="function", +# params=gen_simple_index() +# ) +# def get_simple_index(self, request, connect): +# if str(connect._cmd("mode")) == "GPU": +# if not request.param["index_type"] not in ivf(): +# pytest.skip("Only support index_type: idmap/ivf") +# if str(connect._cmd("mode")) == "CPU": +# if request.param["index_type"] in index_cpu_not_support(): +# pytest.skip("CPU not support index_type: ivf_sq8h") +# return request.param +# +# @pytest.fixture( +# scope="function", +# params=[ +# 1, +# 2000 +# ], +# ) +# def insert_count(self, request): +# yield request.param +# +# def test_delete_entity_id_not_exised(self, connect, collection): +# ''' +# target: test delete entity, params entity_id not existed +# method: add entity and delete +# expected: status DELETED +# ''' +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# status = connect.delete_entity_by_id(collection, [0]) +# assert status +# +# def test_delete_empty_collection(self, connect, collection): +# ''' +# target: test delete entity, params collection_name not existed +# method: add entity and delete +# expected: status DELETED +# ''' +# status = connect.delete_entity_by_id(collection, [0]) +# assert status +# +# def test_delete_entity_collection_not_existed(self, connect, collection): +# ''' +# target: test delete entity, params collection_name not existed +# method: add entity and delete +# expected: error raised +# ''' +# collection_new = gen_unique_str() +# with pytest.raises(Exception) as e: +# status = connect.delete_entity_by_id(collection_new, [0]) +# +# def test_delete_entity_collection_not_existed(self, connect, collection): +# ''' +# target: test delete entity, params collection_name not existed +# method: add entity and delete +# expected: error raised +# ''' +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# collection_new = gen_unique_str() +# with pytest.raises(Exception) as e: +# status = connect.delete_entity_by_id(collection_new, [0]) +# +# def test_insert_delete(self, connect, collection, insert_count): +# ''' +# target: test delete entity +# method: add entities and delete +# expected: no error raised +# ''' +# entities = gen_entities(insert_count) +# ids = connect.bulk_insert(collection, entities) +# connect.flush([collection]) +# delete_ids = [ids[0]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == insert_count - 1 +# +# def test_insert_delete_A(self, connect, collection): +# ''' +# target: test delete entity +# method: add entities and delete one in collection, and one not in collection +# expected: no error raised +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], 1] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == default_nb - 1 +# +# def test_insert_delete_B(self, connect, id_collection): +# ''' +# target: test delete entity +# method: add entities with the same ids, and delete the id in collection +# expected: no error raised, all entities deleted +# ''' +# ids = [1 for i in range(default_nb)] +# res_ids = connect.bulk_insert(id_collection, default_entities, ids) +# connect.flush([id_collection]) +# delete_ids = [1] +# status = connect.delete_entity_by_id(id_collection, delete_ids) +# assert status +# connect.flush([id_collection]) +# res_count = connect.count_entities(id_collection) +# assert res_count == 0 +# +# def test_delete_exceed_limit(self, connect, collection): +# ''' +# target: test delete entity +# method: add one entity and delete two ids +# expected: error raised +# ''' +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# delete_ids = [ids[0], 1] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == 0 +# +# def test_flush_after_delete(self, connect, collection): +# ''' +# target: test delete entity +# method: add entities and delete, then flush +# expected: entity deleted and no error raised +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == default_nb - len(delete_ids) +# +# def test_flush_after_delete_binary(self, connect, binary_collection): +# ''' +# target: test delete entity +# method: add entities and delete, then flush +# expected: entity deleted and no error raised +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# connect.flush([binary_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(binary_collection, delete_ids) +# assert status +# connect.flush([binary_collection]) +# res_count = connect.count_entities(binary_collection) +# assert res_count == default_nb - len(delete_ids) +# +# def test_insert_delete_binary(self, connect, binary_collection): +# ''' +# method: add entities and delete +# expected: status DELETED +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# connect.flush([binary_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(binary_collection, delete_ids) +# +# def test_insert_same_ids_after_delete(self, connect, id_collection): +# ''' +# method: add entities and delete +# expected: status DELETED +# note: Not flush after delete +# ''' +# insert_ids = [i for i in range(default_nb)] +# ids = connect.bulk_insert(id_collection, default_entities, insert_ids) +# connect.flush([id_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(id_collection, delete_ids) +# assert status +# new_ids = connect.bulk_insert(id_collection, default_entity, [ids[0]]) +# assert new_ids == [ids[0]] +# connect.flush([id_collection]) +# res_count = connect.count_entities(id_collection) +# assert res_count == default_nb - 1 +# +# def test_insert_same_ids_after_delete_binary(self, connect, binary_id_collection): +# ''' +# method: add entities, with the same id and delete the ids +# expected: status DELETED, all id deleted +# ''' +# insert_ids = [i for i in range(default_nb)] +# ids = connect.bulk_insert(binary_id_collection, default_binary_entities, insert_ids) +# connect.flush([binary_id_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(binary_id_collection, delete_ids) +# assert status +# new_ids = connect.bulk_insert(binary_id_collection, default_binary_entity, [ids[0]]) +# assert new_ids == [ids[0]] +# connect.flush([binary_id_collection]) +# res_count = connect.count_entities(binary_id_collection) +# assert res_count == default_nb - 1 +# +# def test_search_after_delete(self, connect, collection): +# ''' +# target: test delete entity +# method: add entities and delete, then search +# expected: entity deleted and no error raised +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# query = copy.deepcopy(default_single_query) +# query["bool"]["must"][0]["vector"][field_name]["query"] =\ +# [default_entity[-1]["values"][0], default_entities[-1]["values"][1], default_entities[-1]["values"][-1]] +# res = connect.search(collection, query) +# logging.getLogger().debug(res) +# assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"]) +# assert res[0]._distances[0] > epsilon +# assert res[1]._distances[0] < epsilon +# assert res[2]._distances[0] > epsilon +# +# def test_create_index_after_delete(self, connect, collection, get_simple_index): +# ''' +# method: add entitys and delete, then create index +# expected: vectors deleted, index created +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.create_index(collection, field_name, get_simple_index) +# # assert index info +# +# def test_delete_multiable_times(self, connect, collection): +# ''' +# method: add entities and delete id serveral times +# expected: entities deleted +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# for i in range(10): +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# +# def test_index_insert_batch_delete_get(self, connect, collection, get_simple_index): +# ''' +# method: create index, insert entities, and delete +# expected: entities deleted +# ''' +# connect.create_index(collection, field_name, get_simple_index) +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == default_nb - len(delete_ids) +# res_get = connect.get_entity_by_id(collection, delete_ids) +# assert res_get[0] is None +# +# # TODO: disable +# @pytest.mark.tags(CaseLabel.L2) +# def _test_index_insert_single_delete_get(self, connect, id_collection): +# ''' +# method: insert entities, and delete +# expected: entities deleted +# ''' +# ids = [i for i in range(default_nb)] +# for i in range(default_nb): +# connect.bulk_insert(id_collection, default_entity, [ids[i]]) +# connect.flush([id_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(id_collection, delete_ids) +# assert status +# connect.flush([id_collection]) +# res_count = connect.count_entities(id_collection) +# assert res_count == default_nb - len(delete_ids) +# +# """ +# ****************************************************************** +# The following cases are used to test `delete_entity_by_id` function, with tags +# ****************************************************************** +# """ +# +# def test_insert_tag_delete(self, connect, collection): +# ''' +# method: add entitys with given tag, delete entities with the return ids +# expected: entities deleted +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities, partition_name=default_tag) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == default_nb - 2 +# +# def test_insert_default_tag_delete(self, connect, collection): +# ''' +# method: add entitys, delete entities with the return ids +# expected: entities deleted +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == default_nb - 2 +# +# def test_insert_tags_delete(self, connect, collection): +# ''' +# method: add entitys with given two tags, delete entities with the return ids +# expected: entities deleted +# ''' +# tag_new = "tag_new" +# connect.create_partition(collection, default_tag) +# connect.create_partition(collection, tag_new) +# ids = connect.bulk_insert(collection, default_entities, partition_name=default_tag) +# ids_new = connect.bulk_insert(collection, default_entities, partition_name=tag_new) +# connect.flush([collection]) +# delete_ids = [ids[0], ids_new[0]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == 2 * (default_nb - 1) +# +# def test_insert_tags_index_delete(self, connect, collection, get_simple_index): +# """ +# method: add entitys with given tag, create index, delete entities with the return ids +# expected: entities deleted +# """ +# tag_new = "tag_new" +# connect.create_partition(collection, default_tag) +# connect.create_partition(collection, tag_new) +# ids = connect.bulk_insert(collection, default_entities, partition_name=default_tag) +# ids_new = connect.bulk_insert(collection, default_entities, partition_name=tag_new) +# connect.flush([collection]) +# connect.create_index(collection, field_name, get_simple_index) +# delete_ids = [ids[0], ids_new[0]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == 2 * (default_nb - 1) +# +# def test_insert_delete_loop(self, connect, collection): +# """ +# target: test loop insert and delete entities +# method: loop insert entities into two segments, and delete entities cross segments. +# expected: count is correct +# """ +# loop = 2 +# for i in range(loop): +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# status = connect.delete_entity_by_id(collection, ids[100:default_nb - 100]) +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == loop * 200 +# +# def test_search_delete_loop(self, connect, collection): +# """ +# target: test loop search and delete entities +# method: loop search and delete cross segments +# expected: ok +# """ +# loop = 2 +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# ni = default_nb // loop +# for i in range(loop): +# res = connect.search(collection, default_single_query) +# status = connect.delete_entity_by_id(collection, ids[i * ni:(i + 1) * ni]) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == 0 +# +# def test_count_delete_loop(self, connect, collection): +# """ +# target: test loop search and delete entities +# method: loop search and delete cross segments +# expected: ok +# """ +# loop = 2 +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# ni = default_nb // loop +# for i in range(loop): +# connect.count_entities(collection) +# status = connect.delete_entity_by_id(collection, ids[i * ni:(i + 1) * ni]) +# assert status +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == 0 +# +# +# class TestDeleteInvalid(object): +# """ +# Test adding vectors with invalid vectors +# """ +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_ints() +# ) +# def gen_entity_id(self, request): +# yield request.param +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_strs() +# ) +# def get_collection_name(self, request): +# yield request.param +# +# @pytest.mark.tags(CaseLabel.L1) +# def test_delete_entity_id_invalid(self, connect, collection, gen_entity_id): +# invalid_id = gen_entity_id +# with pytest.raises(Exception) as e: +# status = connect.delete_entity_by_id(collection, [invalid_id]) +# +# def test_delete_entity_ids_invalid(self, connect, collection, gen_entity_id): +# invalid_id = gen_entity_id +# with pytest.raises(Exception) as e: +# status = connect.delete_entity_by_id(collection, [1, invalid_id]) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_delete_entity_with_invalid_collection_name(self, connect, get_collection_name): +# collection_name = get_collection_name +# with pytest.raises(Exception) as e: +# status = connect.delete_entity_by_id(collection_name, [1]) diff --git a/tests/python_client/testcases/entity/test_get_entity_by_id.py b/tests/python_client/testcases/entity/test_get_entity_by_id.py new file mode 100644 index 0000000000..f17b7ddacd --- /dev/null +++ b/tests/python_client/testcases/entity/test_get_entity_by_id.py @@ -0,0 +1,666 @@ +import time +import random +import pdb +import copy +import logging +from multiprocessing import Pool, Process +import concurrent.futures +from threading import current_thread +import pytest +from utils.utils import * +from common.constants import * + +default_single_query = { + "bool": { + "must": [ + {"vector": { + default_float_vec_field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "params": {"nprobe": 10}}}} + ] + } +} + +# class TestGetBase: +# """ +# ****************************************************************** +# The following cases are used to test `get_entity_by_id` function +# ****************************************************************** +# """ +# +# @pytest.fixture( +# scope="function", +# params=gen_simple_index() +# ) +# def get_simple_index(self, request, connect): +# if str(connect._cmd("mode")) == "CPU": +# if request.param["index_type"] in index_cpu_not_support(): +# pytest.skip("sq8h not support in CPU mode") +# return request.param +# +# @pytest.fixture( +# scope="function", +# params=[ +# 1, +# 500 +# ], +# ) +# def get_pos(self, request): +# yield request.param +# +# def test_get_entity(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id, get one +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# res_count = connect.count_entities(collection) +# assert res_count == default_nb +# get_ids = [ids[get_pos]] +# res = connect.get_entity_by_id(collection, get_ids) +# assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][get_pos]) +# +# def test_get_entity_multi_ids(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id, get one +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# def test_get_entity_parts_ids(self, connect, collection): +# ''' +# target: test.get_entity_by_id, some ids in collection, some ids not +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = [ids[0], 1, ids[-1]] +# res = connect.get_entity_by_id(collection, get_ids) +# assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][0]) +# assert_equal_vector(res[-1].get(default_float_vec_field_name), default_entities[-1]["values"][-1]) +# assert res[1] is None +# +# def test_get_entity_limit(self, connect, collection, args): +# ''' +# target: test.get_entity_by_id +# method: add entity, and get, limit > 1000 +# expected: entity returned +# ''' +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# with pytest.raises(Exception) as e: +# res = connect.get_entity_by_id(collection, ids) +# +# def test_get_entity_same_ids(self, connect, id_collection): +# ''' +# target: test.get_entity_by_id, with the same ids +# method: add entity, and get one id +# expected: entity returned equals insert +# ''' +# ids = [1 for i in range(default_nb)] +# res_ids = connect.bulk_insert(id_collection, default_entities, ids) +# connect.flush([id_collection]) +# get_ids = [ids[0]] +# res = connect.get_entity_by_id(id_collection, get_ids) +# assert len(res) == 1 +# assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][0]) +# +# def test_get_entity_params_same_ids(self, connect, id_collection): +# ''' +# target: test.get_entity_by_id, with the same ids +# method: add entity, and get entity with the same ids +# expected: entity returned equals insert +# ''' +# ids = [1] +# res_ids = connect.bulk_insert(id_collection, default_entity, ids) +# connect.flush([id_collection]) +# get_ids = [1, 1] +# res = connect.get_entity_by_id(id_collection, get_ids) +# assert len(res) == len(get_ids) +# for i in range(len(get_ids)): +# logging.getLogger().info(i) +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entity[-1]["values"][0]) +# +# def test_get_entities_params_same_ids(self, connect, collection): +# ''' +# target: test.get_entity_by_id, with the same ids +# method: add entities, and get entity with the same ids +# expected: entity returned equals insert +# ''' +# res_ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = [res_ids[0], res_ids[0]] +# res = connect.get_entity_by_id(collection, get_ids) +# assert len(res) == len(get_ids) +# for i in range(len(get_ids)): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][0]) +# +# """ +# ****************************************************************** +# The following cases are used to test `get_entity_by_id` function, with different metric type +# ****************************************************************** +# """ +# +# def test_get_entity_parts_ids_binary(self, connect, binary_collection): +# ''' +# target: test.get_entity_by_id, some ids in jac_collection, some ids not +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# connect.flush([binary_collection]) +# get_ids = [ids[0], 1, ids[-1]] +# res = connect.get_entity_by_id(binary_collection, get_ids) +# assert_equal_vector(res[0].get("binary_vector"), default_binary_entities[-1]["values"][0]) +# assert_equal_vector(res[-1].get("binary_vector"), default_binary_entities[-1]["values"][-1]) +# assert res[1] is None +# +# """ +# ****************************************************************** +# The following cases are used to test `get_entity_by_id` function, with tags +# ****************************************************************** +# """ +# +# def test_get_entities_tag(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities with tag, get +# expected: entity returned +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities, partition_name = default_tag) +# connect.flush([collection]) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# def test_get_entities_tag_default(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities with default tag, get +# expected: entity returned +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# def test_get_entities_tags_default(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: create partitions, add entities with default tag, get +# expected: entity returned +# ''' +# tag_new = "tag_new" +# connect.create_partition(collection, default_tag) +# connect.create_partition(collection, tag_new) +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# def test_get_entities_tags_A(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: create partitions, add entities with default tag, get +# expected: entity returned +# ''' +# tag_new = "tag_new" +# connect.create_partition(collection, default_tag) +# connect.create_partition(collection, tag_new) +# ids = connect.bulk_insert(collection, default_entities, partition_name = default_tag) +# connect.flush([collection]) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# def test_get_entities_tags_B(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: create partitions, add entities with default tag, get +# expected: entity returned +# ''' +# tag_new = "tag_new" +# connect.create_partition(collection, default_tag) +# connect.create_partition(collection, tag_new) +# new_entities = gen_entities(default_nb + 1) +# ids = connect.bulk_insert(collection, default_entities, partition_name = default_tag) +# ids_new = connect.bulk_insert(collection, new_entities, partition_name = tag_new) +# connect.flush([collection]) +# get_ids = ids[:get_pos] +# get_ids.extend(ids_new[:get_pos]) +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# for i in range(get_pos, get_pos * 2): +# assert_equal_vector(res[i].get(default_float_vec_field_name), new_entities[-1]["values"][i - get_pos]) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_entities_indexed_tag(self, connect, collection, get_simple_index, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities with tag, get +# expected: entity returned +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities, partition_name = default_tag) +# connect.flush([collection]) +# connect.create_index(collection, default_float_vec_field_name, get_simple_index) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# """ +# ****************************************************************** +# The following cases are used to test `get_entity_by_id` function, with fields params +# ****************************************************************** +# """ +# +# def test_get_entity_field(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id, get one +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = [ids[get_pos]] +# fields = ["int64"] +# res = connect.get_entity_by_id(collection, get_ids, fields = fields) +# # assert fields +# res = res.dict() +# assert res[0]["field"] == fields[0] +# assert res[0]["values"] == [default_entities[0]["values"][get_pos]] +# assert res[0]["type"] == DataType.INT64 +# +# def test_get_entity_fields(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id, get one +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = [ids[get_pos]] +# fields = ["int64", "float", default_float_vec_field_name] +# res = connect.get_entity_by_id(collection, get_ids, fields = fields) +# # assert fields +# res = res.dict() +# assert len(res) == len(fields) +# for field in res: +# if field["field"] == fields[0]: +# assert field["values"] == [default_entities[0]["values"][get_pos]] +# elif field["field"] == fields[1]: +# assert field["values"] == [default_entities[1]["values"][get_pos]] +# else: +# assert_equal_vector(field["values"][0], default_entities[-1]["values"][get_pos]) +# +# # TODO: assert exception +# def test_get_entity_field_not_match(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id, get one +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = [ids[get_pos]] +# fields = ["int1288"] +# with pytest.raises(Exception) as e: +# res = connect.get_entity_by_id(collection, get_ids, fields = fields) +# +# # TODO: assert exception +# def test_get_entity_fields_not_match(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id, get one +# method: add entity, and get +# expected: entity returned equals insert +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_ids = [ids[get_pos]] +# fields = ["int1288"] +# with pytest.raises(Exception) as e: +# res = connect.get_entity_by_id(collection, get_ids, fields = fields) +# +# def test_get_entity_id_not_exised(self, connect, collection): +# ''' +# target: test get entity, params entity_id not existed +# method: add entity and get +# expected: empty result +# ''' +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# res = connect.get_entity_by_id(collection, [1]) +# assert res[0] is None +# +# def test_get_entity_collection_not_existed(self, connect, collection): +# ''' +# target: test get entity, params collection_name not existed +# method: add entity and get +# expected: error raised +# ''' +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# collection_new = gen_unique_str() +# with pytest.raises(Exception) as e: +# res = connect.get_entity_by_id(collection_new, [ids[0]]) +# +# """ +# ****************************************************************** +# The following cases are used to test `get_entity_by_id` function, after deleted +# ****************************************************************** +# """ +# +# def test_get_entity_after_delete(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities, and delete, get entity by the given id +# expected: empty result +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[get_pos]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.flush([collection]) +# get_ids = [ids[get_pos]] +# res = connect.get_entity_by_id(collection, get_ids) +# assert res[0] is None +# +# def test_get_entities_after_delete(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities, and delete, get entity by the given id +# expected: empty result +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = ids[:get_pos] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.flush([collection]) +# get_ids = delete_ids +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert res[i] is None +# +# def test_get_entities_after_delete_compact(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities, and delete, get entity by the given id +# expected: empty result +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = ids[:get_pos] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.flush([collection]) +# connect.compact(collection) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert res[i] is None +# +# def test_get_entities_indexed_batch(self, connect, collection, get_simple_index, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities batch, create index, get +# expected: entity returned +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# connect.create_index(collection, default_float_vec_field_name, get_simple_index) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_entities_indexed_single(self, connect, collection, get_simple_index, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities 1 entity/per request, create index, get +# expected: entity returned +# ''' +# ids = [] +# for i in range(default_nb): +# ids.append(connect.bulk_insert(collection, default_entity)[0]) +# connect.flush([collection]) +# connect.create_index(collection, default_float_vec_field_name, get_simple_index) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entity[-1]["values"][0]) +# +# def test_get_entities_with_deleted_ids(self, connect, id_collection): +# ''' +# target: test.get_entity_by_id +# method: add entities ids, and delete part, get entity include the deleted id +# expected: +# ''' +# ids = [i for i in range(default_nb)] +# res_ids = connect.bulk_insert(id_collection, default_entities, ids) +# connect.flush([id_collection]) +# status = connect.delete_entity_by_id(id_collection, [res_ids[1]]) +# connect.flush([id_collection]) +# get_ids = res_ids[:2] +# res = connect.get_entity_by_id(id_collection, get_ids) +# assert len(res) == len(get_ids) +# assert_equal_vector(res[0].get(default_float_vec_field_name), default_entities[-1]["values"][0]) +# assert res[1] is None +# +# # TODO: unable to set config +# def _test_get_entities_after_delete_disable_autoflush(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: disable autoflush, add entities, and delete, get entity by the given id +# expected: empty result +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = ids[:get_pos] +# try: +# disable_flush(connect) +# status = connect.delete_entity_by_id(collection, delete_ids) +# get_ids = ids[:get_pos] +# res = connect.get_entity_by_id(collection, get_ids) +# for i in range(get_pos): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# finally: +# enable_flush(connect) +# +# def test_get_entities_after_delete_same_ids(self, connect, id_collection): +# ''' +# target: test.get_entity_by_id +# method: add entities with the same ids, and delete, get entity by the given id +# expected: empty result +# ''' +# ids = [i for i in range(default_nb)] +# ids[0] = 1 +# res_ids = connect.bulk_insert(id_collection, default_entities, ids) +# connect.flush([id_collection]) +# status = connect.delete_entity_by_id(id_collection, [1]) +# connect.flush([id_collection]) +# get_ids = [1] +# res = connect.get_entity_by_id(id_collection, get_ids) +# assert res[0] is None +# +# def test_get_entity_after_delete_with_partition(self, connect, collection, get_pos): +# ''' +# target: test.get_entity_by_id +# method: add entities into partition, and delete, get entity by the given id +# expected: get one entity +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities, partition_name = default_tag) +# connect.flush([collection]) +# status = connect.delete_entity_by_id(collection, [ids[get_pos]]) +# connect.flush([collection]) +# res = connect.get_entity_by_id(collection, [ids[get_pos]]) +# assert res[0] is None +# +# def test_get_entity_by_id_multithreads(self, connect, collection): +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_id = ids[100:200] +# +# def get(): +# res = connect.get_entity_by_id(collection, get_id) +# assert len(res) == len(get_id) +# for i in range(len(res)): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][100 + i]) +# +# with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: +# future_results = {executor.submit( +# get): i for i in range(10)} +# for future in concurrent.futures.as_completed(future_results): +# future.result() +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_entity_by_id_insert_multi_threads(self, connect, collection): +# ''' +# target: test.get_entity_by_id +# method: thread do insert and get +# expected: +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# get_id = ids[:1000] +# +# def insert(): +# # logging.getLogger().info(current_thread().getName() + " insert") +# step = 1000 +# for i in range(default_nb // step): +# group_entities = gen_entities(step, False) +# connect.bulk_insert(collection, group_entities) +# connect.flush([collection]) +# +# def get(): +# # logging.getLogger().info(current_thread().getName() + " get") +# res = connect.get_entity_by_id(collection, get_id) +# assert len(res) == len(get_id) +# for i in range(len(res)): +# assert_equal_vector(res[i].get(default_float_vec_field_name), default_entities[-1]["values"][i]) +# +# with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor: +# for i in range(20): +# fun = random.choices([get, insert])[0] +# future = executor.submit(fun) +# future.result() +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_entity_by_id_insert_multi_threads_2(self, connect, collection): +# ''' +# target: test.get_entity_by_id +# method: thread do insert and get +# expected: +# ''' +# with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: +# def get(group_ids, group_entities): +# # logging.getLogger().info(current_thread().getName() + " get") +# res = connect.get_entity_by_id(collection, group_ids) +# assert len(res) == len(group_ids) +# for i in range(len(res)): +# assert_equal_vector(res[i].get(default_float_vec_field_name), group_entities[-1]["values"][i]) +# +# def insert(group_vectors): +# # logging.getLogger().info(current_thread().getName() + " insert") +# for group_vector in group_vectors: +# group_entities = [ +# {"name": "int64", "type": DataType.INT64, "values": [i for i in range(step)]}, +# {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(step)]}, +# {"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "values": group_vector} +# ] +# group_ids = connect.bulk_insert(collection, group_entities) +# connect.flush([collection]) +# executor.submit(get, group_ids, group_entities) +# +# step = 100 +# vectors = gen_vectors(default_nb, default_dim, False) +# group_vectors = [vectors[i:i + step] for i in range(0, len(vectors), step)] +# task = executor.submit(insert, group_vectors) +# task.result() +# +# +# class TestGetInvalid(object): +# """ +# Test get entities with invalid params +# """ +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_strs() +# ) +# def get_collection_name(self, request): +# yield request.param +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_strs() +# ) +# def get_field_name(self, request): +# yield request.param +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_ints() +# ) +# def get_entity_id(self, request): +# yield request.param +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_insert_ids_invalid(self, connect, collection, get_entity_id): +# ''' +# target: test insert, with using customize ids, which are not int64 +# method: create collection and insert entities in it +# expected: raise an exception +# ''' +# entity_id = get_entity_id +# ids = [entity_id for _ in range(default_nb)] +# with pytest.raises(Exception): +# connect.get_entity_by_id(collection, ids) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_insert_parts_ids_invalid(self, connect, collection, get_entity_id): +# ''' +# target: test insert, with using customize ids, which are not int64 +# method: create collection and insert entities in it +# expected: raise an exception +# ''' +# entity_id = get_entity_id +# ids = [i for i in range(default_nb)] +# ids[-1] = entity_id +# with pytest.raises(Exception): +# connect.get_entity_by_id(collection, ids) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_entities_with_invalid_collection_name(self, connect, get_collection_name): +# collection_name = get_collection_name +# ids = [1] +# with pytest.raises(Exception): +# res = connect.get_entity_by_id(collection_name, ids) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_entities_with_invalid_field_name(self, connect, collection, get_field_name): +# field_name = get_field_name +# ids = [1] +# fields = [field_name] +# with pytest.raises(Exception): +# res = connect.get_entity_by_id(collection, ids, fields = fields) diff --git a/tests/python_client/testcases/entity/test_insert.py b/tests/python_client/testcases/entity/test_insert.py new file mode 100644 index 0000000000..2c267859f0 --- /dev/null +++ b/tests/python_client/testcases/entity/test_insert.py @@ -0,0 +1,1187 @@ +import pytest +from pymilvus import DataType, ParamError, BaseException +from utils.utils import * +from common.constants import * + +ADD_TIMEOUT = 60 +uid = "test_insert" +field_name = default_float_vec_field_name +binary_field_name = default_binary_vec_field_name +default_single_query = { + "bool": { + "must": [ + {"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type": "L2", + "params": {"nprobe": 10}}}} + ] + } +} + + +class TestInsertBase: + """ + ****************************************************************** + The following cases are used to test `insert` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + if request.param["index_type"] in index_cpu_not_support(): + pytest.skip("CPU not support index_type: ivf_sq8h") + logging.getLogger().info(request.param) + return request.param + + @pytest.fixture( + scope="function", + params=gen_single_filter_fields() + ) + def get_filter_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_single_vector_fields() + ) + def get_vector_field(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_empty_entity(self, connect, collection): + ''' + target: test insert with empty entity list + method: set empty entity list as insert method params + expected: raises a ParamError exception + ''' + entities = [] + with pytest.raises(ParamError) as e: + connect.insert(collection, entities) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_None(self, connect, collection): + ''' + target: test insert with None + method: set None as insert method params + expected: raises a ParamError + ''' + entity = None + with pytest.raises(Exception) as e: + connect.insert(collection, entity) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_collection_not_existed(self, connect): + ''' + target: test insert, with collection not existed + method: insert entity into a random named collection + expected: raise a BaseException + ''' + collection_name = gen_unique_str(uid) + with pytest.raises(BaseException) as e: + connect.insert(collection_name, default_entities) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_without_connect(self, dis_connect, collection): + ''' + target: test insert entities without connection + method: create collection and insert entities in it, check if inserted successfully + expected: raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.insert(collection, default_entities) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(ADD_TIMEOUT) + def test_insert_drop_collection(self, connect, collection): + ''' + target: test delete collection after insert entities + method: insert entities and drop collection + expected: has_collection false + ''' + result = connect.insert(collection, default_entity) + assert len(result.primary_keys) == 1 + connect.drop_collection(collection) + assert connect.has_collection(collection) == False + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_flush_drop_collection(self, connect, collection): + ''' + target: test drop collection after insert entities for a while + method: insert entities, sleep, and delete collection + expected: has_collection false + ''' + result = connect.insert(collection, default_entity) + assert len(result.primary_keys) == 1 + connect.flush([collection]) + connect.drop_collection(collection) + assert connect.has_collection(collection) == False + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(ADD_TIMEOUT) + def test_insert_create_index(self, connect, collection, get_simple_index): + ''' + target: test build index insert after entities + method: insert entities and build index + expected: no error raised + ''' + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_after_create_index(self, connect, collection, get_simple_index): + ''' + target: test build index insert after vector + method: insert entities and build index + expected: no error raised + ''' + connect.create_index(collection, field_name, get_simple_index) + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_search(self, connect, collection): + ''' + target: test search entity after insert entity after a while + method: insert entity, sleep, and search collection + expected: no error raised + ''' + result = connect.insert(collection, default_entities) + connect.flush([collection]) + connect.load_collection(collection) + res = connect.search(collection, default_single_query) + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_segment_row_count(self, connect, collection): + nb = default_segment_row_limit + 1 + result = connect.insert(collection, gen_entities(nb)) + connect.flush([collection]) + assert len(result.primary_keys) == nb + stats = connect.get_collection_stats(collection) + assert len(stats['partitions'][0]['segments']) == 2 + for segment in stats['partitions'][0]['segments']: + assert segment['row_count'] in [default_segment_row_limit, 1] + + @pytest.fixture( + scope="function", + params=[ + 1, + 2000 + ], + ) + def insert_count(self, request): + yield request.param + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_ids(self, connect, id_collection, insert_count): + ''' + target: test insert entities in collection, use customize ids + method: create collection and insert entities in it, check the ids returned and the collection length after entities inserted + expected: the length of ids and the collection row count + ''' + nb = insert_count + ids = [i for i in range(nb)] + entities = gen_entities(nb) + entities[0]["values"] = ids + result = connect.insert(id_collection, entities) + connect.flush([id_collection]) + assert len(result.primary_keys) == nb + assert result.primary_keys == ids + stats = connect.get_collection_stats(id_collection) + assert stats[row_count] == nb + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(ADD_TIMEOUT) + def test_insert_the_same_ids(self, connect, id_collection, insert_count): + ''' + target: test insert vectors in collection, use customize the same ids + method: create collection and insert vectors in it, check the ids returned and the collection length after vectors inserted + expected: the length of ids and the collection row count + ''' + nb = insert_count + ids = [1 for i in range(nb)] + entities = gen_entities(nb) + entities[0]["values"] = ids + result = connect.insert(id_collection, entities) + connect.flush([id_collection]) + assert len(result.primary_keys) == nb + assert result.primary_keys == ids + stats = connect.get_collection_stats(id_collection) + assert stats[row_count] == nb + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field): + ''' + target: test create normal collection with different fields, insert entities into id with ids + method: create collection with diff fields: metric/field_type/..., insert, and count + expected: row count correct + ''' + nb = 5 + filter_field = get_filter_field + vector_field = get_vector_field + collection_name = gen_unique_str("test_collection") + fields = { + "fields": [gen_primary_field(), filter_field, vector_field], + "auto_id": False + } + connect.create_collection(collection_name, fields) + ids = [i for i in range(nb)] + entities = gen_entities_by_fields(fields["fields"], nb, default_dim, ids) + logging.getLogger().info(entities) + result = connect.insert(collection_name, entities) + assert result.primary_keys == ids + connect.flush([collection_name]) + stats = connect.get_collection_stats(collection_name) + assert stats[row_count] == nb + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(ADD_TIMEOUT) + def test_insert_ids_not_match(self, connect, id_collection, insert_count): + ''' + target: test insert entities in collection without ids + method: create id_collection and insert entities without + expected: exception raised + ''' + nb = insert_count + with pytest.raises(Exception) as e: + entities = gen_entities(nb) + del entities[0] + connect.insert(id_collection, entities) + + # TODO + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_twice_ids_no_ids(self, connect, id_collection): + ''' + target: check the result of insert, with params ids and no ids + method: test insert vectors twice, use customize ids first, and then use no ids + expected: BaseException raised + ''' + ids = [i for i in range(default_nb)] + entities = copy.deepcopy(default_entities) + entities[0]["values"] = ids + connect.insert(id_collection, entities) + with pytest.raises(Exception) as e: + del entities[0] + connect.insert(id_collection, entities) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(ADD_TIMEOUT) + def test_insert_not_ids(self, connect, id_collection): + ''' + target: check the result of insert, with params ids and no ids + method: test insert vectors twice, use not ids first, and then use customize ids + expected: error raised + ''' + entities = copy.deepcopy(default_entities) + del entities[0] + with pytest.raises(Exception) as e: + connect.insert(id_collection, entities) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_ids_length_not_match_batch(self, connect, id_collection): + ''' + target: test insert vectors in collection, use customize ids, len(ids) != len(vectors) + method: create collection and insert vectors in it + expected: raise an exception + ''' + ids = [i for i in range(1, default_nb)] + logging.getLogger().info(len(ids)) + entities = copy.deepcopy(default_entities) + entities[0]["values"] = ids + with pytest.raises(Exception) as e: + connect.insert(id_collection, entities) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(ADD_TIMEOUT) + def test_insert_ids_length_not_match_single(self, connect, id_collection): + ''' + target: test insert vectors in collection, use customize ids, len(ids) != len(vectors) + method: create collection and insert vectors in it + expected: raise an exception + ''' + ids = [i for i in range(1, default_nb)] + logging.getLogger().info(len(ids)) + entity = copy.deepcopy(default_entity) + entity[0]["values"] = ids + with pytest.raises(Exception) as e: + connect.insert(id_collection, entity) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_partition(self, connect, collection): + ''' + target: test insert entities in collection created before + method: create collection and insert entities in it, with the partition_name param + expected: the collection row count equals to nq + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + assert connect.has_partition(collection, default_tag) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + + # TODO + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_partition_with_ids(self, connect, id_collection): + ''' + target: test insert entities in collection created before, insert with ids + method: create collection and insert entities in it, with the partition_name param + expected: the collection row count equals to nq + ''' + connect.create_partition(id_collection, default_tag) + ids = [i for i in range(default_nb)] + entities = gen_entities(default_nb) + entities[0]["values"] = ids + result = connect.insert(id_collection, entities, partition_name=default_tag) + assert result.primary_keys == ids + logging.getLogger().info(connect.describe_collection(id_collection)) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_default_partition(self, connect, collection): + ''' + target: test insert entities into default partition + method: create partition and insert info collection without tag params + expected: the collection row count equals to nb + ''' + result = connect.insert(collection, default_entities, partition_name=default_partition_name) + assert len(result.primary_keys) == default_nb + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == default_nb + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_insert_partition_not_existed(self, connect, collection): + ''' + target: test insert entities in collection created before + method: create collection and insert entities in it, with the not existed partition_name param + expected: error raised + ''' + tag = gen_unique_str() + with pytest.raises(Exception) as e: + connect.insert(collection, default_entities, partition_name=tag) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_insert_partition_repeatedly(self, connect, collection): + ''' + target: test insert entities in collection created before + method: create collection and insert entities in it repeatly, with the partition_name param + expected: the collection row count equals to nq + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + res = connect.get_collection_stats(collection) + assert res[row_count] == 2 * default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_dim_not_matched(self, connect, collection): + ''' + target: test insert entities, the vector dimension is not equal to the collection dimension + method: the entities dimension is half of the collection dimension, check the status + expected: error raised + ''' + vectors = gen_vectors(default_nb, int(default_dim) // 2) + insert_entities = copy.deepcopy(default_entities) + insert_entities[-1]["values"] = vectors + with pytest.raises(Exception) as e: + connect.insert(collection, insert_entities) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_field_name_not_match(self, connect, collection): + ''' + target: test insert entities, with the entity field name updated + method: update entity field name + expected: error raised + ''' + tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", "int64new") + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_field_type_not_match(self, connect, collection): + ''' + target: test insert entities, with the entity field type updated + method: update entity field type + expected: error raised + ''' + tmp_entity = update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_field_value_not_match(self, connect, collection): + ''' + target: test insert entities, with the entity field value updated + method: update entity field value + expected: error raised + ''' + tmp_entity = update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's') + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_field_more(self, connect, collection): + ''' + target: test insert entities, with more fields than collection schema + method: add entity field + expected: error raised + ''' + tmp_entity = add_field(copy.deepcopy(default_entity)) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_field_vector_more(self, connect, collection): + ''' + target: test insert entities, with more fields than collection schema + method: add entity vector field + expected: error raised + ''' + tmp_entity = add_vector_field(default_nb, default_dim) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_field_less(self, connect, collection): + ''' + target: test insert entities, with less fields than collection schema + method: remove entity field + expected: error raised + ''' + tmp_entity = remove_field(copy.deepcopy(default_entity)) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_field_vector_less(self, connect, collection): + ''' + target: test insert entities, with less fields than collection schema + method: remove entity vector field + expected: error raised + ''' + tmp_entity = remove_vector_field(copy.deepcopy(default_entity)) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_no_field_vector_value(self, connect, collection): + ''' + target: test insert entities, with no vector field value + method: remove entity values of vector field + expected: error raised + ''' + tmp_entity = copy.deepcopy(default_entity) + del tmp_entity[-1]["values"] + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_no_field_vector_type(self, connect, collection): + ''' + target: test insert entities, with no vector field type + method: remove entity vector field + expected: error raised + ''' + tmp_entity = copy.deepcopy(default_entity) + del tmp_entity[-1]["type"] + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_with_no_field_vector_name(self, connect, collection): + ''' + target: test insert entities, with no vector field name + method: remove entity vector field + expected: error raised + ''' + tmp_entity = copy.deepcopy(default_entity) + del tmp_entity[-1]["name"] + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + # todo fix timeout + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(30) + def test_collection_insert_rows_count_multi_threading(self, args, collection): + ''' + target: test collection rows_count is correct or not with multi threading + method: create collection and insert entities in it(idmap), + assert the value returned by count_entities method is equal to length of entities + expected: the count is equal to the length of entities + ''' + if args["handler"] == "HTTP": + pytest.skip("Skip test in http mode") + thread_num = 8 + threads = [] + milvus = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False) + + def insert(thread_i): + logging.getLogger().info("In thread-%d" % thread_i) + result = milvus.insert(collection, default_entities) + milvus.flush([collection]) + + for i in range(thread_num): + x = threading.Thread(target=insert, args=(i,)) + threads.append(x) + x.start() + for th in threads: + th.join() + stats = milvus.get_collection_stats(collection) + assert stats[row_count] == thread_num * default_nb + + # TODO: unable to set config + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_disable_auto_flush(self, connect, collection): + ''' + target: test insert entities, with disable autoflush + method: disable autoflush and insert, get entity + expected: the count is equal to 0 + ''' + delete_nums = 500 + disable_flush(connect) + result = connect.insert(collection, default_entities) + ids = result.primary_keys + res = connect.get_entity_by_id(collection, ids[:delete_nums]) + assert len(res) == delete_nums + assert res[0] is None + + +class TestInsertBinary: + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_binary_index(self, request): + request.param["metric_type"] = "JACCARD" + return request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_binary_entities(self, connect, binary_collection): + ''' + target: test insert entities in binary collection + method: create collection and insert binary entities in it + expected: the collection row count equals to nb + ''' + result = connect.insert(binary_collection, default_binary_entities) + assert len(result.primary_keys) == default_nb + connect.flush([binary_collection]) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_binary_partition(self, connect, binary_collection): + ''' + target: test insert entities and create partition tag + method: create collection and insert binary entities in it, with the partition_name param + expected: the collection row count equals to nb + ''' + connect.create_partition(binary_collection, default_tag) + result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + assert connect.has_partition(binary_collection, default_tag) + connect.flush([binary_collection]) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == default_nb + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.skip(reason="issue 7027") + def test_insert_binary_multi_times(self, connect, binary_collection): + ''' + target: test insert entities multi times and final flush + method: create collection and insert binary entity multi and final flush + expected: the collection row count equals to nb + ''' + for i in range(default_nb): + result = connect.insert(binary_collection, default_binary_entity) + assert len(result.primary_keys) == 1 + connect.flush([binary_collection]) + stats = connect.get_collection_stats(binary_collection) + assert stats[row_count] == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index): + ''' + target: test insert binary entities after build index + method: build index and insert entities + expected: no error raised + ''' + connect.create_index(binary_collection, binary_field_name, get_binary_index) + result = connect.insert(binary_collection, default_binary_entities) + assert len(result.primary_keys) == default_nb + connect.flush([binary_collection]) + index = connect.describe_index(binary_collection, "") + create_target_index(get_binary_index, binary_field_name) + assert index == get_binary_index + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index): + ''' + target: test build index insert after vector + method: insert vector and build index + expected: no error raised + ''' + result = connect.insert(binary_collection, default_binary_entities) + assert len(result.primary_keys) == default_nb + connect.flush([binary_collection]) + connect.create_index(binary_collection, binary_field_name, get_binary_index) + index = connect.describe_index(binary_collection, "") + create_target_index(get_binary_index, binary_field_name) + assert index == get_binary_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_binary_search(self, connect, binary_collection): + ''' + target: test search vector after insert vector after a while + method: insert vector, sleep, and search collection + expected: no error raised + ''' + result = connect.insert(binary_collection, default_binary_entities) + connect.flush([binary_collection]) + query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, 1, + metric_type="JACCARD") + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + logging.getLogger().debug(res) + assert len(res[0]) == default_top_k + + +class TestInsertAsync: + @pytest.fixture(scope="function", autouse=True) + def skip_http_check(self, args): + if args["handler"] == "HTTP": + pytest.skip("skip in http mode") + + @pytest.fixture( + scope="function", + params=[ + 1, + 1000 + ], + ) + def insert_count(self, request): + yield request.param + + def check_status(self, result): + logging.getLogger().info("In callback check status") + assert not result + + def check_result(self, result): + logging.getLogger().info("In callback check results") + assert result + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_async(self, connect, collection, insert_count): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + nb = insert_count + future = connect.insert(collection, gen_entities(nb), _async=True) + ids = future.result().primary_keys + connect.flush([collection]) + assert len(ids) == nb + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_async_false(self, connect, collection, insert_count): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + nb = insert_count + result = connect.insert(collection, gen_entities(nb), _async=False) + # ids = future.result() + connect.flush([collection]) + assert len(result.primary_keys) == nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_async_callback(self, connect, collection, insert_count): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + nb = insert_count + future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result) + future.done() + ids = future.result().primary_keys + assert len(ids) == nb + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_async_long(self, connect, collection): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + nb = 50000 + future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_result) + result = future.result() + assert len(result.primary_keys) == nb + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + logging.getLogger().info(stats) + assert stats[row_count] == nb + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_async_callback_timeout(self, connect, collection): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + nb = 100000 + future = connect.insert(collection, gen_entities(nb), _async=True, _callback=self.check_status, timeout=1) + with pytest.raises(Exception) as e: + result = future.result() + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_async_invalid_params(self, connect): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + collection_new = gen_unique_str() + future = connect.insert(collection_new, default_entities, _async=True) + future.done() + with pytest.raises(Exception) as e: + result = future.result() + + # 1339 + @pytest.mark.tags(CaseLabel.L2) + def test_insert_async_invalid_params_raise_exception(self, connect, collection): + ''' + target: test insert vectors with different length of vectors + method: set different vectors as insert method params + expected: length of ids is equal to the length of vectors + ''' + entities = [] + future = connect.insert(collection, entities, _async=True) + future.done() + with pytest.raises(Exception) as e: + future.result() + + +class TestInsertMultiCollections: + """ + ****************************************************************** + The following cases are used to test `insert` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + logging.getLogger().info(request.param) + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_entity_multi_collections(self, connect): + ''' + target: test insert entities + method: create 10 collections and insert entities into them in turn + expected: row count + ''' + collection_num = 10 + collection_list = [] + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_fields) + result = connect.insert(collection_name, default_entities) + connect.flush([collection_name]) + assert len(result.primary_keys) == default_nb + stats = connect.get_collection_stats(collection_name) + assert stats[row_count] == default_nb + for i in range(collection_num): + connect.drop_collection(collection_list[i]) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_collection_insert_entity_another(self, connect, collection): + ''' + target: test insert vector to collection_1 after collection_2 deleted + method: delete collection_2 and insert vector to collection_1 + expected: row count equals the length of entities inserted + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + connect.drop_collection(collection) + result = connect.insert(collection_name, default_entity) + connect.flush([collection_name]) + assert len(result.primary_keys) == 1 + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_index_insert_entity_another(self, connect, collection, get_simple_index): + ''' + target: test insert vector to collection_2 after build index for collection_1 + method: build index and insert vector + expected: status ok + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + connect.create_index(collection, field_name, get_simple_index) + result = connect.insert(collection_name, default_entity) + assert len(result.primary_keys) == 1 + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + connect.drop_collection(collection_name) + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_entity_create_index_another(self, connect, collection, get_simple_index): + ''' + target: test insert vector to collection_2 after build index for collection_1 + method: build index and insert vector + expected: status ok + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + result = connect.insert(collection, default_entity) + connect.flush([collection]) + connect.create_index(collection_name, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection_name, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + stats = connect.get_collection_stats(collection) + assert stats[row_count] == 1 + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index): + ''' + target: test insert vector to collection_2 after build index for collection_1 for a while + method: build index and insert vector + expected: status ok + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + result = connect.insert(collection, default_entity) + connect.flush([collection]) + connect.create_index(collection_name, field_name, get_simple_index) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == 1 + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_search_entity_insert_entity_another(self, connect, collection): + ''' + target: test insert entity to collection_1 after search collection_2 + method: search collection and insert entity + expected: status ok + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + connect.load_collection(collection) + res = connect.search(collection, default_single_query) + assert len(res[0]) == 0 + connect.insert(collection_name, default_entity) + connect.flush([collection_name]) + stats = connect.get_collection_stats(collection_name) + assert stats[row_count] == 1 + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_insert_entity_search_entity_another(self, connect, collection): + ''' + target: test insert entity to collection_1 after search collection_2 + method: search collection and insert entity + expected: status ok + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + result = connect.insert(collection, default_entity) + connect.flush([collection]) + connect.load_collection(collection_name) + res = connect.search(collection_name, default_single_query) + stats = connect.get_collection_stats(collection) + assert stats[row_count] == 1 + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_insert_entity_sleep_search_entity_another(self, connect, collection): + ''' + target: test insert entity to collection_1 after search collection_2 a while + method: search collection, sleep, and insert entity + expected: status ok + ''' + collection_name = gen_unique_str(uid) + connect.create_collection(collection_name, default_fields) + result = connect.insert(collection, default_entity) + connect.flush([collection]) + connect.load_collection(collection_name) + res = connect.search(collection_name, default_single_query) + assert len(res[0]) == 0 + + @pytest.mark.timeout(ADD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_entity_during_release_collection(self, connect, collection): + ''' + target: test insert entity during release + method: release collection async, then do insert operation + expected: insert ok + ''' + for i in range(10): + connect.insert(collection, default_entities) + connect.flush([collection]) + connect.load_collection(collection) + + def release(): + connect.release_collection(collection) + + t = threading.Thread(target=release, args=(collection,)) + t.start() + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + + +class TestInsertInvalid(object): + """ + Test inserting vectors with invalid collection names + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_tag_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_field_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_field_type(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_field_int_value(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_ints() + ) + def get_entity_id(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_vectors() + ) + def get_field_vectors_value(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_ids_invalid(self, connect, id_collection, get_entity_id): + ''' + target: test insert, with using customize ids, which are not int64 + method: create collection and insert entities in it + expected: raise an exception + ''' + entity_id = get_entity_id + ids = [entity_id for _ in range(default_nb)] + with pytest.raises(Exception): + connect.insert(id_collection, default_entities, ids) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception): + connect.insert(collection_name, default_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name): + tag_name = get_tag_name + connect.create_partition(collection, default_tag) + if tag_name is not None: + with pytest.raises(Exception): + connect.insert(collection, default_entity, partition_name=tag_name) + else: + connect.insert(collection, default_entity, partition_name=tag_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_name(self, connect, collection, get_field_name): + tmp_entity = update_field_name(copy.deepcopy(default_entity), "int64", get_field_name) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_type(self, connect, collection, get_field_type): + field_type = get_field_type + tmp_entity = update_field_type(copy.deepcopy(default_entity), 'float', field_type) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value): + field_value = get_field_int_value + tmp_entity = update_field_type(copy.deepcopy(default_entity), 'int64', field_value) + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value): + tmp_entity = copy.deepcopy(default_entity) + src_vector = tmp_entity[-1]["values"] + src_vector[0][1] = get_field_vectors_value + with pytest.raises(Exception): + connect.insert(collection, tmp_entity) + + +class TestInsertInvalidBinary(object): + """ + Test inserting vectors with invalid collection names + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_tag_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_field_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_field_type(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_field_int_value(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_ints() + ) + def get_entity_id(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_vectors() + ) + def get_field_vectors_value(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name): + tmp_entity = update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name) + with pytest.raises(Exception): + connect.insert(binary_collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value): + tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value) + with pytest.raises(Exception): + connect.insert(binary_collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value): + tmp_entity = copy.deepcopy(default_binary_entity) + src_vectors = tmp_entity[-1]["values"] + src_vectors[0] = get_field_vectors_value + with pytest.raises(Exception): + connect.insert(binary_collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id): + ''' + target: test insert, with using customize ids, which are not int64 + method: create collection and insert entities in it + expected: raise an exception + ''' + entity_id = get_entity_id + ids = [entity_id for _ in range(default_nb)] + with pytest.raises(Exception): + connect.insert(binary_id_collection, default_binary_entities, ids) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type): + field_type = get_field_type + tmp_entity = update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type) + with pytest.raises(Exception): + connect.insert(binary_collection, tmp_entity) + + @pytest.mark.tags(CaseLabel.L2) + def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value): + tmp_entities = copy.deepcopy(default_binary_entities) + src_vector = tmp_entities[-1]["values"] + src_vector[1] = get_field_vectors_value + with pytest.raises(Exception): + connect.insert(binary_collection, tmp_entities) diff --git a/tests/python_client/testcases/entity/test_list_id_in_segment.py b/tests/python_client/testcases/entity/test_list_id_in_segment.py new file mode 100644 index 0000000000..9eded9e01a --- /dev/null +++ b/tests/python_client/testcases/entity/test_list_id_in_segment.py @@ -0,0 +1,318 @@ +import time +import random +import pdb +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * +from common.constants import * + +uid = "list_id_in_segment" + +# def get_segment_id(connect, collection, nb=1, vec_type='float', index_params=None): +# if vec_type != "float": +# vectors, entities = gen_binary_entities(nb) +# else: +# entities = gen_entities(nb) +# ids = connect.bulk_insert(collection, entities) +# connect.flush([collection]) +# if index_params: +# if vec_type == 'float': +# connect.create_index(collection, default_float_vec_field_name, index_params) +# else: +# connect.create_index(collection, default_binary_vec_field_name, index_params) +# stats = connect.get_collection_stats(collection) +# return ids, stats["partitions"][0]["segments"][0]["id"] +# +# +# class TestListIdInSegmentBase: +# +# """ +# ****************************************************************** +# The following cases are used to test `list_id_in_segment` function +# ****************************************************************** +# """ +# def test_list_id_in_segment_collection_name_None(self, connect, collection): +# ''' +# target: get vector ids where collection name is None +# method: call list_id_in_segment with the collection_name: None +# expected: exception raised +# ''' +# collection_name = None +# ids, segment_id = get_segment_id(connect, collection) +# with pytest.raises(Exception) as e: +# connect.list_id_in_segment(collection_name, segment_id) +# +# def test_list_id_in_segment_collection_name_not_existed(self, connect, collection): +# ''' +# target: get vector ids where collection name does not exist +# method: call list_id_in_segment with a random collection_name, which is not in db +# expected: status not ok +# ''' +# collection_name = gen_unique_str(uid) +# ids, segment_id = get_segment_id(connect, collection) +# with pytest.raises(Exception) as e: +# vector_ids = connect.list_id_in_segment(collection_name, segment_id) +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_strs() +# ) +# def get_collection_name(self, request): +# yield request.param +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_collection_name_invalid(self, connect, collection, get_collection_name): +# ''' +# target: get vector ids where collection name is invalid +# method: call list_id_in_segment with invalid collection_name +# expected: status not ok +# ''' +# collection_name = get_collection_name +# ids, segment_id = get_segment_id(connect, collection) +# with pytest.raises(Exception) as e: +# connect.list_id_in_segment(collection_name, segment_id) +# +# def test_list_id_in_segment_name_None(self, connect, collection): +# ''' +# target: get vector ids where segment name is None +# method: call list_id_in_segment with the name: None +# expected: exception raised +# ''' +# ids, segment_id = get_segment_id(connect, collection) +# segment = None +# with pytest.raises(Exception) as e: +# vector_ids = connect.list_id_in_segment(collection, segment) +# +# def test_list_id_in_segment_name_not_existed(self, connect, collection): +# ''' +# target: get vector ids where segment name does not exist +# method: call list_id_in_segment with a random segment name +# expected: status not ok +# ''' +# ids, seg_id = get_segment_id(connect, collection) +# # segment = gen_unique_str(uid) +# with pytest.raises(Exception) as e: +# vector_ids = connect.list_id_in_segment(collection, seg_id + 10000) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_without_index_A(self, connect, collection): +# ''' +# target: get vector ids when there is no index +# method: call list_id_in_segment and check if the segment contains vectors +# expected: status ok +# ''' +# nb = 1 +# ids, seg_id = get_segment_id(connect, collection, nb=nb) +# vector_ids = connect.list_id_in_segment(collection, seg_id) +# # vector_ids should match ids +# assert len(vector_ids) == nb +# assert vector_ids[0] == ids[0] +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_without_index_B(self, connect, collection): +# ''' +# target: get vector ids when there is no index but with partition +# method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors +# expected: status ok +# ''' +# nb = 10 +# entities = gen_entities(nb) +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, entities, partition_name=default_tag) +# connect.flush([collection]) +# stats = connect.get_collection_stats(collection) +# assert stats["partitions"][1]["tag"] == default_tag +# vector_ids = connect.list_id_in_segment(collection, stats["partitions"][1]["segments"][0]["id"]) +# # vector_ids should match ids +# assert len(vector_ids) == nb +# for i in range(nb): +# assert vector_ids[i] == ids[i] +# +# @pytest.fixture( +# scope="function", +# params=gen_simple_index() +# ) +# def get_simple_index(self, request, connect): +# if str(connect._cmd("mode")) == "CPU": +# if request.param["index_type"] in index_cpu_not_support(): +# pytest.skip("CPU not support index_type: ivf_sq8h") +# return request.param +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_with_index_A(self, connect, collection, get_simple_index): +# ''' +# target: get vector ids when there is index +# method: call list_id_in_segment and check if the segment contains vectors +# expected: status ok +# ''' +# ids, seg_id = get_segment_id(connect, collection, nb=default_nb, index_params=get_simple_index) +# try: +# connect.list_id_in_segment(collection, seg_id) +# except Exception as e: +# assert False, str(e) +# # TODO: +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_with_index_B(self, connect, collection, get_simple_index): +# ''' +# target: get vector ids when there is index and with partition +# method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors +# expected: status ok +# ''' +# connect.create_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities, partition_name=default_tag) +# connect.flush([collection]) +# stats = connect.get_collection_stats(collection) +# assert stats["partitions"][1]["tag"] == default_tag +# try: +# connect.list_id_in_segment(collection, stats["partitions"][1]["segments"][0]["id"]) +# except Exception as e: +# assert False, str(e) +# # vector_ids should match ids +# # TODO +# +# def test_list_id_in_segment_after_delete_vectors(self, connect, collection): +# ''' +# target: get vector ids after vectors are deleted +# method: add vectors and delete a few, call list_id_in_segment +# expected: status ok, vector_ids decreased after vectors deleted +# ''' +# nb = 2 +# ids, seg_id = get_segment_id(connect, collection, nb=nb) +# delete_ids = [ids[0]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.flush([collection]) +# stats = connect.get_collection_stats(collection) +# vector_ids = connect.list_id_in_segment(collection, stats["partitions"][0]["segments"][0]["id"]) +# assert len(vector_ids) == 1 +# assert vector_ids[0] == ids[1] +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_after_delete_vectors(self, connect, collection): +# ''' +# target: get vector ids after vectors are deleted +# method: add vectors and delete a few, call list_id_in_segment +# expected: vector_ids decreased after vectors deleted +# ''' +# nb = 60 +# delete_length = 10 +# ids, seg_id = get_segment_id(connect, collection, nb=nb) +# delete_ids = ids[:delete_length] +# status = connect.delete_entity_by_id(collection, delete_ids) +# connect.flush([collection]) +# stats = connect.get_collection_stats(collection) +# vector_ids = connect.list_id_in_segment(collection, stats["partitions"][0]["segments"][0]["id"]) +# assert len(vector_ids) == nb - delete_length +# assert vector_ids[0] == ids[delete_length] +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_with_index_ip(self, connect, collection, get_simple_index): +# ''' +# target: get vector ids when there is index +# method: call list_id_in_segment and check if the segment contains vectors +# expected: ids returned in ids inserted +# ''' +# get_simple_index["metric_type"] = "IP" +# ids, seg_id = get_segment_id(connect, collection, nb=default_nb, index_params=get_simple_index) +# vector_ids = connect.list_id_in_segment(collection, seg_id) +# # TODO: +# segment_row_limit = connect.get_collection_info(collection)["segment_row_limit"] +# assert vector_ids[0:segment_row_limit] == ids[0:segment_row_limit] +# +# class TestListIdInSegmentBinary: +# """ +# ****************************************************************** +# The following cases are used to test `list_id_in_segment` function +# ****************************************************************** +# """ +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_without_index_A(self, connect, binary_collection): +# ''' +# target: get vector ids when there is no index +# method: call list_id_in_segment and check if the segment contains vectors +# expected: status ok +# ''' +# nb = 10 +# vectors, entities = gen_binary_entities(nb) +# ids = connect.bulk_insert(binary_collection, entities) +# connect.flush([binary_collection]) +# stats = connect.get_collection_stats(binary_collection) +# vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][0]["segments"][0]["id"]) +# # vector_ids should match ids +# assert len(vector_ids) == nb +# for i in range(nb): +# assert vector_ids[i] == ids[i] +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_list_id_in_segment_without_index_B(self, connect, binary_collection): +# ''' +# target: get vector ids when there is no index but with partition +# method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors +# expected: status ok +# ''' +# connect.create_partition(binary_collection, default_tag) +# nb = 10 +# vectors, entities = gen_binary_entities(nb) +# ids = connect.bulk_insert(binary_collection, entities, partition_name=default_tag) +# connect.flush([binary_collection]) +# stats = connect.get_collection_stats(binary_collection) +# vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][1]["segments"][0]["id"]) +# # vector_ids should match ids +# assert len(vector_ids) == nb +# for i in range(nb): +# assert vector_ids[i] == ids[i] +# +# @pytest.fixture( +# scope="function", +# params=gen_binary_index() +# ) +# def get_jaccard_index(self, request, connect): +# logging.getLogger().info(request.param) +# if request.param["index_type"] in binary_support(): +# request.param["metric_type"] = "JACCARD" +# return request.param +# else: +# pytest.skip("not support") +# +# def test_list_id_in_segment_with_index_A(self, connect, binary_collection, get_jaccard_index): +# ''' +# target: get vector ids when there is index +# method: call list_id_in_segment and check if the segment contains vectors +# expected: status ok +# ''' +# ids, seg_id = get_segment_id(connect, binary_collection, nb=default_nb, index_params=get_jaccard_index, vec_type='binary') +# vector_ids = connect.list_id_in_segment(binary_collection, seg_id) +# # TODO: +# +# def test_list_id_in_segment_with_index_B(self, connect, binary_collection, get_jaccard_index): +# ''' +# target: get vector ids when there is index and with partition +# method: create partition, add vectors to it and call list_id_in_segment, check if the segment contains vectors +# expected: status ok +# ''' +# connect.create_partition(binary_collection, default_tag) +# ids = connect.bulk_insert(binary_collection, default_binary_entities, partition_name=default_tag) +# connect.flush([binary_collection]) +# stats = connect.get_collection_stats(binary_collection) +# assert stats["partitions"][1]["tag"] == default_tag +# vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][1]["segments"][0]["id"]) +# # vector_ids should match ids +# # TODO +# +# def test_list_id_in_segment_after_delete_vectors(self, connect, binary_collection, get_jaccard_index): +# ''' +# target: get vector ids after vectors are deleted +# method: add vectors and delete a few, call list_id_in_segment +# expected: status ok, vector_ids decreased after vectors deleted +# ''' +# nb = 2 +# ids, seg_id = get_segment_id(connect, binary_collection, nb=nb, vec_type='binary', index_params=get_jaccard_index) +# delete_ids = [ids[0]] +# status = connect.delete_entity_by_id(binary_collection, delete_ids) +# connect.flush([binary_collection]) +# stats = connect.get_collection_stats(binary_collection) +# vector_ids = connect.list_id_in_segment(binary_collection, stats["partitions"][0]["segments"][0]["id"]) +# assert len(vector_ids) == 1 +# assert vector_ids[0] == ids[1] diff --git a/tests/python_client/testcases/entity/test_query.py b/tests/python_client/testcases/entity/test_query.py new file mode 100644 index 0000000000..5370bcec2c --- /dev/null +++ b/tests/python_client/testcases/entity/test_query.py @@ -0,0 +1,670 @@ +import pdb +import logging + +import pytest +from pymilvus import DataType + +import utils.utils as ut + +default_entities = ut.gen_entities(ut.default_nb, is_normal=True) +raw_vectors, default_binary_entities = ut.gen_binary_entities(ut.default_nb) +default_int_field_name = "int64" +default_float_field_name = "float" +default_pos = 5 +default_term_expr = f'{default_int_field_name} in {[i for i in range(default_pos)]}' + + +def init_data(connect, collection, nb=ut.default_nb, partition_names=None, auto_id=True): + """ + Generate entities and add it in collection + """ + if nb == 3000: + insert_entities = default_entities + else: + insert_entities = ut.gen_entities(nb, is_normal=True) + if partition_names is None: + if auto_id: + res = connect.insert(collection, insert_entities) + else: + res = connect.insert(collection, insert_entities, ids=[i for i in range(nb)]) + else: + if auto_id: + res = connect.insert(collection, insert_entities, partition_name=partition_names) + else: + res = connect.insert(collection, insert_entities, ids=[i for i in range(nb)], + partition_name=partition_names) + connect.flush([collection]) + ids = res.primary_keys + return insert_entities, ids + + +def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None): + """ + Generate entities and add it in collection + """ + ids = [] + # global binary_entities + global raw_vectors + if nb == 3000: + insert_entities = default_binary_entities + insert_raw_vectors = raw_vectors + else: + insert_raw_vectors, insert_entities = ut.gen_binary_entities(nb) + if insert is True: + if partition_names is None: + res = connect.insert(collection, insert_entities) + else: + res = connect.insert(collection, insert_entities, partition_name=partition_names) + connect.flush([collection]) + ids = res.primary_keys + return insert_raw_vectors, insert_entities, ids + + +class TestQueryBase: + """ + test Query interface + query(collection_name, expr, output_fields=None, partition_names=None, timeout=None) + """ + + @pytest.fixture( + scope="function", + params=ut.gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=ut.gen_simple_index() + ) + def get_simple_index(self, request, connect): + return request.param + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_invalid(self, connect, collection): + """ + target: test query + method: query with term expr + expected: verify query result + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = f'{default_int_field_name} in {entities[:default_pos]}' + with pytest.raises(Exception): + res = connect.query(collection, term_expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_valid(self, connect, collection): + """ + target: test query + method: query with term expr + expected: verify query result + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = f'{default_int_field_name} in {ids[:default_pos]}' + res = connect.query(collection, term_expr, output_fields=["*", "%"]) + assert len(res) == default_pos + for _id, index in enumerate(ids[:default_pos]): + if res[index][default_int_field_name] == entities[0]["values"][index]: + assert res[index][default_float_field_name] == entities[1]["values"][index] + res = connect.query(collection, term_expr, output_fields=[ut.default_float_vec_field_name]) + assert len(res) == default_pos + for _id, index in enumerate(ids[:default_pos]): + if res[index][default_int_field_name] == entities[0]["values"][index]: + ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[2]["values"][index]) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_collection_not_existed(self, connect): + """ + target: test query not existed collection + method: query not existed collection + expected: raise exception + """ + collection = "not_exist" + with pytest.raises(Exception): + connect.query(collection, default_term_expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_without_connect(self, dis_connect, collection): + """ + target: test query without connection + method: close connect and query + expected: raise exception + """ + with pytest.raises(Exception): + dis_connect.query(collection, default_term_expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_invalid_collection_name(self, connect, get_collection_name): + """ + target: test query with invalid collection name + method: query with invalid collection name + expected: raise exception + """ + collection_name = get_collection_name + with pytest.raises(Exception): + connect.query(collection_name, default_term_expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_after_index(self, connect, collection, get_simple_index): + """ + target: test query after creating index + method: query after index + expected: query result is correct + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.create_index(collection, ut.default_float_vec_field_name, get_simple_index) + connect.load_collection(collection) + term_expr = f'{default_int_field_name} in {ids[:default_pos]}' + res = connect.query(collection, term_expr, output_fields=["*", "%"]) + logging.getLogger().info(res) + assert len(res) == default_pos + for _id, index in enumerate(ids[:default_pos]): + if res[index][default_int_field_name] == entities[0]["values"][index]: + assert res[index][default_float_field_name] == entities[1]["values"][index] + ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[-1]["values"][index]) + + @pytest.mark.tags(ut.CaseLabel.L2) + def test_query_after_search(self, connect, collection): + """ + target: test query after search + method: query after search + expected: query result is correct + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + top_k = 10 + nq = 2 + query, _ = ut.gen_query_vectors(ut.default_float_vec_field_name, entities, top_k=top_k, nq=nq) + connect.load_collection(collection) + search_res = connect.search(collection, query) + assert len(search_res) == nq + assert len(search_res[0]) == top_k + term_expr = f'{default_int_field_name} in {ids[:default_pos]}' + res = connect.query(collection, term_expr, output_fields=["*", "%"]) + logging.getLogger().info(res) + assert len(res) == default_pos + for _id, index in enumerate(ids[:default_pos]): + if res[index][default_int_field_name] == entities[0]["values"][index]: + assert res[index][default_float_field_name] == entities[1]["values"][index] + ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[2]["values"][index]) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_empty_collection(self, connect, collection): + """ + target: test query empty collection + method: query on a empty collection + expected: todo + """ + connect.load_collection(collection) + res = connect.query(collection, default_term_expr) + logging.getLogger().info(res) + assert len(res) == 0 + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_without_loading(self, connect, collection): + """ + target: test query without loading + method: no loading before query + expected: raise exception + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + with pytest.raises(Exception): + connect.query(collection, default_term_expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_collection_not_primary_key(self, connect, collection): + """ + target: test query on collection that not on the primary field + method: 1.create collection with auto_id=True 2.query on the other field + expected: exception raised + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = f'{default_float_field_name} in {entities[:default_pos]}' + with pytest.raises(Exception): + connect.query(collection, term_expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_none(self, connect, collection): + """ + target: test query with none expr + method: query with expr None + expected: raise exception + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, None) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + @pytest.mark.parametrize("expr", [1, "1", "12-s", "中文", [], {}, ()]) + def test_query_expr_invalid_string(self, connect, collection, expr): + """ + target: test query with non-string expr + method: query with non-string expr, eg 1, [] .. + expected: raise exception + """ + # entities, ids = init_data(connect, collection) + # assert len(ids) == ut.default_nb + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_not_existed_field(self, connect, collection): + """ + target: test query with not existed field + method: query by term expr with fake field + expected: raise exception + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = 'field in [1, 2]' + with pytest.raises(Exception): + connect.query(collection, term_expr) + + @pytest.mark.parametrize("expr", [f'{default_int_field_name} inn [1, 2]', + f'{default_int_field_name} not in [1, 2]', + f'{default_int_field_name} in not [1, 2]']) + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_wrong_term_keyword(self, connect, collection, expr): + """ + target: test query with wrong term expr keyword + method: query with wrong keyword term expr + expected: raise exception + """ + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, expr) + + @pytest.mark.parametrize("expr", [f'{default_int_field_name} in 1', + f'{default_int_field_name} in "in"', + f'{default_int_field_name} in (mn)']) + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_non_array_term(self, connect, collection, expr): + """ + target: test query with non-array term expr + method: query with non-array term expr + expected: raise exception + """ + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_empty_term_array(self, connect, collection): + """ + target: test query with empty array term expr + method: query with empty term expr + expected: todo + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = f'{default_int_field_name} in []' + res = connect.query(collection, term_expr) + assert len(res) == 0 + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_single_term_array(self, connect, collection): + """ + target: test query with single array term expr + method: query with single array value + expected: query result is one entity + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = f'{default_int_field_name} in [0]' + res = connect.query(collection, term_expr, output_fields=["*", "%"]) + assert len(res) == 1 + assert res[0][default_int_field_name] == entities[0]["values"][0] + assert res[0][default_float_field_name] == entities[1]["values"][0] + ut.assert_equal_vector(res[0][ut.default_float_vec_field_name], entities[2]["values"][0]) + + @pytest.mark.xfail(reason="#6072") + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_binary_expr_single_term_array(self, connect, binary_collection): + """ + target: test query with single array term expr + method: query with single array value + expected: query result is one entity + """ + _, binary_entities, ids = init_binary_data(connect, binary_collection) + assert len(ids) == ut.default_nb + connect.load_collection(binary_collection) + term_expr = f'{default_int_field_name} in [0]' + res = connect.query(binary_collection, term_expr, output_fields=["*", "%"]) + assert len(res) == 1 + assert res[0][default_int_field_name] == binary_entities[0]["values"][0] + assert res[1][default_float_field_name] == binary_entities[1]["values"][0] + assert res[2][ut.default_float_vec_field_name] == binary_entities[2]["values"][0] + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_all_term_array(self, connect, collection): + """ + target: test query with all array term expr + method: query with all array value + expected: verify query result + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + term_expr = f'{default_int_field_name} in {ids}' + res = connect.query(collection, term_expr, output_fields=["*", "%"]) + assert len(res) == ut.default_nb + for _id, index in enumerate(ids): + if res[index][default_int_field_name] == entities[0]["values"][index]: + assert res[index][default_float_field_name] == entities[1]["values"][index] + ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[2]["values"][index]) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_repeated_term_array(self, connect, collection): + """ + target: test query with repeated term array on primary field with unique value + method: query with repeated array value + expected: verify query result + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + int_values = [0, 0] + term_expr = f'{default_int_field_name} in {int_values}' + res = connect.query(collection, term_expr) + assert len(res) == 2 + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_inconstant_term_array(self, connect, collection): + """ + target: test query with term expr that field and array are inconsistent + method: query with int field and float values + expected: raise exception + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + expr = f'{default_int_field_name} in [1.0, 2.0]' + with pytest.raises(Exception): + connect.query(collection, expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_mix_term_array(self, connect, collection): + """ + target: test query with mix type value expr + method: query with term expr that has int and float type value + expected: todo + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + expr = f'{default_int_field_name} in [1, 2.]' + with pytest.raises(Exception): + connect.query(collection, expr) + + @pytest.mark.parametrize("constant", [[1], (), {}]) + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_expr_non_constant_array_term(self, connect, collection, constant): + """ + target: test query with non-constant array term expr + method: query with non-constant array expr + expected: raise exception + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + expr = f'{default_int_field_name} in [{constant}]' + with pytest.raises(Exception): + connect.query(collection, expr) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_output_field_empty(self, connect, collection): + """ + target: test query with none output field + method: query with output field=None + expected: return all fields + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + res = connect.query(collection, default_term_expr, output_fields=[]) + assert default_int_field_name in res[0].keys() + assert default_float_field_name not in res[0].keys() + assert ut.default_float_vec_field_name not in res[0].keys() + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_output_one_field(self, connect, collection): + """ + target: test query with output one field + method: query with output one field + expected: return one field + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + res = connect.query(collection, default_term_expr, output_fields=[default_int_field_name]) + assert default_int_field_name in res[0].keys() + assert len(res[0].keys()) == 1 + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_output_all_fields(self, connect, collection): + """ + target: test query with none output field + method: query with output field=None + expected: return all fields + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + # fields = [default_int_field_name, default_float_field_name, ut.default_float_vec_field_name] + fields = [default_int_field_name, default_float_field_name] + res = connect.query(collection, default_term_expr, output_fields=fields) + for field in fields: + assert field in res[0].keys() + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_output_not_existed_field(self, connect, collection): + """ + target: test query output not existed field + method: query with not existed output field + expected: raise exception + """ + entities, ids = init_data(connect, collection) + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, default_term_expr, output_fields=["int"]) + + # @pytest.mark.xfail(reason="#6074") + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_output_part_not_existed_field(self, connect, collection): + """ + target: test query output part not existed field + method: query with part not existed field + expected: raise exception + """ + entities, ids = init_data(connect, collection) + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, default_term_expr, output_fields=[default_int_field_name, "int"]) + + @pytest.mark.parametrize("fields", ut.gen_invalid_strs()) + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_invalid_output_fields(self, connect, collection, fields): + """ + target: test query with invalid output fields + method: query with invalid field fields + expected: raise exception + """ + entities, ids = init_data(connect, collection) + connect.load_collection(collection) + with pytest.raises(Exception): + connect.query(collection, default_term_expr, output_fields=[fields]) + + +class TestQueryPartition: + """ + test Query interface + query(collection_name, expr, output_fields=None, partition_names=None, timeout=None) + """ + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_partition(self, connect, collection): + """ + target: test query on partition + method: create a partition and query + expected: verify query result + """ + connect.create_partition(collection, ut.default_tag) + entities, ids = init_data(connect, collection, partition_names=ut.default_tag) + assert len(ids) == ut.default_nb + connect.load_partitions(collection, [ut.default_tag]) + res = connect.query(collection, default_term_expr, partition_names=[ut.default_tag], output_fields=["*", "%"]) + for _id, index in enumerate(ids[:default_pos]): + if res[index][default_int_field_name] == entities[0]["values"][index]: + assert res[index][default_float_field_name] == entities[1]["values"][index] + ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[2]["values"][index]) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_partition_without_loading(self, connect, collection): + """ + target: test query on partition without loading + method: query on partition and no loading + expected: raise exception + """ + connect.create_partition(collection, ut.default_tag) + entities, ids = init_data(connect, collection, partition_names=ut.default_tag) + assert len(ids) == ut.default_nb + with pytest.raises(Exception): + connect.query(collection, default_term_expr, partition_names=[ut.default_tag]) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_default_partition(self, connect, collection): + """ + target: test query on default partition + method: query on default partition + expected: verify query result + """ + entities, ids = init_data(connect, collection) + assert len(ids) == ut.default_nb + connect.load_collection(collection) + res = connect.query(collection, default_term_expr, partition_names=[ut.default_partition_name], output_fields=["*", "%"]) + for _id, index in enumerate(ids[:default_pos]): + if res[index][default_int_field_name] == entities[0]["values"][index]: + assert res[index][default_float_field_name] == entities[1]["values"][index] + ut.assert_equal_vector(res[index][ut.default_float_vec_field_name], entities[2]["values"][index]) + + @pytest.mark.tags(ut.CaseLabel.L2) + def test_query_empty_partition(self, connect, collection): + """ + target: test query on empty partition + method: query on a empty collection + expected: empty query result + """ + connect.create_partition(collection, ut.default_tag) + connect.load_partitions(collection, [ut.default_partition_name]) + res = connect.query(collection, default_term_expr, partition_names=[ut.default_partition_name]) + assert len(res) == 0 + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_not_existed_partition(self, connect, collection): + """ + target: test query on a not existed partition + method: query on not existed partition + expected: raise exception + """ + connect.load_partitions(collection, [ut.default_partition_name]) + tag = ut.gen_unique_str() + with pytest.raises(Exception): + connect.query(collection, default_term_expr, partition_names=[tag]) + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_partition_repeatedly(self, connect, collection): + """ + target: test query repeatedly on partition + method: query on partition twice + expected: verify query result + """ + connect.create_partition(collection, ut.default_tag) + entities, ids = init_data(connect, collection, partition_names=ut.default_tag) + assert len(ids) == ut.default_nb + connect.load_partitions(collection, [ut.default_tag]) + res_one = connect.query(collection, default_term_expr, partition_names=[ut.default_tag]) + res_two = connect.query(collection, default_term_expr, partition_names=[ut.default_tag]) + assert res_one == res_two + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_another_partition(self, connect, collection): + """ + target: test query another partition + method: 1. insert entities into two partitions + 2. query on one partition and query result empty + expected: query result is empty + """ + insert_entities_into_two_partitions_in_half(connect, collection) + half = ut.default_nb // 2 + term_expr = f'{default_int_field_name} in [{half}]' + res = connect.query(collection, term_expr, partition_names=[ut.default_tag]) + assert len(res) == 0 + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_multi_partitions_multi_results(self, connect, collection): + """ + target: test query on multi partitions and get multi results + method: 1.insert entities into two partitions + 2.query on two partitions and query multi result + expected: query results from two partitions + """ + entities, entities_2 = insert_entities_into_two_partitions_in_half(connect, collection) + half = ut.default_nb // 2 + term_expr = f'{default_int_field_name} in [{half - 1}]' + res = connect.query(collection, term_expr, partition_names=[ut.default_tag, ut.default_partition_name]) + assert len(res) == 1 + assert res[0][default_int_field_name] == entities[0]["values"][-1] + term_expr = f'{default_int_field_name} in [{half}]' + res = connect.query(collection, term_expr, partition_names=[ut.default_tag, ut.default_partition_name]) + assert len(res) == 1 + assert res[0][default_int_field_name] == entities_2[0]["values"][0] + + @pytest.mark.tags(ut.CaseLabel.tags_smoke) + def test_query_multi_partitions_single_result(self, connect, collection): + """ + target: test query on multi partitions and get single result + method: 1.insert into two partitions + 2.query on two partitions and query single result + expected: query from two partitions and get single result + """ + entities, entities_2 = insert_entities_into_two_partitions_in_half(connect, collection) + half = ut.default_nb // 2 + term_expr = f'{default_int_field_name} in [{half}]' + res = connect.query(collection, term_expr, partition_names=[ut.default_tag, ut.default_partition_name]) + assert len(res) == 1 + assert res[0][default_int_field_name] == entities_2[0]["values"][0] + + +def insert_entities_into_two_partitions_in_half(connect, collection): + """ + insert default entities into two partitions(default_tag and _default) in half(int64 and float fields values) + :param connect: milvus connect + :param collection: milvus created collection + :return: entities of default_tag and entities_2 of _default + """ + connect.create_partition(collection, ut.default_tag) + half = ut.default_nb // 2 + entities, _ = init_data(connect, collection, nb=half, partition_names=ut.default_tag) + vectors = ut.gen_vectors(half, ut.default_dim) + entities_2 = [ + {"name": "int64", "type": DataType.INT64, "values": [i for i in range(half, ut.default_nb)]}, + {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(half, ut.default_nb)]}, + {"name": ut.default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "values": vectors} + ] + connect.insert(collection, entities_2) + connect.flush([collection]) + connect.load_collection(collection) + return entities, entities_2 diff --git a/tests/python_client/testcases/entity/test_search.py b/tests/python_client/testcases/entity/test_search.py new file mode 100644 index 0000000000..3c017bd432 --- /dev/null +++ b/tests/python_client/testcases/entity/test_search.py @@ -0,0 +1,1859 @@ +import time +import pdb +import copy +import logging +from multiprocessing import Pool, Process +import pytest +import numpy as np + +from pymilvus import DataType +from utils.utils import * +from common.constants import * + +uid = "test_search" +nq = 1 +epsilon = 0.001 +field_name = default_float_vec_field_name +binary_field_name = default_binary_vec_field_name +search_param = {"nprobe": 1} + +entity = gen_entities(1, is_normal=True) +entities = gen_entities(default_nb, is_normal=True) +raw_vectors, binary_entities = gen_binary_entities(default_nb) +default_query, default_query_vecs = gen_query_vectors(field_name, entities, default_top_k, nq) +default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, default_top_k, + nq) + + +def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True): + ''' + Generate entities and add it in collection + ''' + global entities + if nb == 3000: + insert_entities = entities + else: + insert_entities = gen_entities(nb, is_normal=True) + if partition_names is None: + res = connect.insert(collection, insert_entities) + else: + res = connect.insert(collection, insert_entities, partition_name=partition_names) + connect.flush([collection]) + ids = res.primary_keys + return insert_entities, ids + + +def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None): + ''' + Generate entities and add it in collection + ''' + ids = [] + global binary_entities + global raw_vectors + if nb == 3000: + insert_entities = binary_entities + insert_raw_vectors = raw_vectors + else: + insert_raw_vectors, insert_entities = gen_binary_entities(nb) + if insert is True: + if partition_names is None: + res = connect.insert(collection, insert_entities) + else: + res = connect.insert(collection, insert_entities, partition_name=partition_names) + connect.flush([collection]) + ids = res.primary_keys + return insert_raw_vectors, insert_entities, ids + + +class TestSearchBase: + """ + generate valid create_index params + """ + + @pytest.fixture( + scope="function", + params=gen_index() + ) + def get_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return request.param + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return copy.deepcopy(request.param) + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_jaccard_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] in binary_support(): + return request.param + # else: + # pytest.skip("Skip index Temporary") + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_hamming_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] in binary_support(): + return request.param + # else: + # pytest.skip("Skip index Temporary") + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_structure_index(self, request, connect): + logging.getLogger().info(request.param) + if request.param["index_type"] == "FLAT": + return request.param + # else: + # pytest.skip("Skip index Temporary") + + """ + generate top-k params + """ + + @pytest.fixture( + scope="function", + params=[1, 10] + ) + def get_top_k(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=[1, 10, 1100] + ) + def get_nq(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_flat(self, connect, collection, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, change top-k value + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = get_nq + entities, ids = init_data(connect, collection) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq) + if top_k <= max_top_k: + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res[0]) == top_k + assert res[0]._distances[0] <= epsilon + assert check_id_result(res[0], ids[0]) + else: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_flat_top_k(self, connect, collection, get_nq): + ''' + target: test basic search function, all the search params is correct, change top-k value + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = 16385 + nq = get_nq + entities, ids = init_data(connect, collection) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq) + if top_k <= max_top_k: + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res[0]) == top_k + assert res[0]._distances[0] <= epsilon + assert check_id_result(res[0], ids[0]) + else: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.skip("r0.3-test") + def _test_search_field(self, connect, collection, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, change top-k value + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = get_nq + entities, ids = init_data(connect, collection) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq) + if top_k <= max_top_k: + connect.load_collection(collection) + res = connect.search(collection, query, fields=["float_vector"]) + assert len(res[0]) == top_k + assert res[0]._distances[0] <= epsilon + assert check_id_result(res[0], ids[0]) + res = connect.search(collection, query, fields=["float"]) + for i in range(nq): + assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]] + else: + with pytest.raises(Exception): + connect.search(collection, query) + + def _test_search_after_delete(self, connect, collection, get_top_k, get_nq): + ''' + target: test basic search function before and after deletion, all the search params is + correct, change top-k value. + check issue #4200 + method: search with the given vectors, check the result + expected: the deleted entities do not exist in the result. + ''' + top_k = get_top_k + nq = get_nq + + entities, ids = init_data(connect, collection, nb=10000) + first_int64_value = entities[0]["values"][0] + first_vector = entities[2]["values"][0] + + search_param = get_search_param("FLAT") + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param) + vecs[:] = [] + vecs.append(first_vector) + + res = None + if top_k > max_top_k: + with pytest.raises(Exception): + connect.search(collection, query, fields=['int64']) + # pytest.skip("top_k value is larger than max_topp_k") + pass + else: + res = connect.search(collection, query, fields=['int64']) + assert len(res) == 1 + assert len(res[0]) >= top_k + assert res[0][0].id == ids[0] + assert res[0][0].entity.get("int64") == first_int64_value + assert res[0]._distances[0] < epsilon + assert check_id_result(res[0], ids[0]) + + connect.delete_entity_by_id(collection, ids[:1]) + connect.flush([collection]) + + res2 = connect.search(collection, query, fields=['int64']) + assert len(res2) == 1 + assert len(res2[0]) >= top_k + assert res2[0][0].id != ids[0] + if top_k > 1: + assert res2[0][0].id == res[0][1].id + assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64") + + @pytest.mark.tags(CaseLabel.L2) + def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = get_nq + + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + entities, ids = init_data(connect, collection) + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + else: + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) >= top_k + assert res[0]._distances[0] < epsilon + assert check_id_result(res[0], ids[0]) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index): + ''' + target: test search with different metric_type + method: build index with L2, and search using IP + expected: search ok + ''' + search_metric_type = "IP" + index_type = get_simple_index["index_type"] + entities, ids = init_data(connect, collection) + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, metric_type=search_metric_type, + search_params=search_param) + connect.load_collection(collection) + if index_type == "FLAT": + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + assert res[0]._distances[0] > res[0]._distances[default_top_k - 1] + else: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: add vectors into collection, search with the given vectors, check the result + expected: the length of the result is top_k, search collection with partition tag return empty + ''' + top_k = get_top_k + nq = get_nq + + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + connect.create_partition(collection, default_tag) + entities, ids = init_data(connect, collection) + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + else: + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) >= top_k + assert res[0]._distances[0] < epsilon + assert check_id_result(res[0], ids[0]) + connect.release_collection(collection) + connect.load_partitions(collection, [default_tag]) + res = connect.search(collection, query, partition_names=[default_tag]) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(600) + def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = get_nq + + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + connect.create_partition(collection, default_tag) + entities, ids = init_data(connect, collection, partition_names=default_tag) + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query, partition_names=[default_tag]) + else: + connect.load_partitions(collection, [default_tag]) + res = connect.search(collection, query, partition_names=[default_tag]) + assert len(res) == nq + assert len(res[0]) == top_k + assert res[0]._distances[0] < epsilon + assert check_id_result(res[0], ids[0]) + + + @pytest.mark.tags(CaseLabel.L2) + def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq, get_simple_index): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search with the given vectors and tag (tag name not existed in collection), check the result + expected: error raised + ''' + top_k = get_top_k + nq = get_nq + entities, ids = init_data(connect, collection) + connect.create_index(collection, field_name, get_simple_index) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query, partition_names=["new_tag"]) + else: + connect.load_collection(collection) + with pytest.raises(Exception) as e: + connect.search(collection, query, partition_names=["new_tag"]) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search collection with the given vectors and tags, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = 2 + new_tag = "new_tag" + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + entities, ids = init_data(connect, collection, partition_names=default_tag) + new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag) + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + else: + connect.load_collection(collection) + res = connect.search(collection, query) + assert check_id_result(res[0], ids[0]) + assert not check_id_result(res[1], new_ids[0]) + assert res[0]._distances[0] < epsilon + assert res[1]._distances[0] < epsilon + res = connect.search(collection, query, partition_names=[new_tag]) + assert res[0]._distances[0] > epsilon + assert res[1]._distances[0] > epsilon + connect.release_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search collection with the given vectors and tags, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = 2 + tag = "tag" + new_tag = "new_tag" + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + connect.create_partition(collection, tag) + connect.create_partition(collection, new_tag) + entities, ids = init_data(connect, collection, partition_names=tag) + new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag) + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + print(f'entities[-1]["values"][:1]: {entities[-1]["values"][:1]}') + print(f'new_entities[-1]["values"][:1]: {new_entities[-1]["values"][:1]}') + query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param, + replace_vecs=[entities[-1]["values"][:1][0], new_entities[-1]["values"][:1][0]]) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + else: + connect.load_collection(collection) + res = connect.search(collection, query, partition_names=["(.*)tag"]) + assert check_id_result(res[0], ids[0]) + assert check_id_result(res[0], new_ids[0]) + assert res[0]._distances[0] < epsilon + assert res[1]._distances[0] < epsilon + res = connect.search(collection, query, partition_names=["new(.*)"]) + assert not check_id_result(res[0], ids[0]) + assert check_id_result(res[1], new_ids[0]) + assert res[0]._distances[0] > epsilon + assert res[1]._distances[0] < epsilon + connect.release_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, change top-k value + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = get_nq + entities, ids = init_data(connect, collection) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP") + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res[0]) == top_k + assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0]) + assert check_id_result(res[0], ids[0]) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search with the given vectors, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = get_nq + + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + entities, ids = init_data(connect, collection) + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) >= top_k + assert check_id_result(res[0], ids[0]) + assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0]) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: add vectors into collection, search with the given vectors, check the result + expected: the length of the result is top_k, search collection with partition tag return empty + ''' + top_k = get_top_k + nq = get_nq + metric_type = "IP" + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + connect.create_partition(collection, default_tag) + entities, ids = init_data(connect, collection) + get_simple_index["metric_type"] = metric_type + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type, + search_params=search_param) + if top_k > max_top_k: + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + else: + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) >= top_k + assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0]) + assert check_id_result(res[0], ids[0]) + res = connect.search(collection, query, partition_names=[default_tag]) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k): + ''' + target: test basic search function, all the search params is correct, test all index params, and build + method: search collection with the given vectors and tags, check the result + expected: the length of the result is top_k + ''' + top_k = get_top_k + nq = 2 + metric_type = "IP" + new_tag = "new_tag" + index_type = get_simple_index["index_type"] + if index_type in skip_pq(): + pytest.skip("Skip PQ") + connect.create_partition(collection, default_tag) + connect.create_partition(collection, new_tag) + entities, ids = init_data(connect, collection, partition_names=default_tag) + new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag) + get_simple_index["metric_type"] = metric_type + connect.create_index(collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param) + connect.load_collection(collection) + res = connect.search(collection, query) + assert check_id_result(res[0], ids[0]) + assert not check_id_result(res[1], new_ids[0]) + assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0]) + assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0]) + res = connect.search(collection, query, partition_names=["new_tag"]) + assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0]) + # TODO: + # assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0]) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_without_connect(self, dis_connect, collection): + ''' + target: test search vectors without connection + method: use dis connected instance, call search method and check if search successfully + expected: raise exception + ''' + with pytest.raises(Exception) as e: + res = dis_connect.search(collection, default_query) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_collection_not_existed(self, connect): + ''' + target: search collection not existed + method: search with the random collection_name, which is not in db + expected: status not ok + ''' + collection_name = gen_unique_str(uid) + with pytest.raises(Exception) as e: + res = connect.search(collection_name, default_query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_distance_l2(self, connect, collection): + ''' + target: search collection, and check the result: distance + method: compare the return distance value with value computed with Euclidean + expected: the return distance equals to the computed value + ''' + nq = 2 + search_param = {"nprobe": 1} + entities, ids = init_data(connect, collection, nb=nq) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True, + search_params=search_param) + inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq, + search_params=search_param) + distance_0 = l2(vecs[0], inside_vecs[0]) + distance_1 = l2(vecs[0], inside_vecs[1]) + connect.load_collection(collection) + res = connect.search(collection, query) + assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0]) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index): + ''' + target: search collection, and check the result: distance + method: compare the return distance value with value computed with Inner product + expected: the return distance equals to the computed value + ''' + index_type = get_simple_index["index_type"] + nq = 2 + entities, ids = init_data(connect, id_collection, auto_id=False) + connect.create_index(id_collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True, + search_params=search_param) + inside_vecs = entities[-1]["values"] + min_distance = 1.0 + min_id = None + for i in range(default_nb): + tmp_dis = l2(vecs[0], inside_vecs[i]) + if min_distance > tmp_dis: + min_distance = tmp_dis + min_id = ids[i] + connect.load_collection(id_collection) + res = connect.search(id_collection, query) + tmp_epsilon = epsilon + check_id_result(res[0], min_id) + # if index_type in ["ANNOY", "IVF_PQ"]: + # tmp_epsilon = 0.1 + # TODO: + # assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_ip(self, connect, collection): + ''' + target: search collection, and check the result: distance + method: compare the return distance value with value computed with Inner product + expected: the return distance equals to the computed value + ''' + nq = 2 + metirc_type = "IP" + search_param = {"nprobe": 1} + entities, ids = init_data(connect, collection, nb=nq) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True, + metric_type=metirc_type, + search_params=search_param) + inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq, + search_params=search_param) + distance_0 = ip(vecs[0], inside_vecs[0]) + distance_1 = ip(vecs[0], inside_vecs[1]) + connect.load_collection(collection) + res = connect.search(collection, query) + assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index): + ''' + target: search collection, and check the result: distance + method: compare the return distance value with value computed with Inner product + expected: the return distance equals to the computed value + ''' + index_type = get_simple_index["index_type"] + nq = 2 + metirc_type = "IP" + entities, ids = init_data(connect, id_collection, auto_id=False) + get_simple_index["metric_type"] = metirc_type + connect.create_index(id_collection, field_name, get_simple_index) + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True, + metric_type=metirc_type, + search_params=search_param) + inside_vecs = entities[-1]["values"] + max_distance = 0 + max_id = None + for i in range(default_nb): + tmp_dis = ip(vecs[0], inside_vecs[i]) + if max_distance < tmp_dis: + max_distance = tmp_dis + max_id = ids[i] + connect.load_collection(id_collection) + res = connect.search(id_collection, query) + tmp_epsilon = epsilon + check_id_result(res[0], max_id) + # if index_type in ["ANNOY", "IVF_PQ"]: + # tmp_epsilon = 0.1 + # TODO: + # assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_distance_jaccard_flat_index(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: compare the return distance value with value computed with L2 + expected: the return distance equals to the computed value + ''' + nq = 1 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + distance_0 = jaccard(query_int_vectors[0], int_vectors[0]) + distance_1 = jaccard(query_int_vectors[0], int_vectors[1]) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD") + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon + + @pytest.mark.tags(CaseLabel.L2) + def test_search_binary_flat_with_L2(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: compare the return distance value with value computed with L2 + expected: the return distance equals to the computed value + ''' + nq = 1 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="L2") + with pytest.raises(Exception) as e: + connect.search(binary_collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_hamming_flat_index(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: compare the return distance value with value computed with Inner product + expected: the return distance equals to the computed value + ''' + nq = 1 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + distance_0 = hamming(query_int_vectors[0], int_vectors[0]) + distance_1 = hamming(query_int_vectors[0], int_vectors[1]) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING") + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_substructure_flat_index(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: search with new random binary entities and SUBSTRUCTURE metric type + expected: the return distance equals to the computed value + ''' + nq = 1 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + distance_0 = substructure(query_int_vectors[0], int_vectors[0]) + distance_1 = substructure(query_int_vectors[0], int_vectors[1]) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, + metric_type="SUBSTRUCTURE") + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_substructure_flat_index_B(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: search with entities that related to inserted entities + expected: the return distance equals to the computed value + ''' + top_k = 3 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2) + query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE", + replace_vecs=query_vecs) + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert res[0][0].distance <= epsilon + assert res[0][0].id == ids[0] + assert res[1][0].distance <= epsilon + assert res[1][0].id == ids[1] + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_superstructure_flat_index(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: compare the return distance value with value computed with Inner product + expected: the return distance equals to the computed value + ''' + nq = 1 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + distance_0 = superstructure(query_int_vectors[0], int_vectors[0]) + distance_1 = superstructure(query_int_vectors[0], int_vectors[1]) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, + metric_type="SUPERSTRUCTURE") + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: compare the return distance value with value computed with SUPER + expected: the return distance equals to the computed value + ''' + top_k = 3 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2) + query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE", + replace_vecs=query_vecs) + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert len(res[0]) == 2 + assert len(res[1]) == 2 + assert res[0][0].id in ids + assert res[0][0].distance <= epsilon + assert res[1][0].id in ids + assert res[1][0].distance <= epsilon + + @pytest.mark.tags(CaseLabel.L2) + def test_search_distance_tanimoto_flat_index(self, connect, binary_collection): + ''' + target: search binary_collection, and check the result: distance + method: compare the return distance value with value computed with Inner product + expected: the return distance equals to the computed value + ''' + nq = 1 + int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + distance_0 = tanimoto(query_int_vectors[0], int_vectors[0]) + distance_1 = tanimoto(query_int_vectors[0], int_vectors[1]) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO") + connect.load_collection(binary_collection) + res = connect.search(binary_collection, query) + assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(300) + def test_search_concurrent_multithreads(self, connect, args): + ''' + target: test concurrent search with multiprocessess + method: search with 10 processes, each process uses dependent connection + expected: status ok and the returned vectors should be query_records + ''' + nb = 100 + top_k = 10 + threads_num = 4 + threads = [] + collection = gen_unique_str(uid) + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + # create collection + milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) + milvus.create_collection(collection, default_fields) + entities, ids = init_data(milvus, collection) + connect.load_collection(collection) + + def search(milvus): + res = milvus.search(collection, default_query) + assert len(res) == 1 + assert res[0]._entities[0].id in ids + assert res[0]._distances[0] < epsilon + + for i in range(threads_num): + milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) + t = MyThread(target=search, args=(milvus,)) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(300) + def test_search_concurrent_multithreads_single_connection(self, connect, args): + ''' + target: test concurrent search with multiprocessess + method: search with 10 processes, each process uses dependent connection + expected: status ok and the returned vectors should be query_records + ''' + nb = 100 + top_k = 10 + threads_num = 4 + threads = [] + collection = gen_unique_str(uid) + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + # create collection + milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) + milvus.create_collection(collection, default_fields) + entities, ids = init_data(milvus, collection) + connect.load_collection(collection) + + def search(milvus): + res = milvus.search(collection, default_query) + assert len(res) == 1 + assert res[0]._entities[0].id in ids + assert res[0]._distances[0] < epsilon + + for i in range(threads_num): + t = MyThread(target=search, args=(milvus,)) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + @pytest.mark.tags(CaseLabel.L2) + def test_search_multi_collections(self, connect, args): + ''' + target: test search multi collections of L2 + method: add vectors into 10 collections, and search + expected: search status ok, the length of result + ''' + num = 10 + top_k = 10 + nq = 20 + collection_names = [] + for i in range(num): + collection = gen_unique_str(uid + str(i)) + connect.create_collection(collection, default_fields) + collection_names.append(collection) + entities, ids = init_data(connect, collection) + assert len(ids) == default_nb + query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + for i in range(nq): + assert check_id_result(res[i], ids[i]) + assert res[i]._distances[0] < epsilon + assert res[i]._distances[1] > epsilon + for i in range(num): + connect.drop_collection(collection_names[i]) + + @pytest.mark.skip("r0.3-test") + def _test_query_entities_with_field_less_than_top_k(self, connect, id_collection): + """ + target: test search with field, and let return entities less than topk + method: insert entities and build ivf_ index, and search with field, n_probe=1 + expected: + """ + entities, ids = init_data(connect, id_collection, auto_id=False) + simple_index = {"index_type": "IVF_FLAT", "params": {"nlist": 200}, "metric_type": "L2"} + connect.create_index(id_collection, field_name, simple_index) + # logging.getLogger().info(connect.get_collection_info(id_collection)) + top_k = 300 + default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq, + search_params={"nprobe": 1}) + expr = {"must": [gen_default_vector_expr(default_query)]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(id_collection) + res = connect.search(id_collection, query, fields=["int64"]) + assert len(res) == nq + for r in res[0]: + assert getattr(r.entity, "int64") == getattr(r.entity, "id") + + +class TestSearchDSL(object): + """ + ****************************************************************** + # The following cases are used to build invalid query expr + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.L2) + def test_query_no_must(self, connect, collection): + ''' + method: build query without must expr + expected: error raised + ''' + # entities, ids = init_data(connect, collection) + query = update_query_expr(default_query, keep_old=False) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_no_vector_term_only(self, connect, collection): + ''' + method: build query without vector only term + expected: error raised + ''' + # entities, ids = init_data(connect, collection) + expr = { + "must": [gen_default_term_expr] + } + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_no_vector_range_only(self, connect, collection): + ''' + method: build query without vector only range + expected: error raised + ''' + # entities, ids = init_data(connect, collection) + expr = { + "must": [gen_default_range_expr] + } + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_vector_only(self, connect, collection): + entities, ids = init_data(connect, collection) + connect.load_collection(collection) + res = connect.search(collection, default_query) + assert len(res) == nq + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_wrong_format(self, connect, collection): + ''' + method: build query without must expr, with wrong expr name + expected: error raised + ''' + # entities, ids = init_data(connect, collection) + expr = { + "must1": [gen_default_term_expr] + } + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_empty(self, connect, collection): + ''' + method: search with empty query + expected: error raised + ''' + query = {} + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + """ + ****************************************************************** + # The following cases are used to build valid query expr + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_value_not_in(self, connect, collection): + ''' + method: build query with vector and term expr, with no term can be filtered + expected: filter pass + ''' + entities, ids = init_data(connect, collection) + expr = { + "must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + # TODO: + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_value_all_in(self, connect, collection): + ''' + method: build query with vector and term expr, with all term can be filtered + expected: filter pass + ''' + entities, ids = init_data(connect, collection) + expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 1 + # TODO: + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_values_not_in(self, connect, collection): + ''' + method: build query with vector and term expr, with no term can be filtered + expected: filter pass + ''' + entities, ids = init_data(connect, collection) + expr = {"must": [gen_default_vector_expr(default_query), + gen_default_term_expr(values=[i for i in range(100000, 100010)])]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + # TODO: + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_values_all_in(self, connect, collection): + ''' + method: build query with vector and term expr, with all term can be filtered + expected: filter pass + ''' + entities, ids = init_data(connect, collection) + expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + limit = default_nb // 2 + for i in range(nq): + for result in res[i]: + logging.getLogger().info(result.id) + assert result.id in ids[:limit] + # TODO: + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_values_parts_in(self, connect, collection): + ''' + method: build query with vector and term expr, with parts of term can be filtered + expected: filter pass + ''' + entities, ids = init_data(connect, collection) + expr = {"must": [gen_default_vector_expr(default_query), + gen_default_term_expr( + values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + # TODO: + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_values_repeat(self, connect, collection): + ''' + method: build query with vector and term expr, with the same values + expected: filter pass + ''' + entities, ids = init_data(connect, collection) + expr = { + "must": [gen_default_vector_expr(default_query), + gen_default_term_expr(values=[1 for i in range(1, default_nb)])]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 1 + # TODO: + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_value_empty(self, connect, collection): + ''' + method: build query with term value empty + expected: return null + ''' + expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_complex_dsl(self, connect, collection): + ''' + method: query with complicated dsl + expected: no error raised + ''' + expr = {"must": [ + {"must": [{"should": [gen_default_term_expr(values=[1]), gen_default_range_expr()]}]}, + {"must": [gen_default_vector_expr(default_query)]} + ]} + logging.getLogger().info(expr) + query = update_query_expr(default_query, expr=expr) + logging.getLogger().info(query) + connect.load_collection(collection) + res = connect.search(collection, query) + logging.getLogger().info(res) + + """ + ****************************************************************** + # The following cases are used to build invalid term query expr + ****************************************************************** + """ + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_key_error(self, connect, collection): + ''' + method: build query with term key error + expected: Exception raised + ''' + expr = {"must": [gen_default_vector_expr(default_query), + gen_default_term_expr(keyword="terrm", values=[i for i in range(default_nb // 2)])]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.fixture( + scope="function", + params=gen_invalid_term() + ) + def get_invalid_term(self, request): + return request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_wrong_format(self, connect, collection, get_invalid_term): + ''' + method: build query with wrong format term + expected: Exception raised + ''' + entities, ids = init_data(connect, collection) + term = get_invalid_term + expr = {"must": [gen_default_vector_expr(default_query), term]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_field_named_term(self, connect, collection): + ''' + method: build query with field named "term" + expected: error raised + ''' + term_fields = add_field_default(default_fields, field_name="term") + collection_term = gen_unique_str("term") + connect.create_collection(collection_term, term_fields) + term_entities = add_field(entities, field_name="term") + ids = connect.insert(collection_term, term_entities) + assert len(ids) == default_nb + connect.flush([collection_term]) + # count = connect.count_entities(collection_term) + # assert count == default_nb + stats = connect.get_collection_stats(collection_term) + assert stats["row_count"] == default_nb + term_param = {"term": {"term": {"values": [i for i in range(default_nb // 2)]}}} + expr = {"must": [gen_default_vector_expr(default_query), + term_param]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection_term) + res = connect.search(collection_term, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + connect.drop_collection(collection_term) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_term_one_field_not_existed(self, connect, collection): + ''' + method: build query with two fields term, one of it not existed + expected: exception raised + ''' + entities, ids = init_data(connect, collection) + term = gen_default_term_expr() + term["term"].update({"a": [0]}) + expr = {"must": [gen_default_vector_expr(default_query), term]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + """ + ****************************************************************** + # The following cases are used to build valid range query expr + ****************************************************************** + """ + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_range_key_error(self, connect, collection): + ''' + method: build query with range key error + expected: Exception raised + ''' + range = gen_default_range_expr(keyword="ranges") + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.fixture( + scope="function", + params=gen_invalid_range() + ) + def get_invalid_range(self, request): + return request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_query_range_wrong_format(self, connect, collection, get_invalid_range): + ''' + method: build query with wrong format range + expected: Exception raised + ''' + entities, ids = init_data(connect, collection) + range = get_invalid_range + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_range_string_ranges(self, connect, collection): + ''' + method: build query with invalid ranges + expected: raise Exception + ''' + entities, ids = init_data(connect, collection) + ranges = {"GT": "0", "LT": "1000"} + range = gen_default_range_expr(ranges=ranges) + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_range_invalid_ranges(self, connect, collection): + ''' + method: build query with invalid ranges + expected: 0 + ''' + entities, ids = init_data(connect, collection) + ranges = {"GT": default_nb, "LT": 0} + range = gen_default_range_expr(ranges=ranges) + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res[0]) == 0 + + @pytest.fixture( + scope="function", + params=gen_valid_ranges() + ) + def get_valid_ranges(self, request): + return request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges): + ''' + method: build query with valid ranges + expected: pass + ''' + entities, ids = init_data(connect, collection) + ranges = get_valid_ranges + range = gen_default_range_expr(ranges=ranges) + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_range_one_field_not_existed(self, connect, collection): + ''' + method: build query with two fields ranges, one of fields not existed + expected: exception raised + ''' + entities, ids = init_data(connect, collection) + range = gen_default_range_expr() + range["range"].update({"a": {"GT": 1, "LT": default_nb // 2}}) + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + """ + ************************************************************************ + # The following cases are used to build query expr multi range and term + ************************************************************************ + """ + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_term_has_common(self, connect, collection): + ''' + method: build query with multi term with same field, and values has common + expected: pass + ''' + entities, ids = init_data(connect, collection) + term_first = gen_default_term_expr() + term_second = gen_default_term_expr(values=[i for i in range(default_nb // 3)]) + expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_term_no_common(self, connect, collection): + ''' + method: build query with multi range with same field, and ranges no common + expected: pass + ''' + entities, ids = init_data(connect, collection) + term_first = gen_default_term_expr() + term_second = gen_default_term_expr(values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)]) + expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_term_different_fields(self, connect, collection): + ''' + method: build query with multi range with same field, and ranges no common + expected: pass + ''' + entities, ids = init_data(connect, collection) + term_first = gen_default_term_expr() + term_second = gen_default_term_expr(field="float", + values=[float(i) for i in range(default_nb // 2, default_nb)]) + expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_query_single_term_multi_fields(self, connect, collection): + ''' + method: build query with multi term, different field each term + expected: pass + ''' + entities, ids = init_data(connect, collection) + term_first = {"int64": {"values": [i for i in range(default_nb // 2)]}} + term_second = {"float": {"values": [float(i) for i in range(default_nb // 2, default_nb)]}} + term = update_term_expr({"term": {}}, [term_first, term_second]) + expr = {"must": [gen_default_vector_expr(default_query), term]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_range_has_common(self, connect, collection): + ''' + method: build query with multi range with same field, and ranges has common + expected: pass + ''' + entities, ids = init_data(connect, collection) + range_one = gen_default_range_expr() + range_two = gen_default_range_expr(ranges={"GT": 1, "LT": default_nb // 3}) + expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_range_no_common(self, connect, collection): + ''' + method: build query with multi range with same field, and ranges no common + expected: pass + ''' + entities, ids = init_data(connect, collection) + range_one = gen_default_range_expr() + range_two = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb}) + expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_range_different_fields(self, connect, collection): + ''' + method: build query with multi range, different field each range + expected: pass + ''' + entities, ids = init_data(connect, collection) + range_first = gen_default_range_expr() + range_second = gen_default_range_expr(field="float", ranges={"GT": default_nb // 2, "LT": default_nb}) + expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_query_single_range_multi_fields(self, connect, collection): + ''' + method: build query with multi range, different field each range + expected: pass + ''' + entities, ids = init_data(connect, collection) + range_first = {"int64": {"GT": 0, "LT": default_nb // 2}} + range_second = {"float": {"GT": default_nb / 2, "LT": float(default_nb)}} + range = update_range_expr({"range": {}}, [range_first, range_second]) + expr = {"must": [gen_default_vector_expr(default_query), range]} + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + """ + ****************************************************************** + # The following cases are used to build query expr both term and range + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.L2) + def test_query_single_term_range_has_common(self, connect, collection): + ''' + method: build query with single term single range + expected: pass + ''' + entities, ids = init_data(connect, collection) + term = gen_default_term_expr() + range = gen_default_range_expr(ranges={"GT": -1, "LT": default_nb // 2}) + expr = {"must": [gen_default_vector_expr(default_query), term, range]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + + @pytest.mark.tags(CaseLabel.L2) + def test_query_single_term_range_no_common(self, connect, collection): + ''' + method: build query with single term single range + expected: pass + ''' + entities, ids = init_data(connect, collection) + term = gen_default_term_expr() + range = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb}) + expr = {"must": [gen_default_vector_expr(default_query), term, range]} + query = update_query_expr(default_query, expr=expr) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == 0 + + """ + ****************************************************************** + # The following cases are used to build multi vectors query expr + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.L2) + def test_query_multi_vectors_same_field(self, connect, collection): + ''' + method: build query with two vectors same field + expected: error raised + ''' + entities, ids = init_data(connect, collection) + vector1 = default_query + vector2 = gen_query_vectors(field_name, entities, default_top_k, nq=2) + expr = { + "must": [vector1, vector2] + } + query = update_query_expr(default_query, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + +class TestSearchDSLBools(object): + """ + ****************************************************************** + # The following cases are used to build invalid query expr + ****************************************************************** + """ + + @pytest.mark.tags(CaseLabel.L2) + def test_query_no_bool(self, connect, collection): + ''' + method: build query without bool expr + expected: error raised + ''' + entities, ids = init_data(connect, collection) + expr = {"bool1": {}} + query = expr + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_should_only_term(self, connect, collection): + ''' + method: build query without must, with should.term instead + expected: error raised + ''' + expr = {"should": gen_default_term_expr} + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_query_should_only_vector(self, connect, collection): + ''' + method: build query without must, with should.vector instead + expected: error raised + ''' + expr = {"should": default_query["bool"]["must"]} + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_must_not_only_term(self, connect, collection): + ''' + method: build query without must, with must_not.term instead + expected: error raised + ''' + expr = {"must_not": gen_default_term_expr} + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_must_not_vector(self, connect, collection): + ''' + method: build query without must, with must_not.vector instead + expected: error raised + ''' + expr = {"must_not": default_query["bool"]["must"]} + query = update_query_expr(default_query, keep_old=False, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_query_must_should(self, connect, collection): + ''' + method: build query must, and with should.term + expected: error raised + ''' + expr = {"should": gen_default_term_expr} + query = update_query_expr(default_query, keep_old=True, expr=expr) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + +""" +****************************************************************** +# The following cases are used to test `search` function +# with invalid collection_name, or invalid query expr +****************************************************************** +""" + +class TestSearchInvalid(object): + """ + Test search collection with invalid collection names + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_invalid_partition(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_invalid_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_search_with_invalid_collection(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + res = connect.search(collection_name, default_query) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition): + # tag = " " + tag = get_invalid_partition + with pytest.raises(Exception) as e: + res = connect.search(collection, default_query, partition_names=tag) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field): + fields = [get_invalid_field] + with pytest.raises(Exception) as e: + res = connect.search(collection, default_query, fields=fields) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_with_not_existed_field(self, connect, collection): + fields = [gen_unique_str("field_name")] + with pytest.raises(Exception) as e: + res = connect.search(collection, default_query, fields=fields) + + """ + Test search collection with invalid query + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_ints() + ) + def get_top_k(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_with_invalid_top_k(self, connect, collection, get_top_k): + ''' + target: test search function, with the wrong top_k + method: search with top_k + expected: raise an error, and the connection is normal + ''' + top_k = get_top_k + default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k + with pytest.raises(Exception) as e: + res = connect.search(collection, default_query) + + """ + Test search collection with invalid search params + """ + + @pytest.fixture( + scope="function", + params=gen_invaild_search_params() + ) + def get_search_params(self, request): + yield request.param + + # 1463 + @pytest.mark.tags(CaseLabel.L2) + def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params): + ''' + target: test search function, with the wrong nprobe + method: search with nprobe + expected: raise an error, and the connection is normal + ''' + search_params = get_search_params + index_type = get_simple_index["index_type"] + if index_type in ["FLAT"]: + # pytest.skip("skip in FLAT index") + pass + if index_type != search_params["index_type"]: + # pytest.skip("skip if index_type not matched") + pass + entities, ids = init_data(connect, collection, nb=1200) + connect.create_index(collection, field_name, get_simple_index) + connect.load_collection(collection) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, + search_params=search_params["search_params"]) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.L2) + def test_search_with_invalid_params_binary(self, connect, binary_collection): + ''' + target: test search function, with the wrong nprobe + method: search with nprobe + expected: raise an error, and the connection is normal + ''' + nq = 1 + index_type = "BIN_IVF_FLAT" + int_vectors, entities, ids = init_binary_data(connect, binary_collection) + query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False) + connect.create_index(binary_collection, binary_field_name, + {"index_type": index_type, "metric_type": "JACCARD", "params": {"nlist": 128}}) + connect.load_collection(binary_collection) + query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, + search_params={"nprobe": 0}, metric_type="JACCARD") + with pytest.raises(Exception) as e: + res = connect.search(binary_collection, query) + + # #1464 + @pytest.mark.tags(CaseLabel.L2) + def test_search_with_empty_params(self, connect, collection, args, get_simple_index): + ''' + target: test search function, with empty search params + method: search with params + expected: raise an error, and the connection is normal + ''' + index_type = get_simple_index["index_type"] + if args["handler"] == "HTTP": + pytest.skip("skip in http mode") + if index_type == "FLAT": + # pytest.skip("skip in FLAT index") + pass + entities, ids = init_data(connect, collection) + connect.create_index(collection, field_name, get_simple_index) + connect.load_collection(collection) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, search_params={}) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_with_empty_vectors(self, connect, collection): + """ + target: test search function, with empty search vectors + method: search + expected: raise an exception + """ + entities, ids = init_data(connect, collection) + assert len(ids) == default_nb + connect.load_collection(collection) + query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq=0) + with pytest.raises(Exception) as e: + res = connect.search(collection, query) + + +class TestSearchWithExpression(object): + @pytest.fixture( + scope="function", + params=[1, 10, 20], + ) + def limit(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_normal_expressions(), + ) + def expression(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=[ + {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}, + ] + ) + def index_param(self, request): + return request.param + + @pytest.fixture( + scope="function", + ) + def search_params(self): + return {"metric_type": "L2", "params": {"nprobe": 10}} + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_search_with_expression(self, connect, collection, index_param, search_params, limit, expression): + entities, ids = init_data(connect, collection) + assert len(ids) == default_nb + connect.create_index(collection, default_float_vec_field_name, index_param) + connect.load_collection(collection) + nq = 10 + query_data = entities[2]["values"][:nq] + res = connect.search_with_expression(collection, query_data, default_float_vec_field_name, search_params, + limit, expression) + assert len(res) == nq + for topk_results in res: + assert len(topk_results) <= limit + + +def check_id_result(result, id): + limit_in = 5 + ids = [entity.id for entity in result] + if len(result) >= limit_in: + return id in ids[:limit_in] + else: + return id in ids diff --git a/tests/python_client/testcases/stability/test_mysql.py b/tests/python_client/testcases/stability/test_mysql.py new file mode 100644 index 0000000000..076f6f0ea6 --- /dev/null +++ b/tests/python_client/testcases/stability/test_mysql.py @@ -0,0 +1,44 @@ +import time +import random +import pdb +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * + +class TestMysql: + """ + ****************************************************************** + The following cases are used to test mysql failure + ****************************************************************** + """ + @pytest.fixture(scope="function", autouse=True) + def skip_check(self, connect, args): + if args["service_name"].find("shards") != -1: + reason = "Skip restart cases in shards mode" + logging.getLogger().info(reason) + pytest.skip(reason) + + @pytest.mark.tags(CaseLabel.L2) + def _test_kill_mysql_during_index(self, connect, collection, args): + big_nb = 20000 + index_param = {"nlist": 1024, "m": 16} + index_type = IndexType.IVF_PQ + vectors = gen_vectors(big_nb, default_dim) + status, ids = connect.bulk_insert(collection, vectors, ids=[i for i in range(big_nb)]) + status = connect.flush([collection]) + assert status.OK() + status, res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + assert status.OK() + assert res_count == big_nb + logging.getLogger().info("Start create index async") + status = connect.create_index(collection, index_type, index_param, _async=True) + time.sleep(2) + logging.getLogger().info("Start play mysql failure") + # pass + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + status, res_count = new_connect.count_entities(collection) + assert status.OK() + assert res_count == big_nb diff --git a/tests/python_client/testcases/stability/test_restart.py b/tests/python_client/testcases/stability/test_restart.py new file mode 100644 index 0000000000..d5b94dc86e --- /dev/null +++ b/tests/python_client/testcases/stability/test_restart.py @@ -0,0 +1,315 @@ +import time +import random +import pdb +import threading +import logging +import json +from multiprocessing import Pool, Process +import pytest +from utils.utils import * + + +uid = "wal" +TIMEOUT = 120 +insert_interval_time = 1.5 +big_nb = 100000 +field_name = "float_vector" +big_entities = gen_entities(big_nb) +default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} + + +class TestRestartBase: + """ + ****************************************************************** + The following cases are used to test `create_partition` function + ****************************************************************** + """ + @pytest.fixture(scope="module", autouse=True) + def skip_check(self, args): + logging.getLogger().info(args) + if "service_name" not in args or not args["service_name"]: + reason = "Skip if service name not provided" + logging.getLogger().info(reason) + pytest.skip(reason) + if args["service_name"].find("shards") != -1: + reason = "Skip restart cases in shards mode" + logging.getLogger().info(reason) + pytest.skip(reason) + + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_flush(self, connect, collection, args): + ''' + target: return the same row count after server restart + method: call function: create collection, then insert/flush, restart server and assert row count + expected: row count keep the same + ''' + ids = connect.bulk_insert(collection, default_entities) + connect.flush([collection]) + ids = connect.bulk_insert(collection, default_entities) + connect.flush([collection]) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + assert res_count == 2 * nb + # restart server + logging.getLogger().info("Start restart server") + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count = new_connect.count_entities(collection) + logging.getLogger().info(res_count) + assert res_count == 2 * nb + + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_during_flushing(self, connect, collection, args): + ''' + target: flushing will recover + method: call function: create collection, then insert/flushing, restart server and assert row count + expected: row count equals 0 + ''' + # disable_autoflush() + ids = connect.bulk_insert(collection, big_entities) + connect.flush([collection], _async=True) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + if res_count < big_nb: + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count_2 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_2) + timeout = 300 + start_time = time.time() + while new_connect.count_entities(collection) != big_nb and (time.time() - start_time < timeout): + time.sleep(10) + logging.getLogger().info(new_connect.count_entities(collection)) + res_count_3 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_3) + assert res_count_3 == big_nb + + @pytest.mark.tags(CaseLabel.L2) + def _test_delete_during_flushing(self, connect, collection, args): + ''' + target: flushing will recover + method: call function: create collection, then delete/flushing, restart server and assert row count + expected: row count equals (nb - delete_length) + ''' + # disable_autoflush() + ids = connect.bulk_insert(collection, big_entities) + connect.flush([collection]) + delete_length = 1000 + delete_ids = ids[big_nb//4:big_nb//4+delete_length] + delete_res = connect.delete_entity_by_id(collection, delete_ids) + connect.flush([collection], _async=True) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count_2 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_2) + timeout = 100 + start_time = time.time() + while new_connect.count_entities(collection) != big_nb - delete_length and (time.time() - start_time < timeout): + time.sleep(10) + logging.getLogger().info(new_connect.count_entities(collection)) + if new_connect.count_entities(collection) == big_nb - delete_length: + time.sleep(10) + res_count_3 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_3) + assert res_count_3 == big_nb - delete_length + + @pytest.mark.tags(CaseLabel.L2) + def _test_during_indexed(self, connect, collection, args): + ''' + target: flushing will recover + method: call function: create collection, then indexed, restart server and assert row count + expected: row count equals nb + ''' + # disable_autoflush() + ids = connect.bulk_insert(collection, big_entities) + connect.flush([collection]) + connect.create_index(collection, field_name, default_index) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + stats = connect.get_collection_stats(collection) + # logging.getLogger().info(stats) + # pdb.set_trace() + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + assert new_connect.count_entities(collection) == big_nb + stats = connect.get_collection_stats(collection) + for file in stats["partitions"][0]["segments"][0]["files"]: + if file["field"] == field_name and file["name"] != "_raw": + assert file["data_size"] > 0 + if file["index_type"] != default_index["index_type"]: + assert False + else: + assert True + + @pytest.mark.tags(CaseLabel.L2) + def _test_during_indexing(self, connect, collection, args): + ''' + target: flushing will recover + method: call function: create collection, then indexing, restart server and assert row count + expected: row count equals nb, server contitue to build index after restart + ''' + # disable_autoflush() + loop = 5 + for i in range(loop): + ids = connect.bulk_insert(collection, big_entities) + connect.flush([collection]) + connect.create_index(collection, field_name, default_index, _async=True) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + stats = connect.get_collection_stats(collection) + # logging.getLogger().info(stats) + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count_2 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_2) + assert res_count_2 == loop * big_nb + status = new_connect._cmd("status") + assert json.loads(status)["indexing"] == True + # timeout = 100 + # start_time = time.time() + # while time.time() - start_time < timeout: + # time.sleep(5) + # assert new_connect.count_entities(collection) == loop * big_nb + # stats = connect.get_collection_stats(collection) + # assert stats["row_count"] == loop * big_nb + # for file in stats["partitions"][0]["segments"][0]["files"]: + # # logging.getLogger().info(file) + # if file["field"] == field_name and file["name"] != "_raw": + # assert file["data_size"] > 0 + # if file["index_type"] != default_index["index_type"]: + # continue + # for file in stats["partitions"][0]["segments"][0]["files"]: + # if file["field"] == field_name and file["name"] != "_raw": + # assert file["data_size"] > 0 + # if file["index_type"] != default_index["index_type"]: + # assert False + # else: + # assert True + + @pytest.mark.tags(CaseLabel.L2) + def _test_delete_flush_during_compacting(self, connect, collection, args): + ''' + target: verify server work after restart during compaction + method: call function: create collection, then delete/flush/compacting, restart server and assert row count + call `compact` again, compact pass + expected: row count equals (nb - delete_length) + ''' + # disable_autoflush() + ids = connect.bulk_insert(collection, big_entities) + connect.flush([collection]) + delete_length = 1000 + loop = 10 + for i in range(loop): + delete_ids = ids[i*delete_length:(i+1)*delete_length] + delete_res = connect.delete_entity_by_id(collection, delete_ids) + connect.flush([collection]) + connect.compact(collection, _async=True) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + assert res_count == big_nb - delete_length*loop + info = connect.get_collection_stats(collection) + size_old = info["partitions"][0]["segments"][0]["data_size"] + logging.getLogger().info(size_old) + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count_2 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_2) + assert res_count_2 == big_nb - delete_length*loop + info = connect.get_collection_stats(collection) + size_before = info["partitions"][0]["segments"][0]["data_size"] + status = connect.compact(collection) + assert status.OK() + info = connect.get_collection_stats(collection) + size_after = info["partitions"][0]["segments"][0]["data_size"] + assert size_before > size_after + + + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_during_flushing_multi_collections(self, connect, args): + ''' + target: flushing will recover + method: call function: create collections, then insert/flushing, restart server and assert row count + expected: row count equals 0 + ''' + # disable_autoflush() + collection_num = 2 + collection_list = [] + for i in range(collection_num): + collection_name = gen_unique_str(uid) + collection_list.append(collection_name) + connect.create_collection(collection_name, default_fields) + ids = connect.bulk_insert(collection_name, big_entities) + connect.flush(collection_list, _async=True) + res_count = connect.count_entities(collection_list[-1]) + logging.getLogger().info(res_count) + if res_count < big_nb: + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count_2 = new_connect.count_entities(collection_list[-1]) + logging.getLogger().info(res_count_2) + timeout = 300 + start_time = time.time() + while time.time() - start_time < timeout: + count_list = [] + break_flag = True + for index, name in enumerate(collection_list): + tmp_count = new_connect.count_entities(name) + count_list.append(tmp_count) + logging.getLogger().info(count_list) + if tmp_count != big_nb: + break_flag = False + break + if break_flag == True: + break + time.sleep(10) + for name in collection_list: + assert new_connect.count_entities(name) == big_nb + + @pytest.mark.tags(CaseLabel.L2) + def _test_insert_during_flushing_multi_partitions(self, connect, collection, args): + ''' + target: flushing will recover + method: call function: create collection/partition, then insert/flushing, restart server and assert row count + expected: row count equals 0 + ''' + # disable_autoflush() + partitions_num = 2 + partitions = [] + for i in range(partitions_num): + tag_tmp = gen_unique_str() + partitions.append(tag_tmp) + connect.create_partition(collection, tag_tmp) + ids = connect.bulk_insert(collection, big_entities, partition_name=tag_tmp) + connect.flush([collection], _async=True) + res_count = connect.count_entities(collection) + logging.getLogger().info(res_count) + if res_count < big_nb: + # restart server + assert restart_server(args["service_name"]) + # assert row count again + new_connect = get_milvus(args["ip"], args["port"], handler=args["handler"]) + res_count_2 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_2) + timeout = 300 + start_time = time.time() + while new_connect.count_entities(collection) != big_nb * 2 and (time.time() - start_time < timeout): + time.sleep(10) + logging.getLogger().info(new_connect.count_entities(collection)) + res_count_3 = new_connect.count_entities(collection) + logging.getLogger().info(res_count_3) + assert res_count_3 == big_nb * 2 \ No newline at end of file diff --git a/tests20/python_client/testcases/test_collection.py b/tests/python_client/testcases/test_collection_20.py similarity index 100% rename from tests20/python_client/testcases/test_collection.py rename to tests/python_client/testcases/test_collection_20.py diff --git a/tests/python_client/testcases/test_compact.py b/tests/python_client/testcases/test_compact.py new file mode 100644 index 0000000000..e21485facb --- /dev/null +++ b/tests/python_client/testcases/test_compact.py @@ -0,0 +1,722 @@ +import time +import pdb +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * +from common.constants import * + +# COMPACT_TIMEOUT = 180 +# field_name = default_float_vec_field_name +# binary_field_name = default_binary_vec_field_name +# default_single_query = { +# "bool": { +# "must": [ +# {"vector": {field_name: {"topk": 10, "query": gen_vectors(1, default_dim), "metric_type":"L2", +# "params": {"nprobe": 10}}}} +# ] +# } +# } +# default_binary_single_query = { +# "bool": { +# "must": [ +# {"vector": {binary_field_name: {"topk": 10, "query": gen_binary_vectors(1, default_dim), +# "metric_type":"JACCARD", "params": {"nprobe": 10}}}} +# ] +# } +# } +# default_query, default_query_vecs = gen_query_vectors(binary_field_name, default_binary_entities, 1, 2) +# +# +# def ip_query(): +# query = copy.deepcopy(default_single_query) +# query["bool"]["must"][0]["vector"][field_name].update({"metric_type": "IP"}) +# return query +# +# +# class TestCompactBase: +# """ +# ****************************************************************** +# The following cases are used to test `compact` function +# ****************************************************************** +# """ +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_compact_collection_name_None(self, connect, collection): +# ''' +# target: compact collection where collection name is None +# method: compact with the collection_name: None +# expected: exception raised +# ''' +# collection_name = None +# with pytest.raises(Exception) as e: +# status = connect.compact(collection_name) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_compact_collection_name_not_existed(self, connect, collection): +# ''' +# target: compact collection not existed +# method: compact with a random collection_name, which is not in db +# expected: exception raised +# ''' +# collection_name = gen_unique_str("not_existed") +# with pytest.raises(Exception) as e: +# status = connect.compact(collection_name) +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_strs() +# ) +# def get_collection_name(self, request): +# yield request.param +# +# @pytest.fixture( +# scope="function", +# params=gen_invalid_ints() +# ) +# def get_threshold(self, request): +# yield request.param +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_compact_collection_name_invalid(self, connect, get_collection_name): +# ''' +# target: compact collection with invalid name +# method: compact with invalid collection_name +# expected: exception raised +# ''' +# collection_name = get_collection_name +# with pytest.raises(Exception) as e: +# status = connect.compact(collection_name) +# # assert not status.OK() +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_compact_threshold_invalid(self, connect, collection, get_threshold): +# ''' +# target: compact collection with invalid name +# method: compact with invalid threshold +# expected: exception raised +# ''' +# threshold = get_threshold +# if threshold != None: +# with pytest.raises(Exception) as e: +# status = connect.compact(collection, threshold) +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_add_entity_and_compact(self, connect, collection): +# ''' +# target: test add entity and compact +# method: add entity and compact collection +# expected: data_size before and after Compact +# ''' +# # vector = gen_single_vector(dim) +# ids = connect.bulk_insert(collection, default_entity) +# assert len(ids) == 1 +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_and_compact(self, connect, collection): +# ''' +# target: test add entities and compact +# method: add entities and compact collection +# expected: data_size before and after Compact +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# # assert status.OK() +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# # assert status.OK() +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_part_and_compact(self, connect, collection): +# ''' +# target: test add entities, delete part of them and compact +# method: add entities, delete a few and compact collection +# expected: status ok, data size maybe is smaller after compact +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status.OK() +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# size_before = info["partitions"][0]["data_size"] +# logging.getLogger().info(size_before) +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# size_after = info["partitions"][0]["data_size"] +# logging.getLogger().info(size_after) +# assert(size_before >= size_after) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_part_and_compact_threshold(self, connect, collection): +# ''' +# target: test add entities, delete part of them and compact +# method: add entities, delete a few and compact collection +# expected: status ok, data size maybe is smaller after compact +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status.OK() +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# size_before = info["partitions"][0]["data_size"] +# logging.getLogger().info(size_before) +# status = connect.compact(collection, 0.1) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# size_after = info["partitions"][0]["data_size"] +# logging.getLogger().info(size_after) +# assert(size_before >= size_after) +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_all_and_compact(self, connect, collection): +# ''' +# target: test add entities, delete them and compact +# method: add entities, delete all and compact collection +# expected: status ok, no data size in collection info because collection is empty +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# status = connect.delete_entity_by_id(collection, ids) +# assert status.OK() +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# assert not info["partitions"][0]["segments"] +# +# # TODO: enable +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_partition_delete_half_and_compact(self, connect, collection): +# ''' +# target: test add entities into partition, delete them and compact +# method: add entities, delete half of entities in partition and compact collection +# expected: status ok, data_size less than the older version +# ''' +# connect.create_partition(collection, default_tag) +# assert connect.has_partition(collection, default_tag) +# ids = connect.bulk_insert(collection, default_entities, partition_name=default_tag) +# connect.flush([collection]) +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# delete_ids = ids[:default_nb//2] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status.OK() +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# logging.getLogger().info(info["partitions"]) +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info_after = connect.get_collection_stats(collection) +# logging.getLogger().info(info_after["partitions"]) +# assert info["partitions"][1]["segments"][0]["data_size"] >= info_after["partitions"][1]["segments"][0]["data_size"] +# +# @pytest.fixture( +# scope="function", +# params=gen_simple_index() +# ) +# def get_simple_index(self, request, connect): +# if str(connect._cmd("mode")) == "GPU": +# if not request.param["index_type"] not in ivf(): +# pytest.skip("Only support index_type: idmap/ivf") +# if str(connect._cmd("mode")) == "CPU": +# if request.param["index_type"] in index_cpu_not_support(): +# pytest.skip("CPU not support index_type: ivf_sq8h") +# return request.param +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_compact_after_index_created(self, connect, collection, get_simple_index): +# ''' +# target: test compact collection after index created +# method: add entities, create index, delete part of entities and compact +# expected: status ok, index description no change, data size smaller after compact +# ''' +# count = 10 +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# connect.create_index(collection, field_name, get_simple_index) +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# delete_ids = ids[:default_nb//2] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status.OK() +# connect.flush([collection]) +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before >= size_after) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_add_entity_and_compact_twice(self, connect, collection): +# ''' +# target: test add entity and compact twice +# method: add entity and compact collection twice +# expected: status ok, data size no change +# ''' +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(collection) +# assert status.OK() +# connect.flush([collection]) +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact twice +# info = connect.get_collection_stats(collection) +# size_after_twice = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_after == size_after_twice) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_part_and_compact_twice(self, connect, collection): +# ''' +# target: test add entities, delete part of them and compact twice +# method: add entities, delete part and compact collection twice +# expected: status ok, data size smaller after first compact, no change after second +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(collection, delete_ids) +# assert status.OK() +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# size_before = info["partitions"][0]["data_size"] +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# size_after = info["partitions"][0]["data_size"] +# assert(size_before >= size_after) +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact twice +# info = connect.get_collection_stats(collection) +# size_after_twice = info["partitions"][0]["data_size"] +# assert(size_after == size_after_twice) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_compact_multi_collections(self, connect): +# ''' +# target: test compact works or not with multiple collections +# method: create 50 collections, add entities into them and compact in turn +# expected: status ok +# ''' +# nb = 100 +# num_collections = 20 +# entities = gen_entities(nb) +# collection_list = [] +# for i in range(num_collections): +# collection_name = gen_unique_str("test_compact_multi_collection_%d" % i) +# collection_list.append(collection_name) +# connect.create_collection(collection_name, default_fields) +# for i in range(num_collections): +# ids = connect.bulk_insert(collection_list[i], entities) +# connect.delete_entity_by_id(collection_list[i], ids[:nb//2]) +# status = connect.compact(collection_list[i]) +# assert status.OK() +# connect.drop_collection(collection_list[i]) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_add_entity_after_compact(self, connect, collection): +# ''' +# target: test add entity after compact +# method: after compact operation, add entity +# expected: status ok, entity added +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# # get collection info before compact +# info = connect.get_collection_stats(collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# ids = connect.bulk_insert(collection, default_entity) +# connect.flush([collection]) +# res = connect.count_entities(collection) +# assert res == default_nb+1 +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_index_creation_after_compact(self, connect, collection, get_simple_index): +# ''' +# target: test index creation after compact +# method: after compact operation, create index +# expected: status ok, index description no change +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# connect.flush([collection]) +# status = connect.delete_entity_by_id(collection, ids[:10]) +# assert status.OK() +# connect.flush([collection]) +# status = connect.compact(collection) +# assert status.OK() +# status = connect.create_index(collection, field_name, get_simple_index) +# assert status.OK() +# # status, result = connect.get_index_info(collection) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_delete_entities_after_compact(self, connect, collection): +# ''' +# target: test delete entities after compact +# method: after compact operation, delete entities +# expected: status ok, entities deleted +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# status = connect.compact(collection) +# assert status.OK() +# connect.flush([collection]) +# status = connect.delete_entity_by_id(collection, ids) +# assert status.OK() +# connect.flush([collection]) +# assert connect.count_entities(collection) == 0 +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_search_after_compact(self, connect, collection): +# ''' +# target: test search after compact +# method: after compact operation, search vector +# expected: status ok +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# status = connect.compact(collection) +# assert status.OK() +# query = copy.deepcopy(default_single_query) +# query["bool"]["must"][0]["vector"][field_name]["query"] = [default_entity[-1]["values"][0], +# default_entities[-1]["values"][0], +# default_entities[-1]["values"][-1]] +# res = connect.search(collection, query) +# logging.getLogger().debug(res) +# assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"]) +# assert res[0]._distances[0] > epsilon +# assert res[1]._distances[0] < epsilon +# assert res[2]._distances[0] < epsilon +# +# +# class TestCompactBinary: +# """ +# ****************************************************************** +# The following cases are used to test `compact` function +# ****************************************************************** +# """ +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_add_entity_and_compact(self, connect, binary_collection): +# ''' +# target: test add binary vector and compact +# method: add vector and compact collection +# expected: status ok, vector added +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entity) +# assert len(ids) == 1 +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_and_compact(self, connect, binary_collection): +# ''' +# target: test add entities with binary vector and compact +# method: add entities and compact collection +# expected: status ok, entities added +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# assert len(ids) == default_nb +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_part_and_compact(self, connect, binary_collection): +# ''' +# target: test add entities, delete part of them and compact +# method: add entities, delete a few and compact collection +# expected: status ok, data size is smaller after compact +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# assert len(ids) == default_nb +# connect.flush([binary_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(binary_collection, delete_ids) +# assert status.OK() +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# logging.getLogger().info(info["partitions"]) +# size_before = info["partitions"][0]["data_size"] +# logging.getLogger().info(size_before) +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# logging.getLogger().info(info["partitions"]) +# size_after = info["partitions"][0]["data_size"] +# logging.getLogger().info(size_after) +# assert(size_before >= size_after) +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_all_and_compact(self, connect, binary_collection): +# ''' +# target: test add entities, delete them and compact +# method: add entities, delete all and compact collection +# expected: status ok, no data size in collection info because collection is empty +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# assert len(ids) == default_nb +# connect.flush([binary_collection]) +# status = connect.delete_entity_by_id(binary_collection, ids) +# assert status.OK() +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# assert status.OK() +# logging.getLogger().info(info["partitions"]) +# assert not info["partitions"][0]["segments"] +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_add_entity_and_compact_twice(self, connect, binary_collection): +# ''' +# target: test add entity and compact twice +# method: add entity and compact collection twice +# expected: status ok +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entity) +# assert len(ids) == 1 +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact twice +# info = connect.get_collection_stats(binary_collection) +# size_after_twice = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_after == size_after_twice) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_insert_delete_part_and_compact_twice(self, connect, binary_collection): +# ''' +# target: test add entities, delete part of them and compact twice +# method: add entities, delete part and compact collection twice +# expected: status ok, data size smaller after first compact, no change after second +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# assert len(ids) == default_nb +# connect.flush([binary_collection]) +# delete_ids = [ids[0], ids[-1]] +# status = connect.delete_entity_by_id(binary_collection, delete_ids) +# assert status.OK() +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# size_before = info["partitions"][0]["data_size"] +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# size_after = info["partitions"][0]["data_size"] +# assert(size_before >= size_after) +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact twice +# info = connect.get_collection_stats(binary_collection) +# size_after_twice = info["partitions"][0]["data_size"] +# assert(size_after == size_after_twice) +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_compact_multi_collections(self, connect): +# ''' +# target: test compact works or not with multiple collections +# method: create 10 collections, add entities into them and compact in turn +# expected: status ok +# ''' +# nq = 100 +# num_collections = 10 +# tmp, entities = gen_binary_entities(nq) +# collection_list = [] +# for i in range(num_collections): +# collection_name = gen_unique_str("test_compact_multi_collection_%d" % i) +# collection_list.append(collection_name) +# connect.create_collection(collection_name, default_binary_fields) +# for i in range(num_collections): +# ids = connect.bulk_insert(collection_list[i], entities) +# assert len(ids) == nq +# status = connect.delete_entity_by_id(collection_list[i], [ids[0], ids[-1]]) +# assert status.OK() +# connect.flush([collection_list[i]]) +# status = connect.compact(collection_list[i]) +# assert status.OK() +# status = connect.drop_collection(collection_list[i]) +# assert status.OK() +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_add_entity_after_compact(self, connect, binary_collection): +# ''' +# target: test add entity after compact +# method: after compact operation, add entity +# expected: status ok, entity added +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# connect.flush([binary_collection]) +# # get collection info before compact +# info = connect.get_collection_stats(binary_collection) +# size_before = info["partitions"][0]["segments"][0]["data_size"] +# status = connect.compact(binary_collection) +# assert status.OK() +# # get collection info after compact +# info = connect.get_collection_stats(binary_collection) +# size_after = info["partitions"][0]["segments"][0]["data_size"] +# assert(size_before == size_after) +# ids = connect.bulk_insert(binary_collection, default_binary_entity) +# connect.flush([binary_collection]) +# res = connect.count_entities(binary_collection) +# assert res == default_nb + 1 +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_delete_entities_after_compact(self, connect, binary_collection): +# ''' +# target: test delete entities after compact +# method: after compact operation, delete entities +# expected: status ok, entities deleted +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# connect.flush([binary_collection]) +# status = connect.compact(binary_collection) +# assert status.OK() +# connect.flush([binary_collection]) +# status = connect.delete_entity_by_id(binary_collection, ids) +# assert status.OK() +# connect.flush([binary_collection]) +# res = connect.count_entities(binary_collection) +# assert res == 0 +# +# @pytest.mark.tags(CaseLabel.L2) +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_search_after_compact(self, connect, binary_collection): +# ''' +# target: test search after compact +# method: after compact operation, search vector +# expected: status ok +# ''' +# ids = connect.bulk_insert(binary_collection, default_binary_entities) +# assert len(ids) == default_nb +# connect.flush([binary_collection]) +# status = connect.compact(binary_collection) +# assert status.OK() +# query_vecs = [default_raw_binary_vectors[0]] +# distance = jaccard(query_vecs[0], default_raw_binary_vectors[0]) +# query = copy.deepcopy(default_binary_single_query) +# query["bool"]["must"][0]["vector"][binary_field_name]["query"] = [default_binary_entities[-1]["values"][0], +# default_binary_entities[-1]["values"][-1]] +# +# res = connect.search(binary_collection, query) +# assert abs(res[0]._distances[0]-distance) <= epsilon +# +# @pytest.mark.timeout(COMPACT_TIMEOUT) +# def test_search_after_compact_ip(self, connect, collection): +# ''' +# target: test search after compact +# method: after compact operation, search vector +# expected: status ok +# ''' +# ids = connect.bulk_insert(collection, default_entities) +# assert len(ids) == default_nb +# connect.flush([collection]) +# status = connect.compact(collection) +# query = ip_query() +# query["bool"]["must"][0]["vector"][field_name]["query"] = [default_entity[-1]["values"][0], +# default_entities[-1]["values"][0], +# default_entities[-1]["values"][-1]] +# res = connect.search(collection, query) +# logging.getLogger().info(res) +# assert len(res) == len(query["bool"]["must"][0]["vector"][field_name]["query"]) +# assert res[0]._distances[0] < 1 - epsilon +# assert res[1]._distances[0] > 1 - epsilon +# assert res[2]._distances[0] > 1 - epsilon diff --git a/tests/python_client/testcases/test_config.py b/tests/python_client/testcases/test_config.py new file mode 100644 index 0000000000..6a268ee154 --- /dev/null +++ b/tests/python_client/testcases/test_config.py @@ -0,0 +1,1402 @@ +import time +import random +import pdb +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * +import ujson + +CONFIG_TIMEOUT = 80 + +# class TestCacheConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def reset_configs(self, connect): +# ''' +# reset configs so the tests are stable +# ''' +# relpy = connect.set_config("cache.cache_size", '4GB') +# config_value = connect.get_config("cache.cache_size") +# assert config_value == '4GB' +# #relpy = connect.set_config("cache", "insert_buffer_size", '2GB') +# #config_value = connect.get_config("cache", "insert_buffer_size") +# #assert config_value == '1073741824' +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_cache_size_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: cache +# expected: status not ok +# ''' +# invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+str(".cache_size")) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_cache_size_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: cache_size +# expected: status not ok +# ''' +# invalid_configs = ["Cpu_cache_size", "cpu cache_size", "cpucachecapacity"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("cache."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_cache_size_valid(self, connect, collection): +# ''' +# target: get cache_size +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("cache.cache_size") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_insert_buffer_size_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: cache +# expected: status not ok +# ''' +# invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+".insert_buffer_size") +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_insert_buffer_size_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: insert_buffer_size +# expected: status not ok +# ''' +# invalid_configs = ["Insert_buffer size", "insert buffer_size", "insertbuffersize"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("cache."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_insert_buffer_size_valid(self, connect, collection): +# ''' +# target: get insert_buffer_size +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("cache.insert_buffer_size") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_preload_collection_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: preload_collection +# expected: status not ok +# ''' +# invalid_configs = ["preloadtable", "preload collection "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("cache."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_preload_collection_valid(self, connect, collection): +# ''' +# target: get preload_collection +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("cache.preload_collection") +# assert config_value == '' +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# def get_memory_available(self, connect): +# info = connect._cmd("get_system_info") +# mem_info = ujson.loads(info) +# mem_total = int(mem_info["memory_total"]) +# mem_used = int(mem_info["memory_used"]) +# logging.getLogger().info(mem_total) +# logging.getLogger().info(mem_used) +# mem_available = mem_total - mem_used +# return int(mem_available / 1024 / 1024 / 1024) +# +# def get_memory_total(self, connect): +# info = connect._cmd("get_system_info") +# mem_info = ujson.loads(info) +# mem_total = int(mem_info["memory_total"]) +# return int(mem_total / 1024 / 1024 / 1024) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_size_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: cache +# expected: status not ok +# ''' +# self.reset_configs(connect) +# invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config+".cache_size", '4294967296') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# self.reset_configs(connect) +# invalid_configs = ["abc", 1] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("cache."+config, '4294967296') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_size_valid(self, connect, collection): +# ''' +# target: set cache_size +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# self.reset_configs(connect) +# relpy = connect.set_config("cache.cache_size", '2147483648') +# config_value = connect.get_config("cache.cache_size") +# assert config_value == '2GB' +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.tags(CaseLabel.L2) +# def test_set_cache_size_valid_multiple_times(self, connect, collection): +# ''' +# target: set cache_size +# method: call set_config correctly and repeatedly +# expected: status ok +# ''' +# self.reset_configs(connect) +# for i in range(20): +# relpy = connect.set_config("cache.cache_size", '4294967296') +# config_value = connect.get_config("cache.cache_size") +# assert config_value == '4294967296' +# for i in range(20): +# relpy = connect.set_config("cache.cache_size", '2147483648') +# config_value = connect.get_config("cache.cache_size") +# assert config_value == '2147483648' +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.tags(CaseLabel.L2) +# def test_set_insert_buffer_size_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: cache +# expected: status not ok +# ''' +# self.reset_configs(connect) +# invalid_configs = ["Cache_config", "cache config", "cache_Config", "cacheconfig"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config+".insert_buffer_size", '1073741824') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_insert_buffer_size_valid(self, connect, collection): +# ''' +# target: set insert_buffer_size +# method: call get_config correctly +# expected: status ok, set successfully +# ''' +# self.reset_configs(connect) +# relpy = connect.set_config("cache.insert_buffer_size", '2GB') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.tags(CaseLabel.L2) +# def test_set_insert_buffer_size_valid_multiple_times(self, connect, collection): +# ''' +# target: set insert_buffer_size +# method: call get_config correctly and repeatedly +# expected: status ok +# ''' +# self.reset_configs(connect) +# for i in range(20): +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("cache.insert_buffer_size", '1GB') +# for i in range(20): +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("cache.insert_buffer_size", '2GB') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_out_of_memory_value_A(self, connect, collection): +# ''' +# target: set cache_size / insert_buffer_size to be out-of-memory +# method: call set_config with child values bigger than current system memory +# expected: status not ok (cache_size + insert_buffer_size < system memory) +# ''' +# self.reset_configs(connect) +# mem_total = self.get_memory_total(connect) +# logging.getLogger().info(mem_total) +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("cache.cache_size", str(int(mem_total + 1)+'')) +# +# +# +# class TestGPUConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_gpu_search_threshold_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Engine_config", "engine config"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+".gpu_search_threshold") +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_gpu_search_threshold_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: gpu_search_threshold +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_search threshold", "gpusearchthreshold"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("gpu."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_gpu_search_threshold_valid(self, connect, collection): +# ''' +# target: get gpu_search_threshold +# method: call get_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# config_value = connect.get_config("gpu.gpu_search_threshold") +# assert config_value +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# invalid_configs = ["abc", 1] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu."+config, 1000) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_search_threshold_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Engine_config", "engine config"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config+".gpu_search_threshold", 1000) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_search_threshold_valid(self, connect, collection): +# ''' +# target: set gpu_search_threshold +# method: call set_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# relpy = connect.set_config("gpu.gpu_search_threshold", 2000) +# config_value = connect.get_config("gpu.gpu_search_threshold") +# assert config_value == '2000' +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_invalid_values(self, connect, collection): +# ''' +# target: set gpu +# method: call set_config with invalid child values +# expected: status not ok +# ''' +# for i in [-1, "1000\n", "1000\t", "1000.0", 1000.35]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu.use_blas_threshold", i) +# if str(connect._cmd("mode")) == "GPU": +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu.gpu_search_threshold", i) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def reset_configs(self, connect): +# ''' +# reset configs so the tests are stable +# ''' +# relpy = connect.set_config("gpu.cache_size", 1) +# config_value = connect.get_config("gpu.cache_size") +# assert config_value == '1' +# +# #follows can not be changed +# #relpy = connect.set_config("gpu", "enable", "true") +# #config_value = connect.get_config("gpu", "enable") +# #assert config_value == "true" +# #relpy = connect.set_config("gpu", "search_devices", "gpu0") +# #config_value = connect.get_config("gpu", "search_devices") +# #assert config_value == 'gpu0' +# #relpy = connect.set_config("gpu", "build_index_devices", "gpu0") +# #config_value = connect.get_config("gpu", "build_index_devices") +# #assert config_value == 'gpu0' +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_gpu_enable_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+".enable") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_gpu_enable_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: enable +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Enab_le", "enab_le ", "disable", "true"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("gpu."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_gpu_enable_valid(self, connect, collection): +# ''' +# target: get enable status +# method: call get_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# config_value = connect.get_config("gpu.enable") +# assert config_value == "true" or config_value == "false" +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_cache_size_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+".cache_size") +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_cache_size_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: cache_size +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Cache_capacity", "cachecapacity"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("gpu."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_cache_size_valid(self, connect, collection): +# ''' +# target: get cache_size +# method: call get_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# config_value = connect.get_config("gpu.cache_size") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_search_devices_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+".search_devices") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_search_devices_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: search_devices +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Search_resources"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("gpu."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_search_devices_valid(self, connect, collection): +# ''' +# target: get search_devices +# method: call get_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# config_value = connect.get_config("gpu.search_devices") +# logging.getLogger().info(config_value) +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_build_index_devices_invalid_parent_key(self, connect, collection): +# ''' +# target: get invalid parent key +# method: call get_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config(config+".build_index_devices") +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_build_index_devices_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: build_index_devices +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Build_index_resources"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("gpu."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_build_index_devices_valid(self, connect, collection): +# ''' +# target: get build_index_devices +# method: call get_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# config_value = connect.get_config("gpu.build_index_devices") +# logging.getLogger().info(config_value) +# assert config_value +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_enable_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config+".enable", "true") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu."+config, "true") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_enable_invalid_values(self, connect, collection): +# ''' +# target: set "enable" param +# method: call set_config with invalid child values +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# for i in [-1, -2, 100]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu.enable", i) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_gpu_enable_valid(self, connect, collection): +# ''' +# target: set "enable" param +# method: call set_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# valid_configs = ["off", "False", "0", "nO", "on", "True", 1, "yES"] +# for config in valid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu.enable", config) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_size_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config+".cache_size", 2) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_size_valid(self, connect, collection): +# ''' +# target: set cache_size +# method: call set_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# relpy = connect.set_config("gpu.cache_size", 2) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_cache_size_invalid_values(self, connect, collection): +# ''' +# target: set cache_size +# method: call set_config with invalid child values +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# self.reset_configs(connect) +# for i in [-1, "1\n", "1\t"]: +# logging.getLogger().info(i) +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu", "cache_size", i) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_search_devices_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config, "search_devices", "gpu0") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_search_devices_valid(self, connect, collection): +# ''' +# target: set search_devices +# method: call set_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu", "search_devices", "gpu0") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_search_devices_invalid_values(self, connect, collection): +# ''' +# target: set search_devices +# method: call set_config with invalid child values +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# for i in [-1, "10", "gpu-1", "gpu0, gpu1", "gpu22,gpu44","gpu10000","gpu 0","-gpu0"]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu", "search_devices", i) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_build_index_devices_invalid_parent_key(self, connect, collection): +# ''' +# target: set invalid parent key +# method: call set_config without parent_key: gpu +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# invalid_configs = ["Gpu_resource_config", "gpu resource config", \ +# "gpu_resource"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config(config, "build_index_devices", "gpu0") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_build_index_devices_valid(self, connect, collection): +# ''' +# target: set build_index_devices +# method: call set_config correctly +# expected: status ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu", "build_index_devices", "gpu0") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_build_index_devices_invalid_values(self, connect, collection): +# ''' +# target: set build_index_devices +# method: call set_config with invalid child values +# expected: status not ok +# ''' +# if str(connect._cmd("mode")) == "CPU": +# pytest.skip("Only support GPU mode") +# for i in [-1, "10", "gpu-1", "gpu0, gpu1", "gpu22,gpu44","gpu10000","gpu 0","-gpu0"]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("gpu", "build_index_devices", i) +# self.reset_configs(connect) +# +# +# class TestNetworkConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_address_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: address +# expected: status not ok +# ''' +# invalid_configs = ["Address", "addresses", "address "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("network."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_address_valid(self, connect, collection): +# ''' +# target: get address +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("network.bind.address") +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_port_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: port +# expected: status not ok +# ''' +# invalid_configs = ["Port", "PORT", "port "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("network."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_port_valid(self, connect, collection): +# ''' +# target: get port +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("network.http.port") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_http_port_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: http.port +# expected: status not ok +# ''' +# invalid_configs = ["webport", "Web_port", "http port "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("network."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_http_port_valid(self, connect, collection): +# ''' +# target: get http.port +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("network.http.port") +# assert config_value +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# def gen_valid_timezones(self): +# timezones = [] +# for i in range(0, 13): +# timezones.append("UTC+" + str(i)) +# timezones.append("UTC-" + str(i)) +# timezones.extend(["UTC+13", "UTC+14"]) +# return timezones +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_network_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("network.child_key", 19530) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_address_valid(self, connect, collection): +# ''' +# target: set address +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# relpy = connect.set_config("network.bind.address", '0.0.0.0') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_port_valid(self, connect, collection): +# ''' +# target: set port +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_port in [1025, 65534, 12345, "19530"]: +# relpy = connect.set_config("network.http.port", valid_port) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_port_invalid(self, connect, collection): +# ''' +# target: set port +# method: call set_config with port number out of range(1024, 65535) +# expected: status not ok +# ''' +# for invalid_port in [1024, 65535, "0", "True", "100000"]: +# logging.getLogger().info(invalid_port) +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("network.http.port", invalid_port) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_http_port_valid(self, connect, collection): +# ''' +# target: set http.port +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_http_port in [1025, 65534, "12345", 19121]: +# relpy = connect.set_config("network.http.port", valid_http_port) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_http_port_invalid(self, connect, collection): +# ''' +# target: set http.port +# method: call set_config with http.port number out of range(1024, 65535) +# expected: status not ok +# ''' +# for invalid_http_port in [1024, 65535, "0", "True", "1000000"]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("network.http.port", invalid_http_port) +# +# +# class TestGeneralConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_meta_uri_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: meta_uri +# expected: status not ok +# ''' +# invalid_configs = ["backend_Url", "backend-url", "meta uri "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("general."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_meta_uri_valid(self, connect, collection): +# ''' +# target: get meta_uri +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("general.meta_uri") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_timezone_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: timezone +# expected: status not ok +# ''' +# invalid_configs = ["time", "time_zone "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("general."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_timezone_valid(self, connect, collection): +# ''' +# target: get timezone +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("general.timezone") +# assert "UTC" in config_value +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_timezone_invalid(self, connect, collection): +# ''' +# target: set timezone +# method: call set_config with invalid timezone +# expected: status not ok +# ''' +# for invalid_timezone in ["utc++8", "UTC++8"]: +# logging.getLogger().info(invalid_timezone) +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("general.timezone", invalid_timezone) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_general_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("general.child_key", 1) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_meta_uri_valid(self, connect, collection): +# ''' +# target: set meta_uri +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# relpy = connect.set_config("general.meta_uri", 'sqlite://:@:/') +# +# +# class TestStorageConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_path_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: path +# expected: status not ok +# ''' +# invalid_configs = ["Primary_path", "primarypath", "pa_th "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("storage."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_path_valid(self, connect, collection): +# ''' +# target: get path +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("storage.path") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_auto_flush_interval_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: auto_flush_interval +# expected: status not ok +# ''' +# invalid_configs = ["autoFlushInterval", "auto_flush", "auto_flush interval "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("storage."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_auto_flush_interval_valid(self, connect, collection): +# ''' +# target: get auto_flush_interval +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("storage.auto_flush_interval") +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_storage_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("storage.child_key", "") +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_path_valid(self, connect, collection): +# ''' +# target: set path +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# relpy = connect.set_config("storage.path", '/var/lib/milvus') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_auto_flush_interval_valid(self, connect, collection): +# ''' +# target: set auto_flush_interval +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_auto_flush_interval in [2, 1]: +# logging.getLogger().info(valid_auto_flush_interval) +# relpy = connect.set_config("storage.auto_flush_interval", valid_auto_flush_interval) +# config_value = connect.get_config("storage.auto_flush_interval") +# assert config_value == str(valid_auto_flush_interval) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_auto_flush_interval_invalid(self, connect, collection): +# ''' +# target: set auto_flush_interval +# method: call set_config with invalid auto_flush_interval +# expected: status not ok +# ''' +# for invalid_auto_flush_interval in [-1, "1.5", "invalid", "1+2"]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("storage.auto_flush_interval", invalid_auto_flush_interval) +# +# +# class TestMetricConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_enable_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: enable +# expected: status not ok +# ''' +# invalid_configs = ["enablemonitor", "Enable_monitor", "en able "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("metric."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_enable_valid(self, connect, collection): +# ''' +# target: get enable +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("metric.enable") +# assert config_value +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_address_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: address +# expected: status not ok +# ''' +# invalid_configs = ["Add ress", "addresses", "add ress "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("metric."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_address_valid(self, connect, collection): +# ''' +# target: get address +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("metric.address") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_port_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: port +# expected: status not ok +# ''' +# invalid_configs = ["Po_rt", "PO_RT", "po_rt "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("metric."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_port_valid(self, connect, collection): +# ''' +# target: get port +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("metric.port") +# assert config_value +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_metric_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("metric.child_key", 19530) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_enable_valid(self, connect, collection): +# ''' +# target: set enable +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_enable in ["false", "true"]: +# relpy = connect.set_config("metric.enable", valid_enable) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_address_valid(self, connect, collection): +# ''' +# target: set address +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# relpy = connect.set_config("metric.address", '127.0.0.1') +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_port_valid(self, connect, collection): +# ''' +# target: set port +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_port in [1025, 65534, "19530", "9091"]: +# relpy = connect.set_config("metric.port", valid_port) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_port_invalid(self, connect, collection): +# ''' +# target: set port +# method: call set_config with port number out of range(1024, 65535), or same as http.port number +# expected: status not ok +# ''' +# for invalid_port in [1024, 65535, "0", "True", "100000"]: +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("metric.port", invalid_port) +# +# +# class TestWALConfig: +# """ +# ****************************************************************** +# The following cases are used to test `get_config` function +# ****************************************************************** +# """ +# @pytest.fixture(scope="function", autouse=True) +# def skip_http_check(self, args): +# if args["handler"] == "HTTP": +# pytest.skip("skip in http mode") +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_enable_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: enable +# expected: status not ok +# ''' +# invalid_configs = ["enabled", "Enab_le", "enable_"] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("wal."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_enable_valid(self, connect, collection): +# ''' +# target: get enable +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("wal.enable") +# assert config_value +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_recovery_error_ignore_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: recovery_error_ignore +# expected: status not ok +# ''' +# invalid_configs = ["recovery-error-ignore", "Recovery error_ignore", "recoveryxerror_ignore "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("wal."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_recovery_error_ignore_valid(self, connect, collection): +# ''' +# target: get recovery_error_ignore +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("wal.recovery_error_ignore") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_buffer_size_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: buffer_size +# expected: status not ok +# ''' +# invalid_configs = ["buffersize", "Buffer size", "buffer size "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("wal."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_buffer_size_valid(self, connect, collection): +# ''' +# target: get buffer_size +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("wal.buffer_size") +# assert config_value +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_get_wal_path_invalid_child_key(self, connect, collection): +# ''' +# target: get invalid child key +# method: call get_config without child_key: wal_path +# expected: status not ok +# ''' +# invalid_configs = ["wal", "Wal_path", "wal_path "] +# for config in invalid_configs: +# with pytest.raises(Exception) as e: +# config_value = connect.get_config("wal."+config) +# +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_get_wal_path_valid(self, connect, collection): +# ''' +# target: get wal_path +# method: call get_config correctly +# expected: status ok +# ''' +# config_value = connect.get_config("wal.path") +# assert config_value +# +# """ +# ****************************************************************** +# The following cases are used to test `set_config` function +# ****************************************************************** +# """ +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_wal_invalid_child_key(self, connect, collection): +# ''' +# target: set invalid child key +# method: call set_config with invalid child_key +# expected: status not ok +# ''' +# with pytest.raises(Exception) as e: +# relpy = connect.set_config("wal.child_key", 256) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_enable_valid(self, connect, collection): +# ''' +# target: set enable +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_enable in ["false", "true"]: +# relpy = connect.set_config("wal.enable", valid_enable) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_recovery_error_ignore_valid(self, connect, collection): +# ''' +# target: set recovery_error_ignore +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_recovery_error_ignore in ["false", "true"]: +# relpy = connect.set_config("wal.recovery_error_ignore", valid_recovery_error_ignore) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# def test_set_buffer_size_valid_A(self, connect, collection): +# ''' +# target: set buffer_size +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# for valid_buffer_size in ["64MB", "128MB", "4096MB", "1000MB", "256MB"]: +# relpy = connect.set_config("wal.buffer_size", valid_buffer_size) +# +# @pytest.mark.skip(reason="overwrite config file is not supported in ci yet.") +# @pytest.mark.timeout(CONFIG_TIMEOUT) +# def test_set_wal_path_valid(self, connect, collection, args): +# ''' +# target: set wal_path +# method: call set_config correctly +# expected: status ok, set successfully +# ''' +# relpy = connect.set_config("wal.path", "/var/lib/milvus/wal") + diff --git a/tests/python_client/testcases/test_connect.py b/tests/python_client/testcases/test_connect.py new file mode 100644 index 0000000000..5accdbc8c3 --- /dev/null +++ b/tests/python_client/testcases/test_connect.py @@ -0,0 +1,233 @@ +import pytest +import pdb +import threading +from multiprocessing import Process +import concurrent.futures +from utils.utils import * + +CONNECT_TIMEOUT = 12 + + +class TestConnect: + + def local_ip(self, args): + ''' + check if ip is localhost or not + ''' + if not args["ip"] or args["ip"] == 'localhost' or args["ip"] == "127.0.0.1": + return True + else: + return False + + @pytest.mark.tags(CaseLabel.L2) + def test_close(self, connect): + ''' + target: test disconnect + method: disconnect a connected client + expected: connect failed after disconnected + ''' + connect.close() + with pytest.raises(Exception) as e: + connect.list_collections() + + @pytest.mark.tags(CaseLabel.L2) + def test_close_repeatedly(self, dis_connect, args): + ''' + target: test disconnect repeatedly + method: disconnect a connected client, disconnect again + expected: raise an error after disconnected + ''' + with pytest.raises(Exception) as e: + dis_connect.close() + + @pytest.mark.tags(CaseLabel.L2) + def test_connect_correct_ip_port(self, args): + ''' + target: test connect with correct ip and port value + method: set correct ip and port + expected: connected is True + ''' + milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) + + # TODO: Currently we test with remote IP, localhost testing need to add + @pytest.mark.tags(CaseLabel.L2) + def _test_connect_ip_localhost(self, args): + ''' + target: test connect with ip value: localhost + method: set host localhost + expected: connected is True + ''' + milvus = get_milvus(args["ip"], args["port"], args["handler"]) + # milvus.connect(host='localhost', port=args["port"]) + # assert milvus.connected() + + @pytest.mark.timeout(CONNECT_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_connect_wrong_ip_null(self, args): + ''' + target: test connect with wrong ip value + method: set host null + expected: not use default ip, connected is False + ''' + ip = "" + with pytest.raises(Exception) as e: + get_milvus(ip, args["port"], args["handler"]) + + @pytest.mark.tags(CaseLabel.L2) + def test_connect_uri(self, args): + ''' + target: test connect with correct uri + method: uri format and value are both correct + expected: connected is True + ''' + uri_value = "tcp://%s:%s" % (args["ip"], args["port"]) + milvus = get_milvus(args["ip"], args["port"], uri=uri_value, handler=args["handler"]) + + @pytest.mark.tags(CaseLabel.L2) + def test_connect_uri_null(self, args): + ''' + target: test connect with null uri + method: uri set null + expected: connected is True + ''' + uri_value = "" + if self.local_ip(args): + milvus = get_milvus(None, None, uri=uri_value, handler=args["handler"]) + else: + with pytest.raises(Exception) as e: + milvus = get_milvus(None, None, uri=uri_value, handler=args["handler"]) + + @pytest.mark.tags(CaseLabel.L2) + def test_connect_with_multiprocess(self, args): + ''' + target: test uri connect with multiprocess + method: set correct uri, test with multiprocessing connecting + expected: all connection is connected + ''' + def connect(): + milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) + assert milvus + + with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor: + future_results = {executor.submit( + connect): i for i in range(100)} + for future in concurrent.futures.as_completed(future_results): + future.result() + + @pytest.mark.tags(CaseLabel.L2) + def test_connect_repeatedly(self, args): + ''' + target: test connect repeatedly + method: connect again + expected: status.code is 0, and status.message shows have connected already + ''' + uri_value = "tcp://%s:%s" % (args["ip"], args["port"]) + milvus = Milvus(uri=uri_value, handler=args["handler"]) + milvus = Milvus(uri=uri_value, handler=args["handler"]) + + @pytest.mark.tags(CaseLabel.L2) + def _test_add_vector_and_disconnect_concurrently(self): + ''' + Target: test disconnect in the middle of add vectors + Method: + a. use coroutine or multi-processing, to simulate network crashing + b. data_set not too large incase disconnection happens when data is underd-preparing + c. data_set not too small incase disconnection happens when data has already been transferred + d. make sure disconnection happens when data is in-transport + Expected: Failure, count_entities == 0 + + ''' + pass + + @pytest.mark.tags(CaseLabel.L2) + def _test_search_vector_and_disconnect_concurrently(self): + ''' + Target: Test disconnect in the middle of search vectors(with large nq and topk)multiple times, and search/add vectors still work + Method: + a. coroutine or multi-processing, to simulate network crashing + b. connect, search and disconnect, repeating many times + c. connect and search, add vectors + Expected: Successfully searched back, successfully added + + ''' + pass + + @pytest.mark.tags(CaseLabel.L2) + def _test_thread_safe_with_one_connection_shared_in_multi_threads(self): + ''' + Target: test 1 connection thread safe + Method: 1 connection shared in multi-threads, all adding vectors, or other things + Expected: Functional as one thread + + ''' + pass + + +class TestConnectIPInvalid(object): + """ + Test connect server with invalid ip + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_ips() + ) + def get_invalid_ip(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(CONNECT_TIMEOUT) + def test_connect_with_invalid_ip(self, args, get_invalid_ip): + ip = get_invalid_ip + with pytest.raises(Exception) as e: + milvus = get_milvus(ip, args["port"], args["handler"]) + + +class TestConnectPortInvalid(object): + """ + Test connect server with invalid ip + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_ints() + ) + def get_invalid_port(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(CONNECT_TIMEOUT) + def test_connect_with_invalid_port(self, args, get_invalid_port): + ''' + target: test ip:port connect with invalid port value + method: set port in gen_invalid_ports + expected: connected is False + ''' + port = get_invalid_port + with pytest.raises(Exception) as e: + milvus = get_milvus(args["ip"], port, args["handler"]) + + +class TestConnectURIInvalid(object): + """ + Test connect server with invalid uri + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_uris() + ) + def get_invalid_uri(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(CONNECT_TIMEOUT) + def test_connect_with_invalid_uri(self, get_invalid_uri, args): + ''' + target: test uri connect with invalid uri value + method: set port in gen_invalid_uris + expected: connected is False + ''' + uri_value = get_invalid_uri + with pytest.raises(Exception) as e: + milvus = get_milvus(uri=uri_value, handler=args["handler"]) diff --git a/tests20/python_client/testcases/test_connection.py b/tests/python_client/testcases/test_connection_20.py similarity index 99% rename from tests20/python_client/testcases/test_connection.py rename to tests/python_client/testcases/test_connection_20.py index 7c025bc6f5..e6ef6a0ebc 100644 --- a/tests20/python_client/testcases/test_connection.py +++ b/tests/python_client/testcases/test_connection_20.py @@ -1,5 +1,5 @@ import pytest -from pymilvus_orm.default_config import DefaultConfig +from pymilvus import DefaultConfig from base.client_base import TestcaseBase from utils.util_log import test_log as log diff --git a/tests20/python_client/testcases/test_e2e.py b/tests/python_client/testcases/test_e2e_20.py similarity index 100% rename from tests20/python_client/testcases/test_e2e.py rename to tests/python_client/testcases/test_e2e_20.py diff --git a/tests/python_client/testcases/test_flush.py b/tests/python_client/testcases/test_flush.py new file mode 100644 index 0000000000..cc2b4448fc --- /dev/null +++ b/tests/python_client/testcases/test_flush.py @@ -0,0 +1,394 @@ +import time +import pdb +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * +from common.constants import * + +DELETE_TIMEOUT = 60 +default_single_query = { + "bool": { + "must": [ + {"vector": {default_float_vec_field_name: {"topk": 10, "query": gen_vectors(1, default_dim), + "metric_type": "L2", "params": {"nprobe": 10}}}} + ] + } +} + + +class TestFlushBase: + """ + ****************************************************************** + The following cases are used to test `flush` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")[1]) == "GPU": + # if request.param["index_type"] not in ivf(): + # pytest.skip("Only support index_type: idmap/flat") + return request.param + + @pytest.fixture( + scope="function", + params=gen_single_filter_fields() + ) + def get_filter_field(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_single_vector_fields() + ) + def get_vector_field(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_flush_collection_not_existed(self, connect, collection): + ''' + target: test flush, params collection_name not existed + method: flush, with collection not existed + expected: error raised + ''' + collection_new = gen_unique_str("test_flush_1") + try: + connect.flush([collection_new]) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "describe collection failed: can't find collection: %s" % collection_new + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_flush_empty_collection(self, connect, collection): + ''' + method: flush collection with no vectors + expected: no error raised + ''' + connect.flush([collection]) + results = connect.insert(collection, default_entities) + assert len(results.primary_keys) == default_nb + # status = connect.delete_entity_by_id(collection, ids) + # assert status.OK() + connect.flush([collection]) + res = connect.get_collection_stats(collection) + assert res["row_count"] == default_nb + connect.flush([collection]) + # with pytest.raises(Exception) as e: + # connect.flush([collection]) + + @pytest.mark.tags(CaseLabel.L2) + def test_add_partition_flush(self, connect, id_collection): + ''' + method: add entities into partition in collection, flush serveral times + expected: the length of ids and the collection row count + ''' + connect.create_partition(id_collection, default_tag) + result = connect.insert(id_collection, default_entities) + connect.flush([id_collection]) + res_count = connect.get_collection_stats(id_collection) + assert res_count["row_count"] == default_nb + result = connect.insert(id_collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + connect.flush([id_collection]) + res_count = connect.get_collection_stats(id_collection) + assert res_count["row_count"] == default_nb * 2 + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_add_partitions_flush(self, connect, id_collection): + ''' + method: add entities into partitions in collection, flush one + expected: the length of ids and the collection row count + ''' + tag_new = gen_unique_str() + connect.create_partition(id_collection, default_tag) + connect.create_partition(id_collection, tag_new) + ids = [i for i in range(default_nb)] + connect.insert(id_collection, default_entities, partition_name=default_tag) + connect.flush([id_collection]) + connect.insert(id_collection, default_entities, partition_name=tag_new) + connect.flush([id_collection]) + res = connect.get_collection_stats(id_collection) + assert res["row_count"] == 2 * default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_add_collections_flush(self, connect, id_collection): + ''' + method: add entities into collections, flush one + expected: the length of ids and the collection row count + ''' + collection_new = gen_unique_str() + default_fields = gen_default_fields(False) + connect.create_collection(collection_new, default_fields) + connect.create_partition(id_collection, default_tag) + connect.create_partition(collection_new, default_tag) + ids = [i for i in range(default_nb)] + # ids = connect.insert(id_collection, default_entities, ids, partition_name=default_tag) + # ids = connect.insert(collection_new, default_entities, ids, partition_name=default_tag) + connect.insert(id_collection, default_entities, partition_name=default_tag) + connect.insert(collection_new, default_entities, partition_name=default_tag) + connect.flush([id_collection]) + connect.flush([collection_new]) + res = connect.get_collection_stats(id_collection) + assert res["row_count"] == default_nb + res = connect.get_collection_stats(collection_new) + assert res["row_count"] == default_nb + + @pytest.mark.tags(CaseLabel.L2) + def test_add_collections_fields_flush(self, connect, id_collection, get_filter_field, get_vector_field): + ''' + method: create collection with different fields, and add entities into collections, flush one + expected: the length of ids and the collection row count + ''' + nb_new = 5 + filter_field = get_filter_field + vector_field = get_vector_field + collection_new = gen_unique_str("test_flush") + fields = { + "fields": [gen_primary_field(), filter_field, vector_field], + "segment_row_limit": default_segment_row_limit, + "auto_id": False + } + connect.create_collection(collection_new, fields) + connect.create_partition(id_collection, default_tag) + connect.create_partition(collection_new, default_tag) + entities_new = gen_entities_by_fields(fields["fields"], nb_new, default_dim) + connect.insert(id_collection, default_entities, partition_name=default_tag) + connect.insert(collection_new, entities_new, partition_name=default_tag) + connect.flush([id_collection]) + connect.flush([collection_new]) + res = connect.get_collection_stats(id_collection) + assert res["row_count"] == default_nb + res = connect.get_collection_stats(collection_new) + assert res["row_count"] == nb_new + + # TODO ci failed + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_add_flush_multiable_times(self, connect, collection): + ''' + method: add entities, flush serveral times + expected: no error raised + ''' + result = connect.insert(collection, default_entities) + for i in range(10): + connect.flush([collection]) + res = connect.get_collection_stats(collection) + assert res["row_count"] == len(result.primary_keys) + # query_vecs = [vectors[0], vectors[1], vectors[-1]] + connect.load_collection(collection) + res = connect.search(collection, default_single_query) + logging.getLogger().debug(res) + assert len(res) == 1 + assert len(res[0].ids) == 10 + assert len(res[0].distances) == 10 + + @pytest.mark.tags(CaseLabel.L2) + def test_add_flush_auto(self, connect, id_collection): + ''' + method: add entities + expected: no error raised + ''' + ids = [i for i in range(default_nb)] + result = connect.insert(id_collection, default_entities) + # add flush + connect.flush([id_collection]) + timeout = 20 + start_time = time.time() + while (time.time() - start_time < timeout): + time.sleep(1) + res = connect.get_collection_stats(id_collection) + if res["row_count"] == default_nb: + break + if time.time() - start_time > timeout: + assert False + + @pytest.fixture( + scope="function", + params=[ + 1, + 100 + ], + ) + def same_ids(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_add_flush_same_ids(self, connect, id_collection, same_ids): + ''' + method: add entities, with same ids, count(same ids) < 15, > 15 + expected: the length of ids and the collection row count + ''' + ids = [i for i in range(default_nb)] + for i, item in enumerate(ids): + if item <= same_ids: + ids[i] = 0 + result = connect.insert(id_collection, default_entities) + connect.flush([id_collection]) + res = connect.get_collection_stats(id_collection) + assert res["row_count"] == default_nb + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_delete_flush_multiable_times(self, connect, collection): + ''' + method: delete entities, flush serveral times + expected: no error raised + ''' + result = connect.insert(collection, default_entities) + # status = connect.delete_entity_by_id(collection, [ids[-1]]) + # assert status.OK() + for i in range(10): + connect.flush([collection]) + # query_vecs = [vectors[0], vectors[1], vectors[-1]] + connect.load_collection(collection) + res = connect.search(collection, default_single_query) + assert len(res) == 1 + assert len(res[0].ids) == 10 + assert len(res[0].distances) == 10 + logging.getLogger().debug(res) + # assert res + + # TODO: unable to set config + @pytest.mark.tags(CaseLabel.L2) + def _test_collection_count_during_flush(self, connect, collection, args): + ''' + method: flush collection at background, call `get_collection_stats` + expected: no timeout + ''' + ids = [] + for i in range(5): + tmp_ids = connect.insert(collection, default_entities) + connect.flush([collection]) + ids.extend(tmp_ids) + disable_flush(connect) + # status = connect.delete_entity_by_id(collection, ids) + + def flush(): + milvus = get_milvus(args["ip"], args["port"], handler=args["handler"]) + logging.error("start flush") + milvus.flush([collection]) + logging.error("end flush") + + p = MyThread(target=flush, args=()) + p.start() + time.sleep(0.2) + logging.error("start count") + res = connect.get_collection_stats(collection, timeout=10) + p.join() + res = connect.get_collection_stats(collection) + assert res["row_count"] == 0 + + @pytest.mark.tags(CaseLabel.L2) + def test_delete_flush_during_search(self, connect, collection, args): + ''' + method: search at background, call `delete and flush` + expected: no timeout + ''' + ids = [] + loops = 5 + for i in range(loops): + tmp = connect.insert(collection, default_entities) + connect.flush([collection]) + ids.extend(tmp.primary_keys) + nq = 10000 + query, query_vecs = gen_query_vectors(default_float_vec_field_name, default_entities, default_top_k, nq) + time.sleep(0.1) + connect.load_collection(collection) + future = connect.search(collection, query, _async=True) + res = future.result() + assert res + delete_ids = [ids[0], ids[-1]] + connect.flush([collection]) + res_count = connect.get_collection_stats(collection, timeout=120) + assert res_count["row_count"] == loops * default_nb + + +class TestFlushAsync: + @pytest.fixture(scope="function", autouse=True) + def skip_http_check(self, args): + if args["handler"] == "HTTP": + pytest.skip("skip in http mode") + + """ + ****************************************************************** + The following cases are used to test `flush` function + ****************************************************************** + """ + + def check_status(self): + logging.getLogger().info("In callback check status") + + @pytest.mark.tags(CaseLabel.L2) + def test_flush_empty_collection(self, connect, collection): + ''' + method: flush collection with no vectors + expected: status ok + ''' + future = connect.flush([collection], _async=True) + status = future.result() + assert status is None + + @pytest.mark.tags(CaseLabel.L2) + def test_flush_async_long(self, connect, collection): + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + future = connect.flush([collection], _async=True) + status = future.result() + assert status is None + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_flush_async_long_drop_collection(self, connect, collection): + for i in range(5): + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + future = connect.flush([collection], _async=True) + assert future.result() is None + logging.getLogger().info("DROP") + res = connect.drop_collection(collection) + assert res is None + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_flush_async(self, connect, collection): + connect.insert(collection, default_entities) + logging.getLogger().info("before") + future = connect.flush([collection], _async=True, _callback=self.check_status) + logging.getLogger().info("after") + future.done() + status = future.result() + assert status is None + + +class TestCollectionNameInvalid(object): + """ + Test adding vectors with invalid collection names + """ + + @pytest.fixture( + scope="function", + # params=gen_invalid_collection_names() + params=gen_invalid_strs() + ) + def get_invalid_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_flush_with_invalid_collection_name(self, connect, get_invalid_collection_name): + collection_name = get_invalid_collection_name + if collection_name is None or not collection_name: + pytest.skip("while collection_name is None, then flush all collections") + with pytest.raises(Exception) as e: + connect.flush(collection_name) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_flush_empty(self, connect, collection): + result = connect.insert(collection, default_entities) + assert len(result.primary_keys) == default_nb + try: + connect.flush() + except Exception as e: + assert e.args[0] == "Collection name list can not be None or empty" diff --git a/tests/python_client/testcases/test_index.py b/tests/python_client/testcases/test_index.py new file mode 100644 index 0000000000..625feb86c9 --- /dev/null +++ b/tests/python_client/testcases/test_index.py @@ -0,0 +1,922 @@ +import logging +import time +import pdb +import copy +import threading +from multiprocessing import Pool, Process +import numpy +import pytest +import sklearn.preprocessing +from utils.utils import * +from common.constants import * + +uid = "test_index" +BUILD_TIMEOUT = 300 +field_name = default_float_vec_field_name +binary_field_name = default_binary_vec_field_name +query, query_vecs = gen_query_vectors(field_name, default_entities, default_top_k, 1) +default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"} + + +class TestIndexBase: + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + logging.getLogger().info(request.param) + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return copy.deepcopy(request.param) + + @pytest.fixture( + scope="function", + params=[ + 1, + 10, + 1111 + ], + ) + def get_nq(self, request): + yield request.param + + """ + ****************************************************************** + The following cases are used to test `create_index` function + ****************************************************************** + """ + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(collection, default_entities) + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index on field not existed + expected: error raised + ''' + tmp_field_name = gen_unique_str() + result = connect.insert(collection, default_entities) + with pytest.raises(Exception) as e: + connect.create_index(collection, tmp_field_name, get_simple_index) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_on_field(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index on other field + expected: error raised + ''' + tmp_field_name = "int64" + result = connect.insert(collection, default_entities) + with pytest.raises(Exception) as e: + connect.create_index(collection, tmp_field_name, get_simple_index) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_no_vectors(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_partition(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection, create partition, and add entities in it, create index + expected: return search success + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_partition_flush(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection, create partition, and add entities in it, create index + expected: return search success + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_without_connect(self, dis_connect, collection): + ''' + target: test create index without connection + method: create collection and add entities in it, check if added successfully + expected: raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.create_index(collection, field_name, get_simple_index) + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq): + ''' + target: test create index interface, search with more query vectors + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(collection, default_entities) + connect.flush([collection]) + connect.create_index(collection, field_name, get_simple_index) + logging.getLogger().info(connect.describe_index(collection, "")) + nq = get_nq + index_type = get_simple_index["index_type"] + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, search_params=search_param) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + + @pytest.mark.timeout(BUILD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_multithread(self, connect, collection, args): + ''' + target: test create index interface with multiprocess + method: create collection and add entities in it, create index + expected: return search success + ''' + connect.insert(collection, default_entities) + + def build(connect): + connect.create_index(collection, field_name, default_index) + if default_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(default_index, field_name) + assert index == default_index + + threads_num = 8 + threads = [] + for i in range(threads_num): + m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"]) + t = MyThread(target=build, args=(m,)) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_index_collection_not_existed(self, connect): + ''' + target: test create index interface when collection name not existed + method: create collection and add entities in it, create index + , make sure the collection name not in index + expected: create index failed + ''' + collection_name = gen_unique_str(uid) + with pytest.raises(Exception) as e: + connect.create_index(collection_name, field_name, default_index) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_insert_flush(self, connect, collection, get_simple_index): + ''' + target: test create index + method: create collection and create index, add entities in it + expected: create index ok, and count correct + ''' + connect.create_index(collection, field_name, get_simple_index) + result = connect.insert(collection, default_entities) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats["row_count"] == default_nb + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_same_index_repeatedly(self, connect, collection, get_simple_index): + ''' + target: check if index can be created repeatedly, with the same create_index params + method: create index after index have been built + expected: return code success, and search ok + ''' + connect.create_index(collection, field_name, get_simple_index) + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_different_index_repeatedly(self, connect, collection): + ''' + target: check if index can be created repeatedly, with the different create_index params + method: create another index with different index_params after index have been built + expected: return code 0, and describe index result equals with the second index params + ''' + result = connect.insert(collection, default_entities) + connect.flush([collection]) + indexs = [default_index, {"metric_type":"L2", "index_type": "FLAT", "params":{"nlist": 1024}}] + for index in indexs: + connect.create_index(collection, field_name, index) + connect.release_collection(collection) + connect.load_collection(collection) + index = connect.describe_index(collection, "") + # assert index == indexs[-1] + assert not index # FLAT is the last index_type, drop all indexes in server + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_different_index_repeatedly_B(self, connect, collection): + ''' + target: check if index can be created repeatedly, with the different create_index params + method: create another index with different index_params after index have been built + expected: return code 0, and describe index result equals with the second index params + ''' + result = connect.insert(collection, default_entities) + connect.flush([collection]) + indexs = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}] + for index in indexs: + connect.create_index(collection, field_name, index) + connect.release_collection(collection) + connect.load_collection(collection) + index = connect.describe_index(collection, "") + create_target_index(indexs[-1], field_name) + assert index == indexs[-1] + # assert not index # FLAT is the last index_type, drop all indexes in server + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_ip(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(collection, default_entities) + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_partition_ip(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection, create partition, and add entities in it, create index + expected: return search success + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection, create partition, and add entities in it, create index + expected: return search success + ''' + connect.create_partition(collection, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + connect.flush([collection]) + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + if get_simple_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(get_simple_index, field_name) + assert index == get_simple_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq): + ''' + target: test create index interface, search with more query vectors + method: create collection and add entities in it, create index + expected: return search success + ''' + metric_type = "IP" + result = connect.insert(collection, default_entities) + connect.flush([collection]) + get_simple_index["metric_type"] = metric_type + connect.create_index(collection, field_name, get_simple_index) + connect.load_collection(collection) + logging.getLogger().info(connect.describe_index(collection, "")) + nq = get_nq + index_type = get_simple_index["index_type"] + search_param = get_search_param(index_type) + query, vecs = gen_query_vectors(field_name, default_entities, default_top_k, nq, metric_type=metric_type, search_params=search_param) + res = connect.search(collection, query) + assert len(res) == nq + + @pytest.mark.timeout(BUILD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_multithread_ip(self, connect, collection, args): + ''' + target: test create index interface with multiprocess + method: create collection and add entities in it, create index + expected: return search success + ''' + connect.insert(collection, default_entities) + + def build(connect): + default_index["metric_type"] = "IP" + connect.create_index(collection, field_name, default_index) + if default_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(default_index, field_name) + assert index == default_index + + threads_num = 8 + threads = [] + for i in range(threads_num): + m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"]) + t = MyThread(target=build, args=(m,)) + threads.append(t) + t.start() + time.sleep(0.2) + for t in threads: + t.join() + + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_collection_not_existed_ip(self, connect, collection): + ''' + target: test create index interface when collection name not existed + method: create collection and add entities in it, create index + , make sure the collection name not in index + expected: return code not equals to 0, create index failed + ''' + collection_name = gen_unique_str(uid) + default_index["metric_type"] = "IP" + with pytest.raises(Exception) as e: + connect.create_index(collection_name, field_name, default_index) + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_no_vectors_insert_ip(self, connect, collection): + ''' + target: test create index interface when there is no vectors in collection, and does not affect the subsequent process + method: create collection and add no vectors in it, and then create index, add entities in it + expected: return code equals to 0 + ''' + default_index["metric_type"] = "IP" + connect.create_index(collection, field_name, default_index) + result = connect.insert(collection, default_entities) + connect.flush([collection]) + stats = connect.get_collection_stats(collection) + assert stats["row_count"] == default_nb + if default_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(default_index, field_name) + assert index == default_index + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_same_index_repeatedly_ip(self, connect, collection): + ''' + target: check if index can be created repeatedly, with the same create_index params + method: create index after index have been built + expected: return code success, and search ok + ''' + default_index["metric_type"] = "IP" + connect.create_index(collection, field_name, default_index) + connect.create_index(collection, field_name, default_index) + if default_index["index_type"] != "FLAT": + index = connect.describe_index(collection, "") + create_target_index(default_index, field_name) + assert index == default_index + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_different_index_repeatedly_ip(self, connect, collection): + ''' + target: check if index can be created repeatedly, with the different create_index params + method: create another index with different index_params after index have been built + expected: return code 0, and describe index result equals with the second index params + ''' + result = connect.insert(collection, default_entities) + connect.flush([collection]) + connect.load_collection(collection) + stats = connect.get_collection_stats(collection) + assert stats["row_count"] == default_nb + default_index["metric_type"] = "IP" + indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}] + for index in indexs: + connect.create_index(collection, field_name, index) + connect.release_collection(collection) + connect.load_collection(collection) + index = connect.describe_index(collection, "") + # assert index == indexs[-1] + assert not index + + """ + ****************************************************************** + The following cases are used to test `drop_index` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_index(self, connect, collection, get_simple_index): + ''' + target: test drop index interface + method: create collection and add entities in it, create index, call drop index + expected: return code 0, and default index param + ''' + # result = connect.insert(collection, entities) + connect.create_index(collection, field_name, get_simple_index) + connect.drop_index(collection, field_name) + index = connect.describe_index(collection, "") + assert not index + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_repeatedly(self, connect, collection, get_simple_index): + ''' + target: test drop index repeatedly + method: create index, call drop index, and drop again + expected: return code 0 + ''' + connect.create_index(collection, field_name, get_simple_index) + connect.drop_index(collection, field_name) + connect.drop_index(collection, field_name) + index = connect.describe_index(collection, "") + assert not index + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_without_connect(self, dis_connect, collection): + ''' + target: test drop index without connection + method: drop index, and check if drop successfully + expected: raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.drop_index(collection, field_name) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_index_collection_not_existed(self, connect): + ''' + target: test drop index interface when collection name not existed + method: create collection and add entities in it, create index + , make sure the collection name not in index, and then drop it + expected: return code not equals to 0, drop index failed + ''' + collection_name = gen_unique_str(uid) + with pytest.raises(Exception) as e: + connect.drop_index(collection_name, field_name) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_index_collection_not_create(self, connect, collection): + ''' + target: test drop index interface when index not created + method: create collection and add entities in it, create index + expected: return code not equals to 0, drop index failed + ''' + # no create index + connect.drop_index(collection, field_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index): + ''' + target: test create / drop index repeatedly, use the same index params + method: create index, drop index, four times + expected: return code 0 + ''' + for i in range(4): + connect.create_index(collection, field_name, get_simple_index) + connect.drop_index(collection, field_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_ip(self, connect, collection, get_simple_index): + ''' + target: test drop index interface + method: create collection and add entities in it, create index, call drop index + expected: return code 0, and default index param + ''' + # result = connect.insert(collection, entities) + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + connect.drop_index(collection, field_name) + index = connect.describe_index(collection, "") + assert not index + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index): + ''' + target: test drop index repeatedly + method: create index, call drop index, and drop again + expected: return code 0 + ''' + get_simple_index["metric_type"] = "IP" + connect.create_index(collection, field_name, get_simple_index) + connect.drop_index(collection, field_name) + connect.drop_index(collection, field_name) + index = connect.describe_index(collection, "") + assert not index + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_without_connect_ip(self, dis_connect, collection): + ''' + target: test drop index without connection + method: drop index, and check if drop successfully + expected: raise exception + ''' + with pytest.raises(Exception) as e: + dis_connect.drop_index(collection, field_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_collection_not_create_ip(self, connect, collection): + ''' + target: test drop index interface when index not created + method: create collection and add entities in it, create index + expected: return code not equals to 0, drop index failed + ''' + # result = connect.insert(collection, entities) + # no create index + connect.drop_index(collection, field_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index): + ''' + target: test create / drop index repeatedly, use the same index params + method: create index, drop index, four times + expected: return code 0 + ''' + get_simple_index["metric_type"] = "IP" + for i in range(4): + connect.create_index(collection, field_name, get_simple_index) + connect.drop_index(collection, field_name) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_PQ_without_nbits(self, connect, collection): + PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"} + result = connect.insert(collection, default_entities) + connect.create_index(collection, field_name, PQ_index) + index = connect.describe_index(collection, "") + create_target_index(PQ_index, field_name) + assert index == PQ_index + + +class TestIndexBinary: + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return copy.deepcopy(request.param) + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_jaccard_index(self, request, connect): + if request.param["index_type"] in binary_support(): + request.param["metric_type"] = "JACCARD" + return request.param + else: + pytest.skip("Skip index") + + @pytest.fixture( + scope="function", + params=gen_binary_index() + ) + def get_l2_index(self, request, connect): + request.param["metric_type"] = "L2" + return request.param + + @pytest.fixture( + scope="function", + params=[ + 1, + 10, + 1111 + ], + ) + def get_nq(self, request): + yield request.param + + """ + ****************************************************************** + The following cases are used to test `create_index` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index(self, connect, binary_collection, get_jaccard_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(binary_collection, default_binary_entities) + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + binary_index = connect.describe_index(binary_collection, "") + create_target_index(get_jaccard_index, binary_field_name) + assert binary_index == get_jaccard_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_partition(self, connect, binary_collection, get_jaccard_index): + ''' + target: test create index interface + method: create collection, create partition, and add entities in it, create index + expected: return search success + ''' + connect.create_partition(binary_collection, default_tag) + result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag) + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + binary_index = connect.describe_index(binary_collection, "") + create_target_index(get_jaccard_index, binary_field_name) + assert binary_index == get_jaccard_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq): + ''' + target: test create index interface, search with more query vectors + method: create collection and add entities in it, create index + expected: return search success + ''' + nq = get_nq + result = connect.insert(binary_collection, default_binary_entities) + connect.flush([binary_collection]) + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + connect.load_collection(binary_collection) + query, vecs = gen_query_vectors(binary_field_name, default_binary_entities, default_top_k, nq, metric_type="JACCARD") + search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD") + logging.getLogger().info(search_param) + res = connect.search(binary_collection, query, search_params=search_param) + assert len(res) == nq + + @pytest.mark.timeout(BUILD_TIMEOUT) + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index): + ''' + target: test create index interface with invalid metric type + method: add entitys into binary connection, flash, create index with L2 metric type. + expected: return create_index failure + ''' + # insert 6000 vectors + result = connect.insert(binary_collection, default_binary_entities) + connect.flush([binary_collection]) + with pytest.raises(Exception) as e: + res = connect.create_index(binary_collection, binary_field_name, get_l2_index) + + """ + ****************************************************************** + The following cases are used to test `describe_index` function + *************************************************************** + """ + @pytest.mark.skip("repeat with test_create_index binary") + def _test_get_index_info(self, connect, binary_collection, get_jaccard_index): + ''' + target: test describe index interface + method: create collection and add entities in it, create index, call describe index + expected: return code 0, and index instructure + ''' + result = connect.insert(binary_collection, default_binary_entities) + connect.flush([binary_collection]) + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + stats = connect.get_collection_stats(binary_collection) + assert stats["row_count"] == default_nb + for partition in stats["partitions"]: + segments = partition["segments"] + if segments: + for segment in segments: + for file in segment["files"]: + if "index_type" in file: + assert file["index_type"] == get_jaccard_index["index_type"] + + @pytest.mark.skip("repeat with test_create_index_partition binary") + def _test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index): + ''' + target: test describe index interface + method: create collection, create partition and add entities in it, create index, call describe index + expected: return code 0, and index instructure + ''' + connect.create_partition(binary_collection, default_tag) + result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag) + connect.flush([binary_collection]) + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + stats = connect.get_collection_stats(binary_collection) + logging.getLogger().info(stats) + assert stats["row_count"] == default_nb + assert len(stats["partitions"]) == 2 + for partition in stats["partitions"]: + segments = partition["segments"] + if segments: + for segment in segments: + for file in segment["files"]: + if "index_type" in file: + assert file["index_type"] == get_jaccard_index["index_type"] + + """ + ****************************************************************** + The following cases are used to test `drop_index` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index(self, connect, binary_collection, get_jaccard_index): + ''' + target: test drop index interface + method: create collection and add entities in it, create index, call drop index + expected: return code 0, and default index param + ''' + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + stats = connect.get_collection_stats(binary_collection) + logging.getLogger().info(stats) + connect.drop_index(binary_collection, binary_field_name) + binary_index = connect.describe_index(binary_collection, "") + assert not binary_index + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index): + ''' + target: test drop index interface + method: create collection, create partition and add entities in it, create index on collection, call drop collection index + expected: return code 0, and default index param + ''' + connect.create_partition(binary_collection, default_tag) + result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag) + connect.flush([binary_collection]) + connect.create_index(binary_collection, binary_field_name, get_jaccard_index) + connect.drop_index(binary_collection, binary_field_name) + binary_index = connect.describe_index(binary_collection, "") + assert not binary_index + + +class TestIndexInvalid(object): + """ + Test create / describe / drop index interfaces with invalid collection names + """ + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_index_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.create_index(collection_name, field_name, default_index) + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name): + collection_name = get_collection_name + with pytest.raises(Exception) as e: + connect.drop_index(collection_name) + + @pytest.fixture( + scope="function", + params=gen_invalid_index() + ) + def get_index(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_with_invalid_index_params(self, connect, collection, get_index): + logging.getLogger().info(get_index) + with pytest.raises(Exception) as e: + connect.create_index(collection, field_name, get_index) + + +class TestIndexAsync: + @pytest.fixture(scope="function", autouse=True) + def skip_http_check(self, args): + if args["handler"] == "HTTP": + pytest.skip("skip in http mode") + + """ + ****************************************************************** + The following cases are used to test `create_index` function + ****************************************************************** + """ + + @pytest.fixture( + scope="function", + params=gen_simple_index() + ) + def get_simple_index(self, request, connect): + # if str(connect._cmd("mode")) == "CPU": + # if request.param["index_type"] in index_cpu_not_support(): + # pytest.skip("sq8h not support in CPU mode") + return copy.deepcopy(request.param) + + def check_result(self, res): + logging.getLogger().info("In callback check search result") + logging.getLogger().info(res) + + """ + ****************************************************************** + The following cases are used to test `create_index` function + ****************************************************************** + """ + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(collection, default_entities) + logging.getLogger().info("start index") + future = connect.create_index(collection, field_name, get_simple_index, _async=True) + logging.getLogger().info("before result") + res = future.result() + # TODO: + logging.getLogger().info(res) + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_drop(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(collection, default_entities) + logging.getLogger().info("start index") + future = connect.create_index(collection, field_name, get_simple_index, _async=True) + logging.getLogger().info("DROP") + connect.drop_collection(collection) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_index_with_invalid_collection_name(self, connect): + collection_name = " " + with pytest.raises(Exception) as e: + future = connect.create_index(collection_name, field_name, default_index, _async=True) + res = future.result() + + @pytest.mark.tags(CaseLabel.tags_smoke) + @pytest.mark.timeout(BUILD_TIMEOUT) + def test_create_index_callback(self, connect, collection, get_simple_index): + ''' + target: test create index interface + method: create collection and add entities in it, create index + expected: return search success + ''' + result = connect.insert(collection, default_entities) + logging.getLogger().info("start index") + future = connect.create_index(collection, field_name, get_simple_index, _async=True, + _callback=self.check_result) + logging.getLogger().info("before result") + res = future.result() + # TODO: + logging.getLogger().info(res) diff --git a/tests20/python_client/testcases/test_index.py b/tests/python_client/testcases/test_index_20.py similarity index 99% rename from tests20/python_client/testcases/test_index.py rename to tests/python_client/testcases/test_index_20.py index 036bf956db..20d49b727e 100644 --- a/tests20/python_client/testcases/test_index.py +++ b/tests/python_client/testcases/test_index_20.py @@ -1,7 +1,7 @@ import copy import pdb import pytest -from pymilvus_orm import FieldSchema +from pymilvus import FieldSchema from base.client_base import TestcaseBase from base.index_wrapper import ApiIndexWrapper diff --git a/tests20/python_client/testcases/test_insert.py b/tests/python_client/testcases/test_insert_20.py similarity index 99% rename from tests20/python_client/testcases/test_insert.py rename to tests/python_client/testcases/test_insert_20.py index 27d9f4606a..f5e9703a94 100644 --- a/tests20/python_client/testcases/test_insert.py +++ b/tests/python_client/testcases/test_insert_20.py @@ -3,7 +3,7 @@ import threading import numpy as np import pandas as pd import pytest -from pymilvus_orm import Index +from pymilvus import Index from base.client_base import TestcaseBase from utils.util_log import test_log as log diff --git a/tests20/python_client/testcases/test_load.py b/tests/python_client/testcases/test_load_20.py similarity index 100% rename from tests20/python_client/testcases/test_load.py rename to tests/python_client/testcases/test_load_20.py diff --git a/tests/python_client/testcases/test_mix.py b/tests/python_client/testcases/test_mix.py new file mode 100644 index 0000000000..d0f89d8574 --- /dev/null +++ b/tests/python_client/testcases/test_mix.py @@ -0,0 +1,199 @@ +import pdb +import copy +import pytest +import threading +import datetime +import logging +from time import sleep +from multiprocessing import Process +import sklearn.preprocessing +from utils.utils import * + +index_file_size = 10 +vectors = gen_vectors(10000, default_dim) +vectors = sklearn.preprocessing.normalize(vectors, axis=1, norm='l2') +vectors = vectors.tolist() +top_k = 1 +nprobe = 1 +epsilon = 0.001 +nlist = 128 +# index_params = {'index_type': IndexType.IVFLAT, 'nlist': 16384} +default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 16384}, "metric_type": "L2"} + + +class TestMixBase: + # TODO + @pytest.mark.tags(CaseLabel.L2) + def _test_mix_base(self, connect, collection): + nb = 200000 + nq = 5 + entities = gen_entities(nb=nb) + ids = connect.insert(collection, entities) + assert len(ids) == nb + connect.flush([collection]) + connect.create_index(collection, default_float_vec_field_name, default_index) + index = connect.describe_index(collection, "") + create_target_index(default_index, default_float_vec_field_name) + assert index == default_index + query, vecs = gen_query_vectors(default_float_vec_field_name, entities, default_top_k, nq) + connect.load_collection(collection) + res = connect.search(collection, query) + assert len(res) == nq + assert len(res[0]) == default_top_k + assert res[0]._distances[0] <= epsilon + assert check_id_result(res[0], ids[0]) + + # disable + @pytest.mark.tags(CaseLabel.L2) + def _test_search_during_createIndex(self, args): + loops = 10000 + collection = gen_unique_str() + query_vecs = [vectors[0], vectors[1]] + uri = "tcp://%s:%s" % (args["ip"], args["port"]) + id_0 = 0; + id_1 = 0 + milvus_instance = get_milvus(args["handler"]) + # milvus_instance.connect(uri=uri) + milvus_instance.create_collection({'collection_name': collection, + 'dimension': default_dim, + 'index_file_size': index_file_size, + 'metric_type': "L2"}) + for i in range(10): + status, ids = milvus_instance.bulk_insert(collection, vectors) + # logging.getLogger().info(ids) + if i == 0: + id_0 = ids[0]; + id_1 = ids[1] + + # def create_index(milvus_instance): + # logging.getLogger().info("In create index") + # status = milvus_instance.create_index(collection, index_params) + # logging.getLogger().info(status) + # status, result = milvus_instance.get_index_info(collection) + # logging.getLogger().info(result) + def insert(milvus_instance): + logging.getLogger().info("In add vectors") + status, ids = milvus_instance.bulk_insert(collection, vectors) + logging.getLogger().info(status) + + def search(milvus_instance): + logging.getLogger().info("In search vectors") + for i in range(loops): + status, result = milvus_instance.search(collection, top_k, nprobe, query_vecs) + logging.getLogger().info(status) + assert result[0][0].id == id_0 + assert result[1][0].id == id_1 + + milvus_instance = get_milvus(args["handler"]) + # milvus_instance.connect(uri=uri) + p_search = Process(target=search, args=(milvus_instance,)) + p_search.start() + milvus_instance = get_milvus(args["handler"]) + # milvus_instance.connect(uri=uri) + p_create = Process(target=insert, args=(milvus_instance,)) + p_create.start() + p_create.join() + + @pytest.mark.tags(CaseLabel.L2) + def _test_mix_multi_collections(self, connect): + ''' + target: test functions with multiple collections of different metric_types and index_types + method: create 60 collections which 30 are L2 and the other are IP, add vectors into them + and test describe index and search + expected: status ok + ''' + nq = 10000 + collection_list = [] + idx = [] + index_param = {'nlist': nlist} + + # create collection and add vectors + for i in range(30): + collection_name = gen_unique_str('test_mix_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': default_dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.L2} + connect.create_collection(param) + status, ids = connect.bulk_insert(collection_name=collection_name, records=vectors) + idx.append(ids[0]) + idx.append(ids[10]) + idx.append(ids[20]) + assert status.OK() + for i in range(30): + collection_name = gen_unique_str('test_mix_multi_collections') + collection_list.append(collection_name) + param = {'collection_name': collection_name, + 'dimension': default_dim, + 'index_file_size': index_file_size, + 'metric_type': MetricType.IP} + connect.create_collection(param) + status, ids = connect.bulk_insert(collection_name=collection_name, records=vectors) + assert status.OK() + status = connect.flush([collection_name]) + assert status.OK() + idx.append(ids[0]) + idx.append(ids[10]) + idx.append(ids[20]) + assert status.OK() + for i in range(10): + status = connect.create_index(collection_list[i], IndexType.FLAT, index_param) + assert status.OK() + status = connect.create_index(collection_list[30 + i], IndexType.FLAT, index_param) + assert status.OK() + status = connect.create_index(collection_list[10 + i], IndexType.IVFLAT, index_param) + assert status.OK() + status = connect.create_index(collection_list[40 + i], IndexType.IVFLAT, index_param) + assert status.OK() + status = connect.create_index(collection_list[20 + i], IndexType.IVF_SQ8, index_param) + assert status.OK() + status = connect.create_index(collection_list[50 + i], IndexType.IVF_SQ8, index_param) + assert status.OK() + + # describe index + for i in range(10): + status, result = connect.get_index_info(collection_list[i]) + assert result._index_type == IndexType.FLAT + status, result = connect.get_index_info(collection_list[10 + i]) + assert result._index_type == IndexType.IVFLAT + status, result = connect.get_index_info(collection_list[20 + i]) + assert result._index_type == IndexType.IVF_SQ8 + status, result = connect.get_index_info(collection_list[30 + i]) + assert result._index_type == IndexType.FLAT + status, result = connect.get_index_info(collection_list[40 + i]) + assert result._index_type == IndexType.IVFLAT + status, result = connect.get_index_info(collection_list[50 + i]) + assert result._index_type == IndexType.IVF_SQ8 + + # search + query_vecs = [vectors[0], vectors[10], vectors[20]] + for i in range(60): + collection = collection_list[i] + status, result = connect.search(collection, top_k, query_records=query_vecs, params={"nprobe": 1}) + assert status.OK() + assert len(result) == len(query_vecs) + logging.getLogger().info(i) + for j in range(len(query_vecs)): + assert len(result[j]) == top_k + for j in range(len(query_vecs)): + if not check_result(result[j], idx[3 * i + j]): + logging.getLogger().info(result[j]._id_list) + logging.getLogger().info(idx[3 * i + j]) + assert check_result(result[j], idx[3 * i + j]) + + +def check_result(result, id): + if len(result) >= 5: + return id in [result[0].id, result[1].id, result[2].id, result[3].id, result[4].id] + else: + return id in (i.id for i in result) + + +def check_id_result(result, id): + limit_in = 5 + ids = [entity.id for entity in result] + if len(result) >= limit_in: + return id in ids[:limit_in] + else: + return id in ids diff --git a/tests/python_client/testcases/test_partition.py b/tests/python_client/testcases/test_partition.py new file mode 100644 index 0000000000..ec7e6619ec --- /dev/null +++ b/tests/python_client/testcases/test_partition.py @@ -0,0 +1,496 @@ +import time +import random +import pdb +import threading +import logging +from multiprocessing import Pool, Process +import pytest +from utils.utils import * +from common.constants import * + +TIMEOUT = 120 + + +class TestCreateBase: + """ + ****************************************************************** + The following cases are used to test `create_partition` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_a(self, connect, collection): + ''' + target: test create partition, check status returned + method: call function: create_partition + expected: status ok + ''' + connect.create_partition(collection, default_tag) + + @pytest.mark.tags(CaseLabel.L2) + @pytest.mark.timeout(600) + def test_create_partition_limit(self, connect, collection, args): + ''' + target: test create partitions, check status returned + method: call function: create_partition for 4097 times + expected: exception raised + ''' + threads_num = 8 + threads = [] + if args["handler"] == "HTTP": + pytest.skip("skip in http mode") + + def create(connect, threads_num): + for i in range(max_partition_num // threads_num): + tag_tmp = gen_unique_str() + connect.create_partition(collection, tag_tmp) + + for i in range(threads_num): + m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"]) + t = threading.Thread(target=create, args=(m, threads_num)) + threads.append(t) + t.start() + for t in threads: + t.join() + tag_tmp = gen_unique_str() + with pytest.raises(Exception) as e: + connect.create_partition(collection, tag_tmp) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_repeat(self, connect, collection): + ''' + target: test create partition, check status returned + method: call function: create_partition + expected: status ok + ''' + connect.create_partition(collection, default_tag) + try: + connect.create_partition(collection, default_tag) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "create partition failed: partition name = %s already exists" % default_tag + assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default']) + + @pytest.mark.tags(CaseLabel.L2) + def test_create_partition_collection_not_existed(self, connect): + ''' + target: test create partition, its owner collection name not existed in db, check status returned + method: call function: create_partition + expected: status not ok + ''' + collection_name = gen_unique_str() + try: + connect.create_partition(collection_name, default_tag) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "create partition failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_name_name_None(self, connect, collection): + ''' + target: test create partition, tag name set None, check status returned + method: call function: create_partition + expected: status ok + ''' + tag_name = None + try: + connect.create_partition(collection, tag_name) + except Exception as e: + assert e.args[0] == "`partition_name` value None is illegal" + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_different_partition_names(self, connect, collection): + ''' + target: test create partition twice with different names + method: call function: create_partition, and again + expected: status ok + ''' + connect.create_partition(collection, default_tag) + tag_name = gen_unique_str() + connect.create_partition(collection, tag_name) + assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default']) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_insert_default(self, connect, id_collection): + ''' + target: test create partition, and insert vectors, check status returned + method: call function: create_partition + expected: status ok + ''' + connect.create_partition(id_collection, default_tag) + ids = [i for i in range(default_nb)] + result = connect.insert(id_collection, default_entities) + assert len(result.primary_keys) == len(ids) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_insert_with_tag(self, connect, id_collection): + ''' + target: test create partition, and insert vectors, check status returned + method: call function: create_partition + expected: status ok + ''' + connect.create_partition(id_collection, default_tag) + ids = [i for i in range(default_nb)] + result = connect.insert(id_collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == len(ids) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_insert_with_tag_not_existed(self, connect, collection): + ''' + target: test create partition, and insert vectors, check status returned + method: call function: create_partition + expected: status not ok + ''' + tag_new = "tag_new" + connect.create_partition(collection, default_tag) + ids = [i for i in range(default_nb)] + try: + connect.insert(collection, default_entities, partition_name=tag_new) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "partitionID of partitionName:%s can not be find" % tag_new + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_create_partition_insert_same_tags(self, connect, id_collection): + ''' + target: test create partition, and insert vectors, check status returned + method: call function: create_partition + expected: status ok + ''' + connect.create_partition(id_collection, default_tag) + ids = [i for i in range(default_nb)] + result = connect.insert(id_collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + ids = [(i+default_nb) for i in range(default_nb)] + new_result = connect.insert(id_collection, default_entities, partition_name=default_tag) + assert len(new_result.primary_keys) == default_nb + connect.flush([id_collection]) + res = connect.get_collection_stats(id_collection) + assert res["row_count"] == default_nb * 2 + + @pytest.mark.tags(CaseLabel.L2) + def test_create_partition_insert_same_tags_two_collections(self, connect, collection): + ''' + target: test create two partitions, and insert vectors with the same tag to each collection, check status returned + method: call function: create_partition + expected: status ok, collection length is correct + ''' + connect.create_partition(collection, default_tag) + collection_new = gen_unique_str() + connect.create_collection(collection_new, default_fields) + connect.create_partition(collection_new, default_tag) + result = connect.insert(collection, default_entities, partition_name=default_tag) + assert len(result.primary_keys) == default_nb + new_result = connect.insert(collection_new, default_entities, partition_name=default_tag) + assert len(new_result.primary_keys) == default_nb + connect.flush([collection, collection_new]) + res = connect.get_collection_stats(collection) + assert res["row_count"] == default_nb + res = connect.get_collection_stats(collection_new) + assert res["row_count"] == default_nb + + +class TestShowBase: + + """ + ****************************************************************** + The following cases are used to test `list_partitions` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_list_partitions(self, connect, collection): + ''' + target: test show partitions, check status and partitions returned + method: create partition first, then call function: list_partitions + expected: status ok, partition correct + ''' + connect.create_partition(collection, default_tag) + assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default']) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_list_partitions_no_partition(self, connect, collection): + ''' + target: test show partitions with collection name, check status and partitions returned + method: call function: list_partitions + expected: status ok, partitions correct + ''' + res = connect.list_partitions(collection) + assert compare_list_elements(res, ['_default']) + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_show_multi_partitions(self, connect, collection): + ''' + target: test show partitions, check status and partitions returned + method: create partitions first, then call function: list_partitions + expected: status ok, partitions correct + ''' + tag_new = gen_unique_str() + connect.create_partition(collection, default_tag) + connect.create_partition(collection, tag_new) + res = connect.list_partitions(collection) + assert compare_list_elements(res, [default_tag, tag_new, '_default']) + + +class TestHasBase: + + """ + ****************************************************************** + The following cases are used to test `has_partition` function + ****************************************************************** + """ + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_tag_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_has_partition_a(self, connect, collection): + ''' + target: test has_partition, check status and result + method: create partition first, then call function: has_partition + expected: status ok, result true + ''' + connect.create_partition(collection, default_tag) + res = connect.has_partition(collection, default_tag) + logging.getLogger().info(res) + assert res + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_has_partition_multi_partitions(self, connect, collection): + ''' + target: test has_partition, check status and result + method: create partition first, then call function: has_partition + expected: status ok, result true + ''' + for tag_name in [default_tag, "tag_new", "tag_new_new"]: + connect.create_partition(collection, tag_name) + for tag_name in [default_tag, "tag_new", "tag_new_new"]: + res = connect.has_partition(collection, tag_name) + assert res + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_has_partition_name_not_existed(self, connect, collection): + ''' + target: test has_partition, check status and result + method: then call function: has_partition, with tag not existed + expected: status ok, result empty + ''' + res = connect.has_partition(collection, default_tag) + logging.getLogger().info(res) + assert not res + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_has_partition_collection_not_existed(self, connect, collection): + ''' + target: test has_partition, check status and result + method: then call function: has_partition, with collection not existed + expected: status not ok + ''' + collection_name = "not_existed_collection" + try: + connect.has_partition(collection_name, default_tag) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "HasPartition failed: can't find collection: %s" % collection_name + + @pytest.mark.tags(CaseLabel.L2) + def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name): + ''' + target: test has partition, with invalid tag name, check status returned + method: call function: has_partition + expected: status ok + ''' + tag_name = get_tag_name + connect.create_partition(collection, default_tag) + with pytest.raises(Exception) as e: + connect.has_partition(collection, tag_name) + + +class TestDropBase: + + """ + ****************************************************************** + The following cases are used to test `drop_partition` function + ****************************************************************** + """ + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_partition_a(self, connect, collection): + ''' + target: test drop partition, check status and partition if existed + method: create partitions first, then call function: drop_partition + expected: status ok, no partitions in db + ''' + connect.create_partition(collection, default_tag) + res1 = connect.list_partitions(collection) + assert default_tag in res1 + connect.drop_partition(collection, default_tag) + res2 = connect.list_partitions(collection) + assert default_tag not in res2 + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_partition_name_not_existed(self, connect, collection): + ''' + target: test drop partition, but tag not existed + method: create partitions first, then call function: drop_partition + expected: status not ok + ''' + connect.create_partition(collection, default_tag) + new_tag = "new_tag" + try: + connect.drop_partition(collection, new_tag) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "DropPartition failed: partition %s does not exist" % new_tag + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_partition_name_not_existed_A(self, connect, collection): + ''' + target: test drop partition, but collection not existed + method: create partitions first, then call function: drop_partition + expected: status not ok + ''' + connect.create_partition(collection, default_tag) + new_collection = gen_unique_str() + try: + connect.drop_partition(new_collection, default_tag) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "DropPartition failed: can't find collection: %s" % new_collection + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_partition_repeatedly(self, connect, collection): + ''' + target: test drop partition twice, check status and partition if existed + method: create partitions first, then call function: drop_partition + expected: status not ok, no partitions in db + ''' + connect.create_partition(collection, default_tag) + connect.drop_partition(collection, default_tag) + time.sleep(2) + try: + connect.drop_partition(collection, default_tag) + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "DropPartition failed: partition %s does not exist" % default_tag + tag_list = connect.list_partitions(collection) + assert default_tag not in tag_list + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_partition_create(self, connect, collection): + ''' + target: test drop partition, and create again, check status + method: create partitions first, then call function: drop_partition, create_partition + expected: status not ok, partition in db + ''' + connect.create_partition(collection, default_tag) + assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default']) + connect.drop_partition(collection, default_tag) + assert compare_list_elements(connect.list_partitions(collection), ['_default']) + time.sleep(2) + connect.create_partition(collection, default_tag) + assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default']) + + +class TestNameInvalid(object): + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_tag_name(self, request): + yield request.param + + @pytest.fixture( + scope="function", + params=gen_invalid_strs() + ) + def get_collection_name(self, request): + yield request.param + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name): + ''' + target: test drop partition, with invalid collection name, check status returned + method: call function: drop_partition + expected: status not ok + ''' + collection_name = get_collection_name + connect.create_partition(collection, default_tag) + with pytest.raises(Exception) as e: + connect.drop_partition(collection_name, default_tag) + + @pytest.mark.tags(CaseLabel.L2) + def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name): + ''' + target: test drop partition, with invalid tag name, check status returned + method: call function: drop_partition + expected: status not ok + ''' + tag_name = get_tag_name + connect.create_partition(collection, default_tag) + with pytest.raises(Exception) as e: + connect.drop_partition(collection, tag_name) + + @pytest.mark.tags(CaseLabel.L2) + def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name): + ''' + target: test show partitions, with invalid collection name, check status returned + method: call function: list_partitions + expected: status not ok + ''' + collection_name = get_collection_name + connect.create_partition(collection, default_tag) + with pytest.raises(Exception) as e: + connect.list_partitions(collection_name) + + +class TestNewCase(object): + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_default_partition_A(self, connect, collection): + ''' + target: test drop partition of default, check status returned + method: call function: drop_partition + expected: status not ok + ''' + try: + connect.drop_partition(collection, partition_name='_default') + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "DropPartition failed: default partition cannot be deleted" + list_partition = connect.list_partitions(collection) + assert '_default' in list_partition + + @pytest.mark.tags(CaseLabel.tags_smoke) + def test_drop_default_partition_B(self, connect, collection): + ''' + target: test drop partition of default, check status returned + method: call function: drop_partition + expected: status not ok + ''' + connect.create_partition(collection, default_tag) + try: + connect.drop_partition(collection, partition_name='_default') + except Exception as e: + code = getattr(e, 'code', "The exception does not contain the field of code.") + assert code == 1 + message = getattr(e, 'message', "The exception does not contain the field of message.") + assert message == "DropPartition failed: default partition cannot be deleted" + list_partition = connect.list_partitions(collection) + assert '_default' in list_partition diff --git a/tests20/python_client/testcases/test_partition.py b/tests/python_client/testcases/test_partition_20.py similarity index 99% rename from tests20/python_client/testcases/test_partition.py rename to tests/python_client/testcases/test_partition_20.py index 56bd7d94e8..7a2cda1246 100644 --- a/tests20/python_client/testcases/test_partition.py +++ b/tests/python_client/testcases/test_partition_20.py @@ -218,7 +218,7 @@ class TestPartitionParams(TestcaseBase): self.partition_wrap.init_partition(collection=None, name=partition_name, check_task=CheckTasks.err_res, check_items={ct.err_code: 1, - ct.err_msg: "must be pymilvus_orm.Collection"}) + ct.err_msg: "must be pymilvus.Collection"}) @pytest.mark.tags(CaseLabel.L1) # @pytest.mark.parametrize("partition_name", [cf.gen_unique_str(prefix)]) diff --git a/tests/python_client/testcases/test_ping.py b/tests/python_client/testcases/test_ping.py new file mode 100644 index 0000000000..80cb238a84 --- /dev/null +++ b/tests/python_client/testcases/test_ping.py @@ -0,0 +1,129 @@ +import logging +import pytest + +__version__ = '0.11.1' + + +# class TestPing: +# def test_server_version(self, connect): +# ''' +# target: test get the server version +# method: call the server_version method after connected +# expected: version should be the milvus version +# ''' +# res = connect.server_version() +# assert res == __version__ +# +# def test_server_status(self, connect): +# ''' +# target: test get the server status +# method: call the server_status method after connected +# expected: status returned should be ok +# ''' +# msg = connect.server_status() +# assert msg +# +# def test_server_cmd_with_params_version(self, connect): +# ''' +# target: test cmd: version +# method: cmd = "version" ... +# expected: when cmd = 'version', return version of server; +# ''' +# cmd = "version" +# msg = connect._cmd(cmd) +# logging.getLogger().info(msg) +# assert msg == __version__ +# +# def test_server_cmd_with_params_others(self, connect): +# ''' +# target: test cmd: lalala +# method: cmd = "lalala" ... +# expected: when cmd = 'version', return version of server; +# ''' +# cmd = "rm -rf test" +# msg = connect._cmd(cmd) +# +# def test_connected(self, connect): +# # assert connect.connected() +# assert connect +# +# +# class TestPingWithTimeout: +# def test_server_version_legal_timeout(self, connect): +# ''' +# target: test get the server version with legal timeout +# method: call the server_version method after connected with altering timeout +# expected: version should be the milvus version +# ''' +# res = connect.server_version(20) +# assert res == __version__ +# +# def test_server_version_negative_timeout(self, connect): +# ''' +# target: test get the server version with negative timeout +# method: call the server_version method after connected with altering timeout +# expected: when timeout is illegal raises an error; +# ''' +# with pytest.raises(Exception) as e: +# res = connect.server_version(-1) +# +# def test_server_cmd_with_params_version_with_legal_timeout(self, connect): +# ''' +# target: test cmd: version and timeout +# method: cmd = "version" , timeout=10 +# expected: when cmd = 'version', return version of server; +# ''' +# cmd = "version" +# msg = connect._cmd(cmd, 10) +# logging.getLogger().info(msg) +# assert msg == __version__ +# +# def test_server_cmd_with_params_version_with_illegal_timeout(self, connect): +# ''' +# target: test cmd: version and timeout +# method: cmd = "version" , timeout=-1 +# expected: when timeout is illegal raises an error; +# ''' +# with pytest.raises(Exception) as e: +# res = connect.server_version(-1) +# +# def test_server_cmd_with_params_others_with_illegal_timeout(self, connect): +# ''' +# target: test cmd: lalala, timeout = -1 +# method: cmd = "lalala", timeout = -1 +# expected: when timeout is illegal raises an error; +# ''' +# cmd = "rm -rf test" +# with pytest.raises(Exception) as e: +# res = connect.server_version(-1) +# +# +# class TestPingDisconnect: +# def test_server_version(self, dis_connect): +# ''' +# target: test get the server version, after disconnect +# method: call the server_version method after connected +# expected: version should not be the pymilvus version +# ''' +# with pytest.raises(Exception) as e: +# res = dis_connect.server_version() +# +# def test_server_status(self, dis_connect): +# ''' +# target: test get the server status, after disconnect +# method: call the server_status method after connected +# expected: status returned should be not ok +# ''' +# with pytest.raises(Exception) as e: +# res = dis_connect.server_status() +# +# @pytest.mark.tags(CaseLabel.L2) +# def test_server_version_with_timeout(self, dis_connect): +# ''' +# target: test get the server status with timeout settings after disconnect +# method: call the server_status method after connected +# expected: status returned should be not ok +# ''' +# status = None +# with pytest.raises(Exception) as e: +# res = connect.server_status(100) diff --git a/tests20/python_client/testcases/test_query.py b/tests/python_client/testcases/test_query_20.py similarity index 99% rename from tests20/python_client/testcases/test_query.py rename to tests/python_client/testcases/test_query_20.py index 71d96e30e5..2f5fad9b07 100644 --- a/tests20/python_client/testcases/test_query.py +++ b/tests/python_client/testcases/test_query_20.py @@ -1,6 +1,6 @@ import pytest import random -from pymilvus_orm.default_config import DefaultConfig +from pymilvus import DefaultConfig from base.client_base import TestcaseBase from common.code_mapping import ConnectionErrorMessage as cem diff --git a/tests20/python_client/testcases/test_search.py b/tests/python_client/testcases/test_search_20.py similarity index 100% rename from tests20/python_client/testcases/test_search.py rename to tests/python_client/testcases/test_search_20.py diff --git a/tests20/python_client/testcases/test_utility.py b/tests/python_client/testcases/test_utility_20.py similarity index 99% rename from tests20/python_client/testcases/test_utility.py rename to tests/python_client/testcases/test_utility_20.py index 5d89c64eec..9924ff5214 100644 --- a/tests20/python_client/testcases/test_utility.py +++ b/tests/python_client/testcases/test_utility_20.py @@ -165,7 +165,8 @@ class TestUtilityParams(TestcaseBase): self.utility_wrap.calc_distance(invalid_vector, invalid_vector, check_task=CheckTasks.err_res, check_items={"err_code": 1, - "err_msg": "Left vectors array is invalid"}) + "err_msg": "vectors_left value {} " + "is illegal".format(invalid_vector)}) @pytest.mark.tags(CaseLabel.L1) @pytest.mark.xfail(reason="issue 7038") @@ -198,7 +199,8 @@ class TestUtilityParams(TestcaseBase): self.utility_wrap.calc_distance(op_l, invalid_vector, check_task=CheckTasks.err_res, check_items={"err_code": 1, - "err_msg": "Right vectors array is invalid"}) + "err_msg": "vectors_right value {} " + "is illegal".format(invalid_vector)}) @pytest.mark.tags(CaseLabel.L1) def test_calc_distance_right_vector_invalid_value(self, get_invalid_vector_dict): @@ -396,7 +398,7 @@ class TestUtilityBase(TestcaseBase): res, _ = self.utility_wrap.has_partition(c_name, p_name) assert res is False - @pytest.mark.xfail(reason="issue #5667") + #@pytest.mark.xfail(reason="issue #5667") @pytest.mark.tags(CaseLabel.L1) def test_list_collections(self): """ diff --git a/tests20/python_client/utils/api_request.py b/tests/python_client/utils/api_request.py similarity index 100% rename from tests20/python_client/utils/api_request.py rename to tests/python_client/utils/api_request.py diff --git a/tests20/python_client/utils/util_log.py b/tests/python_client/utils/util_log.py similarity index 100% rename from tests20/python_client/utils/util_log.py rename to tests/python_client/utils/util_log.py diff --git a/tests/python_client/utils/utils.py b/tests/python_client/utils/utils.py new file mode 100644 index 0000000000..c1e60f701d --- /dev/null +++ b/tests/python_client/utils/utils.py @@ -0,0 +1,1031 @@ +import os +import sys +import random +import pdb +import string +import struct +import logging +import threading +import traceback +import time +import copy +import numpy as np +from sklearn import preprocessing +from pymilvus import Milvus, DataType + +port = 19530 +epsilon = 0.000001 +namespace = "milvus" + +default_flush_interval = 1 +big_flush_interval = 1000 +default_drop_interval = 3 +default_dim = 128 +default_nb = 3000 +default_top_k = 10 +max_top_k = 16384 +# max_partition_num = 256 +max_partition_num = 4096 +default_segment_row_limit = 1000 +default_server_segment_row_limit = 1024 * 512 +default_float_vec_field_name = "float_vector" +default_binary_vec_field_name = "binary_vector" +default_partition_name = "_default" +default_tag = "1970_01_01" +row_count = "row_count" + +# TODO: +# TODO: disable RHNSW_SQ/PQ in 0.11.0 +all_index_types = [ + "FLAT", + "IVF_FLAT", + "IVF_SQ8", + # "IVF_SQ8_HYBRID", + "IVF_PQ", + "HNSW", + # "NSG", + "ANNOY", + "RHNSW_FLAT", + "RHNSW_PQ", + "RHNSW_SQ", + "BIN_FLAT", + "BIN_IVF_FLAT" +] + +default_index_params = [ + {"nlist": 128}, + {"nlist": 128}, + {"nlist": 128}, + # {"nlist": 128}, + {"nlist": 128, "m": 16, "nbits": 8}, + {"M": 48, "efConstruction": 500}, + # {"search_length": 50, "out_degree": 40, "candidate_pool_size": 100, "knng": 50}, + {"n_trees": 50}, + {"M": 48, "efConstruction": 500}, + {"M": 48, "efConstruction": 500, "PQM": 64}, + {"M": 48, "efConstruction": 500}, + {"nlist": 128}, + {"nlist": 128} +] + +def create_target_index(index,field_name): + index["field_name"]=field_name + +def index_cpu_not_support(): + return ["IVF_SQ8_HYBRID"] + + +def binary_support(): + return ["BIN_FLAT", "BIN_IVF_FLAT"] + + +def delete_support(): + return ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_SQ8_HYBRID", "IVF_PQ"] + + +def ivf(): + return ["FLAT", "IVF_FLAT", "IVF_SQ8", "IVF_SQ8_HYBRID", "IVF_PQ"] + + +def skip_pq(): + return ["IVF_PQ", "RHNSW_PQ", "RHNSW_SQ"] + + +def binary_metrics(): + return ["JACCARD", "HAMMING", "TANIMOTO", "SUBSTRUCTURE", "SUPERSTRUCTURE"] + + +def structure_metrics(): + return ["SUBSTRUCTURE", "SUPERSTRUCTURE"] + + +def l2(x, y): + return np.linalg.norm(np.array(x) - np.array(y)) + + +def ip(x, y): + return np.inner(np.array(x), np.array(y)) + + +def jaccard(x, y): + x = np.asarray(x, np.bool) + y = np.asarray(y, np.bool) + return 1 - np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum()) + + +def hamming(x, y): + x = np.asarray(x, np.bool) + y = np.asarray(y, np.bool) + return np.bitwise_xor(x, y).sum() + + +def tanimoto(x, y): + x = np.asarray(x, np.bool) + y = np.asarray(y, np.bool) + return -np.log2(np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum())) + + +def substructure(x, y): + x = np.asarray(x, np.bool) + y = np.asarray(y, np.bool) + return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(y) + + +def superstructure(x, y): + x = np.asarray(x, np.bool) + y = np.asarray(y, np.bool) + return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(x) + + +def get_milvus(host, port, uri=None, handler=None, **kwargs): + if handler is None: + handler = "GRPC" + try_connect = kwargs.get("try_connect", True) + if uri is not None: + milvus = Milvus(uri=uri, handler=handler, try_connect=try_connect) + else: + milvus = Milvus(host=host, port=port, handler=handler, try_connect=try_connect) + return milvus + + +def reset_build_index_threshold(connect): + connect.set_config("engine", "build_index_threshold", 1024) + + +def disable_flush(connect): + connect.set_config("storage", "auto_flush_interval", big_flush_interval) + + +def enable_flush(connect): + # reset auto_flush_interval=1 + connect.set_config("storage", "auto_flush_interval", default_flush_interval) + config_value = connect.get_config("storage", "auto_flush_interval") + assert config_value == str(default_flush_interval) + + +def gen_inaccuracy(num): + return num / 255.0 + + +def gen_vectors(num, dim, is_normal=True): + vectors = [[random.random() for _ in range(dim)] for _ in range(num)] + vectors = preprocessing.normalize(vectors, axis=1, norm='l2') + return vectors.tolist() + + +# def gen_vectors(num, dim, seed=np.random.RandomState(1234), is_normal=False): +# xb = seed.rand(num, dim).astype("float32") +# xb = preprocessing.normalize(xb, axis=1, norm='l2') +# return xb.tolist() + + +def gen_binary_vectors(num, dim): + raw_vectors = [] + binary_vectors = [] + for i in range(num): + raw_vector = [random.randint(0, 1) for i in range(dim)] + raw_vectors.append(raw_vector) + binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist())) + return raw_vectors, binary_vectors + + +def gen_binary_sub_vectors(vectors, length): + raw_vectors = [] + binary_vectors = [] + dim = len(vectors[0]) + for i in range(length): + raw_vector = [0 for i in range(dim)] + vector = vectors[i] + for index, j in enumerate(vector): + if j == 1: + raw_vector[index] = 1 + raw_vectors.append(raw_vector) + binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist())) + return raw_vectors, binary_vectors + + +def gen_binary_super_vectors(vectors, length): + raw_vectors = [] + binary_vectors = [] + dim = len(vectors[0]) + for i in range(length): + cnt_1 = np.count_nonzero(vectors[i]) + # raw_vector = [0 for i in range(dim)] ??? + raw_vector = [1 for i in range(dim)] + raw_vectors.append(raw_vector) + binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist())) + return raw_vectors, binary_vectors + + +def gen_int_attr(row_num): + return [random.randint(0, 255) for _ in range(row_num)] + + +def gen_float_attr(row_num): + return [random.uniform(0, 255) for _ in range(row_num)] + + +def gen_unique_str(str_value=None): + prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)) + return "test_" + prefix if str_value is None else str_value + "_" + prefix + + +def gen_primary_field(): + return {"name": gen_unique_str(), "type": DataType.INT64, "is_primary": True} + + +def gen_single_filter_fields(): + fields = [] + for data_type in DataType: + if data_type in [DataType.INT32, DataType.INT64, DataType.FLOAT, DataType.DOUBLE]: + fields.append({"name": data_type.name, "type": data_type}) + return fields + + +def gen_single_vector_fields(): + fields = [] + for data_type in [DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR]: + field = {"name": data_type.name, "type": data_type, "params": {"dim": default_dim}} + fields.append(field) + return fields + + +def gen_default_fields(auto_id=True): + default_fields = { + "fields": [ + {"name": "int64", "type": DataType.INT64, "is_primary": True}, + {"name": "float", "type": DataType.FLOAT}, + {"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "params": {"dim": default_dim}}, + ], + "segment_row_limit": default_segment_row_limit, + } + return default_fields + + +def gen_binary_default_fields(auto_id=True): + default_fields = { + "fields": [ + {"name": "int64", "type": DataType.INT64, "is_primary": True}, + {"name": "float", "type": DataType.FLOAT}, + {"name": default_binary_vec_field_name, "type": DataType.BINARY_VECTOR, "params": {"dim": default_dim}} + ], + "segment_row_limit": default_segment_row_limit, + "auto_id": auto_id + } + return default_fields + + +def gen_entities(nb, is_normal=False): + vectors = gen_vectors(nb, default_dim, is_normal) + entities = [ + {"name": "int64", "type": DataType.INT64, "values": [i for i in range(nb)]}, + {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(nb)]}, + {"name": default_float_vec_field_name, "type": DataType.FLOAT_VECTOR, "values": vectors} + ] + return entities + + +def gen_entities_new(nb, is_normal=False): + vectors = gen_vectors(nb, default_dim, is_normal) + entities = [ + {"name": "int64", "values": [i for i in range(nb)]}, + {"name": "float", "values": [float(i) for i in range(nb)]}, + {"name": default_float_vec_field_name, "values": vectors} + ] + return entities + + +def gen_entities_rows(nb, is_normal=False, _id=True): + vectors = gen_vectors(nb, default_dim, is_normal) + entities = [] + if not _id: + for i in range(nb): + entity = { + "_id": i, + "int64": i, + "float": float(i), + default_float_vec_field_name: vectors[i] + } + entities.append(entity) + else: + for i in range(nb): + entity = { + "int64": i, + "float": float(i), + default_float_vec_field_name: vectors[i] + } + entities.append(entity) + return entities + + +def gen_binary_entities(nb): + raw_vectors, vectors = gen_binary_vectors(nb, default_dim) + entities = [ + {"name": "int64", "type": DataType.INT64, "values": [i for i in range(nb)]}, + {"name": "float", "type": DataType.FLOAT, "values": [float(i) for i in range(nb)]}, + {"name": default_binary_vec_field_name, "type": DataType.BINARY_VECTOR, "values": vectors} + ] + return raw_vectors, entities + + +def gen_binary_entities_new(nb): + raw_vectors, vectors = gen_binary_vectors(nb, default_dim) + entities = [ + {"name": "int64", "values": [i for i in range(nb)]}, + {"name": "float", "values": [float(i) for i in range(nb)]}, + {"name": default_binary_vec_field_name, "values": vectors} + ] + return raw_vectors, entities + + +def gen_binary_entities_rows(nb, _id=True): + raw_vectors, vectors = gen_binary_vectors(nb, default_dim) + entities = [] + if not _id: + for i in range(nb): + entity = { + "_id": i, + "int64": i, + "float": float(i), + default_binary_vec_field_name: vectors[i] + } + entities.append(entity) + else: + for i in range(nb): + entity = { + "int64": i, + "float": float(i), + default_binary_vec_field_name: vectors[i] + } + entities.append(entity) + return raw_vectors, entities + + +def gen_entities_by_fields(fields, nb, dim, ids=None): + entities = [] + for field in fields: + if field.get("is_primary", False) and ids: + field_value = ids + elif field["type"] in [DataType.INT32, DataType.INT64]: + field_value = [1 for i in range(nb)] + elif field["type"] in [DataType.FLOAT, DataType.DOUBLE]: + field_value = [3.0 for i in range(nb)] + elif field["type"] == DataType.BINARY_VECTOR: + field_value = gen_binary_vectors(nb, dim)[1] + elif field["type"] == DataType.FLOAT_VECTOR: + field_value = gen_vectors(nb, dim) + field.update({"values": field_value}) + entities.append(field) + return entities + + +def assert_equal_entity(a, b): + pass + + +def gen_query_vectors(field_name, entities, top_k, nq, search_params={"nprobe": 10}, rand_vector=False, + metric_type="L2", replace_vecs=None): + if rand_vector is True: + dimension = len(entities[-1]["values"][0]) + query_vectors = gen_vectors(nq, dimension) + else: + query_vectors = entities[-1]["values"][:nq] + if replace_vecs: + query_vectors = replace_vecs + must_param = {"vector": {field_name: {"topk": top_k, "query": query_vectors, "params": search_params}}} + must_param["vector"][field_name]["metric_type"] = metric_type + query = { + "bool": { + "must": [must_param] + } + } + return query, query_vectors + + +def update_query_expr(src_query, keep_old=True, expr=None): + tmp_query = copy.deepcopy(src_query) + if expr is not None: + tmp_query["bool"].update(expr) + if keep_old is not True: + tmp_query["bool"].pop("must") + return tmp_query + + +def gen_default_vector_expr(default_query): + return default_query["bool"]["must"][0] + + +def gen_default_term_expr(keyword="term", field="int64", values=None): + if values is None: + values = [i for i in range(default_nb // 2)] + expr = {keyword: {field: {"values": values}}} + return expr + + +def update_term_expr(src_term, terms): + tmp_term = copy.deepcopy(src_term) + for term in terms: + tmp_term["term"].update(term) + return tmp_term + + +def gen_default_range_expr(keyword="range", field="int64", ranges=None): + if ranges is None: + ranges = {"GT": 1, "LT": default_nb // 2} + expr = {keyword: {field: ranges}} + return expr + + +def update_range_expr(src_range, ranges): + tmp_range = copy.deepcopy(src_range) + for range in ranges: + tmp_range["range"].update(range) + return tmp_range + + +def gen_invalid_range(): + range = [ + {"range": 1}, + {"range": {}}, + {"range": []}, + {"range": {"range": {"int64": {"GT": 0, "LT": default_nb // 2}}}} + ] + return range + + +def gen_valid_ranges(): + ranges = [ + {"GT": 0, "LT": default_nb // 2}, + {"GT": default_nb // 2, "LT": default_nb * 2}, + {"GT": 0}, + {"LT": default_nb}, + {"GT": -1, "LT": default_top_k}, + ] + return ranges + + +def gen_invalid_term(): + terms = [ + {"term": 1}, + {"term": []}, + {"term": {}}, + {"term": {"term": {"int64": {"values": [i for i in range(default_nb // 2)]}}}} + ] + return terms + + +def add_field_default(default_fields, type=DataType.INT64, field_name=None): + tmp_fields = copy.deepcopy(default_fields) + if field_name is None: + field_name = gen_unique_str() + field = { + "name": field_name, + "type": type + } + tmp_fields["fields"].append(field) + return tmp_fields + + +def add_field(entities, field_name=None): + nb = len(entities[0]["values"]) + tmp_entities = copy.deepcopy(entities) + if field_name is None: + field_name = gen_unique_str() + field = { + "name": field_name, + "type": DataType.INT64, + "values": [i for i in range(nb)] + } + tmp_entities.append(field) + return tmp_entities + + +def add_vector_field(entities, is_normal=False): + nb = len(entities[0]["values"]) + vectors = gen_vectors(nb, default_dim, is_normal) + field = { + "name": gen_unique_str(), + "type": DataType.FLOAT_VECTOR, + "values": vectors + } + entities.append(field) + return entities + + +# def update_fields_metric_type(fields, metric_type): +# tmp_fields = copy.deepcopy(fields) +# if metric_type in ["L2", "IP"]: +# tmp_fields["fields"][-1]["type"] = DataType.FLOAT_VECTOR +# else: +# tmp_fields["fields"][-1]["type"] = DataType.BINARY_VECTOR +# tmp_fields["fields"][-1]["params"]["metric_type"] = metric_type +# return tmp_fields + + +def remove_field(entities): + del entities[0] + return entities + + +def remove_vector_field(entities): + del entities[-1] + return entities + + +def update_field_name(entities, old_name, new_name): + tmp_entities = copy.deepcopy(entities) + for item in tmp_entities: + if item["name"] == old_name: + item["name"] = new_name + return tmp_entities + + +def update_field_type(entities, old_name, new_name): + tmp_entities = copy.deepcopy(entities) + for item in tmp_entities: + if item["name"] == old_name: + item["type"] = new_name + return tmp_entities + + +def update_field_value(entities, old_type, new_value): + tmp_entities = copy.deepcopy(entities) + for item in tmp_entities: + if item["type"] == old_type: + for index, value in enumerate(item["values"]): + item["values"][index] = new_value + return tmp_entities + + +def update_field_name_row(entities, old_name, new_name): + tmp_entities = copy.deepcopy(entities) + for item in tmp_entities: + if old_name in item: + item[new_name] = item[old_name] + item.pop(old_name) + else: + raise Exception("Field %s not in field" % old_name) + return tmp_entities + + +def update_field_type_row(entities, old_name, new_name): + tmp_entities = copy.deepcopy(entities) + for item in tmp_entities: + if old_name in item: + item["type"] = new_name + return tmp_entities + + +def add_vector_field(nb, dimension=default_dim): + field_name = gen_unique_str() + field = { + "name": field_name, + "type": DataType.FLOAT_VECTOR, + "values": gen_vectors(nb, dimension) + } + return field_name + + +def gen_segment_row_limits(): + sizes = [ + 1024, + 4096 + ] + return sizes + + +def gen_invalid_ips(): + ips = [ + # "255.0.0.0", + # "255.255.0.0", + # "255.255.255.0", + # "255.255.255.255", + "127.0.0", + # "123.0.0.2", + "12-s", + " ", + "12 s", + "BB。A", + " siede ", + "(mn)", + "中文", + "a".join("a" for _ in range(256)) + ] + return ips + + +def gen_invalid_uris(): + ip = None + uris = [ + " ", + "中文", + # invalid protocol + # "tc://%s:%s" % (ip, port), + # "tcp%s:%s" % (ip, port), + + # # invalid port + # "tcp://%s:100000" % ip, + # "tcp://%s: " % ip, + # "tcp://%s:19540" % ip, + # "tcp://%s:-1" % ip, + # "tcp://%s:string" % ip, + + # invalid ip + "tcp:// :19530", + # "tcp://123.0.0.1:%s" % port, + "tcp://127.0.0:19530", + # "tcp://255.0.0.0:%s" % port, + # "tcp://255.255.0.0:%s" % port, + # "tcp://255.255.255.0:%s" % port, + # "tcp://255.255.255.255:%s" % port, + "tcp://\n:19530", + ] + return uris + + +def gen_invalid_strs(): + strings = [ + 1, + [1], + None, + "12-s", + # " ", + # "", + # None, + "12 s", + "(mn)", + "中文", + "a".join("a" for i in range(256)) + ] + return strings + + +def gen_invalid_field_types(): + field_types = [ + # 1, + "=c", + # 0, + None, + "", + "a".join("a" for i in range(256)) + ] + return field_types + + +def gen_invalid_metric_types(): + metric_types = [ + 1, + "=c", + 0, + None, + "", + "a".join("a" for i in range(256)) + ] + return metric_types + + +# TODO: +def gen_invalid_ints(): + int_values = [ + # 1.0, + None, + [1, 2, 3], + " ", + "", + -1, + "String", + "=c", + "中文", + "a".join("a" for i in range(256)) + ] + return int_values + + +def gen_invalid_params(): + params = [ + 9999999999, + -1, + # None, + [1, 2, 3], + " ", + "", + "String", + "中文" + ] + return params + + +def gen_invalid_vectors(): + invalid_vectors = [ + "1*2", + [], + [1], + [1, 2], + [" "], + ['a'], + [None], + None, + (1, 2), + {"a": 1}, + " ", + "", + "String", + " siede ", + "中文", + "a".join("a" for i in range(256)) + ] + return invalid_vectors + + +def gen_invaild_search_params(): + invalid_search_key = 100 + search_params = [] + for index_type in all_index_types: + if index_type == "FLAT": + continue + search_params.append({"index_type": index_type, "search_params": {"invalid_key": invalid_search_key}}) + if index_type in delete_support(): + for nprobe in gen_invalid_params(): + ivf_search_params = {"index_type": index_type, "search_params": {"nprobe": nprobe}} + search_params.append(ivf_search_params) + elif index_type in ["HNSW", "RHNSW_PQ", "RHNSW_SQ"]: + for ef in gen_invalid_params(): + hnsw_search_param = {"index_type": index_type, "search_params": {"ef": ef}} + search_params.append(hnsw_search_param) + elif index_type == "NSG": + for search_length in gen_invalid_params(): + nsg_search_param = {"index_type": index_type, "search_params": {"search_length": search_length}} + search_params.append(nsg_search_param) + search_params.append({"index_type": index_type, "search_params": {"invalid_key": 100}}) + elif index_type == "ANNOY": + for search_k in gen_invalid_params(): + if isinstance(search_k, int): + continue + annoy_search_param = {"index_type": index_type, "search_params": {"search_k": search_k}} + search_params.append(annoy_search_param) + return search_params + + +def gen_invalid_index(): + index_params = [] + for index_type in gen_invalid_strs(): + index_param = {"index_type": index_type, "params": {"nlist": 1024}} + index_params.append(index_param) + for nlist in gen_invalid_params(): + index_param = {"index_type": "IVF_FLAT", "params": {"nlist": nlist}} + index_params.append(index_param) + for M in gen_invalid_params(): + index_param = {"index_type": "HNSW", "params": {"M": M, "efConstruction": 100}} + index_param = {"index_type": "RHNSW_PQ", "params": {"M": M, "efConstruction": 100}} + index_param = {"index_type": "RHNSW_SQ", "params": {"M": M, "efConstruction": 100}} + index_params.append(index_param) + for efConstruction in gen_invalid_params(): + index_param = {"index_type": "HNSW", "params": {"M": 16, "efConstruction": efConstruction}} + index_param = {"index_type": "RHNSW_PQ", "params": {"M": 16, "efConstruction": efConstruction}} + index_param = {"index_type": "RHNSW_SQ", "params": {"M": 16, "efConstruction": efConstruction}} + index_params.append(index_param) + for search_length in gen_invalid_params(): + index_param = {"index_type": "NSG", + "params": {"search_length": search_length, "out_degree": 40, "candidate_pool_size": 50, + "knng": 100}} + index_params.append(index_param) + for out_degree in gen_invalid_params(): + index_param = {"index_type": "NSG", + "params": {"search_length": 100, "out_degree": out_degree, "candidate_pool_size": 50, + "knng": 100}} + index_params.append(index_param) + for candidate_pool_size in gen_invalid_params(): + index_param = {"index_type": "NSG", "params": {"search_length": 100, "out_degree": 40, + "candidate_pool_size": candidate_pool_size, + "knng": 100}} + index_params.append(index_param) + index_params.append({"index_type": "IVF_FLAT", "params": {"invalid_key": 1024}}) + index_params.append({"index_type": "HNSW", "params": {"invalid_key": 16, "efConstruction": 100}}) + index_params.append({"index_type": "RHNSW_PQ", "params": {"invalid_key": 16, "efConstruction": 100}}) + index_params.append({"index_type": "RHNSW_SQ", "params": {"invalid_key": 16, "efConstruction": 100}}) + index_params.append({"index_type": "NSG", + "params": {"invalid_key": 100, "out_degree": 40, "candidate_pool_size": 300, + "knng": 100}}) + for invalid_n_trees in gen_invalid_params(): + index_params.append({"index_type": "ANNOY", "params": {"n_trees": invalid_n_trees}}) + + return index_params + + +def gen_index(): + nlists = [1, 1024, 16384] + pq_ms = [128, 64, 32, 16, 8, 4] + Ms = [5, 24, 48] + efConstructions = [100, 300, 500] + search_lengths = [10, 100, 300] + out_degrees = [5, 40, 300] + candidate_pool_sizes = [50, 100, 300] + knngs = [5, 100, 300] + + index_params = [] + for index_type in all_index_types: + if index_type in ["FLAT", "BIN_FLAT", "BIN_IVF_FLAT"]: + index_params.append({"index_type": index_type, "index_param": {"nlist": 1024}}) + elif index_type in ["IVF_FLAT", "IVF_SQ8", "IVF_SQ8_HYBRID"]: + ivf_params = [{"index_type": index_type, "index_param": {"nlist": nlist}} \ + for nlist in nlists] + index_params.extend(ivf_params) + elif index_type == "IVF_PQ": + IVFPQ_params = [{"index_type": index_type, "index_param": {"nlist": nlist, "m": m}} \ + for nlist in nlists \ + for m in pq_ms] + index_params.extend(IVFPQ_params) + elif index_type in ["HNSW", "RHNSW_SQ", "RHNSW_PQ"]: + hnsw_params = [{"index_type": index_type, "index_param": {"M": M, "efConstruction": efConstruction}} \ + for M in Ms \ + for efConstruction in efConstructions] + index_params.extend(hnsw_params) + elif index_type == "NSG": + nsg_params = [{"index_type": index_type, + "index_param": {"search_length": search_length, "out_degree": out_degree, + "candidate_pool_size": candidate_pool_size, "knng": knng}} \ + for search_length in search_lengths \ + for out_degree in out_degrees \ + for candidate_pool_size in candidate_pool_sizes \ + for knng in knngs] + index_params.extend(nsg_params) + + return index_params + + +def gen_simple_index(): + index_params = [] + for i in range(len(all_index_types)): + if all_index_types[i] in binary_support(): + continue + dic = {"index_type": all_index_types[i], "metric_type": "L2"} + dic.update({"params": default_index_params[i]}) + index_params.append(dic) + return index_params + + +def gen_binary_index(): + index_params = [] + for i in range(len(all_index_types)): + if all_index_types[i] in binary_support(): + dic = {"index_type": all_index_types[i]} + dic.update({"params": default_index_params[i]}) + index_params.append(dic) + return index_params + + +def gen_normal_expressions(): + expressions = [ + "int64 > 0", + "int64 > 0 && int64 < 2021", # range + "int64 == 0 || int64 == 1 || int64 == 2 || int64 == 3", # term + ] + return expressions + + +def get_search_param(index_type, metric_type="L2"): + search_params = {"metric_type": metric_type} + if index_type in ivf() or index_type in binary_support(): + search_params.update({"nprobe": 64}) + elif index_type in ["HNSW", "RHNSW_FLAT", "RHNSW_SQ", "RHNSW_PQ"]: + search_params.update({"ef": 64}) + elif index_type == "NSG": + search_params.update({"search_length": 100}) + elif index_type == "ANNOY": + search_params.update({"search_k": 1000}) + else: + logging.getLogger().error("Invalid index_type.") + raise Exception("Invalid index_type.") + return search_params + + +def assert_equal_vector(v1, v2): + if len(v1) != len(v2): + assert False + for i in range(len(v1)): + assert abs(v1[i] - v2[i]) < epsilon + + +def restart_server(helm_release_name): + res = True + timeout = 120 + from kubernetes import client, config + client.rest.logger.setLevel(logging.WARNING) + + # service_name = "%s.%s.svc.cluster.local" % (helm_release_name, namespace) + config.load_kube_config() + v1 = client.CoreV1Api() + pod_name = None + # config_map_names = v1.list_namespaced_config_map(namespace, pretty='true') + # body = {"replicas": 0} + pods = v1.list_namespaced_pod(namespace) + for i in pods.items: + if i.metadata.name.find(helm_release_name) != -1 and i.metadata.name.find("mysql") == -1: + pod_name = i.metadata.name + break + # v1.patch_namespaced_config_map(config_map_name, namespace, body, pretty='true') + # status_res = v1.read_namespaced_service_status(helm_release_name, namespace, pretty='true') + logging.getLogger().debug("Pod name: %s" % pod_name) + if pod_name is not None: + try: + v1.delete_namespaced_pod(pod_name, namespace) + except Exception as e: + logging.error(str(e)) + logging.error("Exception when calling CoreV1Api->delete_namespaced_pod") + res = False + return res + logging.error("Sleep 10s after pod deleted") + time.sleep(10) + # check if restart successfully + pods = v1.list_namespaced_pod(namespace) + for i in pods.items: + pod_name_tmp = i.metadata.name + logging.error(pod_name_tmp) + if pod_name_tmp == pod_name: + continue + elif pod_name_tmp.find(helm_release_name) == -1 or pod_name_tmp.find("mysql") != -1: + continue + else: + status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true') + logging.error(status_res.status.phase) + start_time = time.time() + ready_break = False + while time.time() - start_time <= timeout: + logging.error(time.time()) + status_res = v1.read_namespaced_pod_status(pod_name_tmp, namespace, pretty='true') + if status_res.status.phase == "Running": + logging.error("Already running") + ready_break = True + time.sleep(10) + break + else: + time.sleep(1) + if time.time() - start_time > timeout: + logging.error("Restart pod: %s timeout" % pod_name_tmp) + res = False + return res + if ready_break: + break + else: + raise Exception("Pod: %s not found" % pod_name) + follow = True + pretty = True + previous = True # bool | Return previous terminated container logs. Defaults to false. (optional) + since_seconds = 56 # int | A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified. (optional) + timestamps = True # bool | If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false. (optional) + container = "milvus" + # start_time = time.time() + # while time.time() - start_time <= timeout: + # try: + # api_response = v1.read_namespaced_pod_log(pod_name_tmp, namespace, container=container, follow=follow, + # pretty=pretty, previous=previous, since_seconds=since_seconds, + # timestamps=timestamps) + # logging.error(api_response) + # return res + # except Exception as e: + # logging.error("Exception when calling CoreV1Api->read_namespaced_pod_log: %s\n" % e) + # # waiting for server start + # time.sleep(5) + # # res = False + # # return res + # if time.time() - start_time > timeout: + # logging.error("Restart pod: %s timeout" % pod_name_tmp) + # res = False + return res + + +def compare_list_elements(_first, _second): + if not isinstance(_first, list) or not isinstance(_second, list) or len(_first) != len(_second): + return False + + for ele in _first: + if ele not in _second: + return False + + return True + + +class MyThread(threading.Thread): + def __init__(self, target, args=()): + threading.Thread.__init__(self, target=target, args=args) + + def run(self): + self.exc = None + try: + super(MyThread, self).run() + except BaseException as e: + self.exc = e + logging.error(traceback.format_exc()) + + def join(self): + super(MyThread, self).join() + if self.exc: + raise self.exc + + +class CaseLabel: + tags_smoke = "smoke" + L1 = "L1" + L2 = "L2" + L3 = "L3" + diff --git a/tests/scripts/e2e.sh b/tests/scripts/e2e.sh index 92b9d62262..b4e7850aba 100755 --- a/tests/scripts/e2e.sh +++ b/tests/scripts/e2e.sh @@ -17,7 +17,7 @@ set -x MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}" MILVUS_CLUSTER_ENABLED="${MILVUS_CLUSTER_ENABLED:-false}" MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}" -PARALLEL_NUM="${PARALLEL_NUM:-3}" +PARALLEL_NUM="${PARALLEL_NUM:-4}" MILVUS_CLIENT="${MILVUS_CLIENT:-pymilvus}" SOURCE="${BASH_SOURCE[0]}" @@ -71,13 +71,13 @@ pushd "${ROOT}/tests/docker" docker-compose up -d else if [[ "${MILVUS_CLIENT}" == "pymilvus" ]]; then - export MILVUS_PYTEST_WORKSPACE="/milvus/tests/python_test" - docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --ip ${MILVUS_SERVICE_IP} \ - --port ${MILVUS_SERVICE_PORT} ${@:-} -x" - elif [[ "${MILVUS_CLIENT}" == "pymilvus-orm" ]]; then - export MILVUS_PYTEST_WORKSPACE="/milvus/tests20/python_client" - docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --host ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} \ - --html=\${CI_LOG_PATH}/report.html --self-contained-html ${@:-} -x" + export MILVUS_PYTEST_WORKSPACE="/milvus/tests/python_client" + docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --ip ${MILVUS_SERVICE_IP} --host ${MILVUS_SERVICE_IP}\ + --port ${MILVUS_SERVICE_PORT} --html=\${CI_LOG_PATH}/report.html --self-contained-html ${@:-}" +# elif [[ "${MILVUS_CLIENT}" == "pymilvus-orm" ]]; then +# export MILVUS_PYTEST_WORKSPACE="/milvus/tests20/python_client" +# docker-compose run --rm pytest /bin/bash -c "pytest -n ${PARALLEL_NUM} --host ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} \ +# --html=\${CI_LOG_PATH}/report.html --self-contained-html ${@:-}" fi fi popd diff --git a/tests20/README.md b/tests20/README.md deleted file mode 100644 index 455c7ee1d8..0000000000 --- a/tests20/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## Tests -### benchmark -### go_client -### java_client -### python_client \ No newline at end of file diff --git a/tests20/benchmark/README.md b/tests20/benchmark/README.md deleted file mode 100644 index b7362ec7a9..0000000000 --- a/tests20/benchmark/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## benchmark - -### Run benchmark - -### Contribute in benchmark \ No newline at end of file diff --git a/tests20/python_client/.gitignore b/tests20/python_client/.gitignore deleted file mode 100644 index 89f88234f0..0000000000 --- a/tests20/python_client/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# python files -.pytest_cache -**/.pytest_cache -.idea -*.html \ No newline at end of file diff --git a/tests20/python_client/conftest.py b/tests20/python_client/conftest.py deleted file mode 100644 index 0dbf46e4ca..0000000000 --- a/tests20/python_client/conftest.py +++ /dev/null @@ -1,215 +0,0 @@ -import pytest - -import common.common_type as ct -import common.common_func as cf -from utils.util_log import test_log as log -from base.client_base import param_info -from check.param_check import ip_check, number_check -from config.log_config import log_config - - -def pytest_addoption(parser): - parser.addoption("--ip", action="store", default="localhost", help="service's ip") - parser.addoption("--host", action="store", default="localhost", help="service's ip") - parser.addoption("--service", action="store", default="", help="service address") - parser.addoption("--port", action="store", default=19530, help="service's port") - parser.addoption("--http_port", action="store", default=19121, help="http's port") - parser.addoption("--handler", action="store", default="GRPC", help="handler of request") - parser.addoption("--tag", action="store", default="all", help="only run tests matching the tag.") - parser.addoption('--dry_run', action='store_true', default=False, help="") - parser.addoption('--partition_name', action='store', default="partition_name", help="name of partition") - parser.addoption('--connect_name', action='store', default="connect_name", help="name of connect") - parser.addoption('--descriptions', action='store', default="partition_des", help="descriptions of partition") - parser.addoption('--collection_name', action='store', default="collection_name", help="name of collection") - parser.addoption('--search_vectors', action='store', default="search_vectors", help="vectors of search") - parser.addoption('--index_param', action='store', default="index_param", help="index_param of index") - parser.addoption('--data', action='store', default="data", help="data of request") - parser.addoption('--clean_log', action='store_true', default=False, help="clean log before testing") - parser.addoption('--schema', action='store', default="schema", help="schema of test interface") - parser.addoption('--err_msg', action='store', default="err_msg", help="error message of test") - parser.addoption('--term_expr', action='store', default="term_expr", help="expr of query quest") - parser.addoption('--check_content', action='store', default="check_content", help="content of check") - parser.addoption('--field_name', action='store', default="field_name", help="field_name of index") - - -@pytest.fixture -def ip(request): - return request.config.getoption("--ip") - - -@pytest.fixture -def host(request): - return request.config.getoption("--host") - - -@pytest.fixture -def service(request): - return request.config.getoption("--service") - - -@pytest.fixture -def port(request): - return request.config.getoption("--port") - - -@pytest.fixture -def http_port(request): - return request.config.getoption("--http_port") - - -@pytest.fixture -def handler(request): - return request.config.getoption("--handler") - - -@pytest.fixture -def tag(request): - return request.config.getoption("--tag") - - -@pytest.fixture -def dry_run(request): - return request.config.getoption("--dry_run") - - -@pytest.fixture -def connect_name(request): - return request.config.getoption("--connect_name") - - -@pytest.fixture -def partition_name(request): - return request.config.getoption("--partition_name") - - -@pytest.fixture -def descriptions(request): - return request.config.getoption("--descriptions") - - -@pytest.fixture -def collection_name(request): - return request.config.getoption("--collection_name") - - -@pytest.fixture -def search_vectors(request): - return request.config.getoption("--search_vectors") - - -@pytest.fixture -def index_param(request): - return request.config.getoption("--index_param") - - -@pytest.fixture -def data(request): - return request.config.getoption("--data") - - -@pytest.fixture -def clean_log(request): - return request.config.getoption("--clean_log") - - -@pytest.fixture -def schema(request): - return request.config.getoption("--schema") - - -@pytest.fixture -def err_msg(request): - return request.config.getoption("--err_msg") - - -@pytest.fixture -def term_expr(request): - return request.config.getoption("--term_expr") - - -@pytest.fixture -def check_content(request): - log.error("^" * 50) - log.error("check_content") - return request.config.getoption("--check_content") - - -@pytest.fixture -def field_name(request): - return request.config.getoption("--field_name") - - -""" fixture func """ - - -@pytest.fixture(scope="session", autouse=True) -def initialize_env(request): - """ clean log before testing """ - host = request.config.getoption("--host") - port = request.config.getoption("--port") - handler = request.config.getoption("--handler") - clean_log = request.config.getoption("--clean_log") - - """ params check """ - assert ip_check(host) and number_check(port) - - """ modify log files """ - cf.modify_file(file_path_list=[log_config.log_debug, log_config.log_info, log_config.log_err], is_modify=clean_log) - - log.info("#" * 80) - log.info("[initialize_milvus] Log cleaned up, start testing...") - param_info.prepare_param_info(host, port, handler) - - -@pytest.fixture(params=ct.get_invalid_strs) -def get_invalid_string(request): - yield request.param - - -@pytest.fixture(params=cf.gen_simple_index()) -def get_index_param(request): - yield request.param - - -@pytest.fixture(params=ct.get_invalid_strs) -def get_invalid_collection_name(request): - yield request.param - - -@pytest.fixture(params=ct.get_invalid_strs) -def get_invalid_field_name(request): - yield request.param - - -@pytest.fixture(params=ct.get_invalid_strs) -def get_invalid_index_type(request): - yield request.param - - -# TODO: construct invalid index params for all index types -@pytest.fixture(params=[{"metric_type": "L3", "index_type": "IVF_FLAT"}, - {"metric_type": "L2", "index_type": "IVF_FLAT", "err_params": {"nlist": 10}}, - {"metric_type": "L2", "index_type": "IVF_FLAT", "params": {"nlist": -1}}]) -def get_invalid_index_params(request): - yield request.param - - -@pytest.fixture(params=ct.get_invalid_strs) -def get_invalid_partition_name(request): - yield request.param - - -@pytest.fixture(params=ct.get_invalid_dict) -def get_invalid_vector_dict(request): - yield request.param - - -# for test exit in the future -# @pytest.hookimpl(hookwrapper=True, tryfirst=True) -# def pytest_runtest_makereport(): -# result = yield -# report = result.get_result() -# if report.outcome == "failed": -# msg = "The execution of the test case fails and the test exits..." -# log.error(msg) -# pytest.exit(msg) \ No newline at end of file