[skip e2e] Add a new case label for cluster mode (#16800)

Signed-off-by: Binbin Lv <binbin.lv@zilliz.com>
pull/16815/merge
binbin 2022-05-06 21:01:51 +08:00 committed by GitHub
parent a0110998e8
commit c55c793da3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 15 additions and 7 deletions

View File

@ -173,12 +173,15 @@ pipeline {
def clusterEnabled = "false"
def mqMode='pulsar'
int e2e_timeout_seconds = 2 * 60 * 60
def tag="L0 L1 L2"
if ("${MILVUS_SERVER_TYPE}" == "distributed-pulsar") {
clusterEnabled = "true"
tag="L0 L1 L2 ClusterOnly"
} else if("${MILVUS_SERVER_TYPE}" == "distributed-kafka" ) {
clusterEnabled = "true"
mqMode='kafka'
tag="L0 L1 L2 ClusterOnly"
}
if ("${MILVUS_CLIENT}" == "pymilvus") {
sh """
@ -187,7 +190,7 @@ pipeline {
MILVUS_CLUSTER_ENABLED="${clusterEnabled}" \
TEST_TIMEOUT="${e2e_timeout_seconds}" \
MQ_MODE="${mqMode}" \
./ci_e2e.sh "-n 6 --tags L0 L1 L2"
./ci_e2e.sh "-n 6 --tags ${tag}"
"""
} else {
error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}"

View File

@ -209,9 +209,14 @@ class CaseLabel:
Stability/Performance/reliability, etc. special tests
Triggered by cron job or manually
run duration depends on test configuration
Loadbalance:
loadbalance testcases which need to be run in multi query nodes
ClusterOnly:
For functions only suitable to cluster mode
"""
L0 = "L0"
L1 = "L1"
L2 = "L2"
L3 = "L3"
Loadbalance = "Loadbalance" # loadbalance testcases which need to be run in multi querynodes
Loadbalance = "Loadbalance" # loadbalance testcases which need to be run in multi query nodes
ClusterOnly = "ClusterOnly" # For functions only suitable to cluster mode

View File

@ -2225,7 +2225,7 @@ class TestLoadCollection(TestcaseBase):
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
collection_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_load_replica_change(self):
"""
target: test load replica change
@ -2264,7 +2264,7 @@ class TestLoadCollection(TestcaseBase):
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_load_replica_multi(self):
"""
target: test load with multiple replicas
@ -2292,7 +2292,7 @@ class TestLoadCollection(TestcaseBase):
search_res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit)
assert len(search_res[0]) == ct.default_limit
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_load_replica_partitions(self):
"""
target: test load replica with partitions

View File

@ -354,7 +354,7 @@ class TestPartitionParams(TestcaseBase):
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
partition_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_load_replica_change(self):
"""
target: test load replica change
@ -392,7 +392,7 @@ class TestPartitionParams(TestcaseBase):
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.tags(CaseLabel.ClusterOnly)
def test_partition_replicas_change_cross_partitions(self):
"""
target: test load with different replicas between partitions