mirror of https://github.com/milvus-io/milvus.git
[skip e2e] Add a new case label for cluster mode (#16800)
Signed-off-by: Binbin Lv <binbin.lv@zilliz.com>pull/16815/merge
parent
a0110998e8
commit
c55c793da3
|
@ -173,12 +173,15 @@ pipeline {
|
||||||
def clusterEnabled = "false"
|
def clusterEnabled = "false"
|
||||||
def mqMode='pulsar'
|
def mqMode='pulsar'
|
||||||
int e2e_timeout_seconds = 2 * 60 * 60
|
int e2e_timeout_seconds = 2 * 60 * 60
|
||||||
|
def tag="L0 L1 L2"
|
||||||
if ("${MILVUS_SERVER_TYPE}" == "distributed-pulsar") {
|
if ("${MILVUS_SERVER_TYPE}" == "distributed-pulsar") {
|
||||||
clusterEnabled = "true"
|
clusterEnabled = "true"
|
||||||
|
tag="L0 L1 L2 ClusterOnly"
|
||||||
|
|
||||||
} else if("${MILVUS_SERVER_TYPE}" == "distributed-kafka" ) {
|
} else if("${MILVUS_SERVER_TYPE}" == "distributed-kafka" ) {
|
||||||
clusterEnabled = "true"
|
clusterEnabled = "true"
|
||||||
mqMode='kafka'
|
mqMode='kafka'
|
||||||
|
tag="L0 L1 L2 ClusterOnly"
|
||||||
}
|
}
|
||||||
if ("${MILVUS_CLIENT}" == "pymilvus") {
|
if ("${MILVUS_CLIENT}" == "pymilvus") {
|
||||||
sh """
|
sh """
|
||||||
|
@ -187,7 +190,7 @@ pipeline {
|
||||||
MILVUS_CLUSTER_ENABLED="${clusterEnabled}" \
|
MILVUS_CLUSTER_ENABLED="${clusterEnabled}" \
|
||||||
TEST_TIMEOUT="${e2e_timeout_seconds}" \
|
TEST_TIMEOUT="${e2e_timeout_seconds}" \
|
||||||
MQ_MODE="${mqMode}" \
|
MQ_MODE="${mqMode}" \
|
||||||
./ci_e2e.sh "-n 6 --tags L0 L1 L2"
|
./ci_e2e.sh "-n 6 --tags ${tag}"
|
||||||
"""
|
"""
|
||||||
} else {
|
} else {
|
||||||
error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}"
|
error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}"
|
||||||
|
|
|
@ -209,9 +209,14 @@ class CaseLabel:
|
||||||
Stability/Performance/reliability, etc. special tests
|
Stability/Performance/reliability, etc. special tests
|
||||||
Triggered by cron job or manually
|
Triggered by cron job or manually
|
||||||
run duration depends on test configuration
|
run duration depends on test configuration
|
||||||
|
Loadbalance:
|
||||||
|
loadbalance testcases which need to be run in multi query nodes
|
||||||
|
ClusterOnly:
|
||||||
|
For functions only suitable to cluster mode
|
||||||
"""
|
"""
|
||||||
L0 = "L0"
|
L0 = "L0"
|
||||||
L1 = "L1"
|
L1 = "L1"
|
||||||
L2 = "L2"
|
L2 = "L2"
|
||||||
L3 = "L3"
|
L3 = "L3"
|
||||||
Loadbalance = "Loadbalance" # loadbalance testcases which need to be run in multi querynodes
|
Loadbalance = "Loadbalance" # loadbalance testcases which need to be run in multi query nodes
|
||||||
|
ClusterOnly = "ClusterOnly" # For functions only suitable to cluster mode
|
||||||
|
|
|
@ -2225,7 +2225,7 @@ class TestLoadCollection(TestcaseBase):
|
||||||
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
|
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
|
||||||
collection_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
collection_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
||||||
|
|
||||||
@pytest.mark.tags(CaseLabel.L2)
|
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||||
def test_load_replica_change(self):
|
def test_load_replica_change(self):
|
||||||
"""
|
"""
|
||||||
target: test load replica change
|
target: test load replica change
|
||||||
|
@ -2264,7 +2264,7 @@ class TestLoadCollection(TestcaseBase):
|
||||||
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
|
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
|
||||||
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
|
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
|
||||||
|
|
||||||
@pytest.mark.tags(CaseLabel.L2)
|
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||||
def test_load_replica_multi(self):
|
def test_load_replica_multi(self):
|
||||||
"""
|
"""
|
||||||
target: test load with multiple replicas
|
target: test load with multiple replicas
|
||||||
|
@ -2292,7 +2292,7 @@ class TestLoadCollection(TestcaseBase):
|
||||||
search_res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit)
|
search_res, _ = collection_w.search(vectors, default_search_field, default_search_params, default_limit)
|
||||||
assert len(search_res[0]) == ct.default_limit
|
assert len(search_res[0]) == ct.default_limit
|
||||||
|
|
||||||
@pytest.mark.tags(CaseLabel.L2)
|
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||||
def test_load_replica_partitions(self):
|
def test_load_replica_partitions(self):
|
||||||
"""
|
"""
|
||||||
target: test load replica with partitions
|
target: test load replica with partitions
|
||||||
|
|
|
@ -354,7 +354,7 @@ class TestPartitionParams(TestcaseBase):
|
||||||
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
|
error = {ct.err_code: 1, ct.err_msg: f"no enough nodes to create replicas"}
|
||||||
partition_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
partition_w.load(replica_number=3, check_task=CheckTasks.err_res, check_items=error)
|
||||||
|
|
||||||
@pytest.mark.tags(CaseLabel.L2)
|
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||||
def test_load_replica_change(self):
|
def test_load_replica_change(self):
|
||||||
"""
|
"""
|
||||||
target: test load replica change
|
target: test load replica change
|
||||||
|
@ -392,7 +392,7 @@ class TestPartitionParams(TestcaseBase):
|
||||||
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
|
assert reduce(lambda x, y: x ^ y, seg_ids) == 0
|
||||||
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
|
assert reduce(lambda x, y: x + y, num_entities) == ct.default_nb * 2
|
||||||
|
|
||||||
@pytest.mark.tags(CaseLabel.L2)
|
@pytest.mark.tags(CaseLabel.ClusterOnly)
|
||||||
def test_partition_replicas_change_cross_partitions(self):
|
def test_partition_replicas_change_cross_partitions(self):
|
||||||
"""
|
"""
|
||||||
target: test load with different replicas between partitions
|
target: test load with different replicas between partitions
|
||||||
|
|
Loading…
Reference in New Issue