[test] Upgrade milvus scale test and Change k8s client config (#18404)

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>
pull/18405/head
ThreadDao 2022-07-26 09:00:33 +08:00 committed by GitHub
parent 4b819ee192
commit 19f2971e11
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 57 additions and 53 deletions

View File

@ -43,7 +43,6 @@ pipeline {
pip install --upgrade setuptools
pip install --upgrade pip
pip install -r requirements.txt
pip install --upgrade protobuf
"""
}
}

View File

@ -2,26 +2,18 @@ piVersion: v1
kind: Pod
metadata:
name: milvus-test-pod
labels:
app: milvus-test
namespace: qa
spec:
containers:
- name: milvus-test
image: milvusdb/pytest:20220525-de0ba6d
image: harbor.milvus.io/qa/krte:dev-4
# image: dockerhub-mirror-sh.zilliz.cc/milvusdb/pytest:20211209-cef343f
command:
- cat
tty: true
resources:
limits:
memory: 16Gi
cpu: 8.0
requests:
memory: 12Gi
cpu: 8.0
volumeMounts:
- name: qa-kubeconfig
mountPath: /root/.kube/
env:
- name: IN_CLUSTER
value: True
- name: jnlp
image: jenkins/inbound-agent:4.11-1-jdk8
resources:
@ -29,8 +21,4 @@ spec:
requests:
memory: 256Mi
cpu: 100m
volumes:
- name: qa-kubeconfig
secret:
secretName: qa-kubeconfig
serviceAccountName: account-milvus-test
serviceAccountName: qa-admin

View File

@ -63,6 +63,7 @@ value_content = "value_content"
err_code = "err_code"
err_msg = "err_msg"
in_cluster_env = "IN_CLUSTER"
"""" List of parameters used to pass """
get_invalid_strs = [

View File

@ -1,7 +1,11 @@
from __future__ import print_function
import os
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from utils.util_log import test_log as log
from common.common_type import in_cluster_env
_GROUP = 'milvus.io'
_VERSION = 'v1alpha1'
@ -18,10 +22,17 @@ class CustomResourceOperations(object):
else:
self.plural = kind.lower()
# init k8s client config
in_cluster = os.getenv(in_cluster_env, default='False')
log.debug(f"env variable IN_CLUSTER: {in_cluster}")
if in_cluster.lower() == 'true':
config.load_incluster_config()
else:
config.load_kube_config()
def create(self, body):
"""create or apply a custom resource in k8s"""
pretty = 'true'
config.load_kube_config()
api_instance = client.CustomObjectsApi()
try:
api_response = api_instance.create_namespaced_custom_object(self.group, self.version, self.namespace,
@ -36,7 +47,6 @@ class CustomResourceOperations(object):
"""delete or uninstall a custom resource in k8s"""
print(metadata_name)
try:
config.load_kube_config()
api_instance = client.CustomObjectsApi()
api_response = api_instance.delete_namespaced_custom_object(self.group, self.version, self.namespace,
self.plural,
@ -49,7 +59,6 @@ class CustomResourceOperations(object):
def patch(self, metadata_name, body):
"""patch a custom resource in k8s"""
config.load_kube_config()
api_instance = client.CustomObjectsApi()
try:
api_response = api_instance.patch_namespaced_custom_object(self.group, self.version, self.namespace,
@ -66,7 +75,6 @@ class CustomResourceOperations(object):
"""list all the customer resources in k8s"""
pretty = 'true'
try:
config.load_kube_config()
api_instance = client.CustomObjectsApi()
api_response = api_instance.list_namespaced_custom_object(self.group, self.version, self.namespace,
plural=self.plural, pretty=pretty)
@ -79,7 +87,6 @@ class CustomResourceOperations(object):
def get(self, metadata_name):
"""get a customer resources by name in k8s"""
try:
config.load_kube_config()
api_instance = client.CustomObjectsApi()
api_response = api_instance.get_namespaced_custom_object(self.group, self.version,
self.namespace, self.plural,

View File

@ -7,9 +7,12 @@ from common.cus_resource_opts import CustomResourceOperations as CusResource
template_yaml = os.path.join(os.path.dirname(__file__), 'template/default.yaml')
MILVUS_GRP = 'milvus.io'
MILVUS_VER = 'v1alpha1'
MILVUS_PLURAL = 'milvusclusters'
MILVUS_KIND = 'MilvusCluster'
# MILVUS_VER = 'v1alpha1'
MILVUS_VER = 'v1beta1'
# MILVUS_PLURAL = 'milvusclusters'
MILVUS_PLURAL = 'milvuses'
# MILVUS_KIND = 'MilvusCluster'
MILVUS_KIND = 'Milvus'
class MilvusOperator(object):

View File

@ -18,24 +18,6 @@ spec:
proxy:
configData:
httpNumThreads: "100"
image:
broker:
tag: 2.8.2
zookeeper:
tag: 2.8.2
bookkeeper:
tag: 2.8.2
proxy:
tag: 2.8.2
bastion:
tag: 2.8.2
# zookeeper:
# configData:
# PULSAR_MEM: "-Xms1024m -Xmx1024m -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no"
#
# bookkeeper:
# configData:
# BOOKIE_MEM: "-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+AggressiveOpts -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -verbosegc"
storage:
inCluster:
deletionPolicy: Delete

View File

@ -8,7 +8,7 @@ pytest-repeat==0.8.0
allure-pytest==2.7.0
pytest-print==0.2.1
pytest-level==0.1.1
pytest-xdist==2.2.1
pytest-xdist==2.5.0
pymilvus==2.1.0.dev103
pytest-rerunfailures==9.1.1
git+https://github.com/Projectplace/pytest-tags
@ -19,7 +19,7 @@ pytest-html==3.1.1
delayed-assert
kubernetes==17.17.0
PyYAML==5.4.1
pytest-sugar==0.9.4
pytest-sugar==0.9.5
pytest-reportportal==5.0.10
pytest-parallel
pytest-random-order

View File

@ -2,6 +2,7 @@
# IMAGE_REPOSITORY = "registry.milvus.io/milvus/milvus" # repository of milvus image
IMAGE_REPOSITORY = "milvusdb/milvus"
IMAGE_TAG = "master-20211227-b022615" # tag of milvus image
NAMESPACE = "chaos-testing" # namespace
# NAMESPACE = "chaos-testing" # namespace
NAMESPACE = "qa"
IF_NOT_PRESENT = "IfNotPresent" # image pullPolicy IfNotPresent
ALWAYS = "Always" # image pullPolicy Always

View File

@ -37,6 +37,7 @@ class TestDataNodeScale:
data_config = {
'metadata.namespace': constants.NAMESPACE,
'spec.mode': 'cluster',
'metadata.name': release_name,
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',

View File

@ -36,6 +36,7 @@ class TestIndexNodeScale:
expand_replicas = 2
data_config = {
'metadata.namespace': constants.NAMESPACE,
'spec.mode': 'cluster',
'metadata.name': release_name,
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
@ -129,6 +130,7 @@ class TestIndexNodeScale:
data_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.mode': 'cluster',
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.indexNode.replicas': 2,

View File

@ -47,6 +47,7 @@ class TestProxyScale:
data_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.mode': 'cluster',
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.proxy.replicas': 1,

View File

@ -41,6 +41,7 @@ class TestQueryNodeScale:
image = f'{constants.IMAGE_REPOSITORY}:{image_tag}'
query_config = {
'metadata.namespace': constants.NAMESPACE,
'spec.mode': 'cluster',
'metadata.name': release_name,
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
@ -155,6 +156,7 @@ class TestQueryNodeScale:
query_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.mode': 'cluster',
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.queryNode.replicas': 5,
@ -177,7 +179,7 @@ class TestQueryNodeScale:
# insert 10 sealed segments
for i in range(5):
df = cf.gen_default_dataframe_data(start=i * nb)
df = cf.gen_default_dataframe_data(nb=nb, start=i * nb)
collection_w.insert(df)
assert collection_w.num_entities == (i + 1) * nb
@ -234,6 +236,7 @@ class TestQueryNodeScale:
query_config = {
'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.mode': 'cluster',
'spec.components.image': image,
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.queryNode.replicas': 2,

View File

@ -8,6 +8,22 @@ from kubernetes import client, config
from kubernetes.client.rest import ApiException
from common.milvus_sys import MilvusSys
from utils.util_log import test_log as log
from common.common_type import in_cluster_env
def init_k8s_client_config():
"""
init kubernetes client config
"""
try:
in_cluster = os.getenv(in_cluster_env, default='False')
# log.debug(f"env variable IN_CLUSTER: {in_cluster}")
if in_cluster.lower() == 'true':
config.load_incluster_config()
else:
config.load_kube_config()
except Exception as e:
raise Exception(e)
def wait_pods_ready(namespace, label_selector, expected_num=None, timeout=360):
@ -29,7 +45,7 @@ def wait_pods_ready(namespace, label_selector, expected_num=None, timeout=360):
:example:
>>> wait_pods_ready("default", "app.kubernetes.io/instance=scale-query", expected_num=9)
"""
config.load_kube_config()
init_k8s_client_config()
api_instance = client.CoreV1Api()
try:
all_pos_ready_flag = False
@ -77,7 +93,7 @@ def get_pod_list(namespace, label_selector):
:example:
>>> get_pod_list("chaos-testing", "app.kubernetes.io/instance=test-proxy-pod-failure, component=proxy")
"""
config.load_kube_config()
init_k8s_client_config()
api_instance = client.CoreV1Api()
try:
api_response = api_instance.list_namespaced_pod(namespace=namespace, label_selector=label_selector)
@ -169,7 +185,7 @@ def get_milvus_instance_name(namespace, host, port="19530"):
# get all pods which label is app.kubernetes.io/name=milvus and component=querynode
ip_name_pairs = get_pod_ip_name_pairs(namespace, "app.kubernetes.io/name=milvus, component=querynode")
pod_name = ip_name_pairs[query_node_ip]
config.load_kube_config()
init_k8s_client_config()
api_instance = client.CoreV1Api()
try:
api_response = api_instance.read_namespaced_pod(namespace=namespace, name=pod_name)
@ -218,7 +234,7 @@ def export_pod_logs(namespace, label_selector, release_name=None):
def read_pod_log(namespace, label_selector, release_name):
config.load_kube_config()
init_k8s_client_config()
items = get_pod_list(namespace, label_selector=label_selector)
try: