Add pytest pipeline for pymilvus-orm (#6278)

Signed-off-by: quicksilver <zhifeng.zhang@zilliz.com>
pull/6286/head
quicksilver 2021-07-03 14:52:28 +08:00 committed by GitHub
parent 111a24a49f
commit d23383b5c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 93 additions and 42 deletions

2
.gitignore vendored
View File

@ -23,6 +23,8 @@ cmake_build
# Docker generated cache file
.docker/
**/_artifacts/**
# proxy
proxy/milvus
proxy/cmake_build

View File

@ -86,12 +86,12 @@ To run E2E tests, use these command:
```shell
MILVUS_SERVICE_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $(docker-compose ps -q))
cd tests/docker
docker-compose run --rm pytest /bin/bash -c "cd tests/python_test/ && pytest --ip ${MILVUS_SERVICE_IP}"
docker-compose run --rm pytest /bin/bash -c "pytest --ip ${MILVUS_SERVICE_IP}"
```
## Basic Flow
The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `builder` Docker image is built (based on [`build/docker/builder`] ) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. You can specify a different registry/name for `builder` by setting `IMAGE_REPO` which defaults to `milvusdb/milvus-env`.
The scripts directly under [`build/`](.) are used to build and test. They will ensure that the `builder` Docker image is built (based on [`build/docker/builder`] ) and then execute the appropriate command in that container. These scripts will both ensure that the right data is cached from run to run for incremental builds and will copy the results back out of the container. You can specify a different registry/name for `builder` by setting `IMAGE_REPO` which defaults to `milvusdb`.
The `builder.sh` is execute by first creating a “docker volume“ directory in `.docker/`. The `.docker/` directory is used to cache the third-party package and compiler cache data. It speeds up recompilation by caching previous compilations and detecting when the same compilation is being done again.

View File

@ -17,6 +17,10 @@ pipeline {
name 'MILVUS_SERVER_TYPE'
values 'standalone', 'distributed'
}
axis {
name 'MILVUS_CLIENT'
values 'pymilvus', 'pymilvus-orm'
}
}
agent {
kubernetes {
@ -34,7 +38,7 @@ pipeline {
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
IMAGE_REPO = "dockerhub-mirror-sh.zilliz.cc/milvusdb"
DOCKER_BUILDKIT = 1
ARTIFACTS = "${env.WORKSPACE}/artifacts"
ARTIFACTS = "${env.WORKSPACE}/_artifacts"
}
stages {
stage('Test') {
@ -47,13 +51,25 @@ pipeline {
clusterEnabled = "true"
}
sh """
MILVUS_CLUSTER_ENABLED=${clusterEnabled} \
./e2e-k8s.sh \
--node-image registry.zilliz.com/kindest/node:v1.20.2 \
--kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \
--test-extra-arg "--tags=smoke"
"""
if ("${MILVUS_CLIENT}" == "pymilvus") {
sh """
MILVUS_CLUSTER_ENABLED=${clusterEnabled} \
./e2e-k8s.sh \
--node-image registry.zilliz.com/kindest/node:v1.20.2 \
--kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \
--test-extra-arg "--tags=smoke"
"""
} else if ("${MILVUS_CLIENT}" == "pymilvus-orm") {
sh """
MILVUS_CLUSTER_ENABLED=${clusterEnabled} \
./e2e-k8s.sh \
--node-image registry.zilliz.com/kindest/node:v1.20.2 \
--kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \
--test-extra-arg "--tags L0 L1"
"""
} else {
error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}"
}
}
}
}
@ -77,7 +93,10 @@ pipeline {
container('main') {
script {
dir("${env.ARTIFACTS}") {
sh "find ./kind -path '*/history/*' -type f | xargs tar -zcvf artifacts-${PROJECT_NAME}-${MILVUS_SERVER_TYPE}-${SEMVER}-${env.BUILD_NUMBER}-e2e-logs.tar.gz --transform='s:^[^/]*/[^/]*/[^/]*/[^/]*/::g' || true"
sh "find ./kind -path '*/history/*' -type f | xargs tar -zcvf artifacts-${PROJECT_NAME}-${MILVUS_SERVER_TYPE}-${SEMVER}-${env.BUILD_NUMBER}-${MILVUS_CLIENT}-e2e-logs.tar.gz --transform='s:^[^/]*/[^/]*/[^/]*/[^/]*/::g' || true"
if ("${MILVUS_CLIENT}" == "pymilvus-orm") {
sh "tar -zcvf artifacts-${PROJECT_NAME}-${MILVUS_SERVER_TYPE}-${MILVUS_CLIENT}-pytest-logs.tar.gz ./tests/pytest_logs --remove-files || true"
}
archiveArtifacts artifacts: "**.tar.gz", allowEmptyArchive: true
sh 'docker rm -f \$(docker network inspect -f \'{{ range \$key, \$value := .Containers }}{{ printf "%s " \$key}}{{ end }}\' kind) || true'
sh 'docker network rm kind 2>&1 > /dev/null || true'

View File

@ -25,6 +25,10 @@ pipeline {
name 'MILVUS_SERVER_TYPE'
values 'standalone', 'distributed'
}
axis {
name 'MILVUS_CLIENT'
values 'pymilvus', 'pymilvus-orm'
}
}
agent {
kubernetes {
@ -40,7 +44,7 @@ pipeline {
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
IMAGE_REPO = "dockerhub-mirror-sh.zilliz.cc/milvusdb"
DOCKER_BUILDKIT = 1
ARTIFACTS = "${env.WORKSPACE}/artifacts"
ARTIFACTS = "${env.WORKSPACE}/_artifacts"
DOCKER_CREDENTIALS_ID = "f0aacc8e-33f2-458a-ba9e-2c44f431b4d2"
TARGET_REPO = "milvusdb"
}
@ -55,12 +59,24 @@ pipeline {
clusterEnabled = "true"
}
sh """
MILVUS_CLUSTER_ENABLED=${clusterEnabled} \
./e2e-k8s.sh \
--kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \
--node-image registry.zilliz.com/kindest/node:v1.20.2
"""
if ("${MILVUS_CLIENT}" == "pymilvus") {
sh """
MILVUS_CLUSTER_ENABLED=${clusterEnabled} \
./e2e-k8s.sh \
--kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \
--node-image registry.zilliz.com/kindest/node:v1.20.2
"""
} else if ("${MILVUS_CLIENT}" == "pymilvus-orm") {
sh """
MILVUS_CLUSTER_ENABLED=${clusterEnabled} \
./e2e-k8s.sh \
--kind-config "${env.WORKSPACE}/build/config/topology/trustworthy-jwt-ci.yaml" \
--node-image registry.zilliz.com/kindest/node:v1.20.2 \
--test-extra-arg "--tags L0 L1 L2"
"""
} else {
error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}"
}
}
}
}
@ -84,6 +100,9 @@ pipeline {
script {
dir("${env.ARTIFACTS}") {
sh "find ./kind -path '*/history/*' -type f | xargs tar -zcvf artifacts-${PROJECT_NAME}-${MILVUS_SERVER_TYPE}-${SEMVER}-${env.BUILD_NUMBER}-e2e-nightly-logs.tar.gz --transform='s:^[^/]*/[^/]*/[^/]*/[^/]*/::g' || true"
if ("${MILVUS_CLIENT}" == "pymilvus-orm") {
sh "tar -zcvf artifacts-${PROJECT_NAME}-${MILVUS_SERVER_TYPE}-${MILVUS_CLIENT}-pytest-logs.tar.gz ./tests/pytest_logs --remove-files || true"
}
archiveArtifacts artifacts: "**.tar.gz", allowEmptyArchive: true
sh 'docker rm -f \$(docker network inspect -f \'{{ range \$key, \$value := .Containers }}{{ printf "%s " \$key}}{{ end }}\' kind) || true'
sh 'docker network rm kind 2>&1 > /dev/null || true'

View File

@ -1,4 +1,6 @@
SERVICE_IP=127.0.0.1
SERVICE_PORT=19530
MILVUS_SERVICE_IP=127.0.0.1
MILVUS_SERVICE_PORT=19530
MILVUS_PYTEST_WORKSPACE=/milvus/tests/python_test
MILVUS_PYTEST_LOG_PATH=/milvus/_artifacts/tests/pytest_logs
IMAGE_REPO=milvusdb
IMAGE_TAG=20210616-46d5b57
IMAGE_TAG=latest

View File

@ -10,11 +10,12 @@ services:
- ${IMAGE_REPO}/pytest:latest
shm_size: 2G
environment:
SERVICE_IP: ${SERVICE_IP}
SERVICE_PORT: ${SERVICE_PORT}
MILVUS_SERVICE_IP: ${MILVUS_SERVICE_IP}
MILVUS_SERVICE_PORT: ${MILVUS_SERVICE_PORT}
CI_LOG_PATH: ${MILVUS_PYTEST_LOG_PATH}
volumes:
- ../../:/milvus:delegated
working_dir: "/milvus"
working_dir: ${MILVUS_PYTEST_WORKSPACE}
networks:
default:

View File

@ -18,7 +18,7 @@ MILVUS_HELM_RELEASE_NAME="${MILVUS_HELM_RELEASE_NAME:-milvus-testing}"
MILVUS_CLUSTER_ENABLED="${MILVUS_CLUSTER_ENABLED:-false}"
MILVUS_HELM_NAMESPACE="${MILVUS_HELM_NAMESPACE:-default}"
PARALLEL_NUM="${PARALLEL_NUM:-4}"
MILVUS_CLIENT="${MILVUS_CLIENT:-pymilvus}"
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
@ -28,26 +28,26 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
done
ROOT="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
if [[ "${MILVUS_CLUSTER_ENABLED}" == "false" ]]; then
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=standalone"
else
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=proxy"
fi
if [[ "${TEST_ENV:-}" =~ ^kind* ]]; then
if [[ "${MILVUS_CLUSTER_ENABLED}" == "false" ]]; then
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=standalone"
else
MILVUS_LABELS="app.kubernetes.io/instance=${MILVUS_HELM_RELEASE_NAME},component=proxy"
fi
SERVICE_TYPE=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.type}')
if [[ "${SERVICE_TYPE}" == "LoadBalancer" ]]; then
SERVICE_IP=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].status.loadBalancer.ingress[0].ip}')
SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}')
MILVUS_SERVICE_IP=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].status.loadBalancer.ingress[0].ip}')
MILVUS_SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}')
elif [[ "${SERVICE_TYPE}" == "NodePort" ]]; then
SERVICE_IP=$(kubectl get nodes --namespace "${MILVUS_HELM_NAMESPACE}" -o jsonpath='{.items[0].status.addresses[0].address}')
SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].nodePort}')
MILVUS_SERVICE_IP=$(kubectl get nodes --namespace "${MILVUS_HELM_NAMESPACE}" -o jsonpath='{.items[0].status.addresses[0].address}')
MILVUS_SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].nodePort}')
else
SERVICE_IP="127.0.0.1"
MILVUS_SERVICE_IP="127.0.0.1"
POD_NAME=$(kubectl get pods --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].metadata.name}')
SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}')
kubectl --namespace "${MILVUS_HELM_NAMESPACE}" port-forward "${POD_NAME}" "${SERVICE_PORT}" &
MILVUS_SERVICE_PORT=$(kubectl get service --namespace "${MILVUS_HELM_NAMESPACE}" -l "${MILVUS_LABELS}" -o jsonpath='{.items[0].spec.ports[0].port}')
kubectl --namespace "${MILVUS_HELM_NAMESPACE}" port-forward "${POD_NAME}" "${MILVUS_SERVICE_PORT}" &
PORT_FORWARD_PID=$!
trap "kill -TERM ${PORT_FORWARD_PID}" EXIT
fi
@ -60,13 +60,21 @@ pushd "${ROOT}/tests/docker"
export PYTEST_NETWORK="kind"
fi
export SERVICE_IP="${SERVICE_IP:-127.0.0.1}"
export SERVICE_PORT="${SERVICE_PORT:-19530}"
export MILVUS_SERVICE_IP="${MILVUS_SERVICE_IP:-127.0.0.1}"
export MILVUS_SERVICE_PORT="${MILVUS_SERVICE_PORT:-19530}"
if [[ "${MANUAL:-}" == "true" ]]; then
docker-compose up -d
else
docker-compose run --rm pytest /bin/bash -c "cd tests/python_test/ && python3 -m pip install --no-cache-dir -r requirements.txt && \
pytest -n ${PARALLEL_NUM} --ip ${SERVICE_IP} --port ${SERVICE_PORT} ${@:-}"
if [[ "${MILVUS_CLIENT}" == "pymilvus" ]]; then
export MILVUS_PYTEST_WORKSPACE="/milvus/tests/python_test"
docker-compose run --rm pytest /bin/bash -c "python3 -m pip install --no-cache-dir -r requirements.txt && \
pytest -n ${PARALLEL_NUM} --ip ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} ${@:-}"
elif [[ "${MILVUS_CLIENT}" == "pymilvus-orm" ]]; then
export MILVUS_PYTEST_WORKSPACE="/milvus/tests20/python_client"
docker-compose run --rm pytest /bin/bash -c "python3 -m pip install --no-cache-dir -r requirements.txt && \
pytest --workers ${PARALLEL_NUM} --host ${MILVUS_SERVICE_IP} --port ${MILVUS_SERVICE_PORT} \
--html=\${CI_LOG_PATH}/report.html --self-contained-html ${@:-}"
fi
fi
popd