mirror of https://github.com/milvus-io/milvus.git
Merge branch '0.5.0' into branch-0.5.0
Former-commit-id: b8115454f411f016333eec9169b569f9a70e29f7pull/191/head
commit
05b47e92d1
13
CHANGELOG.md
13
CHANGELOG.md
|
@ -2,7 +2,7 @@
|
|||
|
||||
Please mark all change in change log and use the ticket from JIRA.
|
||||
|
||||
# Milvus 0.5.0 (TODO)
|
||||
# Milvus 0.5.0 (2019-10-21)
|
||||
|
||||
## Bug
|
||||
- MS-568 - Fix gpuresource free error
|
||||
|
@ -26,6 +26,15 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-653 - When config check fail, Milvus close without message
|
||||
- MS-654 - Describe index timeout when building index
|
||||
- MS-658 - Fix SQ8 Hybrid can't search
|
||||
- MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources
|
||||
- \#9 - Change default gpu_cache_capacity to 4
|
||||
- \#20 - C++ sdk example get grpc error
|
||||
- \#23 - Add unittest to improve code coverage
|
||||
- \#31 - make clang-format failed after run build.sh -l
|
||||
- \#39 - Create SQ8H index hang if using github server version
|
||||
- \#30 - Some troubleshoot messages in Milvus do not provide enough information
|
||||
- \#48 - Config unittest failed
|
||||
- \#59 - Topk result is incorrect for small dataset
|
||||
|
||||
## Improvement
|
||||
- MS-552 - Add and change the easylogging library
|
||||
|
@ -47,6 +56,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-626 - Refactor DataObj to support cache any type data
|
||||
- MS-648 - Improve unittest
|
||||
- MS-655 - Upgrade SPTAG
|
||||
- \#42 - Put union of index_build_device and search resources to gpu_pool
|
||||
|
||||
## New Feature
|
||||
- MS-614 - Preload table at startup
|
||||
|
@ -68,6 +78,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-624 - Re-organize project directory for open-source
|
||||
- MS-635 - Add compile option to support customized faiss
|
||||
- MS-660 - add ubuntu_build_deps.sh
|
||||
- \#18 - Add all test cases
|
||||
|
||||
# Milvus 0.4.0 (2019-09-12)
|
||||
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||

|
||||
|
||||
|
||||

|
||||

|
||||
[](https://codebeat.co/projects/github-com-jinhai-cn-milvus-master)
|
||||
|
||||
- [Slack Community](https://join.slack.com/t/milvusio/shared_invite/enQtNzY1OTQ0NDI3NjMzLWNmYmM1NmNjOTQ5MGI5NDhhYmRhMGU5M2NhNzhhMDMzY2MzNDdlYjM5ODQ5MmE3ODFlYzU3YjJkNmVlNDQ2ZTk)
|
||||
- [Twitter](https://twitter.com/milvusio)
|
||||
|
@ -54,6 +56,7 @@ Keep up-to-date with newest releases and latest updates by reading Milvus [relea
|
|||
You can track system performance on Prometheus-based GUI monitor dashboards.
|
||||
|
||||
## Architecture
|
||||
|
||||

|
||||
|
||||
## Get started
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
def FileTransfer (sourceFiles, remoteDirectory, remoteIP, protocol = "ftp", makeEmptyDirs = true) {
|
||||
if (protocol == "ftp") {
|
||||
ftpPublisher masterNodeName: '', paramPublish: [parameterName: ''], alwaysPublishFromMaster: false, continueOnError: false, failOnError: true, publishers: [
|
||||
[configName: "${remoteIP}", transfers: [
|
||||
[asciiMode: false, cleanRemote: false, excludes: '', flatten: false, makeEmptyDirs: "${makeEmptyDirs}", noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: "${remoteDirectory}", remoteDirectorySDF: false, removePrefix: '', sourceFiles: "${sourceFiles}"]], usePromotionTimestamp: true, useWorkspaceInPromotion: false, verbose: true
|
||||
]
|
||||
]
|
||||
}
|
||||
}
|
||||
return this
|
|
@ -0,0 +1,152 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
parameters{
|
||||
choice choices: ['Release', 'Debug'], description: '', name: 'BUILD_TYPE'
|
||||
string defaultValue: 'cf1434e7-5a4b-4d25-82e8-88d667aef9e5', description: 'GIT CREDENTIALS ID', name: 'GIT_CREDENTIALS_ID', trim: true
|
||||
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
|
||||
string defaultValue: 'ba070c98-c8cc-4f7c-b657-897715f359fc', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
|
||||
string defaultValue: 'http://192.168.1.202/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
|
||||
string defaultValue: '1a527823-d2b7-44fd-834b-9844350baf14', description: 'JFROG CREDENTIALS ID', name: 'JFROG_CREDENTIALS_ID', trim: true
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${BRANCH_NAME}"
|
||||
JOBNAMES = env.JOB_NAME.split('/')
|
||||
PIPELINE_NAME = "${JOBNAMES[0]}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 18.04") {
|
||||
environment {
|
||||
OS_NAME = "ubuntu18.04"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-ubuntu18.04-x86_64-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-build-env-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/coverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./build.sh -l"
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -j -u -c"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
try {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
} catch (exc) {
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
if (!helmResult) {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
}
|
||||
throw exc
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP"
|
||||
// Set some env variables so codecov detection script works correctly
|
||||
withCredentials([[$class: 'StringBinding', credentialsId: "${env.PIPELINE_NAME}-codecov-token", variable: 'CODECOV_TOKEN']]) {
|
||||
sh 'curl -s https://codecov.io/bash | bash -s - -f output_new.info || echo "Codecov did not collect coverage reports"'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo update'
|
||||
dir ('milvus-helm') {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
throw exc
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz -C /opt/ milvus"
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz")
|
||||
if (uploadStatus != 0) {
|
||||
error("\" ${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" failed!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
container('publish-images') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
dir ("docker/deploy/${OS_NAME}") {
|
||||
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
|
||||
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def downloadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -O ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage}")
|
||||
|
||||
if (downloadStatus != 0) {
|
||||
error("\" Download \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage} \" failed!")
|
||||
}
|
||||
}
|
||||
sh "tar zxvf ${binaryPackage}"
|
||||
def imageName = "${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
|
||||
try {
|
||||
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
|
||||
if (isExistSourceImage == 0) {
|
||||
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
|
||||
}
|
||||
|
||||
def customImage = docker.build("${imageName}")
|
||||
|
||||
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
|
||||
if (isExistTargeImage == 0) {
|
||||
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
|
||||
}
|
||||
|
||||
docker.withRegistry("https://${params.DOKCER_REGISTRY_URL}", "${params.DOCKER_CREDENTIALS_ID}") {
|
||||
customImage.push()
|
||||
}
|
||||
} catch (exc) {
|
||||
throw exc
|
||||
} finally {
|
||||
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
|
||||
if (isExistSourceImage == 0) {
|
||||
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
|
||||
}
|
||||
|
||||
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
|
||||
if (isExistTargeImage == 0) {
|
||||
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
timeout(time: 30, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-images
|
||||
image: registry.zilliz.com/library/docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.5.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "32Gi"
|
||||
cpu: "8.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-test-env
|
||||
image: registry.zilliz.com/milvus/milvus-test-env:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
#!/bin/bash
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
SOURCE="$(readlink "$SOURCE")"
|
||||
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
|
||||
done
|
||||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
|
||||
BUILD_TYPE="Debug"
|
||||
BUILD_UNITTEST="OFF"
|
||||
INSTALL_PREFIX="/opt/milvus"
|
||||
BUILD_COVERAGE="OFF"
|
||||
DB_PATH="/opt/milvus"
|
||||
PROFILING="OFF"
|
||||
USE_JFROG_CACHE="OFF"
|
||||
RUN_CPPLINT="OFF"
|
||||
CUSTOMIZATION="OFF" # default use ori faiss
|
||||
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
|
||||
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
|
||||
if [ $? -eq 0 ]; then
|
||||
CUSTOMIZATION="ON"
|
||||
else
|
||||
CUSTOMIZATION="OFF"
|
||||
fi
|
||||
|
||||
while getopts "o:d:t:ulcgjhx" arg
|
||||
do
|
||||
case $arg in
|
||||
o)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases" ;
|
||||
BUILD_UNITTEST="ON";
|
||||
;;
|
||||
l)
|
||||
RUN_CPPLINT="ON"
|
||||
;;
|
||||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
g)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="OFF" # force use ori faiss
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-o: install prefix(default: /opt/milvus)
|
||||
-d: db data path(default: /opt/milvus)
|
||||
-t: build type(default: Debug)
|
||||
-u: building unit test options(default: OFF)
|
||||
-l: run cpplint, clang-format and clang-tidy(default: OFF)
|
||||
-c: code coverage(default: OFF)
|
||||
-g: profiling(default: OFF)
|
||||
-j: use jfrog cache build directory(default: OFF)
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d ${CMAKE_BUILD_DIR} ]]; then
|
||||
mkdir ${CMAKE_BUILD_DIR}
|
||||
fi
|
||||
|
||||
cd ${CMAKE_BUILD_DIR}
|
||||
|
||||
# remove make cache since build.sh -l use default variables
|
||||
# force update the variables each time
|
||||
make rebuild_cache
|
||||
|
||||
CMAKE_CMD="cmake \
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DMILVUS_DB_PATH=${DB_PATH} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DCUSTOMIZATION=${CUSTOMIZATION} \
|
||||
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
|
||||
.."
|
||||
echo ${CMAKE_CMD}
|
||||
${CMAKE_CMD}
|
||||
|
||||
if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
||||
# cpplint check
|
||||
make lint
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! cpplint check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "cpplint check passed!"
|
||||
|
||||
# clang-format check
|
||||
make check-clang-format
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! clang-format check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "clang-format check passed!"
|
||||
|
||||
# # clang-tidy check
|
||||
# make check-clang-tidy
|
||||
# if [ $? -ne 0 ]; then
|
||||
# echo "ERROR! clang-tidy check failed"
|
||||
# rm -f CMakeCache.txt
|
||||
# exit 1
|
||||
# fi
|
||||
# echo "clang-tidy check passed!"
|
||||
else
|
||||
# compile and build
|
||||
make -j8 || exit 1
|
||||
make install || exit 1
|
||||
fi
|
|
@ -0,0 +1,138 @@
|
|||
#!/bin/bash
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
SOURCE="$(readlink "$SOURCE")"
|
||||
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
|
||||
done
|
||||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
INSTALL_PREFIX="/opt/milvus"
|
||||
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=123456
|
||||
MYSQL_HOST='127.0.0.1'
|
||||
MYSQL_PORT='3306'
|
||||
|
||||
while getopts "o:u:p:t:h" arg
|
||||
do
|
||||
case $arg in
|
||||
o)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
u)
|
||||
MYSQL_USER_NAME=$OPTARG
|
||||
;;
|
||||
p)
|
||||
MYSQL_PASSWORD=$OPTARG
|
||||
;;
|
||||
t)
|
||||
MYSQL_HOST=$OPTARG
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-o: milvus install prefix(default: /opt/milvus)
|
||||
-u: mysql account
|
||||
-p: mysql password
|
||||
-t: mysql host
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./coverage.sh -o \${INSTALL_PREFIX} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${INSTALL_PREFIX}/lib
|
||||
|
||||
LCOV_CMD="lcov"
|
||||
# LCOV_GEN_CMD="genhtml"
|
||||
|
||||
FILE_INFO_BASE="base.info"
|
||||
FILE_INFO_MILVUS="server.info"
|
||||
FILE_INFO_OUTPUT="output.info"
|
||||
FILE_INFO_OUTPUT_NEW="output_new.info"
|
||||
DIR_LCOV_OUTPUT="lcov_out"
|
||||
|
||||
DIR_GCNO="${CMAKE_BUILD_DIR}"
|
||||
DIR_UNITTEST="${INSTALL_PREFIX}/unittest"
|
||||
|
||||
# delete old code coverage info files
|
||||
rm -rf lcov_out
|
||||
rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW
|
||||
|
||||
MYSQL_DB_NAME=milvus_`date +%s%N`
|
||||
|
||||
function mysql_exc()
|
||||
{
|
||||
cmd=$1
|
||||
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "mysql $cmd run failed"
|
||||
fi
|
||||
}
|
||||
|
||||
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
|
||||
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
|
||||
mysql_exc "FLUSH PRIVILEGES;"
|
||||
mysql_exc "USE ${MYSQL_DB_NAME};"
|
||||
|
||||
# get baseline
|
||||
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "gen baseline coverage run failed"
|
||||
exit -1
|
||||
fi
|
||||
|
||||
for test in `ls ${DIR_UNITTEST}`; do
|
||||
echo $test
|
||||
case ${test} in
|
||||
test_db)
|
||||
# set run args for test_db
|
||||
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
|
||||
;;
|
||||
*test_*)
|
||||
args=""
|
||||
;;
|
||||
esac
|
||||
# run unittest
|
||||
${DIR_UNITTEST}/${test} "${args}"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo ${args}
|
||||
echo ${DIR_UNITTEST}/${test} "run failed"
|
||||
fi
|
||||
done
|
||||
|
||||
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
|
||||
|
||||
# gen code coverage
|
||||
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
|
||||
# merge coverage
|
||||
${LCOV_CMD} -a ${FILE_INFO_BASE} -a ${FILE_INFO_MILVUS} -o "${FILE_INFO_OUTPUT}"
|
||||
|
||||
# remove third party from tracefiles
|
||||
${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
||||
"/usr/*" \
|
||||
"*/boost/*" \
|
||||
"*/cmake_build/*_ep-prefix/*" \
|
||||
"*/src/index/cmake_build*" \
|
||||
"*/src/index/thirdparty*" \
|
||||
"*/src/grpc*" \
|
||||
"*/src/metrics/MetricBase.h" \
|
||||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/src/utils/easylogging++.h" \
|
||||
"*/src/utils/easylogging++.cc"
|
||||
|
||||
# gen html report
|
||||
# ${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
|
|
@ -0,0 +1,13 @@
|
|||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-cluster") {
|
||||
sh "helm install --wait --timeout 300 --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.5.0 . "
|
||||
}
|
||||
}
|
||||
/*
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def result = sh script: "nc -z -w 3 ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local 19530", returnStatus: true
|
||||
return !result
|
||||
}
|
||||
}
|
||||
*/
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
timeout(time: 25, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements_cluster.txt'
|
||||
sh "pytest . --alluredir=cluster_test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Cluster Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.repository=\"zilliz.azurecr.cn/milvus/engine\" --set engine.image.tag=${DOCKER_VERSION} --set expose.type=loadBalancer --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
timeout(time: 30, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local"
|
||||
}
|
||||
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.4.0 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 120, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
|
||||
dir ("core") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_USER}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "./build.sh -l"
|
||||
sh "rm -rf cmake_build"
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' \
|
||||
&& export JFROG_USER_NAME='${USERNAME}' \
|
||||
&& export JFROG_PASSWORD='${PASSWORD}' \
|
||||
&& export FAISS_URL='http://192.168.1.105:6060/jinhai/faiss/-/archive/branch-0.2.1/faiss-branch-0.2.1.tar.gz' \
|
||||
&& ./build.sh -t ${params.BUILD_TYPE} -d /opt/milvus -j -u -c"
|
||||
|
||||
sh "./coverage.sh -u root -p 123456 -t \$POD_IP"
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 120, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
|
||||
dir ("core") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_USER}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "./build.sh -l"
|
||||
sh "rm -rf cmake_build"
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' \
|
||||
&& export JFROG_USER_NAME='${USERNAME}' \
|
||||
&& export JFROG_PASSWORD='${PASSWORD}' \
|
||||
&& export FAISS_URL='http://192.168.1.105:6060/jinhai/faiss/-/archive/branch-0.2.1/faiss-branch-0.2.1.tar.gz' \
|
||||
&& ./build.sh -t ${params.BUILD_TYPE} -j -d /opt/milvus"
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
container('publish-docker') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Publish Engine Docker') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_build") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("docker/deploy/ubuntu16.04/free_version") {
|
||||
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
try {
|
||||
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
|
||||
docker.withRegistry('https://registry.zilliz.com', "${params.DOCKER_PUBLISH_USER}") {
|
||||
customImage.push()
|
||||
}
|
||||
docker.withRegistry('https://zilliz.azurecr.cn', "${params.AZURE_DOCKER_PUBLISH_USER}") {
|
||||
customImage.push()
|
||||
}
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
|
||||
throw exc
|
||||
} finally {
|
||||
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
|
||||
echo 'Publish docker failed!'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
def notify() {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
return this
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("milvus_engine") {
|
||||
dir ("core") {
|
||||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
error("Milvus binary directory don't exists!")
|
||||
}
|
||||
}
|
||||
|
||||
gitlabCommitStatus(name: 'Packaged Engine lcov') {
|
||||
if (fileExists('lcov_out')) {
|
||||
try {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("lcov_out/", "${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus lcov out Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}/lcov_out/\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
|
||||
error("Milvus lcov out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("milvus_engine") {
|
||||
dir ("core") {
|
||||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
error("Milvus binary directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
container('publish-docker') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Publish Engine Docker') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_build") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("docker/deploy/ubuntu16.04/free_version") {
|
||||
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
try {
|
||||
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
|
||||
docker.withRegistry('https://registry.zilliz.com', "${params.DOCKER_PUBLISH_USER}") {
|
||||
customImage.push()
|
||||
}
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
|
||||
throw exc
|
||||
} finally {
|
||||
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
|
||||
echo 'Publish docker failed!'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
timeout(time: 40, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
def service_ip = sh (script: "kubectl get svc --namespace milvus-1 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine --template \"{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}\"",returnStdout: true).trim()
|
||||
sh "pytest . --alluredir=\"test_out/staging/single/sqlite\" --ip ${service_ip}"
|
||||
}
|
||||
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.repository=\"zilliz.azurecr.cn/milvus/engine\" --set engine.image.tag=${DOCKER_VERSION} --set expose.type=loadBalancer --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
def service_ip = sh (script: "kubectl get svc --namespace milvus-2 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine --template \"{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}\"",returnStdout: true).trim()
|
||||
sh "pytest . --alluredir=\"test_out/staging/single/mysql\" --ip ${service_ip}"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('cluster_test_out')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("cluster_test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('test_out/dev')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/dev/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('test_out/staging')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/staging/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,396 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "28Gi"
|
||||
cpu: "10.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "14Gi"
|
||||
cpu: "5.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stage("Cluster") {
|
||||
// agent {
|
||||
// kubernetes {
|
||||
// label 'dev-test'
|
||||
// defaultContainer 'jnlp'
|
||||
// yaml """
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// labels:
|
||||
// app: milvus
|
||||
// componet: test
|
||||
// spec:
|
||||
// containers:
|
||||
// - name: milvus-testframework
|
||||
// image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
// command:
|
||||
// - cat
|
||||
// tty: true
|
||||
// volumeMounts:
|
||||
// - name: kubeconf
|
||||
// mountPath: /root/.kube/
|
||||
// readOnly: true
|
||||
// volumes:
|
||||
// - name: kubeconf
|
||||
// secret:
|
||||
// secretName: test-cluster-config
|
||||
// """
|
||||
// }
|
||||
// }
|
||||
// stages {
|
||||
// stage("Deploy to Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage("Dev Test") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy Test') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage ("Cleanup Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// success {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD success !"
|
||||
// }
|
||||
// }
|
||||
// aborted {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD aborted !"
|
||||
// }
|
||||
// }
|
||||
// failure {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD failure !"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,396 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "28Gi"
|
||||
cpu: "10.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "14Gi"
|
||||
cpu: "5.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build_no_ut.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus_no_ut.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stage("Cluster") {
|
||||
// agent {
|
||||
// kubernetes {
|
||||
// label 'dev-test'
|
||||
// defaultContainer 'jnlp'
|
||||
// yaml """
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// labels:
|
||||
// app: milvus
|
||||
// componet: test
|
||||
// spec:
|
||||
// containers:
|
||||
// - name: milvus-testframework
|
||||
// image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
// command:
|
||||
// - cat
|
||||
// tty: true
|
||||
// volumeMounts:
|
||||
// - name: kubeconf
|
||||
// mountPath: /root/.kube/
|
||||
// readOnly: true
|
||||
// volumes:
|
||||
// - name: kubeconf
|
||||
// secret:
|
||||
// secretName: test-cluster-config
|
||||
// """
|
||||
// }
|
||||
// }
|
||||
// stages {
|
||||
// stage("Deploy to Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage("Dev Test") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy Test') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage ("Cleanup Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// success {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD success !"
|
||||
// }
|
||||
// }
|
||||
// aborted {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD aborted !"
|
||||
// }
|
||||
// }
|
||||
// failure {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD failure !"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,478 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, \"yyyyMMdd\"}' : '${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}'}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "28Gi"
|
||||
cpu: "10.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "14Gi"
|
||||
cpu: "5.0"
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/nightly_publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test_all.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Deploy to Dev Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Deploy to Dev Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Deploy to Dev Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stage("Cluster") {
|
||||
// agent {
|
||||
// kubernetes {
|
||||
// label 'dev-test'
|
||||
// defaultContainer 'jnlp'
|
||||
// yaml """
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// labels:
|
||||
// app: milvus
|
||||
// componet: test
|
||||
// spec:
|
||||
// containers:
|
||||
// - name: milvus-testframework
|
||||
// image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
// command:
|
||||
// - cat
|
||||
// tty: true
|
||||
// volumeMounts:
|
||||
// - name: kubeconf
|
||||
// mountPath: /root/.kube/
|
||||
// readOnly: true
|
||||
// volumes:
|
||||
// - name: kubeconf
|
||||
// secret:
|
||||
// secretName: test-cluster-config
|
||||
// """
|
||||
// }
|
||||
// }
|
||||
// stages {
|
||||
// stage("Deploy to Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage("Dev Test") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy Test') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage ("Cleanup Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// success {
|
||||
// script {
|
||||
// echo "Milvus Deploy to Dev Cluster CI/CD success !"
|
||||
// }
|
||||
// }
|
||||
// aborted {
|
||||
// script {
|
||||
// echo "Milvus Deploy to Dev Cluster CI/CD aborted !"
|
||||
// }
|
||||
// }
|
||||
// failure {
|
||||
// script {
|
||||
// echo "Milvus Deploy to Dev Cluster CI/CD failure !"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Staging") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: aks-gpu-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Staging") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Staging') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2staging.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Staging Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Staging Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/staging_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_staging_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Staging") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Staging') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Deploy to Staging Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Deploy to Staging Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Deploy to Staging Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.9
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: testframework
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
|
@ -0,0 +1,14 @@
|
|||
#Configuration File for CodeCov
|
||||
coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "70...100"
|
||||
|
||||
status:
|
||||
project: on
|
||||
patch: yes
|
||||
changes: no
|
||||
|
||||
comment:
|
||||
layout: "header, diff, changes, tree"
|
||||
behavior: default
|
|
@ -1,11 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
BUILD_OUTPUT_DIR="cmake_build"
|
||||
BUILD_TYPE="Debug"
|
||||
BUILD_UNITTEST="OFF"
|
||||
INSTALL_PREFIX=$(pwd)/milvus
|
||||
MAKE_CLEAN="OFF"
|
||||
BUILD_COVERAGE="OFF"
|
||||
DB_PATH="/opt/milvus"
|
||||
DB_PATH="/tmp/milvus"
|
||||
PROFILING="OFF"
|
||||
USE_JFROG_CACHE="OFF"
|
||||
RUN_CPPLINT="OFF"
|
||||
|
@ -40,8 +41,8 @@ do
|
|||
RUN_CPPLINT="ON"
|
||||
;;
|
||||
r)
|
||||
if [[ -d cmake_build ]]; then
|
||||
rm ./cmake_build -r
|
||||
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
rm ./${BUILD_OUTPUT_DIR} -r
|
||||
MAKE_CLEAN="ON"
|
||||
fi
|
||||
;;
|
||||
|
@ -62,7 +63,7 @@ do
|
|||
|
||||
parameter:
|
||||
-p: install prefix(default: $(pwd)/milvus)
|
||||
-d: db path(default: /opt/milvus)
|
||||
-d: db data path(default: /tmp/milvus)
|
||||
-t: build type(default: Debug)
|
||||
-u: building unit test options(default: OFF)
|
||||
-l: run cpplint, clang-format and clang-tidy(default: OFF)
|
||||
|
@ -84,11 +85,15 @@ usage:
|
|||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d cmake_build ]]; then
|
||||
mkdir cmake_build
|
||||
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
mkdir ${BUILD_OUTPUT_DIR}
|
||||
fi
|
||||
|
||||
cd cmake_build
|
||||
cd ${BUILD_OUTPUT_DIR}
|
||||
|
||||
# remove make cache since build.sh -l use default variables
|
||||
# force update the variables each time
|
||||
make rebuild_cache
|
||||
|
||||
CMAKE_CMD="cmake \
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
|
|
|
@ -56,7 +56,7 @@ define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
|
|||
"Show output from ExternalProjects rather than just logging to files" ON)
|
||||
|
||||
define_option(MILVUS_BOOST_VENDORED "Use vendored Boost instead of existing Boost. \
|
||||
Note that this requires linking Boost statically" ON)
|
||||
Note that this requires linking Boost statically" OFF)
|
||||
|
||||
define_option(MILVUS_BOOST_HEADER_ONLY "Use only BOOST headers" OFF)
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ db_config:
|
|||
|
||||
insert_buffer_size: 4 # GB, maximum insert buffer size allowed
|
||||
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
|
||||
build_index_gpu: 0 # gpu id used for building index
|
||||
|
||||
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
|
||||
# you can specify preload tables like this: table1,table2,table3
|
||||
|
@ -30,6 +29,8 @@ metric_config:
|
|||
cache_config:
|
||||
cpu_cache_capacity: 16 # GB, CPU memory used for cache
|
||||
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
|
||||
gpu_cache_capacity: 4 # GB, GPU memory used for cache
|
||||
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
|
||||
cache_insert_data: false # whether to load inserted data into cache
|
||||
|
||||
engine_config:
|
||||
|
@ -37,6 +38,6 @@ engine_config:
|
|||
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
|
||||
|
||||
resource_config:
|
||||
resource_pool:
|
||||
- cpu
|
||||
search_resources: # define the GPUs used for search computation, valid value: gpux
|
||||
- gpu0
|
||||
index_build_device: gpu0 # GPU used for building index
|
|
@ -114,15 +114,15 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
|||
"/usr/*" \
|
||||
"*/boost/*" \
|
||||
"*/cmake_build/*_ep-prefix/*" \
|
||||
"src/index/cmake_build*" \
|
||||
"src/index/thirdparty*" \
|
||||
"src/grpc*"\
|
||||
"src/metrics/MetricBase.h"\
|
||||
"src/server/Server.cpp"\
|
||||
"src/server/DBWrapper.cpp"\
|
||||
"src/server/grpc_impl/GrpcServer.cpp"\
|
||||
"src/utils/easylogging++.h"\
|
||||
"src/utils/easylogging++.cc"\
|
||||
"*/src/index/cmake_build*" \
|
||||
"*/src/index/thirdparty*" \
|
||||
"*/src/grpc*" \
|
||||
"*/src/metrics/MetricBase.h" \
|
||||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/src/utils/easylogging++.h" \
|
||||
"*/src/utils/easylogging++.cc"
|
||||
|
||||
# gen html report
|
||||
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/
|
||||
|
|
|
@ -96,9 +96,9 @@ set(prometheus_lib
|
|||
)
|
||||
|
||||
set(boost_lib
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
boost_serialization_static
|
||||
libboost_system.a
|
||||
libboost_filesystem.a
|
||||
libboost_serialization.a
|
||||
)
|
||||
|
||||
set(cuda_lib
|
||||
|
|
|
@ -65,7 +65,7 @@ ExecutionEngineImpl::ExecutionEngineImpl(uint16_t dimension, const std::string&
|
|||
: location_(location), dim_(dimension), index_type_(index_type), metric_type_(metric_type), nlist_(nlist) {
|
||||
index_ = CreatetVecIndex(EngineType::FAISS_IDMAP);
|
||||
if (!index_) {
|
||||
throw Exception(DB_ERROR, "Could not create VecIndex");
|
||||
throw Exception(DB_ERROR, "Unsupported index type");
|
||||
}
|
||||
|
||||
TempMetaConf temp_conf;
|
||||
|
@ -111,7 +111,7 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
|||
break;
|
||||
}
|
||||
default: {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
ENGINE_LOG_ERROR << "Unsupported index type";
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -124,6 +124,11 @@ ExecutionEngineImpl::HybridLoad() const {
|
|||
return;
|
||||
}
|
||||
|
||||
if (index_->GetType() == IndexType::FAISS_IDMAP) {
|
||||
ENGINE_LOG_WARNING << "HybridLoad with type FAISS_IDMAP, ignore";
|
||||
return;
|
||||
}
|
||||
|
||||
const std::string key = location_ + ".quantizer";
|
||||
std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
|
||||
|
||||
|
@ -164,6 +169,9 @@ ExecutionEngineImpl::HybridLoad() const {
|
|||
quantizer_conf->mode = 1;
|
||||
quantizer_conf->gpu_id = best_device_id;
|
||||
auto quantizer = index_->LoadQuantizer(quantizer_conf);
|
||||
if (quantizer == nullptr) {
|
||||
ENGINE_LOG_ERROR << "quantizer is nullptr";
|
||||
}
|
||||
index_->SetQuantizer(quantizer);
|
||||
auto cache_quantizer = std::make_shared<CachedQuantizer>(quantizer);
|
||||
cache::GpuCacheMgr::GetInstance(best_device_id)->InsertItem(key, cache_quantizer);
|
||||
|
@ -175,6 +183,9 @@ ExecutionEngineImpl::HybridUnset() const {
|
|||
if (index_type_ != EngineType::FAISS_IVFSQ8H) {
|
||||
return;
|
||||
}
|
||||
if (index_->GetType() == IndexType::FAISS_IDMAP) {
|
||||
return;
|
||||
}
|
||||
index_->UnsetQuantizer();
|
||||
}
|
||||
|
||||
|
@ -373,7 +384,7 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
|
|||
|
||||
auto to_index = CreatetVecIndex(engine_type);
|
||||
if (!to_index) {
|
||||
throw Exception(DB_ERROR, "Could not create VecIndex");
|
||||
throw Exception(DB_ERROR, "Unsupported index type");
|
||||
}
|
||||
|
||||
TempMetaConf temp_conf;
|
||||
|
@ -397,6 +408,7 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
|
|||
Status
|
||||
ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances, int64_t* labels,
|
||||
bool hybrid) {
|
||||
#if 0
|
||||
if (index_type_ == EngineType::FAISS_IVFSQ8H) {
|
||||
if (!hybrid) {
|
||||
const std::string key = location_ + ".quantizer";
|
||||
|
@ -449,6 +461,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, int64_t npr
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
|
||||
|
@ -501,7 +514,7 @@ ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
|
|||
Status
|
||||
ExecutionEngineImpl::Init() {
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
Status s = config.GetDBConfigBuildIndexGPU(gpu_num_);
|
||||
Status s = config.GetResourceConfigIndexBuildDevice(gpu_num_);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
|
|
@ -243,7 +243,8 @@ if(CUSTOMIZATION)
|
|||
# set(FAISS_MD5 "57da9c4f599cc8fa4260488b1c96e1cc") # commit-id 6dbdf75987c34a2c853bd172ea0d384feea8358c branch-0.2.0
|
||||
# set(FAISS_MD5 "21deb1c708490ca40ecb899122c01403") # commit-id 643e48f479637fd947e7b93fa4ca72b38ecc9a39 branch-0.2.0
|
||||
# set(FAISS_MD5 "072db398351cca6e88f52d743bbb9fa0") # commit-id 3a2344d04744166af41ef1a74449d68a315bfe17 branch-0.2.1
|
||||
set(FAISS_MD5 "c89ea8e655f5cdf58f42486f13614714") # commit-id 9c28a1cbb88f41fa03b03d7204106201ad33276b branch-0.2.1
|
||||
# set(FAISS_MD5 "c89ea8e655f5cdf58f42486f13614714") # commit-id 9c28a1cbb88f41fa03b03d7204106201ad33276b branch-0.2.1
|
||||
set(FAISS_MD5 "87fdd86351ffcaf3f80dc26ade63c44b") # commit-id 841a156e67e8e22cd8088e1b58c00afbf2efc30b branch-0.2.1
|
||||
endif()
|
||||
else()
|
||||
set(FAISS_SOURCE_URL "https://github.com/facebookresearch/faiss/archive/v1.5.3.tar.gz")
|
||||
|
|
|
@ -24,17 +24,21 @@
|
|||
#include <faiss/IndexIVFPQ.h>
|
||||
#include <faiss/gpu/GpuAutoTune.h>
|
||||
#include <faiss/index_io.h>
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/common/Log.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexIVF.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
using stdclock = std::chrono::high_resolution_clock;
|
||||
|
||||
IndexModelPtr
|
||||
IVF::Train(const DatasetPtr& dataset, const Config& config) {
|
||||
auto build_cfg = std::dynamic_pointer_cast<IVFCfg>(config);
|
||||
|
@ -216,7 +220,15 @@ IVF::GenGraph(const int64_t& k, Graph& graph, const DatasetPtr& dataset, const C
|
|||
void
|
||||
IVF::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) {
|
||||
auto params = GenParams(cfg);
|
||||
stdclock::time_point before = stdclock::now();
|
||||
faiss::ivflib::search_with_parameters(index_.get(), n, (float*)data, k, distances, labels, params.get());
|
||||
stdclock::time_point after = stdclock::now();
|
||||
double search_cost = (std::chrono::duration<double, std::micro>(after - before)).count();
|
||||
KNOWHERE_LOG_DEBUG << "IVF search cost: " << search_cost
|
||||
<< ", quantization cost: " << faiss::indexIVF_stats.quantization_time
|
||||
<< ", data search cost: " << faiss::indexIVF_stats.search_time;
|
||||
faiss::indexIVF_stats.quantization_time = 0;
|
||||
faiss::indexIVF_stats.search_time = 0;
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
|
|
|
@ -189,6 +189,8 @@ IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) {
|
|||
if (quantizer_conf->mode != 2) {
|
||||
KNOWHERE_THROW_MSG("mode only support 2 in this func");
|
||||
}
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("conf error");
|
||||
}
|
||||
// if (quantizer_conf->gpu_id != gpu_id_) {
|
||||
// KNOWHERE_THROW_MSG("quantizer and data must on the same gpu card");
|
||||
|
|
|
@ -63,7 +63,7 @@ FaissGpuResourceMgr::InitResource() {
|
|||
|
||||
mutex_cache_.emplace(device_id, std::make_unique<std::mutex>());
|
||||
|
||||
// std::cout << "Device Id: " << device_id << std::endl;
|
||||
// std::cout << "Device Id: " << DEVICEID << std::endl;
|
||||
auto& device_param = device.second;
|
||||
auto& bq = idle_map_[device_id];
|
||||
|
||||
|
@ -119,7 +119,7 @@ void
|
|||
FaissGpuResourceMgr::Dump() {
|
||||
for (auto& item : idle_map_) {
|
||||
auto& bq = item.second;
|
||||
std::cout << "device_id: " << item.first << ", resource count:" << bq.Size();
|
||||
std::cout << "DEVICEID: " << item.first << ", resource count:" << bq.Size();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -73,9 +73,17 @@ target_link_libraries(test_kdt
|
|||
SPTAGLibStatic
|
||||
${depend_libs} ${unittest_libs} ${basic_libs})
|
||||
|
||||
add_executable(test_gpuresource test_gpuresource.cpp ${util_srcs} ${ivf_srcs})
|
||||
target_link_libraries(test_gpuresource ${depend_libs} ${unittest_libs} ${basic_libs})
|
||||
|
||||
add_executable(test_customized_index test_customized_index.cpp ${util_srcs} ${ivf_srcs})
|
||||
target_link_libraries(test_customized_index ${depend_libs} ${unittest_libs} ${basic_libs})
|
||||
|
||||
install(TARGETS test_ivf DESTINATION unittest)
|
||||
install(TARGETS test_idmap DESTINATION unittest)
|
||||
install(TARGETS test_kdt DESTINATION unittest)
|
||||
install(TARGETS test_gpuresource DESTINATION unittest)
|
||||
install(TARGETS test_customized_index DESTINATION unittest)
|
||||
|
||||
#add_subdirectory(faiss_ori)
|
||||
add_subdirectory(test_nsg)
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "knowhere/index/vector_index/IndexGPUIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFPQ.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFSQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFPQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFSQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
|
||||
|
||||
constexpr int DEVICEID = 0;
|
||||
constexpr int64_t DIM = 128;
|
||||
constexpr int64_t NB = 10000;
|
||||
constexpr int64_t NQ = 10;
|
||||
constexpr int64_t K = 10;
|
||||
constexpr int64_t PINMEM = 1024 * 1024 * 200;
|
||||
constexpr int64_t TEMPMEM = 1024 * 1024 * 300;
|
||||
constexpr int64_t RESNUM = 2;
|
||||
|
||||
knowhere::IVFIndexPtr
|
||||
IndexFactory(const std::string& type) {
|
||||
if (type == "IVF") {
|
||||
return std::make_shared<knowhere::IVF>();
|
||||
} else if (type == "IVFPQ") {
|
||||
return std::make_shared<knowhere::IVFPQ>();
|
||||
} else if (type == "GPUIVF") {
|
||||
return std::make_shared<knowhere::GPUIVF>(DEVICEID);
|
||||
} else if (type == "GPUIVFPQ") {
|
||||
return std::make_shared<knowhere::GPUIVFPQ>(DEVICEID);
|
||||
} else if (type == "IVFSQ") {
|
||||
return std::make_shared<knowhere::IVFSQ>();
|
||||
} else if (type == "GPUIVFSQ") {
|
||||
return std::make_shared<knowhere::GPUIVFSQ>(DEVICEID);
|
||||
} else if (type == "IVFSQHybrid") {
|
||||
return std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
|
||||
}
|
||||
}
|
||||
|
||||
enum class ParameterType {
|
||||
ivf,
|
||||
ivfpq,
|
||||
ivfsq,
|
||||
};
|
||||
|
||||
class ParamGenerator {
|
||||
public:
|
||||
static ParamGenerator&
|
||||
GetInstance() {
|
||||
static ParamGenerator instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
knowhere::Config
|
||||
Gen(const ParameterType& type) {
|
||||
if (type == ParameterType::ivf) {
|
||||
auto tempconf = std::make_shared<knowhere::IVFCfg>();
|
||||
tempconf->d = DIM;
|
||||
tempconf->gpu_id = DEVICEID;
|
||||
tempconf->nlist = 100;
|
||||
tempconf->nprobe = 4;
|
||||
tempconf->k = K;
|
||||
tempconf->metric_type = knowhere::METRICTYPE::L2;
|
||||
return tempconf;
|
||||
} else if (type == ParameterType::ivfpq) {
|
||||
auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
|
||||
tempconf->d = DIM;
|
||||
tempconf->gpu_id = DEVICEID;
|
||||
tempconf->nlist = 100;
|
||||
tempconf->nprobe = 4;
|
||||
tempconf->k = K;
|
||||
tempconf->m = 4;
|
||||
tempconf->nbits = 8;
|
||||
tempconf->metric_type = knowhere::METRICTYPE::L2;
|
||||
return tempconf;
|
||||
} else if (type == ParameterType::ivfsq) {
|
||||
auto tempconf = std::make_shared<knowhere::IVFSQCfg>();
|
||||
tempconf->d = DIM;
|
||||
tempconf->gpu_id = DEVICEID;
|
||||
tempconf->nlist = 100;
|
||||
tempconf->nprobe = 4;
|
||||
tempconf->k = K;
|
||||
tempconf->nbits = 8;
|
||||
tempconf->metric_type = knowhere::METRICTYPE::L2;
|
||||
return tempconf;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
class TestGpuIndexBase : public ::testing::Test {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
|
||||
}
|
||||
|
||||
void
|
||||
TearDown() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().Free();
|
||||
}
|
||||
};
|
|
@ -0,0 +1,122 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "unittest/Helper.h"
|
||||
#include "unittest/utils.h"
|
||||
|
||||
class SingleIndexTest : public DataGen, public TestGpuIndexBase {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
TestGpuIndexBase::SetUp();
|
||||
Generate(DIM, NB, NQ);
|
||||
k = K;
|
||||
}
|
||||
|
||||
void
|
||||
TearDown() override {
|
||||
TestGpuIndexBase::TearDown();
|
||||
}
|
||||
|
||||
protected:
|
||||
std::string index_type;
|
||||
knowhere::IVFIndexPtr index_ = nullptr;
|
||||
};
|
||||
|
||||
#ifdef CUSTOMIZATION
|
||||
TEST_F(SingleIndexTest, IVFSQHybrid) {
|
||||
assert(!xb.empty());
|
||||
|
||||
index_type = "IVFSQHybrid";
|
||||
index_ = IndexFactory(index_type);
|
||||
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivfsq);
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
EXPECT_EQ(index_->Count(), nb);
|
||||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
|
||||
auto binaryset = index_->Serialize();
|
||||
{
|
||||
// copy cpu to gpu
|
||||
auto cpu_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
|
||||
cpu_idx->Load(binaryset);
|
||||
|
||||
{
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
auto gpu_idx = cpu_idx->CopyCpuToGpu(DEVICEID, conf);
|
||||
auto result = gpu_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, conf->k);
|
||||
// PrintResult(result, nq, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// quantization already in gpu, only copy data
|
||||
auto cpu_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
|
||||
cpu_idx->Load(binaryset);
|
||||
|
||||
auto pair = cpu_idx->CopyCpuToGpuWithQuantizer(DEVICEID, conf);
|
||||
auto gpu_idx = pair.first;
|
||||
auto quantization = pair.second;
|
||||
|
||||
auto result = gpu_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, conf->k);
|
||||
// PrintResult(result, nq, k);
|
||||
|
||||
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
|
||||
quantizer_conf->mode = 2; // only copy data
|
||||
quantizer_conf->gpu_id = DEVICEID;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
auto hybrid_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
|
||||
hybrid_idx->Load(binaryset);
|
||||
|
||||
auto new_idx = hybrid_idx->LoadData(quantization, quantizer_conf);
|
||||
auto result = new_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, conf->k);
|
||||
// PrintResult(result, nq, k);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// quantization already in gpu, only set quantization
|
||||
auto cpu_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
|
||||
cpu_idx->Load(binaryset);
|
||||
|
||||
auto pair = cpu_idx->CopyCpuToGpuWithQuantizer(DEVICEID, conf);
|
||||
auto quantization = pair.second;
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
auto hybrid_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
|
||||
hybrid_idx->Load(binaryset);
|
||||
|
||||
hybrid_idx->SetQuantizer(quantization);
|
||||
auto result = hybrid_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, conf->k);
|
||||
// PrintResult(result, nq, k);
|
||||
hybrid_idx->UnsetQuantizer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,309 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
#include <faiss/AutoTune.h>
|
||||
#include <faiss/gpu/GpuAutoTune.h>
|
||||
#include <faiss/gpu/GpuIndexIVFFlat.h>
|
||||
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/common/Timer.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFPQ.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFSQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFPQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFSQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
|
||||
#include "knowhere/index/vector_index/helpers/Cloner.h"
|
||||
|
||||
#include "unittest/Helper.h"
|
||||
#include "unittest/utils.h"
|
||||
|
||||
class GPURESTEST : public DataGen, public TestGpuIndexBase {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
TestGpuIndexBase::SetUp();
|
||||
Generate(DIM, NB, NQ);
|
||||
|
||||
k = K;
|
||||
elems = nq * k;
|
||||
ids = (int64_t*)malloc(sizeof(int64_t) * elems);
|
||||
dis = (float*)malloc(sizeof(float) * elems);
|
||||
}
|
||||
|
||||
void
|
||||
TearDown() override {
|
||||
delete ids;
|
||||
delete dis;
|
||||
TestGpuIndexBase::TearDown();
|
||||
}
|
||||
|
||||
protected:
|
||||
std::string index_type;
|
||||
knowhere::IVFIndexPtr index_ = nullptr;
|
||||
|
||||
int64_t* ids = nullptr;
|
||||
float* dis = nullptr;
|
||||
int64_t elems = 0;
|
||||
};
|
||||
|
||||
TEST_F(GPURESTEST, copyandsearch) {
|
||||
// search and copy at the same time
|
||||
printf("==================\n");
|
||||
|
||||
index_type = "GPUIVF";
|
||||
index_ = IndexFactory(index_type);
|
||||
|
||||
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivf);
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
auto result = index_->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, k);
|
||||
|
||||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
|
||||
cpu_idx->Seal();
|
||||
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
|
||||
constexpr int64_t search_count = 50;
|
||||
constexpr int64_t load_count = 15;
|
||||
auto search_func = [&] {
|
||||
// TimeRecorder tc("search&load");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
search_idx->Search(query_dataset, conf);
|
||||
// if (i > search_count - 6 || i == 0)
|
||||
// tc.RecordSection("search once");
|
||||
}
|
||||
// tc.ElapseFromBegin("search finish");
|
||||
};
|
||||
auto load_func = [&] {
|
||||
// TimeRecorder tc("search&load");
|
||||
for (int i = 0; i < load_count; ++i) {
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
// if (i > load_count -5 || i < 5)
|
||||
// tc.RecordSection("Copy to gpu");
|
||||
}
|
||||
// tc.ElapseFromBegin("load finish");
|
||||
};
|
||||
|
||||
knowhere::TimeRecorder tc("Basic");
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
tc.RecordSection("Copy to gpu once");
|
||||
search_idx->Search(query_dataset, conf);
|
||||
tc.RecordSection("Search once");
|
||||
search_func();
|
||||
tc.RecordSection("Search total cost");
|
||||
load_func();
|
||||
tc.RecordSection("Copy total cost");
|
||||
|
||||
std::thread search_thread(search_func);
|
||||
std::thread load_thread(load_func);
|
||||
search_thread.join();
|
||||
load_thread.join();
|
||||
tc.RecordSection("Copy&Search total");
|
||||
}
|
||||
|
||||
TEST_F(GPURESTEST, trainandsearch) {
|
||||
index_type = "GPUIVF";
|
||||
index_ = IndexFactory(index_type);
|
||||
|
||||
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivf);
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
auto new_index = IndexFactory(index_type);
|
||||
new_index->set_index_model(model);
|
||||
new_index->Add(base_dataset, conf);
|
||||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(new_index, knowhere::Config());
|
||||
cpu_idx->Seal();
|
||||
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
|
||||
constexpr int train_count = 5;
|
||||
constexpr int search_count = 200;
|
||||
auto train_stage = [&] {
|
||||
for (int i = 0; i < train_count; ++i) {
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
auto test_idx = IndexFactory(index_type);
|
||||
test_idx->set_index_model(model);
|
||||
test_idx->Add(base_dataset, conf);
|
||||
}
|
||||
};
|
||||
auto search_stage = [&](knowhere::VectorIndexPtr& search_idx) {
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
auto result = search_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, k);
|
||||
}
|
||||
};
|
||||
|
||||
// TimeRecorder tc("record");
|
||||
// train_stage();
|
||||
// tc.RecordSection("train cost");
|
||||
// search_stage(search_idx);
|
||||
// tc.RecordSection("search cost");
|
||||
|
||||
{
|
||||
// search and build parallel
|
||||
std::thread search_thread(search_stage, std::ref(search_idx));
|
||||
std::thread train_thread(train_stage);
|
||||
train_thread.join();
|
||||
search_thread.join();
|
||||
}
|
||||
{
|
||||
// build parallel
|
||||
std::thread train_1(train_stage);
|
||||
std::thread train_2(train_stage);
|
||||
train_1.join();
|
||||
train_2.join();
|
||||
}
|
||||
{
|
||||
// search parallel
|
||||
auto search_idx_2 = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
std::thread search_1(search_stage, std::ref(search_idx));
|
||||
std::thread search_2(search_stage, std::ref(search_idx_2));
|
||||
search_1.join();
|
||||
search_2.join();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CompareToOriFaiss
|
||||
TEST_F(GPURESTEST, gpu_ivf_resource_test) {
|
||||
assert(!xb.empty());
|
||||
|
||||
{
|
||||
index_ = std::make_shared<knowhere::GPUIVF>(-1);
|
||||
ASSERT_EQ(std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->GetGpuDevice(), -1);
|
||||
std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->SetGpuDevice(DEVICEID);
|
||||
ASSERT_EQ(std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->GetGpuDevice(), DEVICEID);
|
||||
|
||||
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivfsq);
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
EXPECT_EQ(index_->Count(), nb);
|
||||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
|
||||
// knowhere::TimeRecorder tc("knowere GPUIVF");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
index_->Search(query_dataset, conf);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
// tc.RecordSection("search once");
|
||||
}
|
||||
// tc.ElapseFromBegin("search all");
|
||||
}
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().Dump();
|
||||
|
||||
// {
|
||||
// // ori faiss IVF-Search
|
||||
// faiss::gpu::StandardGpuResources res;
|
||||
// faiss::gpu::GpuIndexIVFFlatConfig idx_config;
|
||||
// idx_config.device = DEVICEID;
|
||||
// faiss::gpu::GpuIndexIVFFlat device_index(&res, dim, 1638, faiss::METRIC_L2, idx_config);
|
||||
// device_index.train(nb, xb.data());
|
||||
// device_index.add(nb, xb.data());
|
||||
//
|
||||
// knowhere::TimeRecorder tc("ori IVF");
|
||||
// for (int i = 0; i < search_count; ++i) {
|
||||
// device_index.search(nq, xq.data(), k, dis, ids);
|
||||
// if (i > search_count - 6 || i < 5)
|
||||
// tc.RecordSection("search once");
|
||||
// }
|
||||
// tc.ElapseFromBegin("search all");
|
||||
// }
|
||||
}
|
||||
|
||||
TEST_F(GPURESTEST, gpuivfsq) {
|
||||
{
|
||||
// knowhere gpu ivfsq
|
||||
index_type = "GPUIVFSQ";
|
||||
index_ = IndexFactory(index_type);
|
||||
|
||||
auto conf = std::make_shared<knowhere::IVFSQCfg>();
|
||||
conf->nlist = 1638;
|
||||
conf->d = dim;
|
||||
conf->gpu_id = DEVICEID;
|
||||
conf->metric_type = knowhere::METRICTYPE::L2;
|
||||
conf->k = k;
|
||||
conf->nbits = 8;
|
||||
conf->nprobe = 1;
|
||||
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
// auto result = index_->Search(query_dataset, conf);
|
||||
// AssertAnns(result, nq, k);
|
||||
|
||||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
|
||||
cpu_idx->Seal();
|
||||
|
||||
knowhere::TimeRecorder tc("knowhere GPUSQ8");
|
||||
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
tc.RecordSection("Copy to gpu");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
search_idx->Search(query_dataset, conf);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
tc.RecordSection("search once");
|
||||
}
|
||||
tc.ElapseFromBegin("search all");
|
||||
}
|
||||
|
||||
{
|
||||
// Ori gpuivfsq Test
|
||||
const char* index_description = "IVF1638,SQ8";
|
||||
faiss::Index* ori_index = faiss::index_factory(dim, index_description, faiss::METRIC_L2);
|
||||
|
||||
faiss::gpu::StandardGpuResources res;
|
||||
auto device_index = faiss::gpu::index_cpu_to_gpu(&res, DEVICEID, ori_index);
|
||||
device_index->train(nb, xb.data());
|
||||
device_index->add(nb, xb.data());
|
||||
|
||||
auto cpu_index = faiss::gpu::index_gpu_to_cpu(device_index);
|
||||
auto idx = dynamic_cast<faiss::IndexIVF*>(cpu_index);
|
||||
if (idx != nullptr) {
|
||||
idx->to_readonly();
|
||||
}
|
||||
delete device_index;
|
||||
delete ori_index;
|
||||
|
||||
faiss::gpu::GpuClonerOptions option;
|
||||
option.allInGpu = true;
|
||||
|
||||
knowhere::TimeRecorder tc("ori GPUSQ8");
|
||||
faiss::Index* search_idx = faiss::gpu::index_cpu_to_gpu(&res, DEVICEID, cpu_index, &option);
|
||||
tc.RecordSection("Copy to gpu");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
search_idx->search(nq, xq.data(), k, dis, ids);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
tc.RecordSection("search once");
|
||||
}
|
||||
tc.ElapseFromBegin("search all");
|
||||
delete cpu_index;
|
||||
delete search_idx;
|
||||
}
|
||||
}
|
||||
#endif
|
|
@ -23,54 +23,28 @@
|
|||
#include "knowhere/index/vector_index/IndexIDMAP.h"
|
||||
#include "knowhere/index/vector_index/helpers/Cloner.h"
|
||||
|
||||
#include "Helper.h"
|
||||
#include "unittest/utils.h"
|
||||
|
||||
static int device_id = 0;
|
||||
class IDMAPTest : public DataGen, public ::testing::Test {
|
||||
class IDMAPTest : public DataGen, public TestGpuIndexBase {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(device_id, 1024 * 1024 * 200, 1024 * 1024 * 300, 2);
|
||||
TestGpuIndexBase::SetUp();
|
||||
|
||||
Init_with_default();
|
||||
index_ = std::make_shared<knowhere::IDMAP>();
|
||||
}
|
||||
|
||||
void
|
||||
TearDown() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().Free();
|
||||
TestGpuIndexBase::TearDown();
|
||||
}
|
||||
|
||||
protected:
|
||||
knowhere::IDMAPPtr index_ = nullptr;
|
||||
};
|
||||
|
||||
void
|
||||
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
auto dists = result->array()[1];
|
||||
|
||||
std::stringstream ss_id;
|
||||
std::stringstream ss_dist;
|
||||
for (auto i = 0; i < 10; i++) {
|
||||
for (auto j = 0; j < k; ++j) {
|
||||
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
|
||||
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
|
||||
}
|
||||
ss_id << std::endl;
|
||||
ss_dist << std::endl;
|
||||
}
|
||||
std::cout << "id\n" << ss_id.str() << std::endl;
|
||||
std::cout << "dist\n" << ss_dist.str() << std::endl;
|
||||
}
|
||||
|
||||
TEST_F(IDMAPTest, idmap_basic) {
|
||||
ASSERT_TRUE(!xb.empty());
|
||||
|
||||
|
@ -87,7 +61,7 @@ TEST_F(IDMAPTest, idmap_basic) {
|
|||
ASSERT_TRUE(index_->GetRawIds() != nullptr);
|
||||
auto result = index_->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, k);
|
||||
PrintResult(result, nq, k);
|
||||
// PrintResult(result, nq, k);
|
||||
|
||||
index_->Seal();
|
||||
auto binaryset = index_->Serialize();
|
||||
|
@ -95,7 +69,7 @@ TEST_F(IDMAPTest, idmap_basic) {
|
|||
new_index->Load(binaryset);
|
||||
auto re_result = index_->Search(query_dataset, conf);
|
||||
AssertAnns(re_result, nq, k);
|
||||
PrintResult(re_result, nq, k);
|
||||
// PrintResult(re_result, nq, k);
|
||||
}
|
||||
|
||||
TEST_F(IDMAPTest, idmap_serialize) {
|
||||
|
@ -118,7 +92,7 @@ TEST_F(IDMAPTest, idmap_serialize) {
|
|||
index_->Add(base_dataset, knowhere::Config());
|
||||
auto re_result = index_->Search(query_dataset, conf);
|
||||
AssertAnns(re_result, nq, k);
|
||||
PrintResult(re_result, nq, k);
|
||||
// PrintResult(re_result, nq, k);
|
||||
EXPECT_EQ(index_->Count(), nb);
|
||||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
auto binaryset = index_->Serialize();
|
||||
|
@ -138,7 +112,7 @@ TEST_F(IDMAPTest, idmap_serialize) {
|
|||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
auto result = index_->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, k);
|
||||
PrintResult(result, nq, k);
|
||||
// PrintResult(result, nq, k);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,7 +143,7 @@ TEST_F(IDMAPTest, copy_test) {
|
|||
|
||||
{
|
||||
// cpu to gpu
|
||||
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, conf);
|
||||
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, conf);
|
||||
auto clone_result = clone_index->Search(query_dataset, conf);
|
||||
AssertAnns(clone_result, nq, k);
|
||||
ASSERT_THROW({ std::static_pointer_cast<knowhere::GPUIDMAP>(clone_index)->GetRawVectors(); },
|
||||
|
@ -194,9 +168,9 @@ TEST_F(IDMAPTest, copy_test) {
|
|||
ASSERT_TRUE(std::static_pointer_cast<knowhere::IDMAP>(host_index)->GetRawIds() != nullptr);
|
||||
|
||||
// gpu to gpu
|
||||
auto device_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, conf);
|
||||
auto device_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, conf);
|
||||
auto new_device_index =
|
||||
std::static_pointer_cast<knowhere::GPUIDMAP>(device_index)->CopyGpuToGpu(device_id, conf);
|
||||
std::static_pointer_cast<knowhere::GPUIDMAP>(device_index)->CopyGpuToGpu(DEVICEID, conf);
|
||||
auto device_result = new_device_index->Search(query_dataset, conf);
|
||||
AssertAnns(device_result, nq, k);
|
||||
}
|
||||
|
|
|
@ -35,99 +35,25 @@
|
|||
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
|
||||
#include "knowhere/index/vector_index/helpers/Cloner.h"
|
||||
|
||||
#include "unittest/Helper.h"
|
||||
#include "unittest/utils.h"
|
||||
|
||||
using ::testing::Combine;
|
||||
using ::testing::TestWithParam;
|
||||
using ::testing::Values;
|
||||
|
||||
constexpr int device_id = 0;
|
||||
constexpr int64_t DIM = 128;
|
||||
constexpr int64_t NB = 1000000 / 100;
|
||||
constexpr int64_t NQ = 10;
|
||||
constexpr int64_t K = 10;
|
||||
|
||||
knowhere::IVFIndexPtr
|
||||
IndexFactory(const std::string& type) {
|
||||
if (type == "IVF") {
|
||||
return std::make_shared<knowhere::IVF>();
|
||||
} else if (type == "IVFPQ") {
|
||||
return std::make_shared<knowhere::IVFPQ>();
|
||||
} else if (type == "GPUIVF") {
|
||||
return std::make_shared<knowhere::GPUIVF>(device_id);
|
||||
} else if (type == "GPUIVFPQ") {
|
||||
return std::make_shared<knowhere::GPUIVFPQ>(device_id);
|
||||
} else if (type == "IVFSQ") {
|
||||
return std::make_shared<knowhere::IVFSQ>();
|
||||
} else if (type == "GPUIVFSQ") {
|
||||
return std::make_shared<knowhere::GPUIVFSQ>(device_id);
|
||||
} else if (type == "IVFSQHybrid") {
|
||||
return std::make_shared<knowhere::IVFSQHybrid>(device_id);
|
||||
}
|
||||
}
|
||||
|
||||
enum class ParameterType {
|
||||
ivf,
|
||||
ivfpq,
|
||||
ivfsq,
|
||||
nsg,
|
||||
};
|
||||
|
||||
class ParamGenerator {
|
||||
public:
|
||||
static ParamGenerator&
|
||||
GetInstance() {
|
||||
static ParamGenerator instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
knowhere::Config
|
||||
Gen(const ParameterType& type) {
|
||||
if (type == ParameterType::ivf) {
|
||||
auto tempconf = std::make_shared<knowhere::IVFCfg>();
|
||||
tempconf->d = DIM;
|
||||
tempconf->gpu_id = device_id;
|
||||
tempconf->nlist = 100;
|
||||
tempconf->nprobe = 16;
|
||||
tempconf->k = K;
|
||||
tempconf->metric_type = knowhere::METRICTYPE::L2;
|
||||
return tempconf;
|
||||
} else if (type == ParameterType::ivfpq) {
|
||||
auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
|
||||
tempconf->d = DIM;
|
||||
tempconf->gpu_id = device_id;
|
||||
tempconf->nlist = 25;
|
||||
tempconf->nprobe = 4;
|
||||
tempconf->k = K;
|
||||
tempconf->m = 4;
|
||||
tempconf->nbits = 8;
|
||||
tempconf->metric_type = knowhere::METRICTYPE::L2;
|
||||
return tempconf;
|
||||
} else if (type == ParameterType::ivfsq) {
|
||||
auto tempconf = std::make_shared<knowhere::IVFSQCfg>();
|
||||
tempconf->d = DIM;
|
||||
tempconf->gpu_id = device_id;
|
||||
tempconf->nlist = 100;
|
||||
tempconf->nprobe = 16;
|
||||
tempconf->k = K;
|
||||
tempconf->nbits = 8;
|
||||
tempconf->metric_type = knowhere::METRICTYPE::L2;
|
||||
return tempconf;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class IVFTest : public DataGen, public TestWithParam<::std::tuple<std::string, ParameterType>> {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
|
||||
|
||||
ParameterType parameter_type;
|
||||
std::tie(index_type, parameter_type) = GetParam();
|
||||
// Init_with_default();
|
||||
Generate(DIM, NB, NQ);
|
||||
index_ = IndexFactory(index_type);
|
||||
conf = ParamGenerator::GetInstance().Gen(parameter_type);
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(device_id, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -140,7 +66,7 @@ class IVFTest : public DataGen, public TestWithParam<::std::tuple<std::string, P
|
|||
std::vector<std::string> gpu_idx{"GPUIVFSQ"};
|
||||
auto finder = std::find(gpu_idx.cbegin(), gpu_idx.cend(), index_type);
|
||||
if (finder != gpu_idx.cend()) {
|
||||
return knowhere::cloner::CopyCpuToGpu(index_, device_id, knowhere::Config());
|
||||
return knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config());
|
||||
}
|
||||
return index_;
|
||||
}
|
||||
|
@ -162,33 +88,6 @@ INSTANTIATE_TEST_CASE_P(IVFParameters, IVFTest,
|
|||
#endif
|
||||
std::make_tuple("GPUIVFSQ", ParameterType::ivfsq)));
|
||||
|
||||
void
|
||||
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
auto dists = result->array()[1];
|
||||
|
||||
std::stringstream ss_id;
|
||||
std::stringstream ss_dist;
|
||||
for (auto i = 0; i < 10; i++) {
|
||||
for (auto j = 0; j < k; ++j) {
|
||||
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
|
||||
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
|
||||
}
|
||||
ss_id << std::endl;
|
||||
ss_dist << std::endl;
|
||||
}
|
||||
std::cout << "id\n" << ss_id.str() << std::endl;
|
||||
std::cout << "dist\n" << ss_dist.str() << std::endl;
|
||||
}
|
||||
|
||||
TEST_P(IVFTest, ivf_basic) {
|
||||
assert(!xb.empty());
|
||||
|
||||
|
@ -207,85 +106,6 @@ TEST_P(IVFTest, ivf_basic) {
|
|||
// PrintResult(result, nq, k);
|
||||
}
|
||||
|
||||
TEST_P(IVFTest, hybrid) {
|
||||
if (index_type != "IVFSQHybrid") {
|
||||
return;
|
||||
}
|
||||
assert(!xb.empty());
|
||||
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
EXPECT_EQ(index_->Count(), nb);
|
||||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
|
||||
// auto new_idx = ChooseTodo();
|
||||
// auto result = new_idx->Search(query_dataset, conf);
|
||||
// AssertAnns(result, nq, conf->k);
|
||||
|
||||
{
|
||||
auto hybrid_1_idx = std::make_shared<knowhere::IVFSQHybrid>(device_id);
|
||||
|
||||
auto binaryset = index_->Serialize();
|
||||
hybrid_1_idx->Load(binaryset);
|
||||
|
||||
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
|
||||
quantizer_conf->mode = 1;
|
||||
quantizer_conf->gpu_id = device_id;
|
||||
auto q = hybrid_1_idx->LoadQuantizer(quantizer_conf);
|
||||
hybrid_1_idx->SetQuantizer(q);
|
||||
auto result = hybrid_1_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, conf->k);
|
||||
PrintResult(result, nq, k);
|
||||
hybrid_1_idx->UnsetQuantizer();
|
||||
}
|
||||
|
||||
{
|
||||
auto hybrid_2_idx = std::make_shared<knowhere::IVFSQHybrid>(device_id);
|
||||
|
||||
auto binaryset = index_->Serialize();
|
||||
hybrid_2_idx->Load(binaryset);
|
||||
|
||||
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
|
||||
quantizer_conf->mode = 1;
|
||||
quantizer_conf->gpu_id = device_id;
|
||||
auto q = hybrid_2_idx->LoadQuantizer(quantizer_conf);
|
||||
quantizer_conf->mode = 2;
|
||||
auto gpu_idx = hybrid_2_idx->LoadData(q, quantizer_conf);
|
||||
|
||||
auto result = gpu_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, conf->k);
|
||||
PrintResult(result, nq, k);
|
||||
}
|
||||
}
|
||||
|
||||
// TEST_P(IVFTest, gpu_to_cpu) {
|
||||
// if (index_type.find("GPU") == std::string::npos) { return; }
|
||||
//
|
||||
// // else
|
||||
// assert(!xb.empty());
|
||||
//
|
||||
// auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
// index_->set_preprocessor(preprocessor);
|
||||
//
|
||||
// auto model = index_->Train(base_dataset, conf);
|
||||
// index_->set_index_model(model);
|
||||
// index_->Add(base_dataset, conf);
|
||||
// EXPECT_EQ(index_->Count(), nb);
|
||||
// EXPECT_EQ(index_->Dimension(), dim);
|
||||
// auto result = index_->Search(query_dataset, conf);
|
||||
// AssertAnns(result, nq, k);
|
||||
//
|
||||
// if (auto device_index = std::dynamic_pointer_cast<GPUIVF>(index_)) {
|
||||
// auto host_index = device_index->Copy_index_gpu_to_cpu();
|
||||
// auto result = host_index->Search(query_dataset, conf);
|
||||
// AssertAnns(result, nq, k);
|
||||
// }
|
||||
//}
|
||||
|
||||
TEST_P(IVFTest, ivf_serialize) {
|
||||
auto serialize = [](const std::string& filename, knowhere::BinaryPtr& bin, uint8_t* ret) {
|
||||
FileIOWriter writer(filename);
|
||||
|
@ -423,7 +243,7 @@ TEST_P(IVFTest, clone_test) {
|
|||
auto finder = std::find(support_idx_vec.cbegin(), support_idx_vec.cend(), index_type);
|
||||
if (finder != support_idx_vec.cend()) {
|
||||
EXPECT_NO_THROW({
|
||||
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, knowhere::Config());
|
||||
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config());
|
||||
auto clone_result = clone_index->Search(query_dataset, conf);
|
||||
AssertEqual(result, clone_result);
|
||||
std::cout << "clone C <=> G [" << index_type << "] success" << std::endl;
|
||||
|
@ -432,7 +252,7 @@ TEST_P(IVFTest, clone_test) {
|
|||
EXPECT_THROW(
|
||||
{
|
||||
std::cout << "clone C <=> G [" << index_type << "] failed" << std::endl;
|
||||
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, knowhere::Config());
|
||||
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, knowhere::Config());
|
||||
},
|
||||
knowhere::KnowhereException);
|
||||
}
|
||||
|
@ -440,9 +260,7 @@ TEST_P(IVFTest, clone_test) {
|
|||
}
|
||||
|
||||
#ifdef CUSTOMIZATION
|
||||
TEST_P(IVFTest, seal_test) {
|
||||
// FaissGpuResourceMgr::GetInstance().InitDevice(device_id);
|
||||
|
||||
TEST_P(IVFTest, gpu_seal_test) {
|
||||
std::vector<std::string> support_idx_vec{"GPUIVF", "GPUIVFSQ", "IVFSQHybrid"};
|
||||
auto finder = std::find(support_idx_vec.cbegin(), support_idx_vec.cend(), index_type);
|
||||
if (finder == support_idx_vec.cend()) {
|
||||
|
@ -466,309 +284,13 @@ TEST_P(IVFTest, seal_test) {
|
|||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
|
||||
|
||||
knowhere::TimeRecorder tc("CopyToGpu");
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
auto without_seal = tc.RecordSection("Without seal");
|
||||
cpu_idx->Seal();
|
||||
tc.RecordSection("seal cost");
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
|
||||
auto with_seal = tc.RecordSection("With seal");
|
||||
ASSERT_GE(without_seal, with_seal);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
class GPURESTEST : public DataGen, public ::testing::Test {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
Generate(128, 1000000, 1000);
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(device_id, 1024 * 1024 * 200, 1024 * 1024 * 300, 2);
|
||||
|
||||
k = 100;
|
||||
elems = nq * k;
|
||||
ids = (int64_t*)malloc(sizeof(int64_t) * elems);
|
||||
dis = (float*)malloc(sizeof(float) * elems);
|
||||
}
|
||||
|
||||
void
|
||||
TearDown() override {
|
||||
delete ids;
|
||||
delete dis;
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().Free();
|
||||
}
|
||||
|
||||
protected:
|
||||
std::string index_type;
|
||||
knowhere::IVFIndexPtr index_ = nullptr;
|
||||
|
||||
int64_t* ids = nullptr;
|
||||
float* dis = nullptr;
|
||||
int64_t elems = 0;
|
||||
};
|
||||
|
||||
const int search_count = 18;
|
||||
const int load_count = 3;
|
||||
|
||||
TEST_F(GPURESTEST, gpu_ivf_resource_test) {
|
||||
assert(!xb.empty());
|
||||
|
||||
{
|
||||
index_ = std::make_shared<knowhere::GPUIVF>(-1);
|
||||
ASSERT_EQ(std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->GetGpuDevice(), -1);
|
||||
std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->SetGpuDevice(device_id);
|
||||
ASSERT_EQ(std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->GetGpuDevice(), device_id);
|
||||
|
||||
auto conf = std::make_shared<knowhere::IVFCfg>();
|
||||
conf->nlist = 1638;
|
||||
conf->d = dim;
|
||||
conf->gpu_id = device_id;
|
||||
conf->metric_type = knowhere::METRICTYPE::L2;
|
||||
conf->k = k;
|
||||
conf->nprobe = 1;
|
||||
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
EXPECT_EQ(index_->Count(), nb);
|
||||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
|
||||
knowhere::TimeRecorder tc("knowere GPUIVF");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
index_->Search(query_dataset, conf);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
tc.RecordSection("search once");
|
||||
}
|
||||
tc.ElapseFromBegin("search all");
|
||||
}
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().Dump();
|
||||
|
||||
{
|
||||
// IVF-Search
|
||||
faiss::gpu::StandardGpuResources res;
|
||||
faiss::gpu::GpuIndexIVFFlatConfig idx_config;
|
||||
idx_config.device = device_id;
|
||||
faiss::gpu::GpuIndexIVFFlat device_index(&res, dim, 1638, faiss::METRIC_L2, idx_config);
|
||||
device_index.train(nb, xb.data());
|
||||
device_index.add(nb, xb.data());
|
||||
|
||||
knowhere::TimeRecorder tc("ori IVF");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
device_index.search(nq, xq.data(), k, dis, ids);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
tc.RecordSection("search once");
|
||||
}
|
||||
tc.ElapseFromBegin("search all");
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CUSTOMIZATION
|
||||
TEST_F(GPURESTEST, gpuivfsq) {
|
||||
{
|
||||
// knowhere gpu ivfsq
|
||||
index_type = "GPUIVFSQ";
|
||||
index_ = IndexFactory(index_type);
|
||||
|
||||
auto conf = std::make_shared<knowhere::IVFSQCfg>();
|
||||
conf->nlist = 1638;
|
||||
conf->d = dim;
|
||||
conf->gpu_id = device_id;
|
||||
conf->metric_type = knowhere::METRICTYPE::L2;
|
||||
conf->k = k;
|
||||
conf->nbits = 8;
|
||||
conf->nprobe = 1;
|
||||
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
// auto result = index_->Search(query_dataset, conf);
|
||||
// AssertAnns(result, nq, k);
|
||||
|
||||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
|
||||
cpu_idx->Seal();
|
||||
|
||||
knowhere::TimeRecorder tc("knowhere GPUSQ8");
|
||||
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
tc.RecordSection("Copy to gpu");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
search_idx->Search(query_dataset, conf);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
tc.RecordSection("search once");
|
||||
}
|
||||
tc.ElapseFromBegin("search all");
|
||||
}
|
||||
|
||||
{
|
||||
// Ori gpuivfsq Test
|
||||
const char* index_description = "IVF1638,SQ8";
|
||||
faiss::Index* ori_index = faiss::index_factory(dim, index_description, faiss::METRIC_L2);
|
||||
|
||||
faiss::gpu::StandardGpuResources res;
|
||||
auto device_index = faiss::gpu::index_cpu_to_gpu(&res, device_id, ori_index);
|
||||
device_index->train(nb, xb.data());
|
||||
device_index->add(nb, xb.data());
|
||||
|
||||
auto cpu_index = faiss::gpu::index_gpu_to_cpu(device_index);
|
||||
auto idx = dynamic_cast<faiss::IndexIVF*>(cpu_index);
|
||||
if (idx != nullptr) {
|
||||
idx->to_readonly();
|
||||
}
|
||||
delete device_index;
|
||||
delete ori_index;
|
||||
|
||||
faiss::gpu::GpuClonerOptions option;
|
||||
option.allInGpu = true;
|
||||
|
||||
knowhere::TimeRecorder tc("ori GPUSQ8");
|
||||
faiss::Index* search_idx = faiss::gpu::index_cpu_to_gpu(&res, device_id, cpu_index, &option);
|
||||
tc.RecordSection("Copy to gpu");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
search_idx->search(nq, xq.data(), k, dis, ids);
|
||||
if (i > search_count - 6 || i < 5)
|
||||
tc.RecordSection("search once");
|
||||
}
|
||||
tc.ElapseFromBegin("search all");
|
||||
delete cpu_index;
|
||||
delete search_idx;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST_F(GPURESTEST, copyandsearch) {
|
||||
// search and copy at the same time
|
||||
printf("==================\n");
|
||||
|
||||
index_type = "GPUIVF";
|
||||
index_ = IndexFactory(index_type);
|
||||
|
||||
auto conf = std::make_shared<knowhere::IVFSQCfg>();
|
||||
conf->nlist = 1638;
|
||||
conf->d = dim;
|
||||
conf->gpu_id = device_id;
|
||||
conf->metric_type = knowhere::METRICTYPE::L2;
|
||||
conf->k = k;
|
||||
conf->nbits = 8;
|
||||
conf->nprobe = 1;
|
||||
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
index_->set_index_model(model);
|
||||
index_->Add(base_dataset, conf);
|
||||
// auto result = index_->Search(query_dataset, conf);
|
||||
// AssertAnns(result, nq, k);
|
||||
|
||||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
|
||||
cpu_idx->Seal();
|
||||
|
||||
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
|
||||
auto search_func = [&] {
|
||||
// TimeRecorder tc("search&load");
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
search_idx->Search(query_dataset, conf);
|
||||
// if (i > search_count - 6 || i == 0)
|
||||
// tc.RecordSection("search once");
|
||||
}
|
||||
// tc.ElapseFromBegin("search finish");
|
||||
};
|
||||
auto load_func = [&] {
|
||||
// TimeRecorder tc("search&load");
|
||||
for (int i = 0; i < load_count; ++i) {
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
// if (i > load_count -5 || i < 5)
|
||||
// tc.RecordSection("Copy to gpu");
|
||||
}
|
||||
// tc.ElapseFromBegin("load finish");
|
||||
};
|
||||
|
||||
knowhere::TimeRecorder tc("basic");
|
||||
knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
tc.RecordSection("Copy to gpu once");
|
||||
search_idx->Search(query_dataset, conf);
|
||||
tc.RecordSection("search once");
|
||||
search_func();
|
||||
tc.RecordSection("only search total");
|
||||
load_func();
|
||||
tc.RecordSection("only copy total");
|
||||
|
||||
std::thread search_thread(search_func);
|
||||
std::thread load_thread(load_func);
|
||||
search_thread.join();
|
||||
load_thread.join();
|
||||
tc.RecordSection("Copy&search total");
|
||||
}
|
||||
|
||||
TEST_F(GPURESTEST, TrainAndSearch) {
|
||||
index_type = "GPUIVF";
|
||||
index_ = IndexFactory(index_type);
|
||||
|
||||
auto conf = std::make_shared<knowhere::IVFSQCfg>();
|
||||
conf->nlist = 1638;
|
||||
conf->d = dim;
|
||||
conf->gpu_id = device_id;
|
||||
conf->metric_type = knowhere::METRICTYPE::L2;
|
||||
conf->k = k;
|
||||
conf->nbits = 8;
|
||||
conf->nprobe = 1;
|
||||
|
||||
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
|
||||
index_->set_preprocessor(preprocessor);
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
auto new_index = IndexFactory(index_type);
|
||||
new_index->set_index_model(model);
|
||||
new_index->Add(base_dataset, conf);
|
||||
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(new_index, knowhere::Config());
|
||||
cpu_idx->Seal();
|
||||
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
|
||||
constexpr int train_count = 1;
|
||||
constexpr int search_count = 5000;
|
||||
auto train_stage = [&] {
|
||||
for (int i = 0; i < train_count; ++i) {
|
||||
auto model = index_->Train(base_dataset, conf);
|
||||
auto test_idx = IndexFactory(index_type);
|
||||
test_idx->set_index_model(model);
|
||||
test_idx->Add(base_dataset, conf);
|
||||
}
|
||||
};
|
||||
auto search_stage = [&](knowhere::VectorIndexPtr& search_idx) {
|
||||
for (int i = 0; i < search_count; ++i) {
|
||||
auto result = search_idx->Search(query_dataset, conf);
|
||||
AssertAnns(result, nq, k);
|
||||
}
|
||||
};
|
||||
|
||||
// TimeRecorder tc("record");
|
||||
// train_stage();
|
||||
// tc.RecordSection("train cost");
|
||||
// search_stage(search_idx);
|
||||
// tc.RecordSection("search cost");
|
||||
|
||||
{
|
||||
// search and build parallel
|
||||
std::thread search_thread(search_stage, std::ref(search_idx));
|
||||
std::thread train_thread(train_stage);
|
||||
train_thread.join();
|
||||
search_thread.join();
|
||||
}
|
||||
{
|
||||
// build parallel
|
||||
std::thread train_1(train_stage);
|
||||
std::thread train_2(train_stage);
|
||||
train_1.join();
|
||||
train_2.join();
|
||||
}
|
||||
{
|
||||
// search parallel
|
||||
auto search_idx_2 = knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
|
||||
std::thread search_1(search_stage, std::ref(search_idx));
|
||||
std::thread search_2(search_stage, std::ref(search_idx_2));
|
||||
search_1.join();
|
||||
search_2.join();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(lxj): Add exception test
|
||||
|
|
|
@ -52,33 +52,6 @@ class KDTTest : public DataGen, public ::testing::Test {
|
|||
std::shared_ptr<knowhere::CPUKDTRNG> index_ = nullptr;
|
||||
};
|
||||
|
||||
void
|
||||
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
auto dists = result->array()[1];
|
||||
|
||||
std::stringstream ss_id;
|
||||
std::stringstream ss_dist;
|
||||
for (auto i = 0; i < 10; i++) {
|
||||
for (auto j = 0; j < k; ++j) {
|
||||
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
|
||||
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
|
||||
}
|
||||
ss_id << std::endl;
|
||||
ss_dist << std::endl;
|
||||
}
|
||||
std::cout << "id\n" << ss_id.str() << std::endl;
|
||||
std::cout << "dist\n" << ss_dist.str() << std::endl;
|
||||
}
|
||||
|
||||
// TODO(lxj): add test about count() and dimension()
|
||||
TEST_F(KDTTest, kdt_basic) {
|
||||
assert(!xb.empty());
|
||||
|
|
|
@ -30,19 +30,19 @@ using ::testing::Combine;
|
|||
using ::testing::TestWithParam;
|
||||
using ::testing::Values;
|
||||
|
||||
constexpr int64_t DEVICE_ID = 1;
|
||||
constexpr int64_t DEVICEID = 0;
|
||||
|
||||
class NSGInterfaceTest : public DataGen, public ::testing::Test {
|
||||
protected:
|
||||
void
|
||||
SetUp() override {
|
||||
// Init_with_default();
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICE_ID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
|
||||
Generate(256, 1000000 / 100, 1);
|
||||
index_ = std::make_shared<knowhere::NSG>();
|
||||
|
||||
auto tmp_conf = std::make_shared<knowhere::NSGCfg>();
|
||||
tmp_conf->gpu_id = DEVICE_ID;
|
||||
tmp_conf->gpu_id = DEVICEID;
|
||||
tmp_conf->knng = 20;
|
||||
tmp_conf->nprobe = 8;
|
||||
tmp_conf->nlist = 163;
|
||||
|
@ -69,14 +69,6 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test {
|
|||
knowhere::Config search_conf;
|
||||
};
|
||||
|
||||
void
|
||||
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(NSGInterfaceTest, basic_test) {
|
||||
assert(!xb.empty());
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include "unittest/utils.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
@ -147,3 +148,30 @@ generate_query_dataset(int64_t nb, int64_t dim, float* xb) {
|
|||
auto dataset = std::make_shared<knowhere::Dataset>(std::move(tensors), tensor_schema);
|
||||
return dataset;
|
||||
}
|
||||
|
||||
void
|
||||
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
|
||||
auto ids = result->array()[0];
|
||||
auto dists = result->array()[1];
|
||||
|
||||
std::stringstream ss_id;
|
||||
std::stringstream ss_dist;
|
||||
for (auto i = 0; i < 10; i++) {
|
||||
for (auto j = 0; j < k; ++j) {
|
||||
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
|
||||
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
|
||||
}
|
||||
ss_id << std::endl;
|
||||
ss_dist << std::endl;
|
||||
}
|
||||
std::cout << "id\n" << ss_id.str() << std::endl;
|
||||
std::cout << "dist\n" << ss_dist.str() << std::endl;
|
||||
}
|
||||
|
|
|
@ -68,6 +68,12 @@ generate_dataset(int64_t nb, int64_t dim, float* xb, int64_t* ids);
|
|||
knowhere::DatasetPtr
|
||||
generate_query_dataset(int64_t nb, int64_t dim, float* xb);
|
||||
|
||||
void
|
||||
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k);
|
||||
|
||||
void
|
||||
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k);
|
||||
|
||||
struct FileIOWriter {
|
||||
std::fstream fs;
|
||||
std::string name;
|
||||
|
|
|
@ -50,7 +50,7 @@ print_banner() {
|
|||
std::cout << " / /|_/ // // /_| |/ / /_/ /\\ \\ " << std::endl;
|
||||
std::cout << " /_/ /_/___/____/___/\\____/___/ " << std::endl;
|
||||
std::cout << std::endl;
|
||||
std::cout << "Welcome to use Milvus by Zilliz!" << std::endl;
|
||||
std::cout << "Welcome to Milvus!" << std::endl;
|
||||
std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << std::endl;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
|
|
@ -50,29 +50,35 @@ load_simple_config() {
|
|||
std::string mode;
|
||||
config.GetResourceConfigMode(mode);
|
||||
std::vector<std::string> pool;
|
||||
config.GetResourceConfigPool(pool);
|
||||
config.GetResourceConfigSearchResources(pool);
|
||||
|
||||
// get resources
|
||||
bool use_cpu_to_compute = false;
|
||||
for (auto& resource : pool) {
|
||||
if (resource == "cpu") {
|
||||
use_cpu_to_compute = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
auto gpu_ids = get_gpu_pool();
|
||||
|
||||
int32_t build_gpu_id;
|
||||
config.GetResourceConfigIndexBuildDevice(build_gpu_id);
|
||||
|
||||
// create and connect
|
||||
ResMgrInst::GetInstance()->Add(ResourceFactory::Create("disk", "DISK", 0, true, false));
|
||||
|
||||
auto io = Connection("io", 500);
|
||||
ResMgrInst::GetInstance()->Add(ResourceFactory::Create("cpu", "CPU", 0, true, use_cpu_to_compute));
|
||||
ResMgrInst::GetInstance()->Add(ResourceFactory::Create("cpu", "CPU", 0, true, true));
|
||||
ResMgrInst::GetInstance()->Connect("disk", "cpu", io);
|
||||
|
||||
auto pcie = Connection("pcie", 12000);
|
||||
bool find_build_gpu_id = false;
|
||||
for (auto& gpu_id : gpu_ids) {
|
||||
ResMgrInst::GetInstance()->Add(ResourceFactory::Create(std::to_string(gpu_id), "GPU", gpu_id, true, true));
|
||||
ResMgrInst::GetInstance()->Connect("cpu", std::to_string(gpu_id), pcie);
|
||||
if (build_gpu_id == gpu_id) {
|
||||
find_build_gpu_id = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (not find_build_gpu_id) {
|
||||
ResMgrInst::GetInstance()->Add(
|
||||
ResourceFactory::Create(std::to_string(build_gpu_id), "GPU", build_gpu_id, true, true));
|
||||
ResMgrInst::GetInstance()->Connect("cpu", std::to_string(build_gpu_id), pcie);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ get_gpu_pool() {
|
|||
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
std::vector<std::string> pool;
|
||||
Status s = config.GetResourceConfigPool(pool);
|
||||
Status s = config.GetResourceConfigSearchResources(pool);
|
||||
if (!s.ok()) {
|
||||
SERVER_LOG_ERROR << s.message();
|
||||
}
|
||||
|
|
|
@ -184,11 +184,11 @@ Action::SpecifiedResourceLabelTaskScheduler(ResourceMgrWPtr res_mgr, ResourcePtr
|
|||
// get build index gpu resource
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
int32_t build_index_gpu;
|
||||
Status stat = config.GetDBConfigBuildIndexGPU(build_index_gpu);
|
||||
Status stat = config.GetResourceConfigIndexBuildDevice(build_index_gpu);
|
||||
|
||||
bool find_gpu_res = false;
|
||||
for (uint64_t i = 0; i < compute_resources.size(); ++i) {
|
||||
if (res_mgr.lock()->GetResource(ResourceType::GPU, build_index_gpu) != nullptr) {
|
||||
if (res_mgr.lock()->GetResource(ResourceType::GPU, build_index_gpu) != nullptr) {
|
||||
for (uint64_t i = 0; i < compute_resources.size(); ++i) {
|
||||
if (compute_resources[i]->name() ==
|
||||
res_mgr.lock()->GetResource(ResourceType::GPU, build_index_gpu)->name()) {
|
||||
find_gpu_res = true;
|
||||
|
|
|
@ -26,48 +26,48 @@
|
|||
namespace milvus {
|
||||
namespace scheduler {
|
||||
|
||||
bool
|
||||
LargeSQ8HPass::Run(const TaskPtr& task) {
|
||||
if (task->Type() != TaskType::SearchTask) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto search_task = std::static_pointer_cast<XSearchTask>(task);
|
||||
if (search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFSQ8H) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
|
||||
|
||||
// TODO: future, Index::IVFSQ8H, if nq < threshold set cpu, else set gpu
|
||||
if (search_job->nq() < 100) {
|
||||
return false;
|
||||
}
|
||||
|
||||
std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
|
||||
std::vector<int64_t> all_free_mem;
|
||||
for (auto& gpu : gpus) {
|
||||
auto cache = cache::GpuCacheMgr::GetInstance(gpu);
|
||||
auto free_mem = cache->CacheCapacity() - cache->CacheUsage();
|
||||
all_free_mem.push_back(free_mem);
|
||||
}
|
||||
|
||||
auto max_e = std::max_element(all_free_mem.begin(), all_free_mem.end());
|
||||
auto best_index = std::distance(all_free_mem.begin(), max_e);
|
||||
auto best_device_id = gpus[best_index];
|
||||
|
||||
ResourcePtr res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
|
||||
if (not res_ptr) {
|
||||
SERVER_LOG_ERROR << "GpuResource " << best_device_id << " invalid.";
|
||||
// TODO: throw critical error and exit
|
||||
return false;
|
||||
}
|
||||
|
||||
auto label = std::make_shared<SpecResLabel>(std::weak_ptr<Resource>(res_ptr));
|
||||
task->label() = label;
|
||||
|
||||
return true;
|
||||
}
|
||||
// bool
|
||||
// LargeSQ8HPass::Run(const TaskPtr& task) {
|
||||
// if (task->Type() != TaskType::SearchTask) {
|
||||
// return false;
|
||||
// }
|
||||
//
|
||||
// auto search_task = std::static_pointer_cast<XSearchTask>(task);
|
||||
// if (search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFSQ8H) {
|
||||
// return false;
|
||||
// }
|
||||
//
|
||||
// auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
|
||||
//
|
||||
// // TODO: future, Index::IVFSQ8H, if nq < threshold set cpu, else set gpu
|
||||
// if (search_job->nq() < 100) {
|
||||
// return false;
|
||||
// }
|
||||
//
|
||||
// std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
|
||||
// std::vector<int64_t> all_free_mem;
|
||||
// for (auto& gpu : gpus) {
|
||||
// auto cache = cache::GpuCacheMgr::GetInstance(gpu);
|
||||
// auto free_mem = cache->CacheCapacity() - cache->CacheUsage();
|
||||
// all_free_mem.push_back(free_mem);
|
||||
// }
|
||||
//
|
||||
// auto max_e = std::max_element(all_free_mem.begin(), all_free_mem.end());
|
||||
// auto best_index = std::distance(all_free_mem.begin(), max_e);
|
||||
// auto best_device_id = gpus[best_index];
|
||||
//
|
||||
// ResourcePtr res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
|
||||
// if (not res_ptr) {
|
||||
// SERVER_LOG_ERROR << "GpuResource " << best_device_id << " invalid.";
|
||||
// // TODO: throw critical error and exit
|
||||
// return false;
|
||||
// }
|
||||
//
|
||||
// auto label = std::make_shared<SpecResLabel>(std::weak_ptr<Resource>(res_ptr));
|
||||
// task->label() = label;
|
||||
//
|
||||
// return true;
|
||||
// }
|
||||
|
||||
} // namespace scheduler
|
||||
} // namespace milvus
|
||||
|
|
|
@ -37,8 +37,8 @@ class LargeSQ8HPass : public Pass {
|
|||
LargeSQ8HPass() = default;
|
||||
|
||||
public:
|
||||
bool
|
||||
Run(const TaskPtr& task) override;
|
||||
// bool
|
||||
// Run(const TaskPtr& task) override;
|
||||
};
|
||||
|
||||
using LargeSQ8HPassPtr = std::shared_ptr<LargeSQ8HPass>;
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
namespace milvus {
|
||||
namespace scheduler {
|
||||
|
||||
void
|
||||
Optimizer::Init() {
|
||||
for (auto& pass : pass_list_) {
|
||||
pass->Init();
|
||||
}
|
||||
}
|
||||
// void
|
||||
// Optimizer::Init() {
|
||||
// for (auto& pass : pass_list_) {
|
||||
// pass->Init();
|
||||
// }
|
||||
// }
|
||||
|
||||
bool
|
||||
Optimizer::Run(const TaskPtr& task) {
|
||||
|
|
|
@ -38,8 +38,8 @@ class Optimizer {
|
|||
explicit Optimizer(std::vector<PassPtr> pass_list) : pass_list_(std::move(pass_list)) {
|
||||
}
|
||||
|
||||
void
|
||||
Init();
|
||||
// void
|
||||
// Init();
|
||||
|
||||
bool
|
||||
Run(const TaskPtr& task);
|
||||
|
|
|
@ -34,9 +34,9 @@ namespace scheduler {
|
|||
|
||||
class Pass {
|
||||
public:
|
||||
virtual void
|
||||
Init() {
|
||||
}
|
||||
// virtual void
|
||||
// Init() {
|
||||
// }
|
||||
|
||||
virtual bool
|
||||
Run(const TaskPtr& task) = 0;
|
||||
|
|
|
@ -55,9 +55,6 @@ XBuildIndexTask::Load(milvus::scheduler::LoadType type, uint8_t device_id) {
|
|||
} else if (type == LoadType::CPU2GPU) {
|
||||
stat = to_index_engine_->CopyToIndexFileToGpu(device_id);
|
||||
type_str = "CPU2GPU";
|
||||
} else if (type == LoadType::GPU2CPU) {
|
||||
stat = to_index_engine_->CopyToCpu();
|
||||
type_str = "GPU2CPU";
|
||||
} else {
|
||||
error_msg = "Wrong load type";
|
||||
stat = Status(SERVER_UNEXPECTED_ERROR, error_msg);
|
||||
|
@ -137,6 +134,7 @@ XBuildIndexTask::Execute() {
|
|||
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_
|
||||
<< " to to_delete";
|
||||
|
||||
build_index_job->BuildIndexDone(to_index_id_);
|
||||
to_index_engine_ = nullptr;
|
||||
return;
|
||||
}
|
||||
|
@ -151,6 +149,7 @@ XBuildIndexTask::Execute() {
|
|||
std::cout << "ERROR: failed to build index, index file is too large or gpu memory is not enough"
|
||||
<< std::endl;
|
||||
|
||||
build_index_job->BuildIndexDone(to_index_id_);
|
||||
build_index_job->GetStatus() = Status(DB_ERROR, msg);
|
||||
to_index_engine_ = nullptr;
|
||||
return;
|
||||
|
@ -161,6 +160,9 @@ XBuildIndexTask::Execute() {
|
|||
meta_ptr->HasTable(file_->table_id_, has_table);
|
||||
if (!has_table) {
|
||||
meta_ptr->DeleteTableFiles(file_->table_id_);
|
||||
|
||||
build_index_job->BuildIndexDone(to_index_id_);
|
||||
build_index_job->GetStatus() = Status(DB_ERROR, "Table has been deleted, discard index file.");
|
||||
to_index_engine_ = nullptr;
|
||||
return;
|
||||
}
|
||||
|
@ -180,6 +182,7 @@ XBuildIndexTask::Execute() {
|
|||
std::cout << "ERROR: failed to persist index file: " << table_file.location_
|
||||
<< ", possible out of disk space" << std::endl;
|
||||
|
||||
build_index_job->BuildIndexDone(to_index_id_);
|
||||
build_index_job->GetStatus() = Status(DB_ERROR, msg);
|
||||
to_index_engine_ = nullptr;
|
||||
return;
|
||||
|
@ -199,8 +202,9 @@ XBuildIndexTask::Execute() {
|
|||
ENGINE_LOG_DEBUG << "New index file " << table_file.file_id_ << " of size " << index->PhysicalSize()
|
||||
<< " bytes"
|
||||
<< " from file " << origin_file.file_id_;
|
||||
|
||||
// index->Cache();
|
||||
if (build_index_job->options().insert_cache_immediately_) {
|
||||
index->Cache();
|
||||
}
|
||||
} else {
|
||||
// failed to update meta, mark the new file as to_delete, don't delete old file
|
||||
origin_file.file_type_ = engine::meta::TableFileSchema::TO_INDEX;
|
||||
|
|
|
@ -253,7 +253,7 @@ XSearchTask::MergeTopkToResultSet(const std::vector<int64_t>& input_ids, const s
|
|||
|
||||
if (result[i].empty()) {
|
||||
result_buf.resize(input_k, scheduler::IdDistPair(-1, 0.0));
|
||||
uint64_t input_k_multi_i = input_k * i;
|
||||
uint64_t input_k_multi_i = topk * i;
|
||||
for (auto k = 0; k < input_k; ++k) {
|
||||
uint64_t idx = input_k_multi_i + k;
|
||||
auto& result_buf_item = result_buf[k];
|
||||
|
@ -266,7 +266,7 @@ XSearchTask::MergeTopkToResultSet(const std::vector<int64_t>& input_ids, const s
|
|||
result_buf.resize(output_k, scheduler::IdDistPair(-1, 0.0));
|
||||
size_t buf_k = 0, src_k = 0, tar_k = 0;
|
||||
uint64_t src_idx;
|
||||
uint64_t input_k_multi_i = input_k * i;
|
||||
uint64_t input_k_multi_i = topk * i;
|
||||
while (buf_k < output_k && src_k < input_k && tar_k < tar_size) {
|
||||
src_idx = input_k_multi_i + src_k;
|
||||
auto& result_buf_item = result_buf[buf_k];
|
||||
|
@ -307,71 +307,71 @@ XSearchTask::MergeTopkToResultSet(const std::vector<int64_t>& input_ids, const s
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
XSearchTask::MergeTopkArray(std::vector<int64_t>& tar_ids, std::vector<float>& tar_distance, uint64_t& tar_input_k,
|
||||
const std::vector<int64_t>& src_ids, const std::vector<float>& src_distance,
|
||||
uint64_t src_input_k, uint64_t nq, uint64_t topk, bool ascending) {
|
||||
if (src_ids.empty() || src_distance.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t output_k = std::min(topk, tar_input_k + src_input_k);
|
||||
std::vector<int64_t> id_buf(nq * output_k, -1);
|
||||
std::vector<float> dist_buf(nq * output_k, 0.0);
|
||||
|
||||
uint64_t buf_k, src_k, tar_k;
|
||||
uint64_t src_idx, tar_idx, buf_idx;
|
||||
uint64_t src_input_k_multi_i, tar_input_k_multi_i, buf_k_multi_i;
|
||||
|
||||
for (uint64_t i = 0; i < nq; i++) {
|
||||
src_input_k_multi_i = src_input_k * i;
|
||||
tar_input_k_multi_i = tar_input_k * i;
|
||||
buf_k_multi_i = output_k * i;
|
||||
buf_k = src_k = tar_k = 0;
|
||||
while (buf_k < output_k && src_k < src_input_k && tar_k < tar_input_k) {
|
||||
src_idx = src_input_k_multi_i + src_k;
|
||||
tar_idx = tar_input_k_multi_i + tar_k;
|
||||
buf_idx = buf_k_multi_i + buf_k;
|
||||
if ((ascending && src_distance[src_idx] < tar_distance[tar_idx]) ||
|
||||
(!ascending && src_distance[src_idx] > tar_distance[tar_idx])) {
|
||||
id_buf[buf_idx] = src_ids[src_idx];
|
||||
dist_buf[buf_idx] = src_distance[src_idx];
|
||||
src_k++;
|
||||
} else {
|
||||
id_buf[buf_idx] = tar_ids[tar_idx];
|
||||
dist_buf[buf_idx] = tar_distance[tar_idx];
|
||||
tar_k++;
|
||||
}
|
||||
buf_k++;
|
||||
}
|
||||
|
||||
if (buf_k < output_k) {
|
||||
if (src_k < src_input_k) {
|
||||
while (buf_k < output_k && src_k < src_input_k) {
|
||||
src_idx = src_input_k_multi_i + src_k;
|
||||
buf_idx = buf_k_multi_i + buf_k;
|
||||
id_buf[buf_idx] = src_ids[src_idx];
|
||||
dist_buf[buf_idx] = src_distance[src_idx];
|
||||
src_k++;
|
||||
buf_k++;
|
||||
}
|
||||
} else {
|
||||
while (buf_k < output_k && tar_k < tar_input_k) {
|
||||
tar_idx = tar_input_k_multi_i + tar_k;
|
||||
buf_idx = buf_k_multi_i + buf_k;
|
||||
id_buf[buf_idx] = tar_ids[tar_idx];
|
||||
dist_buf[buf_idx] = tar_distance[tar_idx];
|
||||
tar_k++;
|
||||
buf_k++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tar_ids.swap(id_buf);
|
||||
tar_distance.swap(dist_buf);
|
||||
tar_input_k = output_k;
|
||||
}
|
||||
// void
|
||||
// XSearchTask::MergeTopkArray(std::vector<int64_t>& tar_ids, std::vector<float>& tar_distance, uint64_t& tar_input_k,
|
||||
// const std::vector<int64_t>& src_ids, const std::vector<float>& src_distance,
|
||||
// uint64_t src_input_k, uint64_t nq, uint64_t topk, bool ascending) {
|
||||
// if (src_ids.empty() || src_distance.empty()) {
|
||||
// return;
|
||||
// }
|
||||
//
|
||||
// uint64_t output_k = std::min(topk, tar_input_k + src_input_k);
|
||||
// std::vector<int64_t> id_buf(nq * output_k, -1);
|
||||
// std::vector<float> dist_buf(nq * output_k, 0.0);
|
||||
//
|
||||
// uint64_t buf_k, src_k, tar_k;
|
||||
// uint64_t src_idx, tar_idx, buf_idx;
|
||||
// uint64_t src_input_k_multi_i, tar_input_k_multi_i, buf_k_multi_i;
|
||||
//
|
||||
// for (uint64_t i = 0; i < nq; i++) {
|
||||
// src_input_k_multi_i = src_input_k * i;
|
||||
// tar_input_k_multi_i = tar_input_k * i;
|
||||
// buf_k_multi_i = output_k * i;
|
||||
// buf_k = src_k = tar_k = 0;
|
||||
// while (buf_k < output_k && src_k < src_input_k && tar_k < tar_input_k) {
|
||||
// src_idx = src_input_k_multi_i + src_k;
|
||||
// tar_idx = tar_input_k_multi_i + tar_k;
|
||||
// buf_idx = buf_k_multi_i + buf_k;
|
||||
// if ((ascending && src_distance[src_idx] < tar_distance[tar_idx]) ||
|
||||
// (!ascending && src_distance[src_idx] > tar_distance[tar_idx])) {
|
||||
// id_buf[buf_idx] = src_ids[src_idx];
|
||||
// dist_buf[buf_idx] = src_distance[src_idx];
|
||||
// src_k++;
|
||||
// } else {
|
||||
// id_buf[buf_idx] = tar_ids[tar_idx];
|
||||
// dist_buf[buf_idx] = tar_distance[tar_idx];
|
||||
// tar_k++;
|
||||
// }
|
||||
// buf_k++;
|
||||
// }
|
||||
//
|
||||
// if (buf_k < output_k) {
|
||||
// if (src_k < src_input_k) {
|
||||
// while (buf_k < output_k && src_k < src_input_k) {
|
||||
// src_idx = src_input_k_multi_i + src_k;
|
||||
// buf_idx = buf_k_multi_i + buf_k;
|
||||
// id_buf[buf_idx] = src_ids[src_idx];
|
||||
// dist_buf[buf_idx] = src_distance[src_idx];
|
||||
// src_k++;
|
||||
// buf_k++;
|
||||
// }
|
||||
// } else {
|
||||
// while (buf_k < output_k && tar_k < tar_input_k) {
|
||||
// tar_idx = tar_input_k_multi_i + tar_k;
|
||||
// buf_idx = buf_k_multi_i + buf_k;
|
||||
// id_buf[buf_idx] = tar_ids[tar_idx];
|
||||
// dist_buf[buf_idx] = tar_distance[tar_idx];
|
||||
// tar_k++;
|
||||
// buf_k++;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// tar_ids.swap(id_buf);
|
||||
// tar_distance.swap(dist_buf);
|
||||
// tar_input_k = output_k;
|
||||
//}
|
||||
|
||||
} // namespace scheduler
|
||||
} // namespace milvus
|
||||
|
|
|
@ -42,10 +42,10 @@ class XSearchTask : public Task {
|
|||
MergeTopkToResultSet(const std::vector<int64_t>& input_ids, const std::vector<float>& input_distance,
|
||||
uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, scheduler::ResultSet& result);
|
||||
|
||||
static void
|
||||
MergeTopkArray(std::vector<int64_t>& tar_ids, std::vector<float>& tar_distance, uint64_t& tar_input_k,
|
||||
const std::vector<int64_t>& src_ids, const std::vector<float>& src_distance, uint64_t src_input_k,
|
||||
uint64_t nq, uint64_t topk, bool ascending);
|
||||
// static void
|
||||
// MergeTopkArray(std::vector<int64_t>& tar_ids, std::vector<float>& tar_distance, uint64_t& tar_input_k,
|
||||
// const std::vector<int64_t>& src_ids, const std::vector<float>& src_distance, uint64_t
|
||||
// src_input_k, uint64_t nq, uint64_t topk, bool ascending);
|
||||
|
||||
public:
|
||||
TableFileSchemaPtr file_;
|
||||
|
|
|
@ -40,8 +40,10 @@ constexpr int64_t BATCH_ROW_COUNT = 100000;
|
|||
constexpr int64_t NQ = 5;
|
||||
constexpr int64_t TOP_K = 10;
|
||||
constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different
|
||||
constexpr int64_t ADD_VECTOR_LOOP = 1;
|
||||
constexpr int64_t ADD_VECTOR_LOOP = 5;
|
||||
constexpr int64_t SECONDS_EACH_HOUR = 3600;
|
||||
constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8;
|
||||
constexpr int32_t N_LIST = 15000;
|
||||
|
||||
#define BLOCK_SPLITER std::cout << "===========================================" << std::endl;
|
||||
|
||||
|
@ -311,8 +313,8 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
std::cout << "Wait until create all index done" << std::endl;
|
||||
milvus::IndexParam index;
|
||||
index.table_name = TABLE_NAME;
|
||||
index.index_type = milvus::IndexType::gpu_ivfsq8;
|
||||
index.nlist = 16384;
|
||||
index.index_type = INDEX_TYPE;
|
||||
index.nlist = N_LIST;
|
||||
milvus::Status stat = conn->CreateIndex(index);
|
||||
std::cout << "CreateIndex function call status: " << stat.message() << std::endl;
|
||||
|
||||
|
@ -344,8 +346,8 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
|
||||
{ // delete by range
|
||||
milvus::Range rg;
|
||||
rg.start_value = CurrentTmDate(-2);
|
||||
rg.end_value = CurrentTmDate(-3);
|
||||
rg.start_value = CurrentTmDate(-3);
|
||||
rg.end_value = CurrentTmDate(-2);
|
||||
|
||||
milvus::Status stat = conn->DeleteByRange(rg, TABLE_NAME);
|
||||
std::cout << "DeleteByRange function call status: " << stat.message() << std::endl;
|
||||
|
|
|
@ -88,7 +88,7 @@ Status::MoveFrom(Status& s) {
|
|||
std::string
|
||||
Status::message() const {
|
||||
if (state_ == nullptr) {
|
||||
return "";
|
||||
return "OK";
|
||||
}
|
||||
|
||||
std::string msg;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <sys/stat.h>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <regex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
@ -129,12 +130,6 @@ Config::ValidateConfig() {
|
|||
return s;
|
||||
}
|
||||
|
||||
int32_t db_build_index_gpu;
|
||||
s = GetDBConfigBuildIndexGPU(db_build_index_gpu);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
/* metric config */
|
||||
bool metric_enable_monitor;
|
||||
s = GetMetricConfigEnableMonitor(metric_enable_monitor);
|
||||
|
@ -205,8 +200,14 @@ Config::ValidateConfig() {
|
|||
return s;
|
||||
}
|
||||
|
||||
std::vector<std::string> resource_pool;
|
||||
s = GetResourceConfigPool(resource_pool);
|
||||
std::vector<std::string> search_resources;
|
||||
s = GetResourceConfigSearchResources(search_resources);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
int32_t resource_index_build_device;
|
||||
s = GetResourceConfigIndexBuildDevice(resource_index_build_device);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
@ -270,11 +271,6 @@ Config::ResetDefaultConfig() {
|
|||
return s;
|
||||
}
|
||||
|
||||
s = SetDBConfigBuildIndexGPU(CONFIG_DB_BUILD_INDEX_GPU_DEFAULT);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
/* metric config */
|
||||
s = SetMetricConfigEnableMonitor(CONFIG_METRIC_ENABLE_MONITOR_DEFAULT);
|
||||
if (!s.ok()) {
|
||||
|
@ -334,6 +330,11 @@ Config::ResetDefaultConfig() {
|
|||
return s;
|
||||
}
|
||||
|
||||
s = SetResourceConfigIndexBuildDevice(CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -459,19 +460,6 @@ Config::CheckDBConfigInsertBufferSize(const std::string& value) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::CheckDBConfigBuildIndexGPU(const std::string& value) {
|
||||
if (!ValidationUtil::ValidateStringIsNumber(value).ok()) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid DB config build_index_gpu: " + value);
|
||||
} else {
|
||||
int32_t gpu_index = std::stoi(value);
|
||||
if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid DB config build_index_gpu: " + value);
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::CheckMetricConfigEnableMonitor(const std::string& value) {
|
||||
if (!ValidationUtil::ValidateStringIsBool(value).ok()) {
|
||||
|
@ -544,7 +532,7 @@ Config::CheckCacheConfigGpuCacheCapacity(const std::string& value) {
|
|||
} else {
|
||||
uint64_t gpu_cache_capacity = std::stoi(value) * GB;
|
||||
int gpu_index;
|
||||
Status s = GetDBConfigBuildIndexGPU(gpu_index);
|
||||
Status s = GetResourceConfigIndexBuildDevice(gpu_index);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
@ -616,9 +604,38 @@ Config::CheckResourceConfigMode(const std::string& value) {
|
|||
}
|
||||
|
||||
Status
|
||||
Config::CheckResourceConfigPool(const std::vector<std::string>& value) {
|
||||
CheckGpuDevice(const std::string& value) {
|
||||
const std::regex pat("gpu(\\d+)");
|
||||
std::cmatch m;
|
||||
if (!std::regex_match(value.c_str(), m, pat)) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid gpu device: " + value);
|
||||
}
|
||||
|
||||
int32_t gpu_index = std::stoi(value.substr(3));
|
||||
if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid gpu device: " + value);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::CheckResourceConfigSearchResources(const std::vector<std::string>& value) {
|
||||
if (value.empty()) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid resource config pool");
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Empty resource config search_resources");
|
||||
}
|
||||
|
||||
for (auto& gpu_device : value) {
|
||||
if (!CheckGpuDevice(gpu_device).ok()) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid resource config search_resources: " + gpu_device);
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::CheckResourceConfigIndexBuildDevice(const std::string& value) {
|
||||
if (!CheckGpuDevice(value).ok()) {
|
||||
return Status(SERVER_INVALID_ARGUMENT, "Invalid resource config index_build_device: " + value);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -739,18 +756,6 @@ Config::GetDBConfigInsertBufferSize(int32_t& value) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::GetDBConfigBuildIndexGPU(int32_t& value) {
|
||||
std::string str = GetConfigStr(CONFIG_DB, CONFIG_DB_BUILD_INDEX_GPU, CONFIG_DB_BUILD_INDEX_GPU_DEFAULT);
|
||||
Status s = CheckDBConfigBuildIndexGPU(str);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
value = std::stoi(str);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::GetDBConfigPreloadTable(std::string& value) {
|
||||
value = GetConfigStr(CONFIG_DB, CONFIG_DB_PRELOAD_TABLE);
|
||||
|
@ -880,10 +885,23 @@ Config::GetResourceConfigMode(std::string& value) {
|
|||
}
|
||||
|
||||
Status
|
||||
Config::GetResourceConfigPool(std::vector<std::string>& value) {
|
||||
Config::GetResourceConfigSearchResources(std::vector<std::string>& value) {
|
||||
ConfigNode resource_config = GetConfigNode(CONFIG_RESOURCE);
|
||||
value = resource_config.GetSequence(CONFIG_RESOURCE_POOL);
|
||||
return CheckResourceConfigPool(value);
|
||||
value = resource_config.GetSequence(CONFIG_RESOURCE_SEARCH_RESOURCES);
|
||||
return CheckResourceConfigSearchResources(value);
|
||||
}
|
||||
|
||||
Status
|
||||
Config::GetResourceConfigIndexBuildDevice(int32_t& value) {
|
||||
std::string str =
|
||||
GetConfigStr(CONFIG_RESOURCE, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT);
|
||||
Status s = CheckResourceConfigIndexBuildDevice(str);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
value = std::stoi(str.substr(3));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -999,17 +1017,6 @@ Config::SetDBConfigInsertBufferSize(const std::string& value) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::SetDBConfigBuildIndexGPU(const std::string& value) {
|
||||
Status s = CheckDBConfigBuildIndexGPU(value);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
SetConfigValueInMem(CONFIG_DB, CONFIG_DB_BUILD_INDEX_GPU, value);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
/* metric config */
|
||||
Status
|
||||
Config::SetMetricConfigEnableMonitor(const std::string& value) {
|
||||
|
@ -1135,5 +1142,16 @@ Config::SetResourceConfigMode(const std::string& value) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
Config::SetResourceConfigIndexBuildDevice(const std::string& value) {
|
||||
Status s = CheckResourceConfigIndexBuildDevice(value);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace server
|
||||
} // namespace milvus
|
||||
|
|
|
@ -53,8 +53,6 @@ static const char* CONFIG_DB_ARCHIVE_DAYS_THRESHOLD = "archive_days_threshold";
|
|||
static const char* CONFIG_DB_ARCHIVE_DAYS_THRESHOLD_DEFAULT = "0";
|
||||
static const char* CONFIG_DB_INSERT_BUFFER_SIZE = "insert_buffer_size";
|
||||
static const char* CONFIG_DB_INSERT_BUFFER_SIZE_DEFAULT = "4";
|
||||
static const char* CONFIG_DB_BUILD_INDEX_GPU = "build_index_gpu";
|
||||
static const char* CONFIG_DB_BUILD_INDEX_GPU_DEFAULT = "0";
|
||||
static const char* CONFIG_DB_PRELOAD_TABLE = "preload_table";
|
||||
|
||||
/* cache config */
|
||||
|
@ -62,7 +60,7 @@ static const char* CONFIG_CACHE = "cache_config";
|
|||
static const char* CONFIG_CACHE_CPU_CACHE_CAPACITY = "cpu_cache_capacity";
|
||||
static const char* CONFIG_CACHE_CPU_CACHE_CAPACITY_DEFAULT = "16";
|
||||
static const char* CONFIG_CACHE_GPU_CACHE_CAPACITY = "gpu_cache_capacity";
|
||||
static const char* CONFIG_CACHE_GPU_CACHE_CAPACITY_DEFAULT = "0";
|
||||
static const char* CONFIG_CACHE_GPU_CACHE_CAPACITY_DEFAULT = "4";
|
||||
static const char* CONFIG_CACHE_CPU_CACHE_THRESHOLD = "cpu_mem_threshold";
|
||||
static const char* CONFIG_CACHE_CPU_CACHE_THRESHOLD_DEFAULT = "0.85";
|
||||
static const char* CONFIG_CACHE_GPU_CACHE_THRESHOLD = "gpu_mem_threshold";
|
||||
|
@ -91,7 +89,9 @@ static const char* CONFIG_ENGINE_OMP_THREAD_NUM_DEFAULT = "0";
|
|||
static const char* CONFIG_RESOURCE = "resource_config";
|
||||
static const char* CONFIG_RESOURCE_MODE = "mode";
|
||||
static const char* CONFIG_RESOURCE_MODE_DEFAULT = "simple";
|
||||
static const char* CONFIG_RESOURCE_POOL = "resource_pool";
|
||||
static const char* CONFIG_RESOURCE_SEARCH_RESOURCES = "search_resources";
|
||||
static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE = "index_build_device";
|
||||
static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "gpu0";
|
||||
|
||||
class Config {
|
||||
public:
|
||||
|
@ -140,8 +140,6 @@ class Config {
|
|||
CheckDBConfigArchiveDaysThreshold(const std::string& value);
|
||||
Status
|
||||
CheckDBConfigInsertBufferSize(const std::string& value);
|
||||
Status
|
||||
CheckDBConfigBuildIndexGPU(const std::string& value);
|
||||
|
||||
/* metric config */
|
||||
Status
|
||||
|
@ -173,7 +171,9 @@ class Config {
|
|||
Status
|
||||
CheckResourceConfigMode(const std::string& value);
|
||||
Status
|
||||
CheckResourceConfigPool(const std::vector<std::string>& value);
|
||||
CheckResourceConfigSearchResources(const std::vector<std::string>& value);
|
||||
Status
|
||||
CheckResourceConfigIndexBuildDevice(const std::string& value);
|
||||
|
||||
std::string
|
||||
GetConfigStr(const std::string& parent_key, const std::string& child_key, const std::string& default_value = "");
|
||||
|
@ -203,8 +203,6 @@ class Config {
|
|||
Status
|
||||
GetDBConfigInsertBufferSize(int32_t& value);
|
||||
Status
|
||||
GetDBConfigBuildIndexGPU(int32_t& value);
|
||||
Status
|
||||
GetDBConfigPreloadTable(std::string& value);
|
||||
|
||||
/* metric config */
|
||||
|
@ -237,7 +235,9 @@ class Config {
|
|||
Status
|
||||
GetResourceConfigMode(std::string& value);
|
||||
Status
|
||||
GetResourceConfigPool(std::vector<std::string>& value);
|
||||
GetResourceConfigSearchResources(std::vector<std::string>& value);
|
||||
Status
|
||||
GetResourceConfigIndexBuildDevice(int32_t& value);
|
||||
|
||||
public:
|
||||
/* server config */
|
||||
|
@ -263,8 +263,6 @@ class Config {
|
|||
SetDBConfigArchiveDaysThreshold(const std::string& value);
|
||||
Status
|
||||
SetDBConfigInsertBufferSize(const std::string& value);
|
||||
Status
|
||||
SetDBConfigBuildIndexGPU(const std::string& value);
|
||||
|
||||
/* metric config */
|
||||
Status
|
||||
|
@ -295,6 +293,8 @@ class Config {
|
|||
/* resource config */
|
||||
Status
|
||||
SetResourceConfigMode(const std::string& value);
|
||||
Status
|
||||
SetResourceConfigIndexBuildDevice(const std::string& value);
|
||||
|
||||
private:
|
||||
std::unordered_map<std::string, std::unordered_map<std::string, std::string>> config_map_;
|
||||
|
|
|
@ -113,6 +113,14 @@ ConvertTimeRangeToDBDates(const std::vector<::milvus::grpc::Range>& range_array,
|
|||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
std::string
|
||||
TableNotExistMsg(const std::string& table_name) {
|
||||
return "Table " + table_name +
|
||||
" not exist. Use milvus.has_table to verify whether the table exists. You also can check if the table name "
|
||||
"exists.";
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -255,7 +263,7 @@ CreateIndexTask::OnExecute() {
|
|||
}
|
||||
|
||||
if (!has_table) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
|
||||
}
|
||||
|
||||
auto& grpc_index = index_param_->index();
|
||||
|
@ -348,7 +356,7 @@ DropTableTask::OnExecute() {
|
|||
status = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!status.ok()) {
|
||||
if (status.code() == DB_NOT_FOUND) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
|
||||
} else {
|
||||
return status;
|
||||
}
|
||||
|
@ -420,12 +428,14 @@ InsertTask::OnExecute() {
|
|||
return status;
|
||||
}
|
||||
if (insert_param_->row_record_array().empty()) {
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Row record array is empty");
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY,
|
||||
"The vector array is empty. Make sure you have entered vector records.");
|
||||
}
|
||||
|
||||
if (!insert_param_->row_id_array().empty()) {
|
||||
if (insert_param_->row_id_array().size() != insert_param_->row_record_array_size()) {
|
||||
return Status(SERVER_ILLEGAL_VECTOR_ID, "Size of vector ids is not equal to row record array size");
|
||||
return Status(SERVER_ILLEGAL_VECTOR_ID,
|
||||
"The size of vector ID array must be equal to the size of the vector.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -435,7 +445,7 @@ InsertTask::OnExecute() {
|
|||
status = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!status.ok()) {
|
||||
if (status.code() == DB_NOT_FOUND) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + insert_param_->table_name() + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(insert_param_->table_name()));
|
||||
} else {
|
||||
return status;
|
||||
}
|
||||
|
@ -447,13 +457,14 @@ InsertTask::OnExecute() {
|
|||
// user already provided id before, all insert action require user id
|
||||
if ((table_info.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) {
|
||||
return Status(SERVER_ILLEGAL_VECTOR_ID,
|
||||
"Table vector ids are user defined, please provide id for this batch");
|
||||
"Table vector IDs are user-defined. Please provide IDs for all vectors of this table.");
|
||||
}
|
||||
|
||||
// user didn't provided id before, no need to provide user id
|
||||
if ((table_info.flag_ & engine::meta::FLAG_MASK_NO_USERID) != 0 && user_provide_ids) {
|
||||
return Status(SERVER_ILLEGAL_VECTOR_ID,
|
||||
"Table vector ids are auto generated, no need to provide id for this batch");
|
||||
return Status(
|
||||
SERVER_ILLEGAL_VECTOR_ID,
|
||||
"Table vector IDs are auto-generated. All vectors of this table must use auto-generated IDs.");
|
||||
}
|
||||
|
||||
rc.RecordSection("check validation");
|
||||
|
@ -470,13 +481,13 @@ InsertTask::OnExecute() {
|
|||
// TODO(yk): change to one dimension array or use multiple-thread to copy the data
|
||||
for (size_t i = 0; i < insert_param_->row_record_array_size(); i++) {
|
||||
if (insert_param_->row_record_array(i).vector_data().empty()) {
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Row record array data is empty");
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY,
|
||||
"The vector dimension must be equal to the table dimension.");
|
||||
}
|
||||
uint64_t vec_dim = insert_param_->row_record_array(i).vector_data().size();
|
||||
if (vec_dim != table_info.dimension_) {
|
||||
ErrorCode error_code = SERVER_INVALID_VECTOR_DIMENSION;
|
||||
std::string error_msg = "Invalid row record dimension: " + std::to_string(vec_dim) +
|
||||
" vs. table dimension:" + std::to_string(table_info.dimension_);
|
||||
std::string error_msg = "The vector dimension must be equal to the table dimension.";
|
||||
return Status(error_code, error_msg);
|
||||
}
|
||||
memcpy(&vec_f[i * table_info.dimension_], insert_param_->row_record_array(i).vector_data().data(),
|
||||
|
@ -569,7 +580,7 @@ SearchTask::OnExecute() {
|
|||
status = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!status.ok()) {
|
||||
if (status.code() == DB_NOT_FOUND) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
|
||||
} else {
|
||||
return status;
|
||||
}
|
||||
|
@ -587,7 +598,8 @@ SearchTask::OnExecute() {
|
|||
}
|
||||
|
||||
if (search_param_->query_record_array().empty()) {
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Row record array is empty");
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY,
|
||||
"The vector array is empty. Make sure you have entered vector records.");
|
||||
}
|
||||
|
||||
// step 4: check date range, and convert to db dates
|
||||
|
@ -609,13 +621,13 @@ SearchTask::OnExecute() {
|
|||
std::vector<float> vec_f(record_array_size * table_info.dimension_, 0);
|
||||
for (size_t i = 0; i < record_array_size; i++) {
|
||||
if (search_param_->query_record_array(i).vector_data().empty()) {
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Row record array data is empty");
|
||||
return Status(SERVER_INVALID_ROWRECORD_ARRAY,
|
||||
"The vector dimension must be equal to the table dimension.");
|
||||
}
|
||||
uint64_t query_vec_dim = search_param_->query_record_array(i).vector_data().size();
|
||||
if (query_vec_dim != table_info.dimension_) {
|
||||
ErrorCode error_code = SERVER_INVALID_VECTOR_DIMENSION;
|
||||
std::string error_msg = "Invalid row record dimension: " + std::to_string(query_vec_dim) +
|
||||
" vs. table dimension:" + std::to_string(table_info.dimension_);
|
||||
std::string error_msg = "The vector dimension must be equal to the table dimension.";
|
||||
return Status(error_code, error_msg);
|
||||
}
|
||||
|
||||
|
@ -707,7 +719,7 @@ CountTableTask::OnExecute() {
|
|||
status = DBWrapper::DB()->GetTableRowCount(table_name_, row_count);
|
||||
if (!status.ok()) {
|
||||
if (status.code(), DB_NOT_FOUND) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
|
||||
} else {
|
||||
return status;
|
||||
}
|
||||
|
@ -779,7 +791,7 @@ DeleteByRangeTask::OnExecute() {
|
|||
status = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!status.ok()) {
|
||||
if (status.code(), DB_NOT_FOUND) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table_name + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name));
|
||||
} else {
|
||||
return status;
|
||||
}
|
||||
|
@ -917,7 +929,7 @@ DropIndexTask::OnExecute() {
|
|||
}
|
||||
|
||||
if (!has_table) {
|
||||
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
|
||||
}
|
||||
|
||||
// step 2: check table existence
|
||||
|
|
|
@ -88,7 +88,7 @@ Status::MoveFrom(Status& s) {
|
|||
std::string
|
||||
Status::message() const {
|
||||
if (state_ == nullptr) {
|
||||
return "";
|
||||
return "OK";
|
||||
}
|
||||
|
||||
std::string msg;
|
||||
|
|
|
@ -37,14 +37,15 @@ Status
|
|||
ValidationUtil::ValidateTableName(const std::string& table_name) {
|
||||
// Table name shouldn't be empty.
|
||||
if (table_name.empty()) {
|
||||
std::string msg = "Empty table name";
|
||||
std::string msg = "Table name should not be empty.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_TABLE_NAME, msg);
|
||||
}
|
||||
|
||||
std::string invalid_msg = "Invalid table name: " + table_name + ". ";
|
||||
// Table name size shouldn't exceed 16384.
|
||||
if (table_name.size() > TABLE_NAME_SIZE_LIMIT) {
|
||||
std::string msg = "Table name size exceed the limitation";
|
||||
std::string msg = invalid_msg + "The length of a table name must be less than 255 characters.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_TABLE_NAME, msg);
|
||||
}
|
||||
|
@ -52,7 +53,7 @@ ValidationUtil::ValidateTableName(const std::string& table_name) {
|
|||
// Table name first character should be underscore or character.
|
||||
char first_char = table_name[0];
|
||||
if (first_char != '_' && std::isalpha(first_char) == 0) {
|
||||
std::string msg = "Table name first character isn't underscore or character";
|
||||
std::string msg = invalid_msg + "The first character of a table name must be an underscore or letter.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_TABLE_NAME, msg);
|
||||
}
|
||||
|
@ -61,7 +62,7 @@ ValidationUtil::ValidateTableName(const std::string& table_name) {
|
|||
for (int64_t i = 1; i < table_name_size; ++i) {
|
||||
char name_char = table_name[i];
|
||||
if (name_char != '_' && std::isalnum(name_char) == 0) {
|
||||
std::string msg = "Table name character isn't underscore or alphanumber";
|
||||
std::string msg = invalid_msg + "Table name can only contain numbers, letters, and underscores.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_TABLE_NAME, msg);
|
||||
}
|
||||
|
@ -72,12 +73,9 @@ ValidationUtil::ValidateTableName(const std::string& table_name) {
|
|||
|
||||
Status
|
||||
ValidationUtil::ValidateTableDimension(int64_t dimension) {
|
||||
if (dimension <= 0) {
|
||||
std::string msg = "Dimension value should be greater than 0";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
|
||||
} else if (dimension > TABLE_DIMENSION_LIMIT) {
|
||||
std::string msg = "Table dimension excceed the limitation: " + std::to_string(TABLE_DIMENSION_LIMIT);
|
||||
if (dimension <= 0 || dimension > TABLE_DIMENSION_LIMIT) {
|
||||
std::string msg = "Invalid table dimension: " + std::to_string(dimension) + ". " +
|
||||
"The table dimension must be within the range of 1 ~ 16384.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
|
||||
} else {
|
||||
|
@ -89,18 +87,29 @@ Status
|
|||
ValidationUtil::ValidateTableIndexType(int32_t index_type) {
|
||||
int engine_type = static_cast<int>(engine::EngineType(index_type));
|
||||
if (engine_type <= 0 || engine_type > static_cast<int>(engine::EngineType::MAX_VALUE)) {
|
||||
std::string msg = "Invalid index type: " + std::to_string(index_type);
|
||||
std::string msg = "Invalid index type: " + std::to_string(index_type) + ". " +
|
||||
"Make sure the index type is in IndexType list.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_INDEX_TYPE, msg);
|
||||
}
|
||||
|
||||
#ifndef CUSTOMIZATION
|
||||
// special case, hybird index only available in customize faiss library
|
||||
if (engine_type == static_cast<int>(engine::EngineType::FAISS_IVFSQ8H)) {
|
||||
std::string msg = "Unsupported index type: " + std::to_string(index_type);
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_INDEX_TYPE, msg);
|
||||
}
|
||||
#endif
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ValidationUtil::ValidateTableIndexNlist(int32_t nlist) {
|
||||
if (nlist <= 0) {
|
||||
std::string msg = "nlist value should be greater than 0";
|
||||
std::string msg =
|
||||
"Invalid index nlist: " + std::to_string(nlist) + ". " + "The index nlist must be greater than 0.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_INDEX_NLIST, msg);
|
||||
}
|
||||
|
@ -111,7 +120,9 @@ ValidationUtil::ValidateTableIndexNlist(int32_t nlist) {
|
|||
Status
|
||||
ValidationUtil::ValidateTableIndexFileSize(int64_t index_file_size) {
|
||||
if (index_file_size <= 0 || index_file_size > INDEX_FILE_SIZE_LIMIT) {
|
||||
std::string msg = "Invalid index file size: " + std::to_string(index_file_size);
|
||||
std::string msg = "Invalid index file size: " + std::to_string(index_file_size) + ". " +
|
||||
"The index file size must be within the range of 1 ~ " +
|
||||
std::to_string(INDEX_FILE_SIZE_LIMIT) + ".";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_INDEX_FILE_SIZE, msg);
|
||||
}
|
||||
|
@ -123,7 +134,8 @@ Status
|
|||
ValidationUtil::ValidateTableIndexMetricType(int32_t metric_type) {
|
||||
if (metric_type != static_cast<int32_t>(engine::MetricType::L2) &&
|
||||
metric_type != static_cast<int32_t>(engine::MetricType::IP)) {
|
||||
std::string msg = "Invalid metric type: " + std::to_string(metric_type);
|
||||
std::string msg = "Invalid index metric type: " + std::to_string(metric_type) + ". " +
|
||||
"Make sure the metric type is either MetricType.L2 or MetricType.IP.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_INDEX_METRIC_TYPE, msg);
|
||||
}
|
||||
|
@ -133,7 +145,8 @@ ValidationUtil::ValidateTableIndexMetricType(int32_t metric_type) {
|
|||
Status
|
||||
ValidationUtil::ValidateSearchTopk(int64_t top_k, const engine::meta::TableSchema& table_schema) {
|
||||
if (top_k <= 0 || top_k > 2048) {
|
||||
std::string msg = "Invalid top k value: " + std::to_string(top_k) + ", rational range [1, 2048]";
|
||||
std::string msg =
|
||||
"Invalid topk: " + std::to_string(top_k) + ". " + "The topk must be within the range of 1 ~ 2048.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_TOPK, msg);
|
||||
}
|
||||
|
@ -144,8 +157,8 @@ ValidationUtil::ValidateSearchTopk(int64_t top_k, const engine::meta::TableSchem
|
|||
Status
|
||||
ValidationUtil::ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSchema& table_schema) {
|
||||
if (nprobe <= 0 || nprobe > table_schema.nlist_) {
|
||||
std::string msg = "Invalid nprobe value: " + std::to_string(nprobe) + ", rational range [1, " +
|
||||
std::to_string(table_schema.nlist_) + "]";
|
||||
std::string msg = "Invalid nprobe: " + std::to_string(nprobe) + ". " +
|
||||
"The nprobe must be within the range of 1 ~ index nlist.";
|
||||
SERVER_LOG_ERROR << msg;
|
||||
return Status(SERVER_INVALID_NPROBE, msg);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ KnowhereResource::Initialize() {
|
|||
server::Config& config = server::Config::GetInstance();
|
||||
|
||||
int32_t build_index_gpu;
|
||||
s = config.GetDBConfigBuildIndexGPU(build_index_gpu);
|
||||
s = config.GetResourceConfigIndexBuildDevice(build_index_gpu);
|
||||
if (!s.ok())
|
||||
return s;
|
||||
|
||||
|
@ -53,7 +53,7 @@ KnowhereResource::Initialize() {
|
|||
|
||||
// get search gpu resource
|
||||
std::vector<std::string> pool;
|
||||
s = config.GetResourceConfigPool(pool);
|
||||
s = config.GetResourceConfigSearchResources(pool);
|
||||
if (!s.ok())
|
||||
return s;
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "knowhere/common/BinarySet.h"
|
||||
#include "knowhere/common/Config.h"
|
||||
#include "knowhere/index/vector_index/Quantizer.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/Status.h"
|
||||
|
||||
namespace milvus {
|
||||
|
@ -101,6 +102,7 @@ class VecIndex : public cache::DataObj {
|
|||
////////////////
|
||||
virtual knowhere::QuantizerPtr
|
||||
LoadQuantizer(const Config& conf) {
|
||||
ENGINE_LOG_ERROR << "LoadQuantizer virtual funciton called.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
sudo apt-get install -y gfortran libmysqlclient-dev mysql-client libcurl4-openssl-dev
|
||||
sudo apt-get install -y gfortran libmysqlclient-dev mysql-client libcurl4-openssl-dev libboost-system-dev libboost-filesystem-dev libboost-serialization-dev
|
||||
|
||||
sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
|
||||
|
|
|
@ -92,8 +92,8 @@ set(common_files
|
|||
|
||||
set(unittest_libs
|
||||
sqlite
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
libboost_system.a
|
||||
libboost_filesystem.a
|
||||
lz4
|
||||
mysqlpp
|
||||
yaml-cpp
|
||||
|
|
|
@ -308,6 +308,12 @@ TEST_F(DBTest, SEARCH_TEST) {
|
|||
ASSERT_TRUE(stat.ok());
|
||||
}
|
||||
|
||||
{
|
||||
milvus::engine::QueryResults large_nq_results;
|
||||
stat = db_->Query(TABLE_NAME, k, 200, 10, xq.data(), large_nq_results);
|
||||
ASSERT_TRUE(stat.ok());
|
||||
}
|
||||
|
||||
{//search by specify index file
|
||||
milvus::engine::meta::DatesT dates;
|
||||
std::vector<std::string> file_ids = {"1", "2", "3", "4", "5", "6"};
|
||||
|
@ -315,6 +321,8 @@ TEST_F(DBTest, SEARCH_TEST) {
|
|||
stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results);
|
||||
ASSERT_TRUE(stat.ok());
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -412,6 +420,16 @@ TEST_F(DBTest, INDEX_TEST) {
|
|||
stat = db_->CreateIndex(table_info.table_id_, index);
|
||||
ASSERT_TRUE(stat.ok());
|
||||
|
||||
index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT;
|
||||
stat = db_->CreateIndex(table_info.table_id_, index);
|
||||
ASSERT_TRUE(stat.ok());
|
||||
|
||||
#ifdef CUSTOMIZATION
|
||||
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H;
|
||||
stat = db_->CreateIndex(table_info.table_id_, index);
|
||||
ASSERT_TRUE(stat.ok());
|
||||
#endif
|
||||
|
||||
milvus::engine::TableIndex index_out;
|
||||
stat = db_->DescribeIndex(table_info.table_id_, index_out);
|
||||
ASSERT_TRUE(stat.ok());
|
||||
|
|
|
@ -108,15 +108,16 @@ TEST_F(EngineTest, ENGINE_IMPL_TEST) {
|
|||
ASSERT_EQ(engine_ptr->Dimension(), dimension);
|
||||
ASSERT_EQ(engine_ptr->Count(), ids.size());
|
||||
|
||||
// status = engine_ptr->CopyToGpu(0);
|
||||
// //ASSERT_TRUE(status.ok());
|
||||
//
|
||||
// auto new_engine = engine_ptr->Clone();
|
||||
// ASSERT_EQ(new_engine->Dimension(), dimension);
|
||||
// ASSERT_EQ(new_engine->Count(), ids.size());
|
||||
// status = new_engine->CopyToCpu();
|
||||
// //ASSERT_TRUE(status.ok());
|
||||
//
|
||||
// auto engine_build = new_engine->BuildIndex("/tmp/milvus_index_2", engine::EngineType::FAISS_IVFSQ8);
|
||||
// //ASSERT_TRUE(status.ok());
|
||||
status = engine_ptr->CopyToGpu(0, true);
|
||||
status = engine_ptr->CopyToGpu(0, false);
|
||||
//ASSERT_TRUE(status.ok());
|
||||
|
||||
auto new_engine = engine_ptr->Clone();
|
||||
ASSERT_EQ(new_engine->Dimension(), dimension);
|
||||
ASSERT_EQ(new_engine->Count(), ids.size());
|
||||
status = new_engine->CopyToCpu();
|
||||
//ASSERT_TRUE(status.ok());
|
||||
|
||||
auto engine_build = new_engine->BuildIndex("/tmp/milvus_index_2", milvus::engine::EngineType::FAISS_IVFSQ8);
|
||||
//ASSERT_TRUE(status.ok());
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ namespace ms = milvus::scheduler;
|
|||
void
|
||||
BuildResult(std::vector<int64_t>& output_ids,
|
||||
std::vector<float>& output_distance,
|
||||
uint64_t input_k,
|
||||
uint64_t topk,
|
||||
uint64_t nq,
|
||||
bool ascending) {
|
||||
|
@ -39,9 +40,15 @@ BuildResult(std::vector<int64_t>& output_ids,
|
|||
output_distance.resize(nq * topk);
|
||||
|
||||
for (uint64_t i = 0; i < nq; i++) {
|
||||
for (uint64_t j = 0; j < topk; j++) {
|
||||
//insert valid items
|
||||
for (uint64_t j = 0; j < input_k; j++) {
|
||||
output_ids[i * topk + j] = (int64_t)(drand48() * 100000);
|
||||
output_distance[i * topk + j] = ascending ? (j + drand48()) : ((topk - j) + drand48());
|
||||
output_distance[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48());
|
||||
}
|
||||
//insert invalid items
|
||||
for (uint64_t j = input_k; j < topk; j++) {
|
||||
output_ids[i * topk + j] = -1;
|
||||
output_distance[i * topk + j] = -1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -83,23 +90,32 @@ CheckTopkResult(const std::vector<int64_t>& input_ids_1,
|
|||
ASSERT_EQ(input_ids_1.size(), input_distance_1.size());
|
||||
ASSERT_EQ(input_ids_2.size(), input_distance_2.size());
|
||||
|
||||
uint64_t input_k1 = input_ids_1.size() / nq;
|
||||
uint64_t input_k2 = input_ids_2.size() / nq;
|
||||
|
||||
for (int64_t i = 0; i < nq; i++) {
|
||||
std::vector<float>
|
||||
src_vec(input_distance_1.begin() + i * input_k1, input_distance_1.begin() + (i + 1) * input_k1);
|
||||
src_vec(input_distance_1.begin() + i * topk, input_distance_1.begin() + (i + 1) * topk);
|
||||
src_vec.insert(src_vec.end(),
|
||||
input_distance_2.begin() + i * input_k2,
|
||||
input_distance_2.begin() + (i + 1) * input_k2);
|
||||
input_distance_2.begin() + i * topk,
|
||||
input_distance_2.begin() + (i + 1) * topk);
|
||||
if (ascending) {
|
||||
std::sort(src_vec.begin(), src_vec.end());
|
||||
} else {
|
||||
std::sort(src_vec.begin(), src_vec.end(), std::greater<float>());
|
||||
}
|
||||
|
||||
uint64_t n = std::min(topk, input_k1 + input_k2);
|
||||
//erase invalid items
|
||||
std::vector<float>::iterator iter;
|
||||
for (iter = src_vec.begin(); iter != src_vec.end();) {
|
||||
if (*iter < 0.0)
|
||||
iter = src_vec.erase(iter);
|
||||
else
|
||||
++iter;
|
||||
}
|
||||
|
||||
uint64_t n = std::min(topk, result[i].size());
|
||||
for (uint64_t j = 0; j < n; j++) {
|
||||
if (result[i][j].first < 0) {
|
||||
continue;
|
||||
}
|
||||
if (src_vec[j] != result[i][j].second) {
|
||||
std::cout << src_vec[j] << " " << result[i][j].second << std::endl;
|
||||
}
|
||||
|
@ -110,12 +126,13 @@ CheckTopkResult(const std::vector<int64_t>& input_ids_1,
|
|||
|
||||
} // namespace
|
||||
|
||||
void MergeTopkToResultSetTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) {
|
||||
void
|
||||
MergeTopkToResultSetTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) {
|
||||
std::vector<int64_t> ids1, ids2;
|
||||
std::vector<float> dist1, dist2;
|
||||
ms::ResultSet result;
|
||||
BuildResult(ids1, dist1, topk_1, nq, ascending);
|
||||
BuildResult(ids2, dist2, topk_2, nq, ascending);
|
||||
BuildResult(ids1, dist1, topk_1, topk, nq, ascending);
|
||||
BuildResult(ids2, dist2, topk_2, topk, nq, ascending);
|
||||
ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result);
|
||||
ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result);
|
||||
CheckTopkResult(ids1, dist1, ids2, dist2, topk, nq, ascending, result);
|
||||
|
@ -134,70 +151,72 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) {
|
|||
MergeTopkToResultSetTest(TOP_K, TOP_K, NQ, TOP_K, false);
|
||||
|
||||
/* test3, id1/dist1 small topk */
|
||||
MergeTopkToResultSetTest(TOP_K/2, TOP_K, NQ, TOP_K, true);
|
||||
MergeTopkToResultSetTest(TOP_K/2, TOP_K, NQ, TOP_K, false);
|
||||
MergeTopkToResultSetTest(TOP_K / 2, TOP_K, NQ, TOP_K, true);
|
||||
MergeTopkToResultSetTest(TOP_K / 2, TOP_K, NQ, TOP_K, false);
|
||||
|
||||
/* test4, id1/dist1 small topk, id2/dist2 small topk */
|
||||
MergeTopkToResultSetTest(TOP_K/2, TOP_K/3, NQ, TOP_K, true);
|
||||
MergeTopkToResultSetTest(TOP_K/2, TOP_K/3, NQ, TOP_K, false);
|
||||
MergeTopkToResultSetTest(TOP_K / 2, TOP_K / 3, NQ, TOP_K, true);
|
||||
MergeTopkToResultSetTest(TOP_K / 2, TOP_K / 3, NQ, TOP_K, false);
|
||||
}
|
||||
|
||||
void MergeTopkArrayTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) {
|
||||
std::vector<int64_t> ids1, ids2;
|
||||
std::vector<float> dist1, dist2;
|
||||
ms::ResultSet result;
|
||||
BuildResult(ids1, dist1, topk_1, nq, ascending);
|
||||
BuildResult(ids2, dist2, topk_2, nq, ascending);
|
||||
uint64_t result_topk = std::min(topk, topk_1 + topk_2);
|
||||
ms::XSearchTask::MergeTopkArray(ids1, dist1, topk_1, ids2, dist2, topk_2, nq, topk, ascending);
|
||||
if (ids1.size() != result_topk * nq) {
|
||||
std::cout << ids1.size() << " " << result_topk * nq << std::endl;
|
||||
}
|
||||
ASSERT_TRUE(ids1.size() == result_topk * nq);
|
||||
ASSERT_TRUE(dist1.size() == result_topk * nq);
|
||||
for (uint64_t i = 0; i < nq; i++) {
|
||||
for (uint64_t k = 1; k < result_topk; k++) {
|
||||
if (ascending) {
|
||||
if (dist1[i * result_topk + k] < dist1[i * result_topk + k - 1]) {
|
||||
std::cout << dist1[i * result_topk + k - 1] << " " << dist1[i * result_topk + k] << std::endl;
|
||||
}
|
||||
ASSERT_TRUE(dist1[i * result_topk + k] >= dist1[i * result_topk + k - 1]);
|
||||
} else {
|
||||
if (dist1[i * result_topk + k] > dist1[i * result_topk + k - 1]) {
|
||||
std::cout << dist1[i * result_topk + k - 1] << " " << dist1[i * result_topk + k] << std::endl;
|
||||
}
|
||||
ASSERT_TRUE(dist1[i * result_topk + k] <= dist1[i * result_topk + k - 1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//void MergeTopkArrayTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) {
|
||||
// std::vector<int64_t> ids1, ids2;
|
||||
// std::vector<float> dist1, dist2;
|
||||
// ms::ResultSet result;
|
||||
// BuildResult(ids1, dist1, topk_1, topk, nq, ascending);
|
||||
// BuildResult(ids2, dist2, topk_2, topk, nq, ascending);
|
||||
// uint64_t result_topk = std::min(topk, topk_1 + topk_2);
|
||||
// ms::XSearchTask::MergeTopkArray(ids1, dist1, topk_1, ids2, dist2, topk_2, nq, topk, ascending);
|
||||
// if (ids1.size() != result_topk * nq) {
|
||||
// std::cout << ids1.size() << " " << result_topk * nq << std::endl;
|
||||
// }
|
||||
// ASSERT_TRUE(ids1.size() == result_topk * nq);
|
||||
// ASSERT_TRUE(dist1.size() == result_topk * nq);
|
||||
// for (uint64_t i = 0; i < nq; i++) {
|
||||
// for (uint64_t k = 1; k < result_topk; k++) {
|
||||
// float f0 = dist1[i * topk + k - 1];
|
||||
// float f1 = dist1[i * topk + k];
|
||||
// if (ascending) {
|
||||
// if (f1 < f0) {
|
||||
// std::cout << f0 << " " << f1 << std::endl;
|
||||
// }
|
||||
// ASSERT_TRUE(f1 >= f0);
|
||||
// } else {
|
||||
// if (f1 > f0) {
|
||||
// std::cout << f0 << " " << f1 << std::endl;
|
||||
// }
|
||||
// ASSERT_TRUE(f1 <= f0);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
|
||||
TEST(DBSearchTest, MERGE_ARRAY_TEST) {
|
||||
uint64_t NQ = 15;
|
||||
uint64_t TOP_K = 64;
|
||||
//TEST(DBSearchTest, MERGE_ARRAY_TEST) {
|
||||
// uint64_t NQ = 15;
|
||||
// uint64_t TOP_K = 64;
|
||||
//
|
||||
// /* test1, id1/dist1 valid, id2/dist2 empty */
|
||||
// MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, false);
|
||||
// MergeTopkArrayTest(0, TOP_K, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(0, TOP_K, NQ, TOP_K, false);
|
||||
|
||||
/* test1, id1/dist1 valid, id2/dist2 empty */
|
||||
MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, false);
|
||||
MergeTopkArrayTest(0, TOP_K, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(0, TOP_K, NQ, TOP_K, false);
|
||||
|
||||
/* test2, id1/dist1 valid, id2/dist2 valid */
|
||||
MergeTopkArrayTest(TOP_K, TOP_K, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(TOP_K, TOP_K, NQ, TOP_K, false);
|
||||
|
||||
/* test3, id1/dist1 small topk */
|
||||
MergeTopkArrayTest(TOP_K/2, TOP_K, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(TOP_K/2, TOP_K, NQ, TOP_K, false);
|
||||
MergeTopkArrayTest(TOP_K, TOP_K/2, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(TOP_K, TOP_K/2, NQ, TOP_K, false);
|
||||
|
||||
/* test4, id1/dist1 small topk, id2/dist2 small topk */
|
||||
MergeTopkArrayTest(TOP_K/2, TOP_K/3, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(TOP_K/2, TOP_K/3, NQ, TOP_K, false);
|
||||
MergeTopkArrayTest(TOP_K/3, TOP_K/2, NQ, TOP_K, true);
|
||||
MergeTopkArrayTest(TOP_K/3, TOP_K/2, NQ, TOP_K, false);
|
||||
}
|
||||
// /* test2, id1/dist1 valid, id2/dist2 valid */
|
||||
// MergeTopkArrayTest(TOP_K, TOP_K, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(TOP_K, TOP_K, NQ, TOP_K, false);
|
||||
//
|
||||
// /* test3, id1/dist1 small topk */
|
||||
// MergeTopkArrayTest(TOP_K/2, TOP_K, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(TOP_K/2, TOP_K, NQ, TOP_K, false);
|
||||
// MergeTopkArrayTest(TOP_K, TOP_K/2, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(TOP_K, TOP_K/2, NQ, TOP_K, false);
|
||||
//
|
||||
// /* test4, id1/dist1 small topk, id2/dist2 small topk */
|
||||
// MergeTopkArrayTest(TOP_K/2, TOP_K/3, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(TOP_K/2, TOP_K/3, NQ, TOP_K, false);
|
||||
// MergeTopkArrayTest(TOP_K/3, TOP_K/2, NQ, TOP_K, true);
|
||||
// MergeTopkArrayTest(TOP_K/3, TOP_K/2, NQ, TOP_K, false);
|
||||
//}
|
||||
|
||||
TEST(DBSearchTest, REDUCE_PERF_TEST) {
|
||||
int32_t index_file_num = 478; /* sift1B dataset, index files num */
|
||||
|
@ -206,8 +225,8 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) {
|
|||
std::vector<int32_t> thread_vec = {4, 8};
|
||||
std::vector<int32_t> nq_vec = {1, 10, 100};
|
||||
std::vector<int32_t> topk_vec = {1, 4, 16, 64};
|
||||
int32_t NQ = nq_vec[nq_vec.size()-1];
|
||||
int32_t TOPK = topk_vec[topk_vec.size()-1];
|
||||
int32_t NQ = nq_vec[nq_vec.size() - 1];
|
||||
int32_t TOPK = topk_vec[topk_vec.size() - 1];
|
||||
|
||||
std::vector<std::vector<int64_t>> id_vec;
|
||||
std::vector<std::vector<float>> dist_vec;
|
||||
|
@ -217,7 +236,7 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) {
|
|||
|
||||
/* generate testing data */
|
||||
for (i = 0; i < index_file_num; i++) {
|
||||
BuildResult(input_ids, input_distance, TOPK, NQ, ascending);
|
||||
BuildResult(input_ids, input_distance, TOPK, TOPK, NQ, ascending);
|
||||
id_vec.push_back(input_ids);
|
||||
dist_vec.push_back(input_distance);
|
||||
}
|
||||
|
@ -237,7 +256,7 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) {
|
|||
}
|
||||
|
||||
std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " +
|
||||
std::to_string(nq) + " " + std::to_string(top_k);
|
||||
std::to_string(nq) + " " + std::to_string(top_k);
|
||||
milvus::TimeRecorder rc1(str1);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -255,114 +274,114 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) {
|
|||
|
||||
rc1.RecordSection("reduce done");
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
/* method-2 */
|
||||
std::vector<std::vector<int64_t>> id_vec_2(index_file_num);
|
||||
std::vector<std::vector<float>> dist_vec_2(index_file_num);
|
||||
std::vector<uint64_t> k_vec_2(index_file_num);
|
||||
for (i = 0; i < index_file_num; i++) {
|
||||
CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
|
||||
k_vec_2[i] = top_k;
|
||||
}
|
||||
|
||||
std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " +
|
||||
std::to_string(nq) + " " + std::to_string(top_k);
|
||||
milvus::TimeRecorder rc2(str2);
|
||||
|
||||
for (step = 1; step < index_file_num; step *= 2) {
|
||||
for (i = 0; i + step < index_file_num; i += step * 2) {
|
||||
ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i],
|
||||
id_vec_2[i + step], dist_vec_2[i + step], k_vec_2[i + step],
|
||||
nq, top_k, ascending);
|
||||
}
|
||||
}
|
||||
ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0],
|
||||
dist_vec_2[0],
|
||||
k_vec_2[0],
|
||||
nq,
|
||||
top_k,
|
||||
ascending,
|
||||
final_result_2);
|
||||
ASSERT_EQ(final_result_2.size(), nq);
|
||||
|
||||
rc2.RecordSection("reduce done");
|
||||
|
||||
for (i = 0; i < nq; i++) {
|
||||
ASSERT_EQ(final_result[i].size(), final_result_2[i].size());
|
||||
for (k = 0; k < final_result[i].size(); k++) {
|
||||
if (final_result[i][k].first != final_result_2[i][k].first) {
|
||||
std::cout << i << " " << k << std::endl;
|
||||
}
|
||||
ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first);
|
||||
ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second);
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////
|
||||
/* method-3 parallel */
|
||||
std::vector<std::vector<int64_t>> id_vec_3(index_file_num);
|
||||
std::vector<std::vector<float>> dist_vec_3(index_file_num);
|
||||
std::vector<uint64_t> k_vec_3(index_file_num);
|
||||
for (i = 0; i < index_file_num; i++) {
|
||||
CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
|
||||
k_vec_3[i] = top_k;
|
||||
}
|
||||
|
||||
std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " +
|
||||
std::to_string(nq) + " " + std::to_string(top_k);
|
||||
milvus::TimeRecorder rc3(str3);
|
||||
|
||||
for (step = 1; step < index_file_num; step *= 2) {
|
||||
for (i = 0; i + step < index_file_num; i += step * 2) {
|
||||
threads_list.push_back(
|
||||
threadPool.enqueue(ms::XSearchTask::MergeTopkArray,
|
||||
std::ref(id_vec_3[i]),
|
||||
std::ref(dist_vec_3[i]),
|
||||
std::ref(k_vec_3[i]),
|
||||
std::ref(id_vec_3[i + step]),
|
||||
std::ref(dist_vec_3[i + step]),
|
||||
std::ref(k_vec_3[i + step]),
|
||||
nq,
|
||||
top_k,
|
||||
ascending));
|
||||
}
|
||||
|
||||
while (threads_list.size() > 0) {
|
||||
int nready = 0;
|
||||
for (auto it = threads_list.begin(); it != threads_list.end(); it = it) {
|
||||
auto &p = *it;
|
||||
std::chrono::milliseconds span(0);
|
||||
if (p.wait_for(span) == std::future_status::ready) {
|
||||
threads_list.erase(it++);
|
||||
++nready;
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
if (nready == 0) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0],
|
||||
dist_vec_3[0],
|
||||
k_vec_3[0],
|
||||
nq,
|
||||
top_k,
|
||||
ascending,
|
||||
final_result_3);
|
||||
ASSERT_EQ(final_result_3.size(), nq);
|
||||
|
||||
rc3.RecordSection("reduce done");
|
||||
|
||||
for (i = 0; i < nq; i++) {
|
||||
ASSERT_EQ(final_result[i].size(), final_result_3[i].size());
|
||||
for (k = 0; k < final_result[i].size(); k++) {
|
||||
ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first);
|
||||
ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second);
|
||||
}
|
||||
}
|
||||
// ///////////////////////////////////////////////////////////////////////////////////////
|
||||
// /* method-2 */
|
||||
// std::vector<std::vector<int64_t>> id_vec_2(index_file_num);
|
||||
// std::vector<std::vector<float>> dist_vec_2(index_file_num);
|
||||
// std::vector<uint64_t> k_vec_2(index_file_num);
|
||||
// for (i = 0; i < index_file_num; i++) {
|
||||
// CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
|
||||
// k_vec_2[i] = top_k;
|
||||
// }
|
||||
//
|
||||
// std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " +
|
||||
// std::to_string(nq) + " " + std::to_string(top_k);
|
||||
// milvus::TimeRecorder rc2(str2);
|
||||
//
|
||||
// for (step = 1; step < index_file_num; step *= 2) {
|
||||
// for (i = 0; i + step < index_file_num; i += step * 2) {
|
||||
// ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i],
|
||||
// id_vec_2[i + step], dist_vec_2[i + step], k_vec_2[i + step],
|
||||
// nq, top_k, ascending);
|
||||
// }
|
||||
// }
|
||||
// ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0],
|
||||
// dist_vec_2[0],
|
||||
// k_vec_2[0],
|
||||
// nq,
|
||||
// top_k,
|
||||
// ascending,
|
||||
// final_result_2);
|
||||
// ASSERT_EQ(final_result_2.size(), nq);
|
||||
//
|
||||
// rc2.RecordSection("reduce done");
|
||||
//
|
||||
// for (i = 0; i < nq; i++) {
|
||||
// ASSERT_EQ(final_result[i].size(), final_result_2[i].size());
|
||||
// for (k = 0; k < final_result[i].size(); k++) {
|
||||
// if (final_result[i][k].first != final_result_2[i][k].first) {
|
||||
// std::cout << i << " " << k << std::endl;
|
||||
// }
|
||||
// ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first);
|
||||
// ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// ///////////////////////////////////////////////////////////////////////////////////////
|
||||
// /* method-3 parallel */
|
||||
// std::vector<std::vector<int64_t>> id_vec_3(index_file_num);
|
||||
// std::vector<std::vector<float>> dist_vec_3(index_file_num);
|
||||
// std::vector<uint64_t> k_vec_3(index_file_num);
|
||||
// for (i = 0; i < index_file_num; i++) {
|
||||
// CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
|
||||
// k_vec_3[i] = top_k;
|
||||
// }
|
||||
//
|
||||
// std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " +
|
||||
// std::to_string(nq) + " " + std::to_string(top_k);
|
||||
// milvus::TimeRecorder rc3(str3);
|
||||
//
|
||||
// for (step = 1; step < index_file_num; step *= 2) {
|
||||
// for (i = 0; i + step < index_file_num; i += step * 2) {
|
||||
// threads_list.push_back(
|
||||
// threadPool.enqueue(ms::XSearchTask::MergeTopkArray,
|
||||
// std::ref(id_vec_3[i]),
|
||||
// std::ref(dist_vec_3[i]),
|
||||
// std::ref(k_vec_3[i]),
|
||||
// std::ref(id_vec_3[i + step]),
|
||||
// std::ref(dist_vec_3[i + step]),
|
||||
// std::ref(k_vec_3[i + step]),
|
||||
// nq,
|
||||
// top_k,
|
||||
// ascending));
|
||||
// }
|
||||
//
|
||||
// while (threads_list.size() > 0) {
|
||||
// int nready = 0;
|
||||
// for (auto it = threads_list.begin(); it != threads_list.end(); it = it) {
|
||||
// auto &p = *it;
|
||||
// std::chrono::milliseconds span(0);
|
||||
// if (p.wait_for(span) == std::future_status::ready) {
|
||||
// threads_list.erase(it++);
|
||||
// ++nready;
|
||||
// } else {
|
||||
// ++it;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if (nready == 0) {
|
||||
// std::this_thread::yield();
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0],
|
||||
// dist_vec_3[0],
|
||||
// k_vec_3[0],
|
||||
// nq,
|
||||
// top_k,
|
||||
// ascending,
|
||||
// final_result_3);
|
||||
// ASSERT_EQ(final_result_3.size(), nq);
|
||||
//
|
||||
// rc3.RecordSection("reduce done");
|
||||
//
|
||||
// for (i = 0; i < nq; i++) {
|
||||
// ASSERT_EQ(final_result[i].size(), final_result_3[i].size());
|
||||
// for (k = 0; k < final_result[i].size(); k++) {
|
||||
// ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first);
|
||||
// ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second);
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
|
||||
#include "scheduler/task/SearchTask.h"
|
||||
#include "scheduler/task/BuildIndexTask.h"
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
|
||||
|
@ -26,6 +27,11 @@ namespace scheduler {
|
|||
TEST(TaskTest, INVALID_INDEX) {
|
||||
auto search_task = std::make_shared<XSearchTask>(nullptr, nullptr);
|
||||
search_task->Load(LoadType::TEST, 10);
|
||||
|
||||
auto build_task = std::make_shared<XBuildIndexTask>(nullptr, nullptr);
|
||||
build_task->Load(LoadType::TEST, 10);
|
||||
|
||||
build_task->Execute();
|
||||
}
|
||||
|
||||
} // namespace scheduler
|
||||
|
|
|
@ -67,11 +67,3 @@ target_link_libraries(test_server
|
|||
)
|
||||
|
||||
install(TARGETS test_server DESTINATION unittest)
|
||||
|
||||
configure_file(appendix/server_config.yaml
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/server_config.yaml"
|
||||
COPYONLY)
|
||||
|
||||
configure_file(appendix/log_config.conf
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/log_config.conf"
|
||||
COPYONLY)
|
||||
|
|
|
@ -22,28 +22,27 @@
|
|||
#include "utils/CommonUtil.h"
|
||||
#include "utils/ValidationUtil.h"
|
||||
#include "server/Config.h"
|
||||
#include "server/utils.h"
|
||||
|
||||
namespace {
|
||||
|
||||
static const char *CONFIG_FILE_PATH = "./milvus/conf/server_config.yaml";
|
||||
static const char *LOG_FILE_PATH = "./milvus/conf/log_config.conf";
|
||||
|
||||
static constexpr uint64_t KB = 1024;
|
||||
static constexpr uint64_t MB = KB * 1024;
|
||||
static constexpr uint64_t GB = MB * 1024;
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(ConfigTest, CONFIG_TEST) {
|
||||
TEST_F(ConfigTest, CONFIG_TEST) {
|
||||
milvus::server::ConfigMgr *config_mgr = milvus::server::YamlConfigMgr::GetInstance();
|
||||
|
||||
milvus::Status s = config_mgr->LoadConfigFile("");
|
||||
ASSERT_FALSE(s.ok());
|
||||
|
||||
s = config_mgr->LoadConfigFile(LOG_FILE_PATH);
|
||||
std::string config_path(CONFIG_PATH);
|
||||
s = config_mgr->LoadConfigFile(config_path+ INVALID_CONFIG_FILE);
|
||||
ASSERT_FALSE(s.ok());
|
||||
|
||||
s = config_mgr->LoadConfigFile(CONFIG_FILE_PATH);
|
||||
s = config_mgr->LoadConfigFile(config_path + VALID_CONFIG_FILE);
|
||||
ASSERT_TRUE(s.ok());
|
||||
|
||||
config_mgr->Print();
|
||||
|
@ -99,9 +98,10 @@ TEST(ConfigTest, CONFIG_TEST) {
|
|||
ASSERT_TRUE(seqs.empty());
|
||||
}
|
||||
|
||||
TEST(ConfigTest, SERVER_CONFIG_TEST) {
|
||||
TEST_F(ConfigTest, SERVER_CONFIG_TEST) {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
milvus::server::Config &config = milvus::server::Config::GetInstance();
|
||||
milvus::Status s = config.LoadConfigFile(CONFIG_FILE_PATH);
|
||||
milvus::Status s = config.LoadConfigFile(config_path + VALID_CONFIG_FILE);
|
||||
ASSERT_TRUE(s.ok());
|
||||
|
||||
s = config.ValidateConfig();
|
||||
|
|
|
@ -405,12 +405,12 @@ TEST_F(RpcHandlerTest, DELETE_BY_RANGE_TEST) {
|
|||
handler->DeleteByRange(&context, &request, &status);
|
||||
|
||||
request.set_table_name(TABLE_NAME);
|
||||
request.mutable_range()->set_start_value(CurrentTmDate(-2));
|
||||
request.mutable_range()->set_end_value(CurrentTmDate(-3));
|
||||
request.mutable_range()->set_start_value(CurrentTmDate(-3));
|
||||
request.mutable_range()->set_end_value(CurrentTmDate(-2));
|
||||
|
||||
::grpc::Status grpc_status = handler->DeleteByRange(&context, &request, &status);
|
||||
int error_code = status.error_code();
|
||||
ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS);
|
||||
// ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS);
|
||||
|
||||
request.mutable_range()->set_start_value("test6");
|
||||
grpc_status = handler->DeleteByRange(&context, &request, &status);
|
||||
|
|
|
@ -275,6 +275,11 @@ TEST(ValidationUtilTest, VALIDATE_INDEX_TEST) {
|
|||
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexType((int)milvus::engine::EngineType::INVALID).code(),
|
||||
milvus::SERVER_INVALID_INDEX_TYPE);
|
||||
for (int i = 1; i <= (int)milvus::engine::EngineType::MAX_VALUE; i++) {
|
||||
#ifndef CUSTOMIZATION
|
||||
if (i == (int)milvus::engine::EngineType::FAISS_IVFSQ8H) {
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexType(i).code(), milvus::SERVER_SUCCESS);
|
||||
}
|
||||
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexType(
|
|
@ -0,0 +1,93 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "server/utils.h"
|
||||
#include "utils/CommonUtil.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
#include <string>
|
||||
|
||||
namespace {
|
||||
|
||||
static const char
|
||||
* VALID_CONFIG_STR = "# Default values are used when you make no changes to the following parameters.\n"
|
||||
"\n"
|
||||
"server_config:\n"
|
||||
" address: 0.0.0.0 # milvus server ip address (IPv4)\n"
|
||||
" port: 19530 # port range: 1025 ~ 65534\n"
|
||||
" deploy_mode: single \n"
|
||||
" time_zone: UTC+8\n"
|
||||
"\n"
|
||||
"db_config:\n"
|
||||
" primary_path: /tmp/milvus # path used to store data and meta\n"
|
||||
" secondary_path: # path used to store data only, split by semicolon\n"
|
||||
"\n"
|
||||
" backend_url: sqlite://:@:/ \n"
|
||||
"\n"
|
||||
" insert_buffer_size: 4 # GB, maximum insert buffer size allowed\n"
|
||||
" preload_table: \n"
|
||||
"\n"
|
||||
"metric_config:\n"
|
||||
" enable_monitor: false # enable monitoring or not\n"
|
||||
" collector: prometheus # prometheus\n"
|
||||
" prometheus_config:\n"
|
||||
" port: 8080 # port prometheus uses to fetch metrics\n"
|
||||
"\n"
|
||||
"cache_config:\n"
|
||||
" cpu_cache_capacity: 16 # GB, CPU memory used for cache\n"
|
||||
" cpu_cache_threshold: 0.85 \n"
|
||||
" gpu_cache_capacity: 4 # GB, GPU memory used for cache\n"
|
||||
" gpu_cache_threshold: 0.85 \n"
|
||||
" cache_insert_data: false # whether to load inserted data into cache\n"
|
||||
"\n"
|
||||
"engine_config:\n"
|
||||
" use_blas_threshold: 20 \n"
|
||||
"\n"
|
||||
"resource_config:\n"
|
||||
" search_resources: \n"
|
||||
" - gpu0\n"
|
||||
" index_build_device: gpu0 # GPU used for building index";
|
||||
|
||||
static const char* INVALID_CONFIG_STR = "*INVALID*";
|
||||
|
||||
void
|
||||
WriteToFile(const std::string& file_path, const char* content) {
|
||||
std::fstream fs(file_path.c_str(), std::ios_base::out);
|
||||
|
||||
//write data to file
|
||||
fs << content;
|
||||
fs.close();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
void
|
||||
ConfigTest::SetUp() {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
milvus::server::CommonUtil::CreateDirectory(config_path);
|
||||
WriteToFile(config_path + VALID_CONFIG_FILE, VALID_CONFIG_STR);
|
||||
WriteToFile(config_path+ INVALID_CONFIG_FILE, INVALID_CONFIG_STR);
|
||||
}
|
||||
|
||||
void
|
||||
ConfigTest::TearDown() {
|
||||
std::string config_path(CONFIG_PATH);
|
||||
milvus::server::CommonUtil::DeleteDirectory(config_path);
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <chrono>
|
||||
|
||||
static const char *CONFIG_PATH = "/tmp/milvus_test/";
|
||||
static const char *VALID_CONFIG_FILE = "valid_config.yaml";
|
||||
static const char *INVALID_CONFIG_FILE = "invalid_config.conf";
|
||||
|
||||
class ConfigTest : public ::testing::Test {
|
||||
protected:
|
||||
void SetUp() override;
|
||||
void TearDown() override;
|
||||
};
|
|
@ -33,10 +33,19 @@ set(util_files
|
|||
add_executable(test_wrapper
|
||||
${test_files}
|
||||
${wrapper_files}
|
||||
${util_files})
|
||||
${util_files}
|
||||
${common_files})
|
||||
|
||||
target_link_libraries(test_wrapper
|
||||
knowhere
|
||||
${unittest_libs})
|
||||
|
||||
install(TARGETS test_wrapper DESTINATION unittest)
|
||||
install(TARGETS test_wrapper DESTINATION unittest)
|
||||
|
||||
configure_file(appendix/server_config.yaml
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/server_config.yaml"
|
||||
COPYONLY)
|
||||
|
||||
configure_file(appendix/log_config.conf
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/milvus/conf/log_config.conf"
|
||||
COPYONLY)
|
|
@ -0,0 +1,133 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "wrapper/VecIndex.h"
|
||||
#include "wrapper/utils.h"
|
||||
#include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h"
|
||||
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
|
||||
|
||||
using ::testing::TestWithParam;
|
||||
using ::testing::Values;
|
||||
using ::testing::Combine;
|
||||
|
||||
class KnowhereHybrid
|
||||
: public DataGenBase, public ::testing::Test {
|
||||
protected:
|
||||
void SetUp() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
|
||||
|
||||
dim = 128;
|
||||
nb = 10000;
|
||||
nq = 100;
|
||||
k = 100;
|
||||
GenData(dim, nb, nq, xb, xq, ids, k, gt_ids, gt_dis);
|
||||
}
|
||||
|
||||
void TearDown() override {
|
||||
knowhere::FaissGpuResourceMgr::GetInstance().Free();
|
||||
}
|
||||
|
||||
protected:
|
||||
milvus::engine::IndexType index_type;
|
||||
milvus::engine::VecIndexPtr index_ = nullptr;
|
||||
knowhere::Config conf;
|
||||
};
|
||||
|
||||
#ifdef CUSTOMIZATION
|
||||
TEST_F(KnowhereHybrid, test_interface) {
|
||||
assert(!xb.empty());
|
||||
|
||||
index_type = milvus::engine::IndexType::FAISS_IVFSQ8_HYBRID;
|
||||
index_ = GetVecIndexFactory(index_type);
|
||||
conf = ParamGenerator::GetInstance().Gen(index_type);
|
||||
|
||||
auto elems = nq * k;
|
||||
std::vector<int64_t> res_ids(elems);
|
||||
std::vector<float> res_dis(elems);
|
||||
|
||||
conf->gpu_id = DEVICEID;
|
||||
conf->d = dim;
|
||||
conf->k = k;
|
||||
index_->BuildAll(nb, xb.data(), ids.data(), conf);
|
||||
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
|
||||
AssertResult(res_ids, res_dis);
|
||||
EXPECT_EQ(index_->Count(), nb);
|
||||
EXPECT_EQ(index_->Dimension(), dim);
|
||||
|
||||
auto binaryset = index_->Serialize();
|
||||
{
|
||||
// cpu -> gpu
|
||||
auto cpu_idx = GetVecIndexFactory(index_type);
|
||||
cpu_idx->Load(binaryset);
|
||||
{
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
auto gpu_idx = cpu_idx->CopyToGpu(DEVICEID, conf);
|
||||
gpu_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
|
||||
AssertResult(res_ids, res_dis);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// quantization already in gpu, only copy data
|
||||
auto cpu_idx = GetVecIndexFactory(index_type);
|
||||
cpu_idx->Load(binaryset);
|
||||
|
||||
auto pair = cpu_idx->CopyToGpuWithQuantizer(DEVICEID, conf);
|
||||
auto gpu_idx = pair.first;
|
||||
auto quantization = pair.second;
|
||||
|
||||
gpu_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
|
||||
AssertResult(res_ids, res_dis);
|
||||
|
||||
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
|
||||
quantizer_conf->mode = 2;
|
||||
quantizer_conf->gpu_id = DEVICEID;
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
auto hybrid_idx = GetVecIndexFactory(index_type);
|
||||
hybrid_idx->Load(binaryset);
|
||||
|
||||
hybrid_idx->LoadData(quantization, quantizer_conf);
|
||||
hybrid_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
|
||||
AssertResult(res_ids, res_dis);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// quantization already in gpu, only set quantization
|
||||
auto cpu_idx = GetVecIndexFactory(index_type);
|
||||
cpu_idx->Load(binaryset);
|
||||
|
||||
auto pair = cpu_idx->CopyToGpuWithQuantizer(DEVICEID, conf);
|
||||
auto quantization = pair.second;
|
||||
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
auto hybrid_idx = GetVecIndexFactory(index_type);
|
||||
hybrid_idx->Load(binaryset);
|
||||
|
||||
hybrid_idx->SetQuantizer(quantization);
|
||||
hybrid_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
|
||||
AssertResult(res_ids, res_dis);
|
||||
hybrid_idx->UnsetQuantizer();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue