mirror of https://github.com/milvus-io/milvus.git
update README files
commit
3e51ef2630
|
@ -26,3 +26,6 @@ cmake_build
|
|||
*.lo
|
||||
*.tar.gz
|
||||
*.log
|
||||
.coverage
|
||||
*.pyc
|
||||
cov_html/
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
language: cpp
|
||||
sudo: required
|
||||
dist: bionic
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.ccache
|
||||
|
||||
addons:
|
||||
apt:
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source ci/travis/before-install.sh
|
||||
|
||||
install:
|
||||
- source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh
|
||||
|
||||
script:
|
||||
- $TRAVIS_BUILD_DIR/ci/travis/travis_build.sh
|
52
CHANGELOG.md
52
CHANGELOG.md
|
@ -1,6 +1,50 @@
|
|||
# Changelog
|
||||
|
||||
Please mark all change in change log and use the ticket from JIRA.
|
||||
# Milvus 0.6.0 (TODO)
|
||||
|
||||
## Bug
|
||||
- \#228 - memory usage increased slowly during searching vectors
|
||||
- \#246 - Exclude src/external folder from code coverage for jenkin ci
|
||||
- \#248 - Reside src/external in thirdparty
|
||||
- \#316 - Some files not merged after vectors added
|
||||
- \#327 - Search does not use GPU when index type is FLAT
|
||||
- \#340 - Test cases run failed on 0.6.0
|
||||
- \#353 - Rename config.h.in to version.h.in
|
||||
- \#374 - sdk_simple return empty result
|
||||
- \#377 - Create partition success if tag name only contains spaces
|
||||
- \#397 - sdk_simple return incorrect result
|
||||
- \#399 - Create partition should be failed if partition tag existed
|
||||
- \#412 - Message returned is confused when partition created with null partition name
|
||||
- \#416 - Drop the same partition success repeatally
|
||||
- \#440 - Query API in customization still uses old version
|
||||
|
||||
## Feature
|
||||
- \#12 - Pure CPU version for Milvus
|
||||
- \#77 - Support table partition
|
||||
- \#127 - Support new Index type IVFPQ
|
||||
- \#226 - Experimental shards middleware for Milvus
|
||||
- \#227 - Support new index types SPTAG-KDT and SPTAG-BKT
|
||||
- \#346 - Support build index with multiple gpu
|
||||
|
||||
## Improvement
|
||||
- \#255 - Add ivfsq8 test report detailed version
|
||||
- \#260 - C++ SDK README
|
||||
- \#266 - Rpc request source code refactor
|
||||
- \#275 - Rename C++ SDK IndexType
|
||||
- \#284 - Change C++ SDK to shared library
|
||||
- \#306 - Use int64 for all config integer
|
||||
- \#310 - Add Q&A for 'protocol https not supported or disable in libcurl' issue
|
||||
- \#314 - add Find FAISS in CMake
|
||||
- \#322 - Add option to enable / disable prometheus
|
||||
- \#358 - Add more information in build.sh and install.md
|
||||
- \#404 - Add virtual method Init() in Pass abstract class
|
||||
- \#409 - Add a Fallback pass in optimizer
|
||||
- \#433 - C++ SDK query result is not easy to use
|
||||
- \#449 - Add ShowPartitions example for C++ SDK
|
||||
|
||||
## Task
|
||||
|
||||
# Milvus 0.5.3 (2019-11-13)
|
||||
|
||||
## Bug
|
||||
|
@ -87,7 +131,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-658 - Fix SQ8 Hybrid can't search
|
||||
- MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources
|
||||
- \#9 - Change default gpu_cache_capacity to 4
|
||||
- \#20 - C++ sdk example get grpc error
|
||||
- \#20 - C++ sdk example get grpc error
|
||||
- \#23 - Add unittest to improve code coverage
|
||||
- \#31 - make clang-format failed after run build.sh -l
|
||||
- \#39 - Create SQ8H index hang if using github server version
|
||||
|
@ -139,7 +183,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-635 - Add compile option to support customized faiss
|
||||
- MS-660 - add ubuntu_build_deps.sh
|
||||
- \#18 - Add all test cases
|
||||
|
||||
|
||||
# Milvus 0.4.0 (2019-09-12)
|
||||
|
||||
## Bug
|
||||
|
@ -348,11 +392,11 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-82 - Update server startup welcome message
|
||||
- MS-83 - Update vecwise to Milvus
|
||||
- MS-77 - Performance issue of post-search action
|
||||
- MS-22 - Enhancement for MemVector size control
|
||||
- MS-22 - Enhancement for MemVector size control
|
||||
- MS-92 - Unify behavior of debug and release build
|
||||
- MS-98 - Install all unit test to installation directory
|
||||
- MS-115 - Change is_startup of metric_config switch from true to on
|
||||
- MS-122 - Archive criteria config
|
||||
- MS-122 - Archive criteria config
|
||||
- MS-124 - HasTable interface
|
||||
- MS-126 - Add more error code
|
||||
- MS-128 - Change default db path
|
||||
|
|
|
@ -78,6 +78,4 @@ Below is a list of Milvus contributors. We greatly appreciate your contributions
|
|||
|
||||
## License
|
||||
|
||||
[Apache License 2.0](LICENSE)
|
||||
|
||||
|
||||
[Apache License 2.0](LICENSE)
|
|
@ -78,4 +78,3 @@ Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)、[Java](
|
|||
## 许可协议
|
||||
|
||||
[Apache 许可协议2.0版](https://github.com/milvus-io/milvus/blob/master/LICENSE)
|
||||
|
||||
|
|
|
@ -72,4 +72,4 @@ C++サンプルコードを実行するために、次のコマンドをつか
|
|||
|
||||
## ライセンス
|
||||
|
||||
[Apache 2.0ライセンス](LICENSE)
|
||||
[Apache 2.0ライセンス](LICENSE)
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
String cron_timezone = "TZ=Asia/Shanghai"
|
||||
String cron_string = BRANCH_NAME == "master" ? "H 0 * * * " : ""
|
||||
cron_string = BRANCH_NAME == "0.5.2" ? "H 1 * * * " : cron_string
|
||||
cron_string = BRANCH_NAME == "0.6.0" ? "H 1 * * * " : cron_string
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
@ -33,128 +33,267 @@ pipeline {
|
|||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 18.04") {
|
||||
stage("Ubuntu 18.04 x86_64") {
|
||||
environment {
|
||||
OS_NAME = "ubuntu18.04"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-ubuntu18.04-x86_64-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
CPU_ARCH = "amd64"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-build-env-pod.yaml'
|
||||
}
|
||||
parallel {
|
||||
stage ("GPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "gpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-gpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-gpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-gpu-version-build-env-pod.yaml'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/packaging.groovy"
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
|
||||
stage ("CPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "cpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-cpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-cpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-cpu-version-build-env-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-cpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: cpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "32Gi"
|
||||
cpu: "8.0"
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
|
@ -1,14 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
name: milvus-gpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
componet: gpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.5.1-ubuntu18.04
|
||||
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
pip3 install -r requirements.txt
|
||||
./yaml_processor.py merge -f /opt/milvus/conf/server_config.yaml -m ../yaml/update_server_config.yaml -i && \
|
||||
rm /opt/milvus/conf/server_config.yaml.bak
|
||||
|
||||
if [ -d "/opt/milvus/unittest" ]; then
|
||||
rm -rf "/opt/milvus/unittest"
|
||||
fi
|
||||
|
||||
tar -zcvf ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz -C /opt/ milvus
|
|
@ -1,9 +1,11 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./build.sh -l"
|
||||
dir ("ci/scripts") {
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -j -u -c"
|
||||
if ("${env.BINRARY_VERSION}" == "gpu") {
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -g -j -u -c"
|
||||
} else {
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -l -m -j -u -c"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
try {
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}", returnStatus: true
|
||||
if (!helmResult) {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}"
|
||||
}
|
||||
} catch (exc) {
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}", returnStatus: true
|
||||
if (!helmResult) {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
timeout(time: 30, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
dir ("ci/scripts") {
|
||||
sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP"
|
||||
// Set some env variables so codecov detection script works correctly
|
||||
withCredentials([[$class: 'StringBinding', credentialsId: "${env.PIPELINE_NAME}-codecov-token", variable: 'CODECOV_TOKEN']]) {
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo update'
|
||||
dir ('milvus-helm') {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.3"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.3:refs/remotes/origin/0.5.3"]]])
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/sqlite_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
|
||||
dir ("milvus") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f ci/db_backend/sqlite_${env.BINRARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "pip3 install -r requirements.txt"
|
||||
sh "./yaml_processor.py merge -f /opt/milvus/conf/server_config.yaml -m ../yaml/update_server_config.yaml -i && rm /opt/milvus/conf/server_config.yaml.bak"
|
||||
}
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz -C /opt/ milvus"
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz")
|
||||
if (uploadStatus != 0) {
|
||||
error("\" ${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" failed!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./packaging.sh"
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz")
|
||||
if (uploadStatus != 0) {
|
||||
error("\" ${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" failed!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
container('publish-images') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
dir ("docker/deploy/${OS_NAME}") {
|
||||
dir ("docker/deploy/${env.BINRARY_VERSION}/${env.OS_NAME}") {
|
||||
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
|
||||
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
timeout(time: 90, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
load "ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.3"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.3:refs/remotes/origin/0.5.3"]]])
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
dir ("milvus") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f ci/db_backend/mysql_${env.BINRARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,24 +1,23 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
// load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
// Remove mysql-version tests: 10-28
|
||||
// mysql database backend test
|
||||
// load "ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
// if (!fileExists('milvus-helm')) {
|
||||
// dir ("milvus-helm") {
|
||||
// checkout([$class: 'GitSCM', branches: [[name: "0.5.3"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.3:refs/remotes/origin/0.5.3"]]])
|
||||
// checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
|
||||
// }
|
||||
// }
|
||||
// dir ("milvus-helm") {
|
||||
// dir ("milvus-gpu") {
|
||||
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
// dir ("milvus") {
|
||||
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION} -f ci/db_backend/mysql_${env.BINRARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
// }
|
||||
// }
|
||||
// dir ("tests/milvus_python_test") {
|
||||
// sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
// sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}-engine.milvus.svc.cluster.local"
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -5,9 +5,6 @@ container('milvus-build-env') {
|
|||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
if (fileExists('milvus/unittest')) {
|
||||
sh "rm -rf ./milvus/unittest"
|
||||
}
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
@ -8,38 +10,41 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
|
|||
done
|
||||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
|
||||
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
|
||||
CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build"
|
||||
BUILD_TYPE="Debug"
|
||||
BUILD_UNITTEST="OFF"
|
||||
INSTALL_PREFIX="/opt/milvus"
|
||||
FAISS_ROOT=""
|
||||
CUSTOMIZATION="OFF" # default use origin faiss
|
||||
BUILD_COVERAGE="OFF"
|
||||
DB_PATH="/opt/milvus"
|
||||
PROFILING="OFF"
|
||||
USE_JFROG_CACHE="OFF"
|
||||
RUN_CPPLINT="OFF"
|
||||
CUSTOMIZATION="OFF" # default use ori faiss
|
||||
GPU_VERSION="OFF"
|
||||
WITH_MKL="OFF"
|
||||
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
|
||||
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
|
||||
if [ $? -eq 0 ]; then
|
||||
CUSTOMIZATION="ON"
|
||||
else
|
||||
CUSTOMIZATION="OFF"
|
||||
fi
|
||||
|
||||
while getopts "o:d:t:ulcgjhx" arg
|
||||
while getopts "o:t:b:f:gxulcjmh" arg
|
||||
do
|
||||
case $arg in
|
||||
o)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
b)
|
||||
CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR
|
||||
;;
|
||||
f)
|
||||
FAISS_ROOT=$OPTARG # FAISS ROOT PATH
|
||||
;;
|
||||
g)
|
||||
GPU_VERSION="ON";
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="ON";
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases" ;
|
||||
BUILD_UNITTEST="ON";
|
||||
|
@ -50,31 +55,31 @@ do
|
|||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
g)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="OFF" # force use ori faiss
|
||||
m)
|
||||
WITH_MKL="ON"
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-o: install prefix(default: /opt/milvus)
|
||||
-d: db data path(default: /opt/milvus)
|
||||
-t: build type(default: Debug)
|
||||
-b: core code build directory
|
||||
-f: faiss root path
|
||||
-g: gpu version
|
||||
-x: milvus customization (default: OFF)
|
||||
-u: building unit test options(default: OFF)
|
||||
-l: run cpplint, clang-format and clang-tidy(default: OFF)
|
||||
-c: code coverage(default: OFF)
|
||||
-g: profiling(default: OFF)
|
||||
-j: use jfrog cache build directory(default: OFF)
|
||||
-m: build with MKL(default: OFF)
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
|
||||
./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} -f \${FAISS_ROOT} [-g] [-x] [-u] [-l] [-c] [-j] [-m] [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
|
@ -85,31 +90,30 @@ usage:
|
|||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d ${CMAKE_BUILD_DIR} ]]; then
|
||||
mkdir ${CMAKE_BUILD_DIR}
|
||||
if [[ ! -d ${CORE_BUILD_DIR} ]]; then
|
||||
mkdir ${CORE_BUILD_DIR}
|
||||
fi
|
||||
|
||||
cd ${CMAKE_BUILD_DIR}
|
||||
|
||||
# remove make cache since build.sh -l use default variables
|
||||
# force update the variables each time
|
||||
make rebuild_cache
|
||||
cd ${CORE_BUILD_DIR}
|
||||
|
||||
CMAKE_CMD="cmake \
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DMILVUS_DB_PATH=${DB_PATH} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
|
||||
-DCUSTOMIZATION=${CUSTOMIZATION} \
|
||||
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
|
||||
.."
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DFAISS_ROOT=${FAISS_ROOT} \
|
||||
-DFAISS_WITH_MKL=${WITH_MKL} \
|
||||
-DArrow_SOURCE=AUTO \
|
||||
-DFAISS_SOURCE=AUTO \
|
||||
${MILVUS_CORE_DIR}"
|
||||
echo ${CMAKE_CMD}
|
||||
${CMAKE_CMD}
|
||||
|
||||
|
||||
if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
||||
# cpplint check
|
||||
make lint
|
||||
|
@ -135,8 +139,8 @@ if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
|||
# exit 1
|
||||
# fi
|
||||
# echo "clang-tidy check passed!"
|
||||
else
|
||||
# compile and build
|
||||
make -j8 || exit 1
|
||||
make install || exit 1
|
||||
fi
|
||||
|
||||
# compile and build
|
||||
make -j8 || exit 1
|
||||
make install || exit 1
|
|
@ -9,18 +9,22 @@ done
|
|||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
INSTALL_PREFIX="/opt/milvus"
|
||||
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
|
||||
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
|
||||
CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build"
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=123456
|
||||
MYSQL_HOST='127.0.0.1'
|
||||
MYSQL_PORT='3306'
|
||||
|
||||
while getopts "o:u:p:t:h" arg
|
||||
while getopts "o:b:u:p:t:h" arg
|
||||
do
|
||||
case $arg in
|
||||
o)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
b)
|
||||
CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR
|
||||
;;
|
||||
u)
|
||||
MYSQL_USER_NAME=$OPTARG
|
||||
;;
|
||||
|
@ -35,13 +39,14 @@ do
|
|||
|
||||
parameter:
|
||||
-o: milvus install prefix(default: /opt/milvus)
|
||||
-b: core code build directory
|
||||
-u: mysql account
|
||||
-p: mysql password
|
||||
-t: mysql host
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./coverage.sh -o \${INSTALL_PREFIX} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
|
||||
./coverage.sh -o \${INSTALL_PREFIX} -b \${CORE_BUILD_DIR} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
|
@ -63,12 +68,14 @@ FILE_INFO_OUTPUT="output.info"
|
|||
FILE_INFO_OUTPUT_NEW="output_new.info"
|
||||
DIR_LCOV_OUTPUT="lcov_out"
|
||||
|
||||
DIR_GCNO="${CMAKE_BUILD_DIR}"
|
||||
DIR_GCNO="${CORE_BUILD_DIR}"
|
||||
DIR_UNITTEST="${INSTALL_PREFIX}/unittest"
|
||||
|
||||
cd ${SCRIPTS_DIR}
|
||||
|
||||
# delete old code coverage info files
|
||||
rm -rf lcov_out
|
||||
rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW
|
||||
rm -rf ${DIR_LCOV_OUTPUT}
|
||||
rm -f ${FILE_INFO_BASE} ${FILE_INFO_MILVUS} ${FILE_INFO_OUTPUT} ${FILE_INFO_OUTPUT_NEW}
|
||||
|
||||
MYSQL_DB_NAME=milvus_`date +%s%N`
|
||||
|
||||
|
@ -109,7 +116,7 @@ for test in `ls ${DIR_UNITTEST}`; do
|
|||
if [ $? -ne 0 ]; then
|
||||
echo ${args}
|
||||
echo ${DIR_UNITTEST}/${test} "run failed"
|
||||
exit -1
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -132,12 +139,11 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
|||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/src/external/easyloggingpp/easylogging++.h" \
|
||||
"*/src/external/easyloggingpp/easylogging++.cc"
|
||||
"*/thirdparty/*"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "gen ${FILE_INFO_OUTPUT_NEW} failed"
|
||||
exit -2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# gen html report
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
|
||||
export CCACHE_COMPRESS=1
|
||||
export CCACHE_COMPRESSLEVEL=5
|
||||
export CCACHE_COMPILERCHECK=content
|
||||
export PATH=/usr/lib/ccache/:$PATH
|
||||
ccache --show-stats
|
||||
fi
|
||||
|
||||
set +ex
|
|
@ -0,0 +1,44 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
|
||||
|
||||
sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
|
||||
|
||||
echo "deb https://apt.repos.intel.com/mkl all main" | \
|
||||
sudo tee /etc/apt/sources.list.d/intel-mkl.list
|
||||
|
||||
sudo wget -O /usr/share/keyrings/apache-arrow-keyring.gpg https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-keyring.gpg
|
||||
|
||||
sudo tee /etc/apt/sources.list.d/apache-arrow.list <<APT_LINE
|
||||
deb [arch=amd64 signed-by=/usr/share/keyrings/apache-arrow-keyring.gpg] https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/ $(lsb_release --codename --short) main
|
||||
deb-src [signed-by=/usr/share/keyrings/apache-arrow-keyring.gpg] https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/ $(lsb_release --codename --short) main
|
||||
APT_LINE
|
||||
|
||||
sudo apt-get update -qq
|
||||
|
||||
sudo apt-get install -y -q --no-install-recommends \
|
||||
gfortran \
|
||||
lsb-core \
|
||||
libtool \
|
||||
automake \
|
||||
ccache \
|
||||
pkg-config \
|
||||
libarrow-dev \
|
||||
libjemalloc-dev \
|
||||
libboost-serialization-dev \
|
||||
libboost-filesystem-dev \
|
||||
libboost-system-dev \
|
||||
libboost-regex-dev \
|
||||
intel-mkl-gnu-2019.5-281 \
|
||||
intel-mkl-core-2019.5-281 \
|
||||
libmysqlclient-dev \
|
||||
clang-format-6.0 \
|
||||
clang-tidy-6.0 \
|
||||
lcov
|
||||
|
||||
sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \
|
||||
/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
|
||||
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
source $TRAVIS_BUILD_DIR/ci/travis/travis_env_common.sh
|
||||
|
||||
only_library_mode=no
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--only-library)
|
||||
only_library_mode=yes
|
||||
shift ;;
|
||||
*) break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
BUILD_COMMON_FLAGS="-t ${MILVUS_BUILD_TYPE} -o ${MILVUS_INSTALL_PREFIX} -b ${MILVUS_BUILD_DIR}"
|
||||
|
||||
if [ $only_library_mode == "yes" ]; then
|
||||
${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -m
|
||||
else
|
||||
${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -m -u -c
|
||||
fi
|
|
@ -0,0 +1,10 @@
|
|||
export MILVUS_CORE_DIR=${TRAVIS_BUILD_DIR}/core
|
||||
export MILVUS_BUILD_DIR=${TRAVIS_BUILD_DIR}/core/cmake_build
|
||||
export MILVUS_INSTALL_PREFIX=/opt/milvus
|
||||
export MILVUS_TRAVIS_COVERAGE=${MILVUS_TRAVIS_COVERAGE:=0}
|
||||
|
||||
if [ "${MILVUS_TRAVIS_COVERAGE}" == "1" ]; then
|
||||
export MILVUS_CPP_COVERAGE_FILE=${TRAVIS_BUILD_DIR}/output_new.info
|
||||
fi
|
||||
|
||||
export MILVUS_BUILD_TYPE=${MILVUS_BUILD_TYPE:=Release}
|
|
@ -1,10 +1,13 @@
|
|||
milvus/
|
||||
conf/server_config.yaml
|
||||
conf/log_config.conf
|
||||
version.h
|
||||
src/config.h
|
||||
src/version.h
|
||||
lcov_out/
|
||||
base.info
|
||||
output.info
|
||||
output_new.info
|
||||
server.info
|
||||
*.pyc
|
||||
src/grpc/python_gen.h
|
||||
src/grpc/python/
|
||||
|
|
|
@ -18,47 +18,42 @@
|
|||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
MACRO (GET_CURRENT_TIME CURRENT_TIME)
|
||||
MACRO(GET_CURRENT_TIME CURRENT_TIME)
|
||||
execute_process(COMMAND "date" +"%Y-%m-%d %H:%M.%S" OUTPUT_VARIABLE ${CURRENT_TIME})
|
||||
ENDMACRO (GET_CURRENT_TIME)
|
||||
ENDMACRO(GET_CURRENT_TIME)
|
||||
|
||||
GET_CURRENT_TIME(BUILD_TIME)
|
||||
string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME})
|
||||
message(STATUS "Build time = ${BUILD_TIME}")
|
||||
|
||||
MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
|
||||
execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
if(GIT_BRANCH_NAME STREQUAL "")
|
||||
execute_process(COMMAND "git" symbolic-ref --short -q HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
endif()
|
||||
ENDMACRO (GET_GIT_BRANCH_NAME)
|
||||
MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
|
||||
execute_process(COMMAND sh "-c" "git log --decorate | head -n 1 | sed 's/.*(\\(.*\\))/\\1/' | sed 's/.* \\(.*\\),.*/\\1/' | sed 's=[a-zA-Z]*\/==g'"
|
||||
OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
ENDMACRO(GET_GIT_BRANCH_NAME)
|
||||
|
||||
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
|
||||
message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}")
|
||||
if(NOT GIT_BRANCH_NAME STREQUAL "")
|
||||
if (NOT GIT_BRANCH_NAME STREQUAL "")
|
||||
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(MILVUS_VERSION "${GIT_BRANCH_NAME}")
|
||||
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MILVUS_VERSION "${MILVUS_VERSION}")
|
||||
|
||||
find_package(ClangTools)
|
||||
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(BUILD_TYPE "Release")
|
||||
else()
|
||||
else ()
|
||||
set(BUILD_TYPE "Debug")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Build type = ${BUILD_TYPE}")
|
||||
|
||||
project(milvus VERSION "${MILVUS_VERSION}")
|
||||
project(milvus_engine LANGUAGES CUDA CXX)
|
||||
project(milvus_engine LANGUAGES CXX)
|
||||
|
||||
unset(CMAKE_EXPORT_COMPILE_COMMANDS CACHE)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
@ -67,15 +62,15 @@ set(MILVUS_VERSION_MAJOR "${milvus_VERSION_MAJOR}")
|
|||
set(MILVUS_VERSION_MINOR "${milvus_VERSION_MINOR}")
|
||||
set(MILVUS_VERSION_PATCH "${milvus_VERSION_PATCH}")
|
||||
|
||||
if(MILVUS_VERSION_MAJOR STREQUAL ""
|
||||
if (MILVUS_VERSION_MAJOR STREQUAL ""
|
||||
OR MILVUS_VERSION_MINOR STREQUAL ""
|
||||
OR MILVUS_VERSION_PATCH STREQUAL "")
|
||||
message(WARNING "Failed to determine Milvus version from git branch name")
|
||||
set(MILVUS_VERSION "0.5.3")
|
||||
set(MILVUS_VERSION "0.6.0")
|
||||
endif()
|
||||
|
||||
message(STATUS "Build version = ${MILVUS_VERSION}")
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h @ONLY)
|
||||
|
||||
message(STATUS "Milvus version: "
|
||||
"${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} "
|
||||
|
@ -84,77 +79,111 @@ message(STATUS "Milvus version: "
|
|||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED on)
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
message(STATUS "Building milvus_engine on x86 architecture")
|
||||
set(MILVUS_BUILD_ARCH x86_64)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
message(STATUS "Building milvus_engine on ppc architecture")
|
||||
set(MILVUS_BUILD_ARCH ppc64le)
|
||||
else()
|
||||
else ()
|
||||
message(WARNING "Unknown processor type")
|
||||
message(WARNING "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}")
|
||||
set(MILVUS_BUILD_ARCH unknown)
|
||||
endif()
|
||||
|
||||
find_package (Python COMPONENTS Interpreter Development)
|
||||
|
||||
find_package(CUDA)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# Ensure that a default make is set
|
||||
if("${MAKE}" STREQUAL "")
|
||||
if(NOT MSVC)
|
||||
if ("${MAKE}" STREQUAL "")
|
||||
if (NOT MSVC)
|
||||
find_program(MAKE make)
|
||||
endif()
|
||||
endif()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
find_path(MYSQL_INCLUDE_DIR
|
||||
NAMES "mysql.h"
|
||||
PATH_SUFFIXES "mysql")
|
||||
NAMES "mysql.h"
|
||||
PATH_SUFFIXES "mysql")
|
||||
if (${MYSQL_INCLUDE_DIR} STREQUAL "MYSQL_INCLUDE_DIR-NOTFOUND")
|
||||
message(FATAL_ERROR "Could not found MySQL include directory")
|
||||
else()
|
||||
else ()
|
||||
include_directories(${MYSQL_INCLUDE_DIR})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR})
|
||||
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
|
||||
set(MILVUS_THIRDPARTY_SRC ${PROJECT_SOURCE_DIR}/thirdparty)
|
||||
|
||||
include(ExternalProject)
|
||||
include(DefineOptions)
|
||||
include(BuildUtils)
|
||||
include(ThirdPartyPackages)
|
||||
|
||||
config_summary()
|
||||
if (MILVUS_USE_CCACHE)
|
||||
find_program(CCACHE_FOUND ccache)
|
||||
if (CCACHE_FOUND)
|
||||
message(STATUS "Using ccache: ${CCACHE_FOUND}")
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
||||
# let ccache preserve C++ comments, because some of them may be
|
||||
# meaningful to the compiler
|
||||
set(ENV{CCACHE_COMMENTS} "1")
|
||||
endif (CCACHE_FOUND)
|
||||
endif ()
|
||||
|
||||
set(MILVUS_CPU_VERSION false)
|
||||
if (MILVUS_GPU_VERSION)
|
||||
message(STATUS "Building Milvus GPU version")
|
||||
add_compile_definitions("MILVUS_GPU_VERSION")
|
||||
enable_language(CUDA)
|
||||
find_package(CUDA 10 REQUIRED)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
|
||||
else ()
|
||||
message(STATUS "Building Milvus CPU version")
|
||||
set(MILVUS_CPU_VERSION true)
|
||||
add_compile_definitions("MILVUS_CPU_VERSION")
|
||||
endif ()
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
add_compile_definitions("MILVUS_WITH_PROMETHEUS")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
if (MILVUS_GPU_VERSION)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
|
||||
endif ()
|
||||
else ()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
if (MILVUS_GPU_VERSION)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (CUSTOMIZATION)
|
||||
add_definitions(-DCUSTOMIZATION)
|
||||
endif (CUSTOMIZATION)
|
||||
|
||||
config_summary()
|
||||
add_subdirectory(src)
|
||||
|
||||
if (BUILD_UNIT_TEST STREQUAL "ON")
|
||||
if (BUILD_COVERAGE STREQUAL "ON")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
endif()
|
||||
endif ()
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean)
|
||||
|
||||
if("${MILVUS_DB_PATH}" STREQUAL "")
|
||||
if ("${MILVUS_DB_PATH}" STREQUAL "")
|
||||
set(MILVUS_DB_PATH "/tmp/milvus")
|
||||
endif()
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
|
||||
endif ()
|
||||
|
||||
if (MILVUS_GPU_VERSION)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_gpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
|
||||
else ()
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_cpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
|
||||
endif ()
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf)
|
||||
|
||||
install(DIRECTORY scripts/
|
||||
|
@ -169,19 +198,22 @@ install(FILES
|
|||
DESTINATION
|
||||
conf)
|
||||
|
||||
find_package(Python COMPONENTS Interpreter Development)
|
||||
find_package(ClangTools)
|
||||
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
|
||||
|
||||
#
|
||||
# "make lint" target
|
||||
#
|
||||
if(NOT MILVUS_VERBOSE_LINT)
|
||||
set(MILVUS_LINT_QUIET "--quiet")
|
||||
endif()
|
||||
if (NOT MILVUS_VERBOSE_LINT)
|
||||
set(MILVUS_LINT_QUIET "--quiet")
|
||||
endif ()
|
||||
|
||||
if(NOT LINT_EXCLUSIONS_FILE)
|
||||
# source files matching a glob from a line in this file
|
||||
# will be excluded from linting (cpplint, clang-tidy, clang-format)
|
||||
set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt)
|
||||
endif()
|
||||
if (NOT LINT_EXCLUSIONS_FILE)
|
||||
# source files matching a glob from a line in this file
|
||||
# will be excluded from linting (cpplint, clang-tidy, clang-format)
|
||||
set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt)
|
||||
endif ()
|
||||
|
||||
find_program(CPPLINT_BIN NAMES cpplint cpplint.py HINTS ${BUILD_SUPPORT_DIR})
|
||||
message(STATUS "Found cpplint executable at ${CPPLINT_BIN}")
|
||||
|
@ -190,77 +222,76 @@ message(STATUS "Found cpplint executable at ${CPPLINT_BIN}")
|
|||
# "make lint" targets
|
||||
#
|
||||
add_custom_target(lint
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_cpplint.py
|
||||
--cpplint_binary
|
||||
${CPPLINT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${MILVUS_LINT_QUIET})
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_cpplint.py
|
||||
--cpplint_binary
|
||||
${CPPLINT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
#
|
||||
# "make clang-format" and "make check-clang-format" targets
|
||||
#
|
||||
if(${CLANG_FORMAT_FOUND})
|
||||
# runs clang format and updates files in place.
|
||||
add_custom_target(clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
if (${CLANG_FORMAT_FOUND})
|
||||
# runs clang format and updates files in place.
|
||||
add_custom_target(clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
# runs clang format and exits with a non-zero exit code if any files need to be reformatted
|
||||
add_custom_target(check-clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif()
|
||||
# runs clang format and exits with a non-zero exit code if any files need to be reformatted
|
||||
add_custom_target(check-clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif ()
|
||||
|
||||
#
|
||||
# "make clang-tidy" and "make check-clang-tidy" targets
|
||||
#
|
||||
if(${CLANG_TIDY_FOUND})
|
||||
# runs clang-tidy and attempts to fix any warning automatically
|
||||
add_custom_target(clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
# runs clang-tidy and exits with a non-zero exit code if any errors are found.
|
||||
add_custom_target(check-clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif()
|
||||
if (${CLANG_TIDY_FOUND})
|
||||
# runs clang-tidy and attempts to fix any warning automatically
|
||||
add_custom_target(clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
# runs clang-tidy and exits with a non-zero exit code if any errors are found.
|
||||
add_custom_target(check-clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif ()
|
||||
|
|
|
@ -6,5 +6,5 @@
|
|||
*easylogging++*
|
||||
*SqliteMetaImpl.cpp
|
||||
*src/grpc*
|
||||
*src/external*
|
||||
*thirdparty*
|
||||
*milvus/include*
|
181
core/build.sh
181
core/build.sh
|
@ -12,124 +12,143 @@ USE_JFROG_CACHE="OFF"
|
|||
RUN_CPPLINT="OFF"
|
||||
CUSTOMIZATION="OFF" # default use ori faiss
|
||||
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
GPU_VERSION="OFF" #defaults to CPU version
|
||||
WITH_MKL="OFF"
|
||||
FAISS_ROOT="" #FAISS root path
|
||||
FAISS_SOURCE="BUNDLED"
|
||||
WITH_PROMETHEUS="ON"
|
||||
|
||||
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
|
||||
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
|
||||
if [ $? -eq 0 ]; then
|
||||
CUSTOMIZATION="ON"
|
||||
else
|
||||
CUSTOMIZATION="OFF"
|
||||
fi
|
||||
|
||||
while getopts "p:d:t:ulrcgjhx" arg
|
||||
do
|
||||
case $arg in
|
||||
p)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases" ;
|
||||
BUILD_UNITTEST="ON";
|
||||
;;
|
||||
l)
|
||||
RUN_CPPLINT="ON"
|
||||
;;
|
||||
r)
|
||||
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
rm ./${BUILD_OUTPUT_DIR} -r
|
||||
MAKE_CLEAN="ON"
|
||||
fi
|
||||
;;
|
||||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
g)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="OFF" # force use ori faiss
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
while getopts "p:d:t:f:ulrcgjhxzme" arg; do
|
||||
case $arg in
|
||||
p)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
f)
|
||||
FAISS_ROOT=$OPTARG
|
||||
FAISS_SOURCE="AUTO"
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases"
|
||||
BUILD_UNITTEST="ON"
|
||||
;;
|
||||
l)
|
||||
RUN_CPPLINT="ON"
|
||||
;;
|
||||
r)
|
||||
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
rm ./${BUILD_OUTPUT_DIR} -r
|
||||
MAKE_CLEAN="ON"
|
||||
fi
|
||||
;;
|
||||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
z)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="OFF" # force use ori faiss
|
||||
;;
|
||||
g)
|
||||
GPU_VERSION="ON"
|
||||
;;
|
||||
m)
|
||||
WITH_MKL="ON"
|
||||
;;
|
||||
e)
|
||||
WITH_PROMETHEUS="OFF"
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-p: install prefix(default: $(pwd)/milvus)
|
||||
-d: db data path(default: /tmp/milvus)
|
||||
-t: build type(default: Debug)
|
||||
-f: FAISS root path(default: empty). The path should be an absolute path
|
||||
containing the pre-installed lib/ and include/ directory of FAISS. If they can't be found,
|
||||
we will build the original FAISS from source instead.
|
||||
-u: building unit test options(default: OFF)
|
||||
-l: run cpplint, clang-format and clang-tidy(default: OFF)
|
||||
-r: remove previous build directory(default: OFF)
|
||||
-c: code coverage(default: OFF)
|
||||
-g: profiling(default: OFF)
|
||||
-z: profiling(default: OFF)
|
||||
-j: use jfrog cache build directory(default: OFF)
|
||||
-g: build GPU version(default: OFF)
|
||||
-m: build with MKL(default: OFF)
|
||||
-e: build without prometheus(default: OFF)
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} -f \${FAISS_ROOT} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-e] [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
mkdir ${BUILD_OUTPUT_DIR}
|
||||
mkdir ${BUILD_OUTPUT_DIR}
|
||||
fi
|
||||
|
||||
cd ${BUILD_OUTPUT_DIR}
|
||||
|
||||
# remove make cache since build.sh -l use default variables
|
||||
# force update the variables each time
|
||||
make rebuild_cache > /dev/null 2>&1
|
||||
make rebuild_cache >/dev/null 2>&1
|
||||
|
||||
CMAKE_CMD="cmake \
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DFAISS_ROOT=${FAISS_ROOT} \
|
||||
-DFAISS_SOURCE=${FAISS_SOURCE} \
|
||||
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DMILVUS_DB_PATH=${DB_PATH} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DCUSTOMIZATION=${CUSTOMIZATION} \
|
||||
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
|
||||
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
|
||||
-DFAISS_WITH_MKL=${WITH_MKL} \
|
||||
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
|
||||
../"
|
||||
echo ${CMAKE_CMD}
|
||||
${CMAKE_CMD}
|
||||
|
||||
if [[ ${MAKE_CLEAN} == "ON" ]]; then
|
||||
make clean
|
||||
make clean
|
||||
fi
|
||||
|
||||
if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
||||
# cpplint check
|
||||
make lint
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! cpplint check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "cpplint check passed!"
|
||||
# cpplint check
|
||||
make lint
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! cpplint check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "cpplint check passed!"
|
||||
|
||||
# clang-format check
|
||||
make check-clang-format
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! clang-format check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "clang-format check passed!"
|
||||
# clang-format check
|
||||
make check-clang-format
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! clang-format check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "clang-format check passed!"
|
||||
|
||||
# # clang-tidy check
|
||||
# make check-clang-tidy
|
||||
|
@ -140,11 +159,11 @@ if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
|||
# echo "clang-tidy check passed!"
|
||||
else
|
||||
|
||||
# strip binary symbol
|
||||
if [[ ${BUILD_TYPE} != "Debug" ]]; then
|
||||
strip src/milvus_server
|
||||
fi
|
||||
# strip binary symbol
|
||||
if [[ ${BUILD_TYPE} != "Debug" ]]; then
|
||||
strip src/milvus_server
|
||||
fi
|
||||
|
||||
# compile and build
|
||||
make -j 8 install || exit 1
|
||||
fi
|
||||
# compile and build
|
||||
make -j 8 install || exit 1
|
||||
fi
|
||||
|
|
|
@ -13,16 +13,16 @@ macro(define_option name description default)
|
|||
endmacro()
|
||||
|
||||
function(list_join lst glue out)
|
||||
if("${${lst}}" STREQUAL "")
|
||||
if ("${${lst}}" STREQUAL "")
|
||||
set(${out} "" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
list(GET ${lst} 0 joined)
|
||||
list(REMOVE_AT ${lst} 0)
|
||||
foreach(item ${${lst}})
|
||||
foreach (item ${${lst}})
|
||||
set(joined "${joined}${glue}${item}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
set(${out} ${joined} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
@ -35,22 +35,29 @@ macro(define_option_string name description default)
|
|||
|
||||
set("${name}_OPTION_ENUM" ${ARGN})
|
||||
list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM")
|
||||
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
set_property(CACHE ${name} PROPERTY STRINGS ${ARGN})
|
||||
endif()
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("GPU version")
|
||||
|
||||
define_option(MILVUS_GPU_VERSION "Build GPU version" OFF)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Thirdparty")
|
||||
|
||||
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO")
|
||||
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "BUNDLED")
|
||||
|
||||
define_option_string(MILVUS_DEPENDENCY_SOURCE
|
||||
"Method to use for acquiring MILVUS's build dependencies"
|
||||
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
"Method to use for acquiring MILVUS's build dependencies"
|
||||
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
|
||||
define_option(MILVUS_USE_CCACHE "Use ccache when compiling (if available)" ON)
|
||||
|
||||
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
|
||||
"Show output from ExternalProjects rather than just logging to files" ON)
|
||||
|
@ -70,33 +77,21 @@ define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
|
|||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
|
||||
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
define_option(MILVUS_WITH_GRPC "Build with GRPC" ON)
|
||||
|
||||
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
if(MSVC)
|
||||
set_option_category("MSVC")
|
||||
|
||||
define_option(MSVC_LINK_VERBOSE
|
||||
"Pass verbose linking options when linking libraries and executables"
|
||||
OFF)
|
||||
|
||||
define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF)
|
||||
endif()
|
||||
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Test and benchmark")
|
||||
|
||||
unset(MILVUS_BUILD_TESTS CACHE)
|
||||
if (BUILD_UNIT_TEST)
|
||||
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON)
|
||||
else()
|
||||
else ()
|
||||
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF)
|
||||
endif(BUILD_UNIT_TEST)
|
||||
endif (BUILD_UNIT_TEST)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
macro(config_summary)
|
||||
|
@ -108,12 +103,12 @@ macro(config_summary)
|
|||
message(STATUS " Generator: ${CMAKE_GENERATOR}")
|
||||
message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
if(${CMAKE_EXPORT_COMPILE_COMMANDS})
|
||||
if (${CMAKE_EXPORT_COMPILE_COMMANDS})
|
||||
message(
|
||||
STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
foreach(category ${MILVUS_OPTION_CATEGORIES})
|
||||
foreach (category ${MILVUS_OPTION_CATEGORIES})
|
||||
|
||||
message(STATUS)
|
||||
message(STATUS "${category} options:")
|
||||
|
@ -121,50 +116,50 @@ macro(config_summary)
|
|||
set(option_names ${MILVUS_${category}_OPTION_NAMES})
|
||||
|
||||
set(max_value_length 0)
|
||||
foreach(name ${option_names})
|
||||
foreach (name ${option_names})
|
||||
string(LENGTH "\"${${name}}\"" value_length)
|
||||
if(${max_value_length} LESS ${value_length})
|
||||
if (${max_value_length} LESS ${value_length})
|
||||
set(max_value_length ${value_length})
|
||||
endif()
|
||||
endforeach()
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
foreach(name ${option_names})
|
||||
if("${${name}_OPTION_TYPE}" STREQUAL "string")
|
||||
foreach (name ${option_names})
|
||||
if ("${${name}_OPTION_TYPE}" STREQUAL "string")
|
||||
set(value "\"${${name}}\"")
|
||||
else()
|
||||
else ()
|
||||
set(value "${${name}}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(default ${${name}_OPTION_DEFAULT})
|
||||
set(description ${${name}_OPTION_DESCRIPTION})
|
||||
string(LENGTH ${description} description_length)
|
||||
if(${description_length} LESS 70)
|
||||
if (${description_length} LESS 70)
|
||||
string(
|
||||
SUBSTRING
|
||||
" "
|
||||
${description_length} -1 description_padding)
|
||||
else()
|
||||
else ()
|
||||
set(description_padding "
|
||||
")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(comment "[${name}]")
|
||||
|
||||
if("${value}" STREQUAL "${default}")
|
||||
if ("${value}" STREQUAL "${default}")
|
||||
set(comment "[default] ${comment}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
set(comment "${comment} [${${name}_OPTION_ENUM}]")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
string(
|
||||
SUBSTRING "${value} "
|
||||
0 ${max_value_length} value)
|
||||
|
||||
message(STATUS " ${description} ${description_padding} ${value} ${comment}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
endmacro()
|
||||
|
|
|
@ -164,8 +164,10 @@ endif ()
|
|||
|
||||
macro(resolve_dependency DEPENDENCY_NAME)
|
||||
if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO")
|
||||
#disable find_package for now
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
find_package(${DEPENDENCY_NAME} MODULE)
|
||||
if(NOT ${${DEPENDENCY_NAME}_FOUND})
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
endif()
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED")
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM")
|
||||
|
|
|
@ -27,10 +27,7 @@ metric_config:
|
|||
port: 8080 # port prometheus uses to fetch metrics, must in range [1025, 65534]
|
||||
|
||||
cache_config:
|
||||
cpu_cache_capacity: 16 # GB, CPU memory used for cache, must be a positive integer
|
||||
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
|
||||
gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer
|
||||
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
|
||||
cpu_cache_capacity: 16 # GB, size of CPU memory used for cache, must be a positive integer
|
||||
cache_insert_data: false # whether to load inserted data into cache, must be a boolean
|
||||
|
||||
engine_config:
|
||||
|
@ -38,8 +35,10 @@ engine_config:
|
|||
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
|
||||
gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only
|
||||
|
||||
resource_config:
|
||||
search_resources: # define the devices used for search computation, must be in format: cpu or gpux
|
||||
- cpu
|
||||
gpu_resource_config:
|
||||
enable: false # whether to enable GPU resources
|
||||
cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer
|
||||
search_resources: # define the GPU devices used for search computation, must be in format gpux
|
||||
- gpu0
|
||||
build_index_resources: # define the GPU devices used for index building, must be in format gpux
|
||||
- gpu0
|
||||
index_build_device: gpu0 # GPU used for building index, must be in format: gpux
|
|
@ -0,0 +1,44 @@
|
|||
# Default values are used when you make no changes to the following parameters.
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # milvus server port, must in range [1025, 65534]
|
||||
deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable
|
||||
time_zone: UTC+8 # time zone, must be in format: UTC+X
|
||||
|
||||
db_config:
|
||||
primary_path: @MILVUS_DB_PATH@ # path used to store data and meta
|
||||
secondary_path: # path used to store data only, split by semicolon
|
||||
|
||||
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
|
||||
# Keep 'dialect://:@:/', and replace other texts with real values
|
||||
# Replace 'dialect' with 'mysql' or 'sqlite'
|
||||
|
||||
insert_buffer_size: 4 # GB, maximum insert buffer size allowed, must be a positive integer
|
||||
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
|
||||
|
||||
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
|
||||
# you can specify preload tables like this: table1,table2,table3
|
||||
|
||||
metric_config:
|
||||
enable_monitor: false # enable monitoring or not, must be a boolean
|
||||
collector: prometheus # prometheus
|
||||
prometheus_config:
|
||||
port: 8080 # port prometheus uses to fetch metrics, must in range [1025, 65534]
|
||||
|
||||
cache_config:
|
||||
cpu_cache_capacity: 16 # GB, size of CPU memory used for cache, must be a positive integer
|
||||
cache_insert_data: false # whether to load inserted data into cache, must be a boolean
|
||||
|
||||
engine_config:
|
||||
use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
|
||||
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
|
||||
gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only
|
||||
|
||||
gpu_resource_config:
|
||||
enable: true # whether to enable GPU resources
|
||||
cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer
|
||||
search_resources: # define the GPU devices used for search computation, must be in format gpux
|
||||
- gpu0
|
||||
build_index_resources: # define the GPU devices used for index building, must be in format gpux
|
||||
- gpu0
|
|
@ -122,9 +122,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
|||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/easylogging++.h" \
|
||||
"*/easylogging++.cc" \
|
||||
"*/src/external/*"
|
||||
"*/thirdparty/*"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "generate ${FILE_INFO_OUTPUT_NEW} failed"
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
## Data Migration
|
||||
|
||||
####0.3.x
|
||||
legacy data is not migrate-able for later versions
|
||||
|
||||
####0.4.x
|
||||
legacy data can be reused directly by 0.5.x
|
||||
|
||||
legacy data can be migrated to 0.6.x
|
||||
|
||||
####0.5.x
|
||||
legacy data can be migrated to 0.6.x
|
||||
|
||||
####0.6.x
|
||||
how to migrate legacy 0.4.x/0.5.x data
|
||||
|
||||
for sqlite meta:
|
||||
```shell
|
||||
$ sqlite3 [parth_to]/meta.sqlite < sqlite_4_to_6.sql
|
||||
```
|
||||
|
||||
for mysql meta:
|
||||
```shell
|
||||
$ mysql -h127.0.0.1 -uroot -p123456 -Dmilvus < mysql_4_to_6.sql
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
alter table Tables add column owner_table VARCHAR(255) DEFAULT '' NOT NULL;
|
||||
alter table Tables add column partition_tag VARCHAR(255) DEFAULT '' NOT NULL;
|
||||
alter table Tables add column version VARCHAR(64) DEFAULT '0.6.0' NOT NULL;
|
||||
update Tables set version='0.6.0';
|
|
@ -0,0 +1,3 @@
|
|||
alter table Tables drop column owner_table;
|
||||
alter table Tables drop column partition_tag;
|
||||
alter table Tables drop column version;
|
|
@ -0,0 +1,4 @@
|
|||
alter table Tables add column 'owner_table' TEXT DEFAULT '' NOT NULL;
|
||||
alter table Tables add column 'partition_tag' TEXT DEFAULT '' NOT NULL;
|
||||
alter table Tables add column 'version' TEXT DEFAULT '0.6.0' NOT NULL;
|
||||
update Tables set version='0.6.0';
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE 'TempTables' ( 'id' INTEGER PRIMARY KEY NOT NULL , 'table_id' TEXT UNIQUE NOT NULL , 'state' INTEGER NOT NULL , 'dimension' INTEGER NOT NULL , 'created_on' INTEGER NOT NULL , 'flag' INTEGER DEFAULT 0 NOT NULL , 'index_file_size' INTEGER NOT NULL , 'engine_type' INTEGER NOT NULL , 'nlist' INTEGER NOT NULL , 'metric_type' INTEGER NOT NULL);
|
||||
|
||||
INSERT INTO TempTables SELECT id, table_id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type FROM Tables;
|
||||
|
||||
DROP TABLE Tables;
|
||||
|
||||
ALTER TABLE TempTables RENAME TO Tables;
|
|
@ -19,13 +19,15 @@
|
|||
|
||||
include_directories(${MILVUS_SOURCE_DIR})
|
||||
include_directories(${MILVUS_ENGINE_SRC})
|
||||
include_directories(${MILVUS_THIRDPARTY_SRC})
|
||||
|
||||
include_directories(${CUDA_TOOLKIT_ROOT_DIR}/include)
|
||||
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status)
|
||||
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus)
|
||||
|
||||
#this statement must put here, since the INDEX_INCLUDE_DIRS is defined in code/CMakeList.txt
|
||||
add_subdirectory(index)
|
||||
if (FAISS_WITH_MKL)
|
||||
add_compile_definitions("WITH_MKL")
|
||||
endif ()
|
||||
|
||||
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
|
||||
foreach (dir ${INDEX_INCLUDE_DIRS})
|
||||
|
@ -35,6 +37,7 @@ endforeach ()
|
|||
aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics metrics_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics/prometheus metrics_prometheus_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/db db_main_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/db/engine db_engine_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/db/insert db_insert_files)
|
||||
|
@ -64,15 +67,21 @@ set(scheduler_files
|
|||
${scheduler_task_files}
|
||||
)
|
||||
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/external/easyloggingpp external_easyloggingpp_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/external/nlohmann external_nlohmann_files)
|
||||
set(external_files
|
||||
${external_easyloggingpp_files}
|
||||
${external_nlohmann_files}
|
||||
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/easyloggingpp thirdparty_easyloggingpp_files)
|
||||
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/nlohmann thirdparty_nlohmann_files)
|
||||
set(thirdparty_files
|
||||
${thirdparty_easyloggingpp_files}
|
||||
${thirdparty_nlohmann_files}
|
||||
)
|
||||
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server server_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl grpc_server_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl/request grpc_request_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl grpc_impl_files)
|
||||
set(grpc_server_files
|
||||
${grpc_request_files}
|
||||
${grpc_impl_files}
|
||||
)
|
||||
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/utils utils_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper wrapper_files)
|
||||
|
||||
|
@ -84,11 +93,16 @@ set(engine_files
|
|||
${db_insert_files}
|
||||
${db_meta_files}
|
||||
${metrics_files}
|
||||
${external_files}
|
||||
${thirdparty_files}
|
||||
${utils_files}
|
||||
${wrapper_files}
|
||||
)
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
set(engine_files ${engine_files}
|
||||
${metrics_prometheus_files})
|
||||
endif ()
|
||||
|
||||
set(client_grpc_lib
|
||||
grpcpp_channelz
|
||||
grpc++
|
||||
|
@ -109,35 +123,50 @@ set(boost_lib
|
|||
libboost_serialization.a
|
||||
)
|
||||
|
||||
set(cuda_lib
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
cudart
|
||||
cublas
|
||||
)
|
||||
|
||||
set(third_party_libs
|
||||
sqlite
|
||||
${client_grpc_lib}
|
||||
yaml-cpp
|
||||
${prometheus_lib}
|
||||
${cuda_lib}
|
||||
mysqlpp
|
||||
zlib
|
||||
${boost_lib}
|
||||
)
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
if (MILVUS_GPU_VERSION)
|
||||
include_directories(${CUDA_INCLUDE_DIRS})
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(cuda_lib
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
cudart
|
||||
cublas
|
||||
)
|
||||
set(third_party_libs ${third_party_libs}
|
||||
gperftools
|
||||
libunwind
|
||||
)
|
||||
${cuda_lib}
|
||||
)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper/gpu wrapper_gpu_files)
|
||||
set(engine_files ${engine_files}
|
||||
${wrapper_gpu_files}
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING)
|
||||
set(third_party_libs ${third_party_libs}
|
||||
gperftools
|
||||
libunwind
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
set(third_party_libs ${third_party_libs}
|
||||
${prometheus_lib}
|
||||
)
|
||||
endif ()
|
||||
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(engine_libs
|
||||
pthread
|
||||
libgomp.a
|
||||
libgfortran.a
|
||||
dl
|
||||
)
|
||||
|
||||
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
||||
|
@ -147,26 +176,33 @@ if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|||
)
|
||||
endif ()
|
||||
|
||||
cuda_add_library(milvus_engine STATIC ${engine_files})
|
||||
add_library(milvus_engine STATIC ${engine_files})
|
||||
target_link_libraries(milvus_engine
|
||||
knowhere
|
||||
${engine_libs}
|
||||
${third_party_libs}
|
||||
${engine_libs}
|
||||
)
|
||||
|
||||
add_library(metrics STATIC ${metrics_files})
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
add_library(metrics STATIC ${metrics_files} ${metrics_prometheus_files})
|
||||
else ()
|
||||
add_library(metrics STATIC ${metrics_files})
|
||||
endif ()
|
||||
|
||||
set(metrics_lib
|
||||
yaml-cpp
|
||||
${prometheus_lib}
|
||||
)
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
set(metrics_lib ${metrics_lib}
|
||||
${prometheus_lib}
|
||||
)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(metrics ${metrics_lib})
|
||||
|
||||
set(server_libs
|
||||
milvus_engine
|
||||
pthread
|
||||
dl
|
||||
metrics
|
||||
)
|
||||
|
||||
|
|
|
@ -15,24 +15,18 @@
|
|||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
|
||||
|
||||
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
constexpr double DEFAULT_THRESHHOLD_PERCENT = 0.85;
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
Cache<ItemObj>::Cache(int64_t capacity, uint64_t cache_max_count)
|
||||
: usage_(0),
|
||||
capacity_(capacity),
|
||||
freemem_percent_(DEFAULT_THRESHHOLD_PERCENT),
|
||||
lru_(cache_max_count) {
|
||||
// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity)
|
||||
: usage_(0), capacity_(capacity), freemem_percent_(DEFAULT_THRESHHOLD_PERCENT), lru_(cache_max_count) {
|
||||
// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity)
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::set_capacity(int64_t capacity) {
|
||||
if (capacity > 0) {
|
||||
|
@ -41,23 +35,23 @@ Cache<ItemObj>::set_capacity(int64_t capacity) {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
size_t
|
||||
Cache<ItemObj>::size() const {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
return lru_.size();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
bool
|
||||
Cache<ItemObj>::exists(const std::string &key) {
|
||||
Cache<ItemObj>::exists(const std::string& key) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
return lru_.exists(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
ItemObj
|
||||
Cache<ItemObj>::get(const std::string &key) {
|
||||
Cache<ItemObj>::get(const std::string& key) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
if (!lru_.exists(key)) {
|
||||
return nullptr;
|
||||
|
@ -66,60 +60,59 @@ Cache<ItemObj>::get(const std::string &key) {
|
|||
return lru_.get(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::insert(const std::string &key, const ItemObj &item) {
|
||||
Cache<ItemObj>::insert(const std::string& key, const ItemObj& item) {
|
||||
if (item == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// if(item->size() > capacity_) {
|
||||
// SERVER_LOG_ERROR << "Item size " << item->size()
|
||||
// << " is too large to insert into cache, capacity " << capacity_;
|
||||
// return;
|
||||
// }
|
||||
// if(item->size() > capacity_) {
|
||||
// SERVER_LOG_ERROR << "Item size " << item->size()
|
||||
// << " is too large to insert into cache, capacity " << capacity_;
|
||||
// return;
|
||||
// }
|
||||
|
||||
//calculate usage
|
||||
// calculate usage
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
||||
//if key already exist, subtract old item size
|
||||
// if key already exist, subtract old item size
|
||||
if (lru_.exists(key)) {
|
||||
const ItemObj &old_item = lru_.get(key);
|
||||
const ItemObj& old_item = lru_.get(key);
|
||||
usage_ -= old_item->Size();
|
||||
}
|
||||
|
||||
//plus new item size
|
||||
// plus new item size
|
||||
usage_ += item->Size();
|
||||
}
|
||||
|
||||
//if usage exceed capacity, free some items
|
||||
// if usage exceed capacity, free some items
|
||||
if (usage_ > capacity_) {
|
||||
SERVER_LOG_DEBUG << "Current usage " << usage_
|
||||
<< " exceeds cache capacity " << capacity_
|
||||
SERVER_LOG_DEBUG << "Current usage " << usage_ << " exceeds cache capacity " << capacity_
|
||||
<< ", start free memory";
|
||||
free_memory();
|
||||
}
|
||||
|
||||
//insert new item
|
||||
// insert new item
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
||||
lru_.put(key, item);
|
||||
SERVER_LOG_DEBUG << "Insert " << key << " size:" << item->Size()
|
||||
<< " bytes into cache, usage: " << usage_ << " bytes";
|
||||
SERVER_LOG_DEBUG << "Insert " << key << " size:" << item->Size() << " bytes into cache, usage: " << usage_
|
||||
<< " bytes";
|
||||
}
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::erase(const std::string &key) {
|
||||
Cache<ItemObj>::erase(const std::string& key) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
if (!lru_.exists(key)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ItemObj &old_item = lru_.get(key);
|
||||
const ItemObj& old_item = lru_.get(key);
|
||||
usage_ -= old_item->Size();
|
||||
|
||||
SERVER_LOG_DEBUG << "Erase " << key << " size: " << old_item->Size();
|
||||
|
@ -127,7 +120,7 @@ Cache<ItemObj>::erase(const std::string &key) {
|
|||
lru_.erase(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::clear() {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
@ -137,15 +130,16 @@ Cache<ItemObj>::clear() {
|
|||
}
|
||||
|
||||
/* free memory space when CACHE occupation exceed its capacity */
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::free_memory() {
|
||||
if (usage_ <= capacity_) return;
|
||||
if (usage_ <= capacity_)
|
||||
return;
|
||||
|
||||
int64_t threshhold = capacity_ * freemem_percent_;
|
||||
int64_t delta_size = usage_ - threshhold;
|
||||
if (delta_size <= 0) {
|
||||
delta_size = 1;//ensure at least one item erased
|
||||
delta_size = 1; // ensure at least one item erased
|
||||
}
|
||||
|
||||
std::set<std::string> key_array;
|
||||
|
@ -156,8 +150,8 @@ Cache<ItemObj>::free_memory() {
|
|||
|
||||
auto it = lru_.rbegin();
|
||||
while (it != lru_.rend() && released_size < delta_size) {
|
||||
auto &key = it->first;
|
||||
auto &obj_ptr = it->second;
|
||||
auto& key = it->first;
|
||||
auto& obj_ptr = it->second;
|
||||
|
||||
key_array.emplace(key);
|
||||
released_size += obj_ptr->Size();
|
||||
|
@ -167,14 +161,14 @@ Cache<ItemObj>::free_memory() {
|
|||
|
||||
SERVER_LOG_DEBUG << "to be released memory size: " << released_size;
|
||||
|
||||
for (auto &key : key_array) {
|
||||
for (auto& key : key_array) {
|
||||
erase(key);
|
||||
}
|
||||
|
||||
print();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::print() {
|
||||
size_t cache_count = 0;
|
||||
|
@ -188,7 +182,5 @@ Cache<ItemObj>::print() {
|
|||
SERVER_LOG_DEBUG << "[Cache capacity]: " << capacity_ << " bytes";
|
||||
}
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
|
|
@ -15,21 +15,18 @@
|
|||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
|
||||
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
CacheMgr<ItemObj>::CacheMgr() {
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
CacheMgr<ItemObj>::~CacheMgr() {
|
||||
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
uint64_t
|
||||
CacheMgr<ItemObj>::ItemCount() const {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -37,12 +34,12 @@ CacheMgr<ItemObj>::ItemCount() const {
|
|||
return 0;
|
||||
}
|
||||
|
||||
return (uint64_t) (cache_->size());
|
||||
return (uint64_t)(cache_->size());
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
bool
|
||||
CacheMgr<ItemObj>::ItemExists(const std::string &key) {
|
||||
CacheMgr<ItemObj>::ItemExists(const std::string& key) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return false;
|
||||
|
@ -51,9 +48,9 @@ CacheMgr<ItemObj>::ItemExists(const std::string &key) {
|
|||
return cache_->exists(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
ItemObj
|
||||
CacheMgr<ItemObj>::GetItem(const std::string &key) {
|
||||
CacheMgr<ItemObj>::GetItem(const std::string& key) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return nullptr;
|
||||
|
@ -62,9 +59,9 @@ CacheMgr<ItemObj>::GetItem(const std::string &key) {
|
|||
return cache_->get(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::InsertItem(const std::string &key, const ItemObj &data) {
|
||||
CacheMgr<ItemObj>::InsertItem(const std::string& key, const ItemObj& data) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
|
@ -74,9 +71,9 @@ CacheMgr<ItemObj>::InsertItem(const std::string &key, const ItemObj &data) {
|
|||
server::Metrics::GetInstance().CacheAccessTotalIncrement();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::EraseItem(const std::string &key) {
|
||||
CacheMgr<ItemObj>::EraseItem(const std::string& key) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
|
@ -86,7 +83,7 @@ CacheMgr<ItemObj>::EraseItem(const std::string &key) {
|
|||
server::Metrics::GetInstance().CacheAccessTotalIncrement();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::PrintInfo() {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -97,7 +94,7 @@ CacheMgr<ItemObj>::PrintInfo() {
|
|||
cache_->print();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::ClearCache() {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -108,7 +105,7 @@ CacheMgr<ItemObj>::ClearCache() {
|
|||
cache_->clear();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
int64_t
|
||||
CacheMgr<ItemObj>::CacheUsage() const {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -119,7 +116,7 @@ CacheMgr<ItemObj>::CacheUsage() const {
|
|||
return cache_->usage();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
int64_t
|
||||
CacheMgr<ItemObj>::CacheCapacity() const {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -130,7 +127,7 @@ CacheMgr<ItemObj>::CacheCapacity() const {
|
|||
return cache_->capacity();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -140,6 +137,5 @@ CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
|
|||
cache_->set_capacity(capacity);
|
||||
}
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
|
|
@ -37,7 +37,7 @@ GpuCacheMgr::GpuCacheMgr() {
|
|||
Status s;
|
||||
|
||||
int64_t gpu_cache_cap;
|
||||
s = config.GetCacheConfigGpuCacheCapacity(gpu_cache_cap);
|
||||
s = config.GetGpuResourceConfigCacheCapacity(gpu_cache_cap);
|
||||
if (!s.ok()) {
|
||||
SERVER_LOG_ERROR << s.message();
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ GpuCacheMgr::GpuCacheMgr() {
|
|||
cache_ = std::make_shared<Cache<DataObjPtr>>(cap, 1UL << 32);
|
||||
|
||||
float gpu_mem_threshold;
|
||||
s = config.GetCacheConfigGpuCacheThreshold(gpu_mem_threshold);
|
||||
s = config.GetGpuResourceConfigCacheThreshold(gpu_mem_threshold);
|
||||
if (!s.ok()) {
|
||||
SERVER_LOG_ERROR << s.message();
|
||||
}
|
||||
|
|
|
@ -47,44 +47,68 @@ class DB {
|
|||
|
||||
virtual Status
|
||||
CreateTable(meta::TableSchema& table_schema_) = 0;
|
||||
|
||||
virtual Status
|
||||
DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
|
||||
DropTable(const std::string& table_id, const meta::DatesT& dates) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeTable(meta::TableSchema& table_schema_) = 0;
|
||||
|
||||
virtual Status
|
||||
HasTable(const std::string& table_id, bool& has_or_not_) = 0;
|
||||
|
||||
virtual Status
|
||||
AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
|
||||
|
||||
virtual Status
|
||||
PreloadTable(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
|
||||
|
||||
virtual Status
|
||||
InsertVectors(const std::string& table_id_, uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name,
|
||||
const std::string& partition_tag) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
ResultIds& result_ids, ResultDistances& result_distances) = 0;
|
||||
DropPartition(const std::string& partition_name) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) = 0;
|
||||
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
|
||||
IDNumbers& vector_ids_) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) = 0;
|
||||
|
||||
virtual Status
|
||||
QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) = 0;
|
||||
|
||||
virtual Status
|
||||
Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status
|
||||
CreateIndex(const std::string& table_id, const TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeIndex(const std::string& table_id, TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DropIndex(const std::string& table_id) = 0;
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "scheduler/job/DeleteJob.h"
|
||||
#include "scheduler/job/SearchJob.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/StringHelpFunctions.h"
|
||||
#include "utils/TimeRecorder.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
@ -38,6 +39,7 @@
|
|||
#include <chrono>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
#include <thread>
|
||||
|
||||
namespace milvus {
|
||||
|
@ -49,6 +51,17 @@ constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
|
|||
constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
|
||||
constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
|
||||
|
||||
static const Status SHUTDOWN_ERROR = Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
|
||||
void
|
||||
TraverseFiles(const meta::DatePartionedTableFilesSchema& date_files, meta::TableFilesSchema& files_array) {
|
||||
for (auto& day_files : date_files) {
|
||||
for (auto& file : day_files.second) {
|
||||
files_array.push_back(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
DBImpl::DBImpl(const DBOptions& options)
|
||||
|
@ -113,7 +126,7 @@ DBImpl::DropAll() {
|
|||
Status
|
||||
DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
meta::TableSchema temp_schema = table_schema;
|
||||
|
@ -122,34 +135,18 @@ DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
|||
}
|
||||
|
||||
Status
|
||||
DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
DBImpl::DropTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
// dates partly delete files of the table but currently we don't support
|
||||
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
|
||||
|
||||
if (dates.empty()) {
|
||||
mem_mgr_->EraseMemVector(table_id); // not allow insert
|
||||
meta_ptr_->DeleteTable(table_id); // soft delete table
|
||||
|
||||
// scheduler will determine when to delete table files
|
||||
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
|
||||
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(table_id, meta_ptr_, nres);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitAndDelete();
|
||||
} else {
|
||||
meta_ptr_->DropPartitionsByDates(table_id, dates);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
return DropTableRecursively(table_id, dates);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
auto stat = meta_ptr_->DescribeTable(table_schema);
|
||||
|
@ -160,7 +157,7 @@ DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
|||
Status
|
||||
DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->HasTable(table_id, has_or_not);
|
||||
|
@ -169,7 +166,7 @@ DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
|||
Status
|
||||
DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->AllTables(table_schema_array);
|
||||
|
@ -178,55 +175,60 @@ DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
|||
Status
|
||||
DBImpl::PreloadTable(const std::string& table_id) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
|
||||
// get all table files from parent table
|
||||
meta::DatesT dates;
|
||||
std::vector<size_t> ids;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
|
||||
meta::TableFilesSchema files_array;
|
||||
auto status = GetFilesToSearch(table_id, ids, dates, files_array);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// get files from partition tables
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
status = GetFilesToSearch(schema.table_id_, ids, dates, files_array);
|
||||
}
|
||||
|
||||
int64_t size = 0;
|
||||
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
|
||||
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
int64_t available_size = cache_total - cache_usage;
|
||||
|
||||
for (auto& day_files : files) {
|
||||
for (auto& file : day_files.second) {
|
||||
ExecutionEnginePtr engine =
|
||||
EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
|
||||
(MetricType)file.metric_type_, file.nlist_);
|
||||
if (engine == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status(DB_ERROR, "Invalid engine type");
|
||||
}
|
||||
for (auto& file : files_array) {
|
||||
ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
|
||||
(MetricType)file.metric_type_, file.nlist_);
|
||||
if (engine == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status(DB_ERROR, "Invalid engine type");
|
||||
}
|
||||
|
||||
size += engine->PhysicalSize();
|
||||
if (size > available_size) {
|
||||
return Status(SERVER_CACHE_FULL, "Cache is full");
|
||||
} else {
|
||||
try {
|
||||
// step 1: load index
|
||||
engine->Load(true);
|
||||
} catch (std::exception& ex) {
|
||||
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
size += engine->PhysicalSize();
|
||||
if (size > available_size) {
|
||||
return Status(SERVER_CACHE_FULL, "Cache is full");
|
||||
} else {
|
||||
try {
|
||||
// step 1: load index
|
||||
engine->Load(true);
|
||||
} catch (std::exception& ex) {
|
||||
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->UpdateTableFlag(table_id, flag);
|
||||
|
@ -235,34 +237,105 @@ DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
|
|||
Status
|
||||
DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->Count(table_id, row_count);
|
||||
return GetTableRowCountRecursively(table_id, row_count);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) {
|
||||
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
DBImpl::CreatePartition(const std::string& table_id, const std::string& partition_name,
|
||||
const std::string& partition_tag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->CreatePartition(table_id, partition_name, partition_tag);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropPartition(const std::string& partition_name) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
auto status = mem_mgr_->EraseMemVector(partition_name); // not allow insert
|
||||
status = meta_ptr_->DropPartition(partition_name); // soft delete table
|
||||
|
||||
// scheduler will determine when to delete table files
|
||||
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
|
||||
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(partition_name, meta_ptr_, nres);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitAndDelete();
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
std::string partition_name;
|
||||
auto status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << status.message();
|
||||
return status;
|
||||
}
|
||||
|
||||
return DropPartition(partition_name);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->ShowPartitions(table_id, partiton_schema_array);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
|
||||
IDNumbers& vector_ids) {
|
||||
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
// if partition is specified, use partition as target table
|
||||
Status status;
|
||||
std::string target_table_name = table_id;
|
||||
if (!partition_tag.empty()) {
|
||||
std::string partition_name;
|
||||
status = meta_ptr_->GetPartitionName(table_id, partition_tag, target_table_name);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << status.message();
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
// insert vectors into target table
|
||||
milvus::server::CollectInsertMetrics metrics(n, status);
|
||||
status = mem_mgr_->InsertVectors(table_id, n, vectors, vector_ids);
|
||||
status = mem_mgr_->InsertVectors(target_table_name, n, vectors, vector_ids);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
Status status;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
|
||||
// step 1: check index difference
|
||||
TableIndex old_index;
|
||||
auto status = DescribeIndex(table_id, old_index);
|
||||
status = DescribeIndex(table_id, old_index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id;
|
||||
return status;
|
||||
|
@ -272,11 +345,8 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
|||
TableIndex new_index = index;
|
||||
new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable
|
||||
if (!utils::IsSameIndex(old_index, new_index)) {
|
||||
DropIndex(table_id);
|
||||
|
||||
status = meta_ptr_->UpdateTableIndex(table_id, new_index);
|
||||
status = UpdateTableIndexRecursively(table_id, new_index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
@ -287,102 +357,91 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
|||
WaitMergeFileFinish();
|
||||
|
||||
// step 4: wait and build index
|
||||
// for IDMAP type, only wait all NEW file converted to RAW file
|
||||
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
|
||||
std::vector<int> file_types;
|
||||
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
};
|
||||
} else {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::RAW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
|
||||
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
|
||||
};
|
||||
}
|
||||
status = BuildTableIndexRecursively(table_id, index);
|
||||
|
||||
std::vector<std::string> file_ids;
|
||||
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
int times = 1;
|
||||
|
||||
while (!file_ids.empty()) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
|
||||
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
|
||||
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
times++;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
return status;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->DescribeTableIndex(table_id, index);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropIndex(const std::string& table_id) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
|
||||
return meta_ptr_->DropTableIndex(table_id);
|
||||
return DropTableIndexRecursively(table_id);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
ResultIds& result_ids, ResultDistances& result_distances) {
|
||||
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
meta::DatesT dates = {utils::GetDate()};
|
||||
Status result = Query(table_id, k, nq, nprobe, vectors, dates, result_ids, result_distances);
|
||||
|
||||
Status result = Query(table_id, partition_tags, k, nq, nprobe, vectors, dates, result_ids, result_distances);
|
||||
return result;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) {
|
||||
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id << " date range count: " << dates.size();
|
||||
|
||||
// get all table files from table
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
Status status;
|
||||
std::vector<size_t> ids;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
meta::TableFilesSchema files_array;
|
||||
|
||||
meta::TableFilesSchema file_id_array;
|
||||
for (auto& day_files : files) {
|
||||
for (auto& file : day_files.second) {
|
||||
file_id_array.push_back(file);
|
||||
if (partition_tags.empty()) {
|
||||
// no partition tag specified, means search in whole table
|
||||
// get all table files from parent table
|
||||
status = GetFilesToSearch(table_id, ids, dates, files_array);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
status = GetFilesToSearch(schema.table_id_, ids, dates, files_array);
|
||||
}
|
||||
} else {
|
||||
// get files from specified partitions
|
||||
std::set<std::string> partition_name_array;
|
||||
GetPartitionsByTags(table_id, partition_tags, partition_name_array);
|
||||
|
||||
for (auto& partition_name : partition_name_array) {
|
||||
status = GetFilesToSearch(partition_name, ids, dates, files_array);
|
||||
}
|
||||
}
|
||||
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
|
||||
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
|
||||
return status;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) {
|
||||
DBImpl::QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id << " date range count: " << dates.size();
|
||||
|
@ -396,25 +455,18 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_
|
|||
ids.push_back(std::stoul(id, &sz));
|
||||
}
|
||||
|
||||
meta::DatePartionedTableFilesSchema files_array;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files_array);
|
||||
meta::TableFilesSchema files_array;
|
||||
auto status = GetFilesToSearch(table_id, ids, dates, files_array);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
meta::TableFilesSchema file_id_array;
|
||||
for (auto& day_files : files_array) {
|
||||
for (auto& file : day_files.second) {
|
||||
file_id_array.push_back(file);
|
||||
}
|
||||
}
|
||||
|
||||
if (file_id_array.empty()) {
|
||||
if (files_array.empty()) {
|
||||
return Status(DB_ERROR, "Invalid file id");
|
||||
}
|
||||
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
|
||||
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
|
||||
return status;
|
||||
}
|
||||
|
@ -422,7 +474,7 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_
|
|||
Status
|
||||
DBImpl::Size(uint64_t& result) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->Size(result);
|
||||
|
@ -577,6 +629,18 @@ DBImpl::StartCompactionTask() {
|
|||
{
|
||||
std::lock_guard<std::mutex> lck(compact_result_mutex_);
|
||||
if (compact_thread_results_.empty()) {
|
||||
// collect merge files for all tables(if compact_table_ids_ is empty) for two reasons:
|
||||
// 1. other tables may still has un-merged files
|
||||
// 2. server may be closed unexpected, these un-merge files need to be merged when server restart
|
||||
if (compact_table_ids_.empty()) {
|
||||
std::vector<meta::TableSchema> table_schema_array;
|
||||
meta_ptr_->AllTables(table_schema_array);
|
||||
for (auto& schema : table_schema_array) {
|
||||
compact_table_ids_.insert(schema.table_id_);
|
||||
}
|
||||
}
|
||||
|
||||
// start merge file thread
|
||||
compact_thread_results_.push_back(
|
||||
compact_thread_pool_.enqueue(&DBImpl::BackgroundCompaction, this, compact_table_ids_));
|
||||
compact_table_ids_.clear();
|
||||
|
@ -675,7 +739,7 @@ DBImpl::BackgroundMergeFiles(const std::string& table_id) {
|
|||
for (auto& kv : raw_files) {
|
||||
auto files = kv.second;
|
||||
if (files.size() < options_.merge_trigger_number_) {
|
||||
ENGINE_LOG_DEBUG << "Files number not greater equal than merge trigger number, skip merge action";
|
||||
ENGINE_LOG_TRACE << "Files number not greater equal than merge trigger number, skip merge action";
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -774,5 +838,186 @@ DBImpl::BackgroundBuildIndex() {
|
|||
// ENGINE_LOG_TRACE << "Background build index thread exit";
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, const meta::DatesT& dates,
|
||||
meta::TableFilesSchema& files) {
|
||||
meta::DatePartionedTableFilesSchema date_files;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, file_ids, dates, date_files);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
TraverseFiles(date_files, files);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
|
||||
std::set<std::string>& partition_name_array) {
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
auto status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
|
||||
for (auto& tag : partition_tags) {
|
||||
// trim side-blank of tag, only compare valid characters
|
||||
// for example: " ab cd " is treated as "ab cd"
|
||||
std::string valid_tag = tag;
|
||||
server::StringHelpFunctions::TrimStringBlank(valid_tag);
|
||||
for (auto& schema : partiton_array) {
|
||||
if (server::StringHelpFunctions::IsRegexMatch(schema.partition_tag_, valid_tag)) {
|
||||
partition_name_array.insert(schema.table_id_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropTableRecursively(const std::string& table_id, const meta::DatesT& dates) {
|
||||
// dates partly delete files of the table but currently we don't support
|
||||
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
|
||||
|
||||
Status status;
|
||||
if (dates.empty()) {
|
||||
status = mem_mgr_->EraseMemVector(table_id); // not allow insert
|
||||
status = meta_ptr_->DropTable(table_id); // soft delete table
|
||||
|
||||
// scheduler will determine when to delete table files
|
||||
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
|
||||
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(table_id, meta_ptr_, nres);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitAndDelete();
|
||||
} else {
|
||||
status = meta_ptr_->DropDataByDate(table_id, dates);
|
||||
}
|
||||
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
status = DropTableRecursively(schema.table_id_, dates);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index) {
|
||||
DropIndex(table_id);
|
||||
|
||||
auto status = meta_ptr_->UpdateTableIndex(table_id, index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
status = UpdateTableIndexRecursively(schema.table_id_, index);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index) {
|
||||
// for IDMAP type, only wait all NEW file converted to RAW file
|
||||
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
|
||||
std::vector<int> file_types;
|
||||
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
};
|
||||
} else {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::RAW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
|
||||
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
|
||||
};
|
||||
}
|
||||
|
||||
// get files to build index
|
||||
std::vector<std::string> file_ids;
|
||||
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
int times = 1;
|
||||
|
||||
while (!file_ids.empty()) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
|
||||
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
|
||||
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
times++;
|
||||
}
|
||||
|
||||
// build index for partition
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
status = BuildTableIndexRecursively(schema.table_id_, index);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropTableIndexRecursively(const std::string& table_id) {
|
||||
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
|
||||
auto status = meta_ptr_->DropTableIndex(table_id);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// drop partition index
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
status = DropTableIndexRecursively(schema.table_id_);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count) {
|
||||
row_count = 0;
|
||||
auto status = meta_ptr_->Count(table_id, row_count);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// get partition row count
|
||||
std::vector<meta::TableSchema> partiton_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
|
||||
for (auto& schema : partiton_array) {
|
||||
uint64_t partition_row_count = 0;
|
||||
status = GetTableRowCountRecursively(schema.table_id_, partition_row_count);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
row_count += partition_row_count;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
|
|
@ -57,7 +57,7 @@ class DBImpl : public DB {
|
|||
CreateTable(meta::TableSchema& table_schema) override;
|
||||
|
||||
Status
|
||||
DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
|
||||
DropTable(const std::string& table_id, const meta::DatesT& dates) override;
|
||||
|
||||
Status
|
||||
DescribeTable(meta::TableSchema& table_schema) override;
|
||||
|
@ -78,7 +78,21 @@ class DBImpl : public DB {
|
|||
GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
|
||||
|
||||
Status
|
||||
InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name,
|
||||
const std::string& partition_tag) override;
|
||||
|
||||
Status
|
||||
DropPartition(const std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override;
|
||||
|
||||
Status
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) override;
|
||||
|
||||
Status
|
||||
InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
|
||||
IDNumbers& vector_ids) override;
|
||||
|
||||
Status
|
||||
CreateIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
@ -90,18 +104,19 @@ class DBImpl : public DB {
|
|||
DropIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
ResultIds& result_ids, ResultDistances& result_distances) override;
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
Size(uint64_t& result) override;
|
||||
|
||||
|
@ -137,6 +152,29 @@ class DBImpl : public DB {
|
|||
Status
|
||||
MemSerialize();
|
||||
|
||||
Status
|
||||
GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, const meta::DatesT& dates,
|
||||
meta::TableFilesSchema& files);
|
||||
|
||||
Status
|
||||
GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
|
||||
std::set<std::string>& partition_name_array);
|
||||
|
||||
Status
|
||||
DropTableRecursively(const std::string& table_id, const meta::DatesT& dates);
|
||||
|
||||
Status
|
||||
UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index);
|
||||
|
||||
Status
|
||||
BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index);
|
||||
|
||||
Status
|
||||
DropTableIndexRecursively(const std::string& table_id);
|
||||
|
||||
Status
|
||||
GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count);
|
||||
|
||||
private:
|
||||
const DBOptions options_;
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ static const char* ARCHIVE_CONF_DISK = "disk";
|
|||
static const char* ARCHIVE_CONF_DAYS = "days";
|
||||
|
||||
struct ArchiveConf {
|
||||
using CriteriaT = std::map<std::string, int>;
|
||||
using CriteriaT = std::map<std::string, int64_t>;
|
||||
|
||||
explicit ArchiveConf(const std::string& type, const std::string& criterias = std::string());
|
||||
|
||||
|
|
|
@ -27,8 +27,7 @@
|
|||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
using IDNumber = faiss::Index::idx_t;
|
||||
|
||||
typedef int64_t IDNumber;
|
||||
typedef IDNumber* IDNumberPtr;
|
||||
typedef std::vector<IDNumber> IDNumbers;
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
// TODO(linxj): replace with VecIndex::IndexType
|
||||
enum class EngineType {
|
||||
INVALID = 0,
|
||||
FAISS_IDMAP = 1,
|
||||
|
@ -33,7 +34,10 @@ enum class EngineType {
|
|||
FAISS_IVFSQ8,
|
||||
NSG_MIX,
|
||||
FAISS_IVFSQ8H,
|
||||
MAX_VALUE = FAISS_IVFSQ8H,
|
||||
FAISS_PQ,
|
||||
SPTAG_KDT,
|
||||
SPTAG_BKT,
|
||||
MAX_VALUE = SPTAG_BKT,
|
||||
};
|
||||
|
||||
enum class MetricType {
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "utils/CommonUtil.h"
|
||||
#include "utils/Exception.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
#include "wrapper/ConfAdapter.h"
|
||||
#include "wrapper/ConfAdapterMgr.h"
|
||||
#include "wrapper/VecImpl.h"
|
||||
|
@ -92,11 +93,19 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
|||
break;
|
||||
}
|
||||
case EngineType::FAISS_IVFFLAT: {
|
||||
#ifdef MILVUS_CPU_VERSION
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_CPU);
|
||||
#else
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_MIX);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case EngineType::FAISS_IVFSQ8: {
|
||||
#ifdef MILVUS_CPU_VERSION
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_CPU);
|
||||
#else
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_MIX);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case EngineType::NSG_MIX: {
|
||||
|
@ -107,6 +116,22 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
|||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_HYBRID);
|
||||
break;
|
||||
}
|
||||
case EngineType::FAISS_PQ: {
|
||||
#ifdef MILVUS_CPU_VERSION
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_CPU);
|
||||
#else
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_MIX);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case EngineType::SPTAG_KDT: {
|
||||
index = GetVecIndexFactory(IndexType::SPTAG_KDT_RNT_CPU);
|
||||
break;
|
||||
}
|
||||
case EngineType::SPTAG_BKT: {
|
||||
index = GetVecIndexFactory(IndexType::SPTAG_BKT_RNT_CPU);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
ENGINE_LOG_ERROR << "Unsupported index type";
|
||||
return nullptr;
|
||||
|
@ -127,7 +152,14 @@ ExecutionEngineImpl::HybridLoad() const {
|
|||
}
|
||||
|
||||
const std::string key = location_ + ".quantizer";
|
||||
std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
|
||||
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
std::vector<int64_t> gpus;
|
||||
Status s = config.GetGpuResourceConfigSearchResources(gpus);
|
||||
if (!s.ok()) {
|
||||
ENGINE_LOG_ERROR << s.message();
|
||||
return;
|
||||
}
|
||||
|
||||
// cache hit
|
||||
{
|
||||
|
@ -309,18 +341,36 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
|
|||
return Status::OK();
|
||||
}
|
||||
#endif
|
||||
try {
|
||||
index_ = index_->CopyToGpu(device_id);
|
||||
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id;
|
||||
} catch (std::exception& e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status(DB_ERROR, e.what());
|
||||
|
||||
auto index = std::static_pointer_cast<VecIndex>(cache::GpuCacheMgr::GetInstance(device_id)->GetIndex(location_));
|
||||
bool already_in_cache = (index != nullptr);
|
||||
if (already_in_cache) {
|
||||
index_ = index;
|
||||
} else {
|
||||
if (index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to gpu";
|
||||
return Status(DB_ERROR, "index is null");
|
||||
}
|
||||
|
||||
try {
|
||||
index_ = index_->CopyToGpu(device_id);
|
||||
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id;
|
||||
} catch (std::exception& e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status(DB_ERROR, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
if (!already_in_cache) {
|
||||
GpuCache(device_id);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ExecutionEngineImpl::CopyToIndexFileToGpu(uint64_t device_id) {
|
||||
gpu_num_ = device_id;
|
||||
auto to_index_data = std::make_shared<ToIndexData>(PhysicalSize());
|
||||
cache::DataObjPtr obj = std::static_pointer_cast<cache::DataObj>(to_index_data);
|
||||
milvus::cache::GpuCacheMgr::GetInstance(device_id)->InsertItem(location_, obj);
|
||||
|
@ -544,12 +594,16 @@ ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
|
|||
Status
|
||||
ExecutionEngineImpl::Init() {
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
Status s = config.GetResourceConfigIndexBuildDevice(gpu_num_);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
std::vector<int64_t> gpu_ids;
|
||||
Status s = config.GetGpuResourceConfigBuildIndexResources(gpu_ids);
|
||||
for (auto id : gpu_ids) {
|
||||
if (gpu_num_ == id) {
|
||||
return Status::OK();
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
std::string msg = "Invalid gpu_num";
|
||||
return Status(SERVER_INVALID_ARGUMENT, msg);
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
@ -122,8 +122,8 @@ class ExecutionEngineImpl : public ExecutionEngine {
|
|||
int64_t dim_;
|
||||
std::string location_;
|
||||
|
||||
int32_t nlist_ = 0;
|
||||
int32_t gpu_num_ = 0;
|
||||
int64_t nlist_ = 0;
|
||||
int64_t gpu_num_ = 0;
|
||||
};
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
@ -50,14 +50,11 @@ class Meta {
|
|||
virtual Status
|
||||
AllTables(std::vector<TableSchema>& table_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
|
||||
|
||||
virtual Status
|
||||
DeleteTable(const std::string& table_id) = 0;
|
||||
DropTable(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
DeleteTableFiles(const std::string& table_id) = 0;
|
||||
|
@ -66,20 +63,41 @@ class Meta {
|
|||
CreateTableFile(TableFileSchema& file_schema) = 0;
|
||||
|
||||
virtual Status
|
||||
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) = 0;
|
||||
DropDataByDate(const std::string& table_id, const DatesT& dates) = 0;
|
||||
|
||||
virtual Status
|
||||
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFiles(TableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DropTableIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag) = 0;
|
||||
|
||||
virtual Status
|
||||
DropPartition(const std::string& partition_name) = 0;
|
||||
|
||||
virtual Status
|
||||
ShowPartitions(const std::string& table_name, std::vector<meta::TableSchema>& partiton_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0;
|
||||
|
||||
virtual Status
|
||||
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
|
@ -87,12 +105,6 @@ class Meta {
|
|||
virtual Status
|
||||
FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status
|
||||
Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status
|
||||
Archive() = 0;
|
||||
|
||||
virtual Status
|
||||
FilesToIndex(TableFilesSchema&) = 0;
|
||||
|
||||
|
@ -101,10 +113,10 @@ class Meta {
|
|||
std::vector<std::string>& file_ids) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
|
||||
Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status
|
||||
DropTableIndex(const std::string& table_id) = 0;
|
||||
Archive() = 0;
|
||||
|
||||
virtual Status
|
||||
CleanUp() = 0;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "db/Constants.h"
|
||||
#include "db/engine/ExecutionEngine.h"
|
||||
#include "src/version.h"
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
@ -33,6 +34,7 @@ constexpr int32_t DEFAULT_ENGINE_TYPE = (int)EngineType::FAISS_IDMAP;
|
|||
constexpr int32_t DEFAULT_NLIST = 16384;
|
||||
constexpr int32_t DEFAULT_METRIC_TYPE = (int)MetricType::L2;
|
||||
constexpr int32_t DEFAULT_INDEX_FILE_SIZE = ONE_GB;
|
||||
constexpr char CURRENT_VERSION[] = MILVUS_VERSION;
|
||||
|
||||
constexpr int64_t FLAG_MASK_NO_USERID = 0x1;
|
||||
constexpr int64_t FLAG_MASK_HAS_USERID = 0x1 << 1;
|
||||
|
@ -57,6 +59,9 @@ struct TableSchema {
|
|||
int32_t engine_type_ = DEFAULT_ENGINE_TYPE;
|
||||
int32_t nlist_ = DEFAULT_NLIST;
|
||||
int32_t metric_type_ = DEFAULT_METRIC_TYPE;
|
||||
std::string owner_table_;
|
||||
std::string partition_tag_;
|
||||
std::string version_ = CURRENT_VERSION;
|
||||
}; // TableSchema
|
||||
|
||||
struct TableFileSchema {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -49,7 +49,7 @@ class MySQLMetaImpl : public Meta {
|
|||
AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
|
||||
Status
|
||||
DeleteTable(const std::string& table_id) override;
|
||||
DropTable(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
DeleteTableFiles(const std::string& table_id) override;
|
||||
|
@ -58,27 +58,17 @@ class MySQLMetaImpl : public Meta {
|
|||
CreateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
Status
|
||||
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override;
|
||||
DropDataByDate(const std::string& table_id, const DatesT& dates) override;
|
||||
|
||||
Status
|
||||
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
|
||||
|
||||
Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) override;
|
||||
|
||||
Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
||||
Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
|
||||
|
||||
Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
Status
|
||||
DropTableIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
|
@ -88,6 +78,24 @@ class MySQLMetaImpl : public Meta {
|
|||
Status
|
||||
UpdateTableFiles(TableFilesSchema& files) override;
|
||||
|
||||
Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
Status
|
||||
DropTableIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override;
|
||||
|
||||
Status
|
||||
DropPartition(const std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) override;
|
||||
|
||||
Status
|
||||
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
|
@ -98,6 +106,10 @@ class MySQLMetaImpl : public Meta {
|
|||
Status
|
||||
FilesToIndex(TableFilesSchema&) override;
|
||||
|
||||
Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) override;
|
||||
|
||||
Status
|
||||
Archive() override;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -49,7 +49,7 @@ class SqliteMetaImpl : public Meta {
|
|||
AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
|
||||
Status
|
||||
DeleteTable(const std::string& table_id) override;
|
||||
DropTable(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
DeleteTableFiles(const std::string& table_id) override;
|
||||
|
@ -58,21 +58,26 @@ class SqliteMetaImpl : public Meta {
|
|||
CreateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
Status
|
||||
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override;
|
||||
DropDataByDate(const std::string& table_id, const DatesT& dates) override;
|
||||
|
||||
Status
|
||||
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
|
||||
|
||||
Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) override;
|
||||
|
||||
Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
||||
Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
|
||||
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
UpdateTableFiles(TableFilesSchema& files) override;
|
||||
|
||||
Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
|
@ -80,13 +85,16 @@ class SqliteMetaImpl : public Meta {
|
|||
DropTableIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) override;
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override;
|
||||
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
DropPartition(const std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
UpdateTableFiles(TableFilesSchema& files) override;
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) override;
|
||||
|
||||
Status
|
||||
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
|
||||
|
@ -99,11 +107,15 @@ class SqliteMetaImpl : public Meta {
|
|||
FilesToIndex(TableFilesSchema&) override;
|
||||
|
||||
Status
|
||||
Archive() override;
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) override;
|
||||
|
||||
Status
|
||||
Size(uint64_t& result) override;
|
||||
|
||||
Status
|
||||
Archive() override;
|
||||
|
||||
Status
|
||||
CleanUp() override;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -22,19 +22,22 @@ namespace grpc {
|
|||
static const char* MilvusService_method_names[] = {
|
||||
"/milvus.grpc.MilvusService/CreateTable",
|
||||
"/milvus.grpc.MilvusService/HasTable",
|
||||
"/milvus.grpc.MilvusService/DropTable",
|
||||
"/milvus.grpc.MilvusService/CreateIndex",
|
||||
"/milvus.grpc.MilvusService/Insert",
|
||||
"/milvus.grpc.MilvusService/Search",
|
||||
"/milvus.grpc.MilvusService/SearchInFiles",
|
||||
"/milvus.grpc.MilvusService/DescribeTable",
|
||||
"/milvus.grpc.MilvusService/CountTable",
|
||||
"/milvus.grpc.MilvusService/ShowTables",
|
||||
"/milvus.grpc.MilvusService/Cmd",
|
||||
"/milvus.grpc.MilvusService/DeleteByRange",
|
||||
"/milvus.grpc.MilvusService/PreloadTable",
|
||||
"/milvus.grpc.MilvusService/DropTable",
|
||||
"/milvus.grpc.MilvusService/CreateIndex",
|
||||
"/milvus.grpc.MilvusService/DescribeIndex",
|
||||
"/milvus.grpc.MilvusService/DropIndex",
|
||||
"/milvus.grpc.MilvusService/CreatePartition",
|
||||
"/milvus.grpc.MilvusService/ShowPartitions",
|
||||
"/milvus.grpc.MilvusService/DropPartition",
|
||||
"/milvus.grpc.MilvusService/Insert",
|
||||
"/milvus.grpc.MilvusService/Search",
|
||||
"/milvus.grpc.MilvusService/SearchInFiles",
|
||||
"/milvus.grpc.MilvusService/Cmd",
|
||||
"/milvus.grpc.MilvusService/DeleteByDate",
|
||||
"/milvus.grpc.MilvusService/PreloadTable",
|
||||
};
|
||||
|
||||
std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) {
|
||||
|
@ -46,19 +49,22 @@ std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_p
|
|||
MilvusService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel)
|
||||
: channel_(channel), rpcmethod_CreateTable_(MilvusService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_HasTable_(MilvusService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CreateIndex_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Insert_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Search_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_SearchInFiles_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeTable_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CountTable_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_ShowTables_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Cmd_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DeleteByRange_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_PreloadTable_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeIndex_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropIndex_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CountTable_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_ShowTables_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropTable_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CreateIndex_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeIndex_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropIndex_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CreatePartition_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_ShowPartitions_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropPartition_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Insert_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Search_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_SearchInFiles_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Cmd_(MilvusService_method_names[15], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DeleteByDate_(MilvusService_method_names[16], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_PreloadTable_(MilvusService_method_names[17], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
{}
|
||||
|
||||
::grpc::Status MilvusService::Stub::CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) {
|
||||
|
@ -117,146 +123,6 @@ void MilvusService::Stub::experimental_async::HasTable(::grpc::ClientContext* co
|
|||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::BoolReply>::Create(channel_.get(), cq, rpcmethod_HasTable_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DescribeTable_, context, request, response);
|
||||
}
|
||||
|
@ -341,88 +207,60 @@ void MilvusService::Stub::experimental_async::ShowTables(::grpc::ClientContext*
|
|||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TableNameList>::Create(channel_.get(), cq, rpcmethod_ShowTables_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response);
|
||||
::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByRange_, context, request, response);
|
||||
::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, true);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) {
|
||||
|
@ -481,6 +319,258 @@ void MilvusService::Stub::experimental_async::DropIndex(::grpc::ClientContext* c
|
|||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropIndex_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreatePartition_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ShowPartitions_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropPartition_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByDate_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false);
|
||||
}
|
||||
|
||||
MilvusService::Service::Service() {
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[0],
|
||||
|
@ -495,68 +585,83 @@ MilvusService::Service::Service() {
|
|||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[2],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DropTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[3],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::CreateIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[4],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(
|
||||
std::mem_fn(&MilvusService::Service::Insert), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[5],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::Search), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[6],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::SearchInFiles), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[7],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>(
|
||||
std::mem_fn(&MilvusService::Service::DescribeTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[8],
|
||||
MilvusService_method_names[3],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>(
|
||||
std::mem_fn(&MilvusService::Service::CountTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[9],
|
||||
MilvusService_method_names[4],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::TableNameList>(
|
||||
std::mem_fn(&MilvusService::Service::ShowTables), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[10],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>(
|
||||
std::mem_fn(&MilvusService::Service::Cmd), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[11],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DeleteByRange), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[12],
|
||||
MilvusService_method_names[5],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::PreloadTable), this)));
|
||||
std::mem_fn(&MilvusService::Service::DropTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[13],
|
||||
MilvusService_method_names[6],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::CreateIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[7],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>(
|
||||
std::mem_fn(&MilvusService::Service::DescribeIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[14],
|
||||
MilvusService_method_names[8],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DropIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[9],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::CreatePartition), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[10],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>(
|
||||
std::mem_fn(&MilvusService::Service::ShowPartitions), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[11],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DropPartition), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[12],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(
|
||||
std::mem_fn(&MilvusService::Service::Insert), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[13],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::Search), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[14],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::SearchInFiles), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[15],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>(
|
||||
std::mem_fn(&MilvusService::Service::Cmd), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[16],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DeleteByDate), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[17],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::PreloadTable), this)));
|
||||
}
|
||||
|
||||
MilvusService::Service::~Service() {
|
||||
|
@ -576,41 +681,6 @@ MilvusService::Service::~Service() {
|
|||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
|
@ -632,21 +702,14 @@ MilvusService::Service::~Service() {
|
|||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) {
|
||||
::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
|
@ -667,6 +730,69 @@ MilvusService::Service::~Service() {
|
|||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
|
||||
} // namespace milvus
|
||||
} // namespace grpc
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -5,14 +5,21 @@ import "status.proto";
|
|||
package milvus.grpc;
|
||||
|
||||
/**
|
||||
* @brief Table Name
|
||||
* @brief Table name
|
||||
*/
|
||||
message TableName {
|
||||
string table_name = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Table Name List
|
||||
* @brief Partition name
|
||||
*/
|
||||
message PartitionName {
|
||||
string partition_name = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Table name list
|
||||
*/
|
||||
message TableNameList {
|
||||
Status status = 1;
|
||||
|
@ -20,7 +27,7 @@ message TableNameList {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief Table Schema
|
||||
* @brief Table schema
|
||||
*/
|
||||
message TableSchema {
|
||||
Status status = 1;
|
||||
|
@ -31,7 +38,24 @@ message TableSchema {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief Range Schema
|
||||
* @brief Params of partition
|
||||
*/
|
||||
message PartitionParam {
|
||||
string table_name = 1;
|
||||
string partition_name = 2;
|
||||
string tag = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Partition list
|
||||
*/
|
||||
message PartitionList {
|
||||
Status status = 1;
|
||||
repeated PartitionParam partition_array = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Range schema
|
||||
*/
|
||||
message Range {
|
||||
string start_value = 1;
|
||||
|
@ -46,12 +70,13 @@ message RowRecord {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief params to be inserted
|
||||
* @brief Params to be inserted
|
||||
*/
|
||||
message InsertParam {
|
||||
string table_name = 1;
|
||||
repeated RowRecord row_record_array = 2;
|
||||
repeated int64 row_id_array = 3; //optional
|
||||
string partition_tag = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -63,7 +88,7 @@ message VectorIds {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief params for searching vector
|
||||
* @brief Params for searching vector
|
||||
*/
|
||||
message SearchParam {
|
||||
string table_name = 1;
|
||||
|
@ -71,10 +96,11 @@ message SearchParam {
|
|||
repeated Range query_range_array = 3;
|
||||
int64 topk = 4;
|
||||
int64 nprobe = 5;
|
||||
repeated string partition_tag_array = 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief params for searching vector in files
|
||||
* @brief Params for searching vector in files
|
||||
*/
|
||||
message SearchInFilesParam {
|
||||
repeated string file_id_array = 1;
|
||||
|
@ -92,7 +118,7 @@ message TopKQueryResult {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief Server String Reply
|
||||
* @brief Server string Reply
|
||||
*/
|
||||
message StringReply {
|
||||
Status status = 1;
|
||||
|
@ -116,7 +142,7 @@ message TableRowCount {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief Give Server Command
|
||||
* @brief Give server Command
|
||||
*/
|
||||
message Command {
|
||||
string cmd = 1;
|
||||
|
@ -142,169 +168,173 @@ message IndexParam {
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief table name and range for DeleteByRange
|
||||
* @brief table name and range for DeleteByDate
|
||||
*/
|
||||
message DeleteByRangeParam {
|
||||
message DeleteByDateParam {
|
||||
Range range = 1;
|
||||
string table_name = 2;
|
||||
}
|
||||
|
||||
service MilvusService {
|
||||
/**
|
||||
* @brief Create table method
|
||||
* @brief This method is used to create table
|
||||
*
|
||||
* This method is used to create table
|
||||
*
|
||||
* @param param, use to provide table information to be created.
|
||||
* @param TableSchema, use to provide table information to be created.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc CreateTable(TableSchema) returns (Status){}
|
||||
|
||||
/**
|
||||
* @brief Test table existence method
|
||||
* @brief This method is used to test table existence.
|
||||
*
|
||||
* This method is used to test table existence.
|
||||
*
|
||||
* @param table_name, table name is going to be tested.
|
||||
* @param TableName, table name is going to be tested.
|
||||
*
|
||||
* @return BoolReply
|
||||
*/
|
||||
rpc HasTable(TableName) returns (BoolReply) {}
|
||||
|
||||
/**
|
||||
* @brief Delete table method
|
||||
* @brief This method is used to get table schema.
|
||||
*
|
||||
* This method is used to delete table.
|
||||
* @param TableName, target table name.
|
||||
*
|
||||
* @param table_name, table name is going to be deleted.
|
||||
*
|
||||
*/
|
||||
rpc DropTable(TableName) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief Build index by table method
|
||||
*
|
||||
* This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param table_name, table is going to be built index.
|
||||
*
|
||||
*/
|
||||
rpc CreateIndex(IndexParam) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief Add vector array to table
|
||||
*
|
||||
* This method is used to add vector array to table.
|
||||
*
|
||||
* @param table_name, table_name is inserted.
|
||||
* @param record_array, vector array is inserted.
|
||||
*
|
||||
* @return vector id array
|
||||
*/
|
||||
rpc Insert(InsertParam) returns (VectorIds) {}
|
||||
|
||||
/**
|
||||
* @brief Query vector
|
||||
*
|
||||
* This method is used to query vector in table.
|
||||
*
|
||||
* @param table_name, table_name is queried.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
*
|
||||
* @return query result array.
|
||||
*/
|
||||
rpc Search(SearchParam) returns (TopKQueryResult) {}
|
||||
|
||||
/**
|
||||
* @brief Internal use query interface
|
||||
*
|
||||
* This method is used to query vector in specified files.
|
||||
*
|
||||
* @param file_id_array, specified files id array, queried.
|
||||
* @param query_record_array, all vector are going to be queried.
|
||||
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
|
||||
* @param topk, how many similarity vectors will be searched.
|
||||
*
|
||||
* @return query result array.
|
||||
*/
|
||||
rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResult) {}
|
||||
|
||||
/**
|
||||
* @brief Get table schema
|
||||
*
|
||||
* This method is used to get table schema.
|
||||
*
|
||||
* @param table_name, target table name.
|
||||
*
|
||||
* @return table schema
|
||||
* @return TableSchema
|
||||
*/
|
||||
rpc DescribeTable(TableName) returns (TableSchema) {}
|
||||
|
||||
/**
|
||||
* @brief Get table schema
|
||||
* @brief This method is used to get table schema.
|
||||
*
|
||||
* This method is used to get table schema.
|
||||
* @param TableName, target table name.
|
||||
*
|
||||
* @param table_name, target table name.
|
||||
*
|
||||
* @return table schema
|
||||
* @return TableRowCount
|
||||
*/
|
||||
rpc CountTable(TableName) returns (TableRowCount) {}
|
||||
|
||||
/**
|
||||
* @brief List all tables in database
|
||||
* @brief This method is used to list all tables.
|
||||
*
|
||||
* This method is used to list all tables.
|
||||
* @param Command, dummy parameter.
|
||||
*
|
||||
*
|
||||
* @return table names.
|
||||
* @return TableNameList
|
||||
*/
|
||||
rpc ShowTables(Command) returns (TableNameList) {}
|
||||
|
||||
/**
|
||||
* @brief Give the server status
|
||||
* @brief This method is used to delete table.
|
||||
*
|
||||
* This method is used to give the server status.
|
||||
* @param TableName, table name is going to be deleted.
|
||||
*
|
||||
* @return Server status.
|
||||
* @return TableNameList
|
||||
*/
|
||||
rpc DropTable(TableName) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to build index by table in sync mode.
|
||||
*
|
||||
* @param IndexParam, index paramters.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc CreateIndex(IndexParam) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to describe index
|
||||
*
|
||||
* @param TableName, target table name.
|
||||
*
|
||||
* @return IndexParam
|
||||
*/
|
||||
rpc DescribeIndex(TableName) returns (IndexParam) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to drop index
|
||||
*
|
||||
* @param TableName, target table name.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc DropIndex(TableName) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to create partition
|
||||
*
|
||||
* @param PartitionParam, partition parameters.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc CreatePartition(PartitionParam) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to show partition information
|
||||
*
|
||||
* @param TableName, target table name.
|
||||
*
|
||||
* @return PartitionList
|
||||
*/
|
||||
rpc ShowPartitions(TableName) returns (PartitionList) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to drop partition
|
||||
*
|
||||
* @param PartitionParam, target partition.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc DropPartition(PartitionParam) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to add vector array to table.
|
||||
*
|
||||
* @param InsertParam, insert parameters.
|
||||
*
|
||||
* @return VectorIds
|
||||
*/
|
||||
rpc Insert(InsertParam) returns (VectorIds) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to query vector in table.
|
||||
*
|
||||
* @param SearchParam, search parameters.
|
||||
*
|
||||
* @return TopKQueryResult
|
||||
*/
|
||||
rpc Search(SearchParam) returns (TopKQueryResult) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to query vector in specified files.
|
||||
*
|
||||
* @param SearchInFilesParam, search in files paremeters.
|
||||
*
|
||||
* @return TopKQueryResult
|
||||
*/
|
||||
rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResult) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to give the server status.
|
||||
*
|
||||
* @param Command, command string
|
||||
*
|
||||
* @return StringReply
|
||||
*/
|
||||
rpc Cmd(Command) returns (StringReply) {}
|
||||
|
||||
/**
|
||||
* @brief delete table by range
|
||||
* @brief This method is used to delete vector by date range
|
||||
*
|
||||
* This method is used to delete vector by range
|
||||
* @param DeleteByDateParam, delete parameters.
|
||||
*
|
||||
* @return rpc status.
|
||||
* @return status
|
||||
*/
|
||||
rpc DeleteByRange(DeleteByRangeParam) returns (Status) {}
|
||||
rpc DeleteByDate(DeleteByDateParam) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief preload table
|
||||
* @brief This method is used to preload table
|
||||
*
|
||||
* This method is used to preload table
|
||||
* @param TableName, target table name.
|
||||
*
|
||||
* @return Status.
|
||||
* @return Status
|
||||
*/
|
||||
rpc PreloadTable(TableName) returns (Status) {}
|
||||
|
||||
/**
|
||||
* @brief describe index
|
||||
*
|
||||
* This method is used to describe index
|
||||
*
|
||||
* @return Status.
|
||||
*/
|
||||
rpc DescribeIndex(TableName) returns (IndexParam) {}
|
||||
|
||||
/**
|
||||
* @brief drop index
|
||||
*
|
||||
* This method is used to drop index
|
||||
*
|
||||
* @return Status.
|
||||
*/
|
||||
rpc DropIndex(TableName) returns (Status) {}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,84 +18,97 @@
|
|||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
message(STATUS "---------------core--------------")
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
message(STATUS "------------------------------KNOWHERE-----------------------------------")
|
||||
message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
|
||||
|
||||
set(KNOWHERE_VERSION "0.1.0")
|
||||
set(KNOWHERE_VERSION "0.6.0")
|
||||
string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" KNOWHERE_BASE_VERSION "${KNOWHERE_VERSION}")
|
||||
project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES CUDA C CXX)
|
||||
project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES C CXX)
|
||||
set(CMAKE_CXX_STANDARD 14)
|
||||
|
||||
set(KNOWHERE_VERSION_MAJOR "${knowhere_VERSION_MAJOR}")
|
||||
set(KNOWHERE_VERSION_MINOR "${knowhere_VERSION_MINOR}")
|
||||
set(KNOWHERE_VERSION_PATCH "${knowhere_VERSION_PATCH}")
|
||||
if(KNOWHERE_VERSION_MAJOR STREQUAL ""
|
||||
if (KNOWHERE_VERSION_MAJOR STREQUAL ""
|
||||
OR KNOWHERE_VERSION_MINOR STREQUAL ""
|
||||
OR KNOWHERE_VERSION_PATCH STREQUAL "")
|
||||
message(FATAL_ERROR "Failed to determine Knowhere version from '${KNOWHERE_VERSION}'")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
message(STATUS "Knowhere version: "
|
||||
"${KNOWHERE_VERSION_MAJOR}.${KNOWHERE_VERSION_MINOR}.${KNOWHERE_VERSION_PATCH} "
|
||||
"(full: '${KNOWHERE_VERSION}')")
|
||||
|
||||
# if no build build type is specified, default to release builds
|
||||
if(NOT CMAKE_BUILD_TYPE)
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
endif(NOT CMAKE_BUILD_TYPE)
|
||||
endif (NOT CMAKE_BUILD_TYPE)
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
|
||||
endif()
|
||||
MESSAGE(STATUS "CMAKE_CXX_FLAGS" ${CMAKE_CXX_FLAGS})
|
||||
|
||||
find_package(CUDA)
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
message(STATUS "building milvus_engine on x86 architecture")
|
||||
set(KNOWHERE_BUILD_ARCH x86_64)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
message(STATUS "building milvus_engine on ppc architecture")
|
||||
set(KNOWHERE_BUILD_ARCH ppc64le)
|
||||
else()
|
||||
else ()
|
||||
message(WARNING "unknown processor type")
|
||||
message(WARNING "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}")
|
||||
set(KNOWHERE_BUILD_ARCH unknown)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(BUILD_TYPE "release")
|
||||
else()
|
||||
else ()
|
||||
set(BUILD_TYPE "debug")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Build type = ${BUILD_TYPE}")
|
||||
|
||||
set(INDEX_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
set(INDEX_BINARY_DIR ${PROJECT_BINARY_DIR})
|
||||
message(STATUS "Core source dir: ${PROJECT_SOURCE_DIR}")
|
||||
message(STATUS "Core binary dir: ${PROJECT_BINARY_DIR}")
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${INDEX_SOURCE_DIR}/cmake")
|
||||
|
||||
include(ExternalProject)
|
||||
include(DefineOptionsCore)
|
||||
include(BuildUtilsCore)
|
||||
|
||||
set(KNOWHERE_CPU_VERSION false)
|
||||
if (MILVUS_GPU_VERSION OR KNOWHERE_GPU_VERSION)
|
||||
message(STATUS "Building Knowhere GPU version")
|
||||
add_compile_definitions("MILVUS_GPU_VERSION")
|
||||
enable_language(CUDA)
|
||||
find_package(CUDA 10 REQUIRED)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
|
||||
else ()
|
||||
message(STATUS "Building Knowhere CPU version")
|
||||
set(KNOWHERE_CPU_VERSION true)
|
||||
add_compile_definitions("MILVUS_CPU_VERSION")
|
||||
endif ()
|
||||
|
||||
include(ThirdPartyPackagesCore)
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp -mavx -mf16c -msse4 -mpopcnt")
|
||||
if (KNOWHERE_GPU_VERSION)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
|
||||
endif ()
|
||||
else ()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -g -fPIC -DELPP_THREAD_SAFE -fopenmp -mavx -mf16c -msse4 -mpopcnt")
|
||||
if (KNOWHERE_GPU_VERSION)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3 -g")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
add_subdirectory(knowhere)
|
||||
|
||||
if (BUILD_COVERAGE STREQUAL "ON")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
|
||||
|
||||
if(BUILD_UNIT_TEST STREQUAL "ON")
|
||||
if (KNOWHERE_BUILD_TESTS)
|
||||
add_subdirectory(unittest)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
config_summary()
|
||||
|
|
|
@ -5,7 +5,7 @@ BUILD_UNITTEST="OFF"
|
|||
INSTALL_PREFIX=$(pwd)/cmake_build
|
||||
MAKE_CLEAN="OFF"
|
||||
PROFILING="OFF"
|
||||
BUILD_FAISS_WITH_MKL="OFF"
|
||||
FAISS_WITH_MKL="OFF"
|
||||
USE_JFROG_CACHE="OFF"
|
||||
|
||||
while getopts "p:d:t:uhrcgmj" arg
|
||||
|
@ -31,7 +31,7 @@ do
|
|||
PROFILING="ON"
|
||||
;;
|
||||
m)
|
||||
BUILD_FAISS_WITH_MKL="ON"
|
||||
FAISS_WITH_MKL="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
|
@ -74,7 +74,7 @@ if [[ ${MAKE_CLEAN} == "ON" ]]; then
|
|||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DBUILD_FAISS_WITH_MKL=${BUILD_FAISS_WITH_MKL} \
|
||||
-DFAISS_WITH_MKL=${FAISS_WITH_MKL} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
../"
|
||||
echo ${CMAKE_CMD}
|
||||
|
|
|
@ -1,50 +1,50 @@
|
|||
# Define a function that check last file modification
|
||||
function(Check_Last_Modify cache_check_lists_file_path working_dir last_modified_commit_id)
|
||||
if(EXISTS "${working_dir}")
|
||||
if(EXISTS "${cache_check_lists_file_path}")
|
||||
if (EXISTS "${working_dir}")
|
||||
if (EXISTS "${cache_check_lists_file_path}")
|
||||
set(GIT_LOG_SKIP_NUM 0)
|
||||
set(_MATCH_ALL ON CACHE BOOL "Match all")
|
||||
set(_LOOP_STATUS ON CACHE BOOL "Whether out of loop")
|
||||
file(STRINGS ${cache_check_lists_file_path} CACHE_IGNORE_TXT)
|
||||
while(_LOOP_STATUS)
|
||||
foreach(_IGNORE_ENTRY ${CACHE_IGNORE_TXT})
|
||||
if(NOT _IGNORE_ENTRY MATCHES "^[^#]+")
|
||||
while (_LOOP_STATUS)
|
||||
foreach (_IGNORE_ENTRY ${CACHE_IGNORE_TXT})
|
||||
if (NOT _IGNORE_ENTRY MATCHES "^[^#]+")
|
||||
continue()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(_MATCH_ALL OFF)
|
||||
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --name-status --pretty= WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE CHANGE_FILES)
|
||||
if(NOT CHANGE_FILES STREQUAL "")
|
||||
if (NOT CHANGE_FILES STREQUAL "")
|
||||
string(REPLACE "\n" ";" _CHANGE_FILES ${CHANGE_FILES})
|
||||
foreach(_FILE_ENTRY ${_CHANGE_FILES})
|
||||
foreach (_FILE_ENTRY ${_CHANGE_FILES})
|
||||
string(REGEX MATCH "[^ \t]+$" _FILE_NAME ${_FILE_ENTRY})
|
||||
execute_process(COMMAND sh -c "echo ${_FILE_NAME} | grep ${_IGNORE_ENTRY}" RESULT_VARIABLE return_code)
|
||||
if (return_code EQUAL 0)
|
||||
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
|
||||
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
|
||||
set(${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
|
||||
set(_LOOP_STATUS OFF)
|
||||
endif()
|
||||
endforeach()
|
||||
else()
|
||||
endif ()
|
||||
endforeach ()
|
||||
else ()
|
||||
set(_LOOP_STATUS OFF)
|
||||
endif()
|
||||
endforeach()
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
if(_MATCH_ALL)
|
||||
if (_MATCH_ALL)
|
||||
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
|
||||
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
|
||||
set(${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
|
||||
set(_LOOP_STATUS OFF)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
math(EXPR GIT_LOG_SKIP_NUM "${GIT_LOG_SKIP_NUM} + 1")
|
||||
endwhile(_LOOP_STATUS)
|
||||
else()
|
||||
endwhile (_LOOP_STATUS)
|
||||
else ()
|
||||
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
|
||||
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
|
||||
endif()
|
||||
else()
|
||||
set(${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
|
||||
endif ()
|
||||
else ()
|
||||
message(FATAL_ERROR "The directory ${working_dir} does not exist")
|
||||
endif()
|
||||
endif ()
|
||||
endfunction()
|
||||
|
||||
# Define a function that extracts a cached package
|
||||
|
@ -83,15 +83,15 @@ endfunction()
|
|||
|
||||
# Define a function that to create a new cached package
|
||||
function(ExternalProject_Create_Cache project_name package_file install_path cache_username cache_password cache_path)
|
||||
if(EXISTS ${package_file})
|
||||
if (EXISTS ${package_file})
|
||||
message(STATUS "Removing existing package file: ${package_file}")
|
||||
file(REMOVE ${package_file})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
string(REGEX REPLACE "(.+)/.+$" "\\1" package_dir ${package_file})
|
||||
if(NOT EXISTS ${package_dir})
|
||||
if (NOT EXISTS ${package_dir})
|
||||
file(MAKE_DIRECTORY ${package_dir})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
message(STATUS "Will create cached package file: ${package_file}")
|
||||
|
||||
|
@ -116,89 +116,89 @@ function(ADD_THIRDPARTY_LIB LIB_NAME)
|
|||
"${one_value_args}"
|
||||
"${multi_value_args}"
|
||||
${ARGN})
|
||||
if(ARG_UNPARSED_ARGUMENTS)
|
||||
if (ARG_UNPARSED_ARGUMENTS)
|
||||
message(SEND_ERROR "Error: unrecognized arguments: ${ARG_UNPARSED_ARGUMENTS}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(ARG_STATIC_LIB AND ARG_SHARED_LIB)
|
||||
if(NOT ARG_STATIC_LIB)
|
||||
if (ARG_STATIC_LIB AND ARG_SHARED_LIB)
|
||||
if (NOT ARG_STATIC_LIB)
|
||||
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(AUG_LIB_NAME "${LIB_NAME}_static")
|
||||
add_library(${AUG_LIB_NAME} STATIC IMPORTED)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES IMPORTED_LOCATION "${ARG_STATIC_LIB}")
|
||||
if(ARG_DEPS)
|
||||
if (ARG_DEPS)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Added static library dependency ${AUG_LIB_NAME}: ${ARG_STATIC_LIB}")
|
||||
if(ARG_INCLUDE_DIRECTORIES)
|
||||
if (ARG_INCLUDE_DIRECTORIES)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${ARG_INCLUDE_DIRECTORIES}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(AUG_LIB_NAME "${LIB_NAME}_shared")
|
||||
add_library(${AUG_LIB_NAME} SHARED IMPORTED)
|
||||
|
||||
if(WIN32)
|
||||
if (WIN32)
|
||||
# Mark the ".lib" location as part of a Windows DLL
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES IMPORTED_IMPLIB "${ARG_SHARED_LIB}")
|
||||
else()
|
||||
else ()
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES IMPORTED_LOCATION "${ARG_SHARED_LIB}")
|
||||
endif()
|
||||
if(ARG_DEPS)
|
||||
endif ()
|
||||
if (ARG_DEPS)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Added shared library dependency ${AUG_LIB_NAME}: ${ARG_SHARED_LIB}")
|
||||
if(ARG_INCLUDE_DIRECTORIES)
|
||||
if (ARG_INCLUDE_DIRECTORIES)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${ARG_INCLUDE_DIRECTORIES}")
|
||||
endif()
|
||||
elseif(ARG_STATIC_LIB)
|
||||
endif ()
|
||||
elseif (ARG_STATIC_LIB)
|
||||
set(AUG_LIB_NAME "${LIB_NAME}_static")
|
||||
add_library(${AUG_LIB_NAME} STATIC IMPORTED)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES IMPORTED_LOCATION "${ARG_STATIC_LIB}")
|
||||
if(ARG_DEPS)
|
||||
if (ARG_DEPS)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Added static library dependency ${AUG_LIB_NAME}: ${ARG_STATIC_LIB}")
|
||||
if(ARG_INCLUDE_DIRECTORIES)
|
||||
if (ARG_INCLUDE_DIRECTORIES)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${ARG_INCLUDE_DIRECTORIES}")
|
||||
endif()
|
||||
elseif(ARG_SHARED_LIB)
|
||||
endif ()
|
||||
elseif (ARG_SHARED_LIB)
|
||||
set(AUG_LIB_NAME "${LIB_NAME}_shared")
|
||||
add_library(${AUG_LIB_NAME} SHARED IMPORTED)
|
||||
|
||||
if(WIN32)
|
||||
if (WIN32)
|
||||
# Mark the ".lib" location as part of a Windows DLL
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES IMPORTED_IMPLIB "${ARG_SHARED_LIB}")
|
||||
else()
|
||||
else ()
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES IMPORTED_LOCATION "${ARG_SHARED_LIB}")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Added shared library dependency ${AUG_LIB_NAME}: ${ARG_SHARED_LIB}")
|
||||
if(ARG_DEPS)
|
||||
if (ARG_DEPS)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
|
||||
endif()
|
||||
if(ARG_INCLUDE_DIRECTORIES)
|
||||
endif ()
|
||||
if (ARG_INCLUDE_DIRECTORIES)
|
||||
set_target_properties(${AUG_LIB_NAME}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${ARG_INCLUDE_DIRECTORIES}")
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
else ()
|
||||
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
|
||||
endif()
|
||||
endif ()
|
||||
endfunction()
|
||||
|
|
|
@ -13,16 +13,16 @@ macro(define_option name description default)
|
|||
endmacro()
|
||||
|
||||
function(list_join lst glue out)
|
||||
if("${${lst}}" STREQUAL "")
|
||||
if ("${${lst}}" STREQUAL "")
|
||||
set(${out} "" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
list(GET ${lst} 0 joined)
|
||||
list(REMOVE_AT ${lst} 0)
|
||||
foreach(item ${${lst}})
|
||||
foreach (item ${${lst}})
|
||||
set(joined "${joined}${glue}${item}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
set(${out} ${joined} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
@ -35,22 +35,31 @@ macro(define_option_string name description default)
|
|||
|
||||
set("${name}_OPTION_ENUM" ${ARGN})
|
||||
list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM")
|
||||
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
set_property(CACHE ${name} PROPERTY STRINGS ${ARGN})
|
||||
endif()
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("GPU version")
|
||||
|
||||
if (MILVUS_GPU_VERSION)
|
||||
define_option(KNOWHERE_GPU_VERSION "Build GPU version" ON)
|
||||
else ()
|
||||
define_option(KNOWHERE_GPU_VERSION "Build GPU version" OFF)
|
||||
endif ()
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Thirdparty")
|
||||
|
||||
set(KNOWHERE_DEPENDENCY_SOURCE_DEFAULT "AUTO")
|
||||
set(KNOWHERE_DEPENDENCY_SOURCE_DEFAULT "BUNDLED")
|
||||
|
||||
define_option_string(KNOWHERE_DEPENDENCY_SOURCE
|
||||
"Method to use for acquiring KNOWHERE's build dependencies"
|
||||
"${KNOWHERE_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
"Method to use for acquiring KNOWHERE's build dependencies"
|
||||
"${KNOWHERE_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
|
||||
define_option(KNOWHERE_VERBOSE_THIRDPARTY_BUILD
|
||||
"Show output from ExternalProjects rather than just logging to files" ON)
|
||||
|
@ -70,27 +79,16 @@ define_option(KNOWHERE_WITH_FAISS "Build with FAISS library" ON)
|
|||
|
||||
define_option(KNOWHERE_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON)
|
||||
|
||||
define_option(KNOWHERE_WITH_OPENBLAS "Build with OpenBLAS library" ON)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
if(MSVC)
|
||||
set_option_category("MSVC")
|
||||
|
||||
define_option(MSVC_LINK_VERBOSE
|
||||
"Pass verbose linking options when linking libraries and executables"
|
||||
OFF)
|
||||
|
||||
define_option(KNOWHERE_USE_STATIC_CRT "Build KNOWHERE with statically linked CRT" OFF)
|
||||
endif()
|
||||
define_option(FAISS_WITH_MKL "Build FAISS with MKL" OFF)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Test and benchmark")
|
||||
|
||||
if (BUILD_UNIT_TEST)
|
||||
define_option(KNOWHERE_BUILD_TESTS "Build the KNOWHERE googletest unit tests" ON)
|
||||
else()
|
||||
else ()
|
||||
define_option(KNOWHERE_BUILD_TESTS "Build the KNOWHERE googletest unit tests" OFF)
|
||||
endif(BUILD_UNIT_TEST)
|
||||
endif (BUILD_UNIT_TEST)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
macro(config_summary)
|
||||
|
@ -102,12 +100,12 @@ macro(config_summary)
|
|||
message(STATUS " Generator: ${CMAKE_GENERATOR}")
|
||||
message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
if(${CMAKE_EXPORT_COMPILE_COMMANDS})
|
||||
if (${CMAKE_EXPORT_COMPILE_COMMANDS})
|
||||
message(
|
||||
STATUS " Compile commands: ${INDEX_BINARY_DIR}/compile_commands.json")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
foreach(category ${KNOWHERE_OPTION_CATEGORIES})
|
||||
foreach (category ${KNOWHERE_OPTION_CATEGORIES})
|
||||
|
||||
message(STATUS)
|
||||
message(STATUS "${category} options:")
|
||||
|
@ -115,50 +113,50 @@ macro(config_summary)
|
|||
set(option_names ${KNOWHERE_${category}_OPTION_NAMES})
|
||||
|
||||
set(max_value_length 0)
|
||||
foreach(name ${option_names})
|
||||
foreach (name ${option_names})
|
||||
string(LENGTH "\"${${name}}\"" value_length)
|
||||
if(${max_value_length} LESS ${value_length})
|
||||
if (${max_value_length} LESS ${value_length})
|
||||
set(max_value_length ${value_length})
|
||||
endif()
|
||||
endforeach()
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
foreach(name ${option_names})
|
||||
if("${${name}_OPTION_TYPE}" STREQUAL "string")
|
||||
foreach (name ${option_names})
|
||||
if ("${${name}_OPTION_TYPE}" STREQUAL "string")
|
||||
set(value "\"${${name}}\"")
|
||||
else()
|
||||
else ()
|
||||
set(value "${${name}}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(default ${${name}_OPTION_DEFAULT})
|
||||
set(description ${${name}_OPTION_DESCRIPTION})
|
||||
string(LENGTH ${description} description_length)
|
||||
if(${description_length} LESS 70)
|
||||
if (${description_length} LESS 70)
|
||||
string(
|
||||
SUBSTRING
|
||||
" "
|
||||
${description_length} -1 description_padding)
|
||||
else()
|
||||
else ()
|
||||
set(description_padding "
|
||||
")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(comment "[${name}]")
|
||||
|
||||
if("${value}" STREQUAL "${default}")
|
||||
if ("${value}" STREQUAL "${default}")
|
||||
set(comment "[default] ${comment}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
set(comment "${comment} [${${name}_OPTION_ENUM}]")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
string(
|
||||
SUBSTRING "${value} "
|
||||
0 ${max_value_length} value)
|
||||
|
||||
message(STATUS " ${description} ${description_padding} ${value} ${comment}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
endmacro()
|
||||
|
|
|
@ -0,0 +1,431 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# - Find Arrow (arrow/api.h, libarrow.a, libarrow.so)
|
||||
# This module defines
|
||||
# ARROW_FOUND, whether Arrow has been found
|
||||
# ARROW_FULL_SO_VERSION, full shared object version of found Arrow "100.0.0"
|
||||
# ARROW_IMPORT_LIB, path to libarrow's import library (Windows only)
|
||||
# ARROW_INCLUDE_DIR, directory containing headers
|
||||
# ARROW_LIBS, deprecated. Use ARROW_LIB_DIR instead
|
||||
# ARROW_LIB_DIR, directory containing Arrow libraries
|
||||
# ARROW_SHARED_IMP_LIB, deprecated. Use ARROW_IMPORT_LIB instead
|
||||
# ARROW_SHARED_LIB, path to libarrow's shared library
|
||||
# ARROW_SO_VERSION, shared object version of found Arrow such as "100"
|
||||
# ARROW_STATIC_LIB, path to libarrow.a
|
||||
# ARROW_VERSION, version of found Arrow
|
||||
# ARROW_VERSION_MAJOR, major version of found Arrow
|
||||
# ARROW_VERSION_MINOR, minor version of found Arrow
|
||||
# ARROW_VERSION_PATCH, patch version of found Arrow
|
||||
|
||||
include(FindPkgConfig)
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
set(ARROW_SEARCH_LIB_PATH_SUFFIXES)
|
||||
if(CMAKE_LIBRARY_ARCHITECTURE)
|
||||
list(APPEND ARROW_SEARCH_LIB_PATH_SUFFIXES "lib/${CMAKE_LIBRARY_ARCHITECTURE}")
|
||||
endif()
|
||||
list(APPEND ARROW_SEARCH_LIB_PATH_SUFFIXES
|
||||
"lib64"
|
||||
"lib32"
|
||||
"lib"
|
||||
"bin")
|
||||
set(ARROW_CONFIG_SUFFIXES
|
||||
"_RELEASE"
|
||||
"_RELWITHDEBINFO"
|
||||
"_MINSIZEREL"
|
||||
"_DEBUG"
|
||||
"")
|
||||
if(CMAKE_BUILD_TYPE)
|
||||
string(TOUPPER ${CMAKE_BUILD_TYPE} ARROW_CONFIG_SUFFIX_PREFERRED)
|
||||
set(ARROW_CONFIG_SUFFIX_PREFERRED "_${ARROW_CONFIG_SUFFIX_PREFERRED}")
|
||||
list(INSERT ARROW_CONFIG_SUFFIXES 0 "${ARROW_CONFIG_SUFFIX_PREFERRED}")
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED ARROW_MSVC_STATIC_LIB_SUFFIX)
|
||||
if(MSVC)
|
||||
set(ARROW_MSVC_STATIC_LIB_SUFFIX "_static")
|
||||
else()
|
||||
set(ARROW_MSVC_STATIC_LIB_SUFFIX "")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Internal function.
|
||||
#
|
||||
# Set shared library name for ${base_name} to ${output_variable}.
|
||||
#
|
||||
# Example:
|
||||
# arrow_build_shared_library_name(ARROW_SHARED_LIBRARY_NAME arrow)
|
||||
# # -> ARROW_SHARED_LIBRARY_NAME=libarrow.so on Linux
|
||||
# # -> ARROW_SHARED_LIBRARY_NAME=libarrow.dylib on macOS
|
||||
# # -> ARROW_SHARED_LIBRARY_NAME=arrow.dll with MSVC on Windows
|
||||
# # -> ARROW_SHARED_LIBRARY_NAME=libarrow.dll with MinGW on Windows
|
||||
function(arrow_build_shared_library_name output_variable base_name)
|
||||
set(${output_variable}
|
||||
"${CMAKE_SHARED_LIBRARY_PREFIX}${base_name}${CMAKE_SHARED_LIBRARY_SUFFIX}"
|
||||
PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Internal function.
|
||||
#
|
||||
# Set import library name for ${base_name} to ${output_variable}.
|
||||
# This is useful only for MSVC build. Import library is used only
|
||||
# with MSVC build.
|
||||
#
|
||||
# Example:
|
||||
# arrow_build_import_library_name(ARROW_IMPORT_LIBRARY_NAME arrow)
|
||||
# # -> ARROW_IMPORT_LIBRARY_NAME=arrow on Linux (meaningless)
|
||||
# # -> ARROW_IMPORT_LIBRARY_NAME=arrow on macOS (meaningless)
|
||||
# # -> ARROW_IMPORT_LIBRARY_NAME=arrow.lib with MSVC on Windows
|
||||
# # -> ARROW_IMPORT_LIBRARY_NAME=libarrow.dll.a with MinGW on Windows
|
||||
function(arrow_build_import_library_name output_variable base_name)
|
||||
set(${output_variable}
|
||||
"${CMAKE_IMPORT_LIBRARY_PREFIX}${base_name}${CMAKE_IMPORT_LIBRARY_SUFFIX}"
|
||||
PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Internal function.
|
||||
#
|
||||
# Set static library name for ${base_name} to ${output_variable}.
|
||||
#
|
||||
# Example:
|
||||
# arrow_build_static_library_name(ARROW_STATIC_LIBRARY_NAME arrow)
|
||||
# # -> ARROW_STATIC_LIBRARY_NAME=libarrow.a on Linux
|
||||
# # -> ARROW_STATIC_LIBRARY_NAME=libarrow.a on macOS
|
||||
# # -> ARROW_STATIC_LIBRARY_NAME=arrow.lib with MSVC on Windows
|
||||
# # -> ARROW_STATIC_LIBRARY_NAME=libarrow.dll.a with MinGW on Windows
|
||||
function(arrow_build_static_library_name output_variable base_name)
|
||||
set(
|
||||
${output_variable}
|
||||
"${CMAKE_STATIC_LIBRARY_PREFIX}${base_name}${ARROW_MSVC_STATIC_LIB_SUFFIX}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Internal function.
|
||||
#
|
||||
# Set macro value for ${macro_name} in ${header_content} to ${output_variable}.
|
||||
#
|
||||
# Example:
|
||||
# arrow_extract_macro_value(version_major
|
||||
# "ARROW_VERSION_MAJOR"
|
||||
# "#define ARROW_VERSION_MAJOR 1.0.0")
|
||||
# # -> version_major=1.0.0
|
||||
function(arrow_extract_macro_value output_variable macro_name header_content)
|
||||
string(REGEX MATCH "#define +${macro_name} +[^\r\n]+" macro_definition
|
||||
"${header_content}")
|
||||
string(REGEX
|
||||
REPLACE "^#define +${macro_name} +(.+)$" "\\1" macro_value "${macro_definition}")
|
||||
set(${output_variable} "${macro_value}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Internal macro only for arrow_find_package.
|
||||
#
|
||||
# Find package in HOME.
|
||||
macro(arrow_find_package_home)
|
||||
find_path(${prefix}_include_dir "${header_path}"
|
||||
PATHS "${home}"
|
||||
PATH_SUFFIXES "include"
|
||||
NO_DEFAULT_PATH)
|
||||
set(include_dir "${${prefix}_include_dir}")
|
||||
set(${prefix}_INCLUDE_DIR "${include_dir}" PARENT_SCOPE)
|
||||
|
||||
if(MSVC)
|
||||
set(CMAKE_SHARED_LIBRARY_SUFFIXES_ORIGINAL ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||
# .dll isn't found by find_library with MSVC because .dll isn't included in
|
||||
# CMAKE_FIND_LIBRARY_SUFFIXES.
|
||||
list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES "${CMAKE_SHARED_LIBRARY_SUFFIX}")
|
||||
endif()
|
||||
find_library(${prefix}_shared_lib
|
||||
NAMES "${shared_lib_name}"
|
||||
PATHS "${home}"
|
||||
PATH_SUFFIXES ${ARROW_SEARCH_LIB_PATH_SUFFIXES}
|
||||
NO_DEFAULT_PATH)
|
||||
if(MSVC)
|
||||
set(CMAKE_SHARED_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES_ORIGINAL})
|
||||
endif()
|
||||
set(shared_lib "${${prefix}_shared_lib}")
|
||||
set(${prefix}_SHARED_LIB "${shared_lib}" PARENT_SCOPE)
|
||||
if(shared_lib)
|
||||
add_library(${target_shared} SHARED IMPORTED)
|
||||
set_target_properties(${target_shared} PROPERTIES IMPORTED_LOCATION "${shared_lib}")
|
||||
if(include_dir)
|
||||
set_target_properties(${target_shared}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${include_dir}")
|
||||
endif()
|
||||
find_library(${prefix}_import_lib
|
||||
NAMES "${import_lib_name}"
|
||||
PATHS "${home}"
|
||||
PATH_SUFFIXES ${ARROW_SEARCH_LIB_PATH_SUFFIXES}
|
||||
NO_DEFAULT_PATH)
|
||||
set(import_lib "${${prefix}_import_lib}")
|
||||
set(${prefix}_IMPORT_LIB "${import_lib}" PARENT_SCOPE)
|
||||
if(import_lib)
|
||||
set_target_properties(${target_shared} PROPERTIES IMPORTED_IMPLIB "${import_lib}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
find_library(${prefix}_static_lib
|
||||
NAMES "${static_lib_name}"
|
||||
PATHS "${home}"
|
||||
PATH_SUFFIXES ${ARROW_SEARCH_LIB_PATH_SUFFIXES}
|
||||
NO_DEFAULT_PATH)
|
||||
set(static_lib "${${prefix}_static_lib}")
|
||||
set(${prefix}_STATIC_LIB "${static_lib}" PARENT_SCOPE)
|
||||
if(static_lib)
|
||||
add_library(${target_static} STATIC IMPORTED)
|
||||
set_target_properties(${target_static} PROPERTIES IMPORTED_LOCATION "${static_lib}")
|
||||
if(include_dir)
|
||||
set_target_properties(${target_static}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${include_dir}")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# Internal macro only for arrow_find_package.
|
||||
#
|
||||
# Find package by CMake package configuration.
|
||||
macro(arrow_find_package_cmake_package_configuration)
|
||||
# ARROW-5575: We need to split target files for each component
|
||||
if(TARGET ${target_shared} OR TARGET ${target_static})
|
||||
set(${cmake_package_name}_FOUND TRUE)
|
||||
else()
|
||||
find_package(${cmake_package_name} CONFIG)
|
||||
endif()
|
||||
if(${cmake_package_name}_FOUND)
|
||||
set(${prefix}_USE_CMAKE_PACKAGE_CONFIG TRUE PARENT_SCOPE)
|
||||
if(TARGET ${target_shared})
|
||||
foreach(suffix ${ARROW_CONFIG_SUFFIXES})
|
||||
get_target_property(shared_lib ${target_shared} IMPORTED_LOCATION${suffix})
|
||||
if(shared_lib)
|
||||
# Remove shared library version:
|
||||
# libarrow.so.100.0.0 -> libarrow.so
|
||||
# Because ARROW_HOME and pkg-config approaches don't add
|
||||
# shared library version.
|
||||
string(REGEX
|
||||
REPLACE "(${CMAKE_SHARED_LIBRARY_SUFFIX})[.0-9]+$" "\\1" shared_lib
|
||||
"${shared_lib}")
|
||||
set(${prefix}_SHARED_LIB "${shared_lib}" PARENT_SCOPE)
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
if(TARGET ${target_static})
|
||||
foreach(suffix ${ARROW_CONFIG_SUFFIXES})
|
||||
get_target_property(static_lib ${target_static} IMPORTED_LOCATION${suffix})
|
||||
if(static_lib)
|
||||
set(${prefix}_STATIC_LIB "${static_lib}" PARENT_SCOPE)
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
# Internal macro only for arrow_find_package.
|
||||
#
|
||||
# Find package by pkg-config.
|
||||
macro(arrow_find_package_pkg_config)
|
||||
pkg_check_modules(${prefix}_PC ${pkg_config_name})
|
||||
if(${prefix}_PC_FOUND)
|
||||
set(${prefix}_USE_PKG_CONFIG TRUE PARENT_SCOPE)
|
||||
|
||||
set(include_dir "${${prefix}_PC_INCLUDEDIR}")
|
||||
set(lib_dir "${${prefix}_PC_LIBDIR}")
|
||||
set(shared_lib_paths "${${prefix}_PC_LINK_LIBRARIES}")
|
||||
# Use the first shared library path as the IMPORTED_LOCATION
|
||||
# for ${target_shared}. This assumes that the first shared library
|
||||
# path is the shared library path for this module.
|
||||
list(GET shared_lib_paths 0 first_shared_lib_path)
|
||||
# Use the rest shared library paths as the INTERFACE_LINK_LIBRARIES
|
||||
# for ${target_shared}. This assumes that the rest shared library
|
||||
# paths are dependency library paths for this module.
|
||||
list(LENGTH shared_lib_paths n_shared_lib_paths)
|
||||
if(n_shared_lib_paths LESS_EQUAL 1)
|
||||
set(rest_shared_lib_paths)
|
||||
else()
|
||||
list(SUBLIST
|
||||
shared_lib_paths
|
||||
1
|
||||
-1
|
||||
rest_shared_lib_paths)
|
||||
endif()
|
||||
|
||||
set(${prefix}_VERSION "${${prefix}_PC_VERSION}" PARENT_SCOPE)
|
||||
set(${prefix}_INCLUDE_DIR "${include_dir}" PARENT_SCOPE)
|
||||
set(${prefix}_SHARED_LIB "${first_shared_lib_path}" PARENT_SCOPE)
|
||||
|
||||
add_library(${target_shared} SHARED IMPORTED)
|
||||
set_target_properties(${target_shared}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
|
||||
"${include_dir}"
|
||||
INTERFACE_LINK_LIBRARIES
|
||||
"${rest_shared_lib_paths}"
|
||||
IMPORTED_LOCATION
|
||||
"${first_shared_lib_path}")
|
||||
|
||||
find_library(${prefix}_static_lib
|
||||
NAMES "${static_lib_name}"
|
||||
PATHS "${lib_dir}"
|
||||
NO_DEFAULT_PATH)
|
||||
set(static_lib "${${prefix}_static_lib}")
|
||||
set(${prefix}_STATIC_LIB "${static_lib}" PARENT_SCOPE)
|
||||
if(static_lib)
|
||||
add_library(${target_static} STATIC IMPORTED)
|
||||
set_target_properties(${target_static}
|
||||
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${include_dir}"
|
||||
IMPORTED_LOCATION "${static_lib}")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
function(arrow_find_package
|
||||
prefix
|
||||
home
|
||||
base_name
|
||||
header_path
|
||||
cmake_package_name
|
||||
pkg_config_name)
|
||||
arrow_build_shared_library_name(shared_lib_name ${base_name})
|
||||
arrow_build_import_library_name(import_lib_name ${base_name})
|
||||
arrow_build_static_library_name(static_lib_name ${base_name})
|
||||
|
||||
set(target_shared ${base_name}_shared)
|
||||
set(target_static ${base_name}_static)
|
||||
|
||||
if(home)
|
||||
arrow_find_package_home()
|
||||
set(${prefix}_FIND_APPROACH "HOME: ${home}" PARENT_SCOPE)
|
||||
else()
|
||||
arrow_find_package_cmake_package_configuration()
|
||||
if(${cmake_package_name}_FOUND)
|
||||
set(${prefix}_FIND_APPROACH
|
||||
"CMake package configuration: ${cmake_package_name}"
|
||||
PARENT_SCOPE)
|
||||
else()
|
||||
arrow_find_package_pkg_config()
|
||||
set(${prefix}_FIND_APPROACH "pkg-config: ${pkg_config_name}" PARENT_SCOPE)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT include_dir)
|
||||
if(TARGET ${target_shared})
|
||||
get_target_property(include_dir ${target_shared} INTERFACE_INCLUDE_DIRECTORIES)
|
||||
elseif(TARGET ${target_static})
|
||||
get_target_property(include_dir ${target_static} INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
endif()
|
||||
if(include_dir)
|
||||
set(${prefix}_INCLUDE_DIR "${include_dir}" PARENT_SCOPE)
|
||||
endif()
|
||||
|
||||
if(shared_lib)
|
||||
get_filename_component(lib_dir "${shared_lib}" DIRECTORY)
|
||||
elseif(static_lib)
|
||||
get_filename_component(lib_dir "${static_lib}" DIRECTORY)
|
||||
else()
|
||||
set(lib_dir NOTFOUND)
|
||||
endif()
|
||||
set(${prefix}_LIB_DIR "${lib_dir}" PARENT_SCOPE)
|
||||
# For backward compatibility
|
||||
set(${prefix}_LIBS "${lib_dir}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
if(NOT "$ENV{ARROW_HOME}" STREQUAL "")
|
||||
file(TO_CMAKE_PATH "$ENV{ARROW_HOME}" ARROW_HOME)
|
||||
endif()
|
||||
arrow_find_package(ARROW
|
||||
"${ARROW_HOME}"
|
||||
arrow
|
||||
arrow/api.h
|
||||
Arrow
|
||||
arrow)
|
||||
|
||||
if(ARROW_HOME)
|
||||
if(ARROW_INCLUDE_DIR)
|
||||
file(READ "${ARROW_INCLUDE_DIR}/arrow/util/config.h" ARROW_CONFIG_H_CONTENT)
|
||||
arrow_extract_macro_value(ARROW_VERSION_MAJOR "ARROW_VERSION_MAJOR"
|
||||
"${ARROW_CONFIG_H_CONTENT}")
|
||||
arrow_extract_macro_value(ARROW_VERSION_MINOR "ARROW_VERSION_MINOR"
|
||||
"${ARROW_CONFIG_H_CONTENT}")
|
||||
arrow_extract_macro_value(ARROW_VERSION_PATCH "ARROW_VERSION_PATCH"
|
||||
"${ARROW_CONFIG_H_CONTENT}")
|
||||
if("${ARROW_VERSION_MAJOR}" STREQUAL ""
|
||||
OR "${ARROW_VERSION_MINOR}" STREQUAL ""
|
||||
OR "${ARROW_VERSION_PATCH}" STREQUAL "")
|
||||
set(ARROW_VERSION "0.0.0")
|
||||
else()
|
||||
set(ARROW_VERSION
|
||||
"${ARROW_VERSION_MAJOR}.${ARROW_VERSION_MINOR}.${ARROW_VERSION_PATCH}")
|
||||
endif()
|
||||
|
||||
arrow_extract_macro_value(ARROW_SO_VERSION_QUOTED "ARROW_SO_VERSION"
|
||||
"${ARROW_CONFIG_H_CONTENT}")
|
||||
string(REGEX REPLACE "^\"(.+)\"$" "\\1" ARROW_SO_VERSION "${ARROW_SO_VERSION_QUOTED}")
|
||||
arrow_extract_macro_value(ARROW_FULL_SO_VERSION_QUOTED "ARROW_FULL_SO_VERSION"
|
||||
"${ARROW_CONFIG_H_CONTENT}")
|
||||
string(REGEX
|
||||
REPLACE "^\"(.+)\"$" "\\1" ARROW_FULL_SO_VERSION
|
||||
"${ARROW_FULL_SO_VERSION_QUOTED}")
|
||||
endif()
|
||||
else()
|
||||
if(ARROW_USE_CMAKE_PACKAGE_CONFIG)
|
||||
find_package(Arrow CONFIG)
|
||||
elseif(ARROW_USE_PKG_CONFIG)
|
||||
pkg_get_variable(ARROW_SO_VERSION arrow so_version)
|
||||
pkg_get_variable(ARROW_FULL_SO_VERSION arrow full_so_version)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(ARROW_ABI_VERSION ${ARROW_SO_VERSION})
|
||||
|
||||
mark_as_advanced(ARROW_ABI_VERSION
|
||||
ARROW_CONFIG_SUFFIXES
|
||||
ARROW_FULL_SO_VERSION
|
||||
ARROW_IMPORT_LIB
|
||||
ARROW_INCLUDE_DIR
|
||||
ARROW_LIBS
|
||||
ARROW_LIB_DIR
|
||||
ARROW_SEARCH_LIB_PATH_SUFFIXES
|
||||
ARROW_SHARED_IMP_LIB
|
||||
ARROW_SHARED_LIB
|
||||
ARROW_SO_VERSION
|
||||
ARROW_STATIC_LIB
|
||||
ARROW_VERSION
|
||||
ARROW_VERSION_MAJOR
|
||||
ARROW_VERSION_MINOR
|
||||
ARROW_VERSION_PATCH)
|
||||
|
||||
find_package_handle_standard_args(Arrow REQUIRED_VARS
|
||||
# The first required variable is shown
|
||||
# in the found message. So this list is
|
||||
# not sorted alphabetically.
|
||||
ARROW_INCLUDE_DIR
|
||||
ARROW_LIB_DIR
|
||||
ARROW_FULL_SO_VERSION
|
||||
ARROW_SO_VERSION
|
||||
VERSION_VAR
|
||||
ARROW_VERSION)
|
||||
set(ARROW_FOUND ${Arrow_FOUND})
|
||||
|
||||
if(Arrow_FOUND AND NOT Arrow_FIND_QUIETLY)
|
||||
message(STATUS "Arrow version: ${ARROW_VERSION} (${ARROW_FIND_APPROACH})")
|
||||
message(STATUS "Arrow SO and ABI version: ${ARROW_SO_VERSION}")
|
||||
message(STATUS "Arrow full SO version: ${ARROW_FULL_SO_VERSION}")
|
||||
message(STATUS "Found the Arrow core shared library: ${ARROW_SHARED_LIB}")
|
||||
message(STATUS "Found the Arrow core import library: ${ARROW_IMPORT_LIB}")
|
||||
message(STATUS "Found the Arrow core static library: ${ARROW_STATIC_LIB}")
|
||||
endif()
|
|
@ -0,0 +1,44 @@
|
|||
set(FAISS_STATIC_LIB_NAME ${CMAKE_STATIC_LIBRARY_PREFIX}faiss${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
|
||||
# First, find via if specified FAISS_ROOT
|
||||
if (FAISS_ROOT)
|
||||
find_library(FAISS_STATIC_LIB
|
||||
NAMES ${FAISS_STATIC_LIB_NAME}
|
||||
PATHS ${FAISS_ROOT}
|
||||
PATH_SUFFIXES "lib"
|
||||
NO_DEFAULT_PATH
|
||||
)
|
||||
find_path(FAISS_INCLUDE_DIR
|
||||
NAMES "faiss/Index.h"
|
||||
PATHS ${FAISS_ROOT}
|
||||
NO_DEFAULT_PATH
|
||||
PATH_SUFFIXES "include"
|
||||
)
|
||||
endif ()
|
||||
|
||||
find_package_handle_standard_args(FAISS REQUIRED_VARS FAISS_STATIC_LIB FAISS_INCLUDE_DIR)
|
||||
|
||||
if (FAISS_FOUND)
|
||||
if (NOT TARGET faiss)
|
||||
add_library(faiss STATIC IMPORTED)
|
||||
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
|
||||
)
|
||||
|
||||
if (FAISS_WITH_MKL)
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES "${MKL_LIBS}")
|
||||
else ()
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES "openblas;lapack")
|
||||
endif ()
|
||||
endif ()
|
||||
endif ()
|
|
@ -16,43 +16,47 @@
|
|||
|
||||
set(KNOWHERE_THIRDPARTY_DEPENDENCIES
|
||||
|
||||
ARROW
|
||||
Arrow
|
||||
FAISS
|
||||
GTest
|
||||
LAPACK
|
||||
OpenBLAS
|
||||
MKL
|
||||
)
|
||||
|
||||
message(STATUS "Using ${KNOWHERE_DEPENDENCY_SOURCE} approach to find dependencies")
|
||||
|
||||
# For each dependency, set dependency source to global default, if unset
|
||||
foreach(DEPENDENCY ${KNOWHERE_THIRDPARTY_DEPENDENCIES})
|
||||
if("${${DEPENDENCY}_SOURCE}" STREQUAL "")
|
||||
foreach (DEPENDENCY ${KNOWHERE_THIRDPARTY_DEPENDENCIES})
|
||||
if ("${${DEPENDENCY}_SOURCE}" STREQUAL "")
|
||||
set(${DEPENDENCY}_SOURCE ${KNOWHERE_DEPENDENCY_SOURCE})
|
||||
endif()
|
||||
endforeach()
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
macro(build_dependency DEPENDENCY_NAME)
|
||||
if("${DEPENDENCY_NAME}" STREQUAL "ARROW")
|
||||
if ("${DEPENDENCY_NAME}" STREQUAL "Arrow")
|
||||
build_arrow()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "LAPACK")
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "LAPACK")
|
||||
build_lapack()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
|
||||
build_gtest()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "OpenBLAS")
|
||||
build_openblas()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "FAISS")
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "FAISS")
|
||||
build_faiss()
|
||||
else()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "MKL")
|
||||
build_mkl()
|
||||
else ()
|
||||
message(FATAL_ERROR "Unknown thirdparty dependency to build: ${DEPENDENCY_NAME}")
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
macro(resolve_dependency DEPENDENCY_NAME)
|
||||
if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO")
|
||||
#message(STATUS "Finding ${DEPENDENCY_NAME} package")
|
||||
#message(STATUS "${DEPENDENCY_NAME} package not found")
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
find_package(${DEPENDENCY_NAME} MODULE)
|
||||
if (NOT ${${DEPENDENCY_NAME}_FOUND})
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
endif ()
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED")
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM")
|
||||
|
@ -64,28 +68,28 @@ endmacro()
|
|||
# Identify OS
|
||||
if (UNIX)
|
||||
if (APPLE)
|
||||
set (CMAKE_OS_NAME "osx" CACHE STRING "Operating system name" FORCE)
|
||||
set(CMAKE_OS_NAME "osx" CACHE STRING "Operating system name" FORCE)
|
||||
else (APPLE)
|
||||
## Check for Debian GNU/Linux ________________
|
||||
find_file (DEBIAN_FOUND debian_version debconf.conf
|
||||
find_file(DEBIAN_FOUND debian_version debconf.conf
|
||||
PATHS /etc
|
||||
)
|
||||
if (DEBIAN_FOUND)
|
||||
set (CMAKE_OS_NAME "debian" CACHE STRING "Operating system name" FORCE)
|
||||
set(CMAKE_OS_NAME "debian" CACHE STRING "Operating system name" FORCE)
|
||||
endif (DEBIAN_FOUND)
|
||||
## Check for Fedora _________________________
|
||||
find_file (FEDORA_FOUND fedora-release
|
||||
find_file(FEDORA_FOUND fedora-release
|
||||
PATHS /etc
|
||||
)
|
||||
if (FEDORA_FOUND)
|
||||
set (CMAKE_OS_NAME "fedora" CACHE STRING "Operating system name" FORCE)
|
||||
set(CMAKE_OS_NAME "fedora" CACHE STRING "Operating system name" FORCE)
|
||||
endif (FEDORA_FOUND)
|
||||
## Check for RedHat _________________________
|
||||
find_file (REDHAT_FOUND redhat-release inittab.RH
|
||||
find_file(REDHAT_FOUND redhat-release inittab.RH
|
||||
PATHS /etc
|
||||
)
|
||||
if (REDHAT_FOUND)
|
||||
set (CMAKE_OS_NAME "redhat" CACHE STRING "Operating system name" FORCE)
|
||||
set(CMAKE_OS_NAME "redhat" CACHE STRING "Operating system name" FORCE)
|
||||
endif (REDHAT_FOUND)
|
||||
## Extra check for Ubuntu ____________________
|
||||
if (DEBIAN_FOUND)
|
||||
|
@ -94,18 +98,18 @@ if (UNIX)
|
|||
## a first superficial inspection a system will
|
||||
## be considered as Debian, which signifies an
|
||||
## extra check is required.
|
||||
find_file (UBUNTU_EXTRA legal issue
|
||||
find_file(UBUNTU_EXTRA legal issue
|
||||
PATHS /etc
|
||||
)
|
||||
if (UBUNTU_EXTRA)
|
||||
## Scan contents of file
|
||||
file (STRINGS ${UBUNTU_EXTRA} UBUNTU_FOUND
|
||||
file(STRINGS ${UBUNTU_EXTRA} UBUNTU_FOUND
|
||||
REGEX Ubuntu
|
||||
)
|
||||
## Check result of string search
|
||||
if (UBUNTU_FOUND)
|
||||
set (CMAKE_OS_NAME "ubuntu" CACHE STRING "Operating system name" FORCE)
|
||||
set (DEBIAN_FOUND FALSE)
|
||||
set(CMAKE_OS_NAME "ubuntu" CACHE STRING "Operating system name" FORCE)
|
||||
set(DEBIAN_FOUND FALSE)
|
||||
endif (UBUNTU_FOUND)
|
||||
endif (UBUNTU_EXTRA)
|
||||
endif (DEBIAN_FOUND)
|
||||
|
@ -119,28 +123,17 @@ set(THIRDPARTY_DIR "${INDEX_SOURCE_DIR}/thirdparty")
|
|||
|
||||
# ----------------------------------------------------------------------
|
||||
# JFrog
|
||||
if(NOT DEFINED USE_JFROG_CACHE)
|
||||
if (NOT DEFINED USE_JFROG_CACHE)
|
||||
set(USE_JFROG_CACHE "OFF")
|
||||
endif()
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
endif ()
|
||||
if (USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(JFROG_ARTFACTORY_CACHE_URL "${JFROG_ARTFACTORY_URL}/milvus/thirdparty/cache/${CMAKE_OS_NAME}/${KNOWHERE_BUILD_ARCH}/${BUILD_TYPE}")
|
||||
set(THIRDPARTY_PACKAGE_CACHE "${THIRDPARTY_DIR}/cache")
|
||||
if(NOT EXISTS ${THIRDPARTY_PACKAGE_CACHE})
|
||||
if (NOT EXISTS ${THIRDPARTY_PACKAGE_CACHE})
|
||||
message(STATUS "Will create cached directory: ${THIRDPARTY_PACKAGE_CACHE}")
|
||||
file(MAKE_DIRECTORY ${THIRDPARTY_PACKAGE_CACHE})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
macro(resolve_dependency DEPENDENCY_NAME)
|
||||
if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO")
|
||||
#disable find_package for now
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED")
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM")
|
||||
find_package(${DEPENDENCY_NAME} REQUIRED)
|
||||
endif ()
|
||||
endmacro()
|
||||
endif ()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# ExternalProject options
|
||||
|
@ -150,11 +143,11 @@ string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_BUILD_TYPE)
|
|||
set(EP_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${UPPERCASE_BUILD_TYPE}}")
|
||||
set(EP_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${UPPERCASE_BUILD_TYPE}}")
|
||||
|
||||
if(NOT MSVC)
|
||||
if (NOT MSVC)
|
||||
# Set -fPIC on all external projects
|
||||
set(EP_CXX_FLAGS "${EP_CXX_FLAGS} -fPIC")
|
||||
set(EP_C_FLAGS "${EP_C_FLAGS} -fPIC")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# CC/CXX environment variables are captured on the first invocation of the
|
||||
# builder (e.g make or ninja) instead of when CMake is invoked into to build
|
||||
|
@ -164,13 +157,13 @@ endif()
|
|||
set(EP_COMMON_TOOLCHAIN -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER})
|
||||
|
||||
if(CMAKE_AR)
|
||||
if (CMAKE_AR)
|
||||
set(EP_COMMON_TOOLCHAIN ${EP_COMMON_TOOLCHAIN} -DCMAKE_AR=${CMAKE_AR})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(CMAKE_RANLIB)
|
||||
if (CMAKE_RANLIB)
|
||||
set(EP_COMMON_TOOLCHAIN ${EP_COMMON_TOOLCHAIN} -DCMAKE_RANLIB=${CMAKE_RANLIB})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# External projects are still able to override the following declarations.
|
||||
# cmake command line will favor the last defined variable when a duplicate is
|
||||
|
@ -184,18 +177,18 @@ set(EP_COMMON_CMAKE_ARGS
|
|||
-DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS_${UPPERCASE_BUILD_TYPE}=${EP_CXX_FLAGS})
|
||||
|
||||
if(NOT KNOWHERE_VERBOSE_THIRDPARTY_BUILD)
|
||||
if (NOT KNOWHERE_VERBOSE_THIRDPARTY_BUILD)
|
||||
set(EP_LOG_OPTIONS LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1 LOG_DOWNLOAD 1)
|
||||
else()
|
||||
else ()
|
||||
set(EP_LOG_OPTIONS)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# Ensure that a default make is set
|
||||
if("${MAKE}" STREQUAL "")
|
||||
if(NOT MSVC)
|
||||
if ("${MAKE}" STREQUAL "")
|
||||
if (NOT MSVC)
|
||||
find_program(MAKE make)
|
||||
endif()
|
||||
endif()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
set(MAKE_BUILD_ARGS "-j8")
|
||||
|
||||
|
@ -212,71 +205,54 @@ find_package(Threads REQUIRED)
|
|||
|
||||
# Read toolchain versions from cpp/thirdparty/versions.txt
|
||||
file(STRINGS "${THIRDPARTY_DIR}/versions.txt" TOOLCHAIN_VERSIONS_TXT)
|
||||
foreach(_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT})
|
||||
foreach (_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT})
|
||||
# Exclude comments
|
||||
if(NOT _VERSION_ENTRY MATCHES "^[^#][A-Za-z0-9-_]+_VERSION=")
|
||||
if (NOT _VERSION_ENTRY MATCHES "^[^#][A-Za-z0-9-_]+_VERSION=")
|
||||
continue()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
string(REGEX MATCH "^[^=]*" _LIB_NAME ${_VERSION_ENTRY})
|
||||
string(REPLACE "${_LIB_NAME}=" "" _LIB_VERSION ${_VERSION_ENTRY})
|
||||
|
||||
# Skip blank or malformed lines
|
||||
if(${_LIB_VERSION} STREQUAL "")
|
||||
if (${_LIB_VERSION} STREQUAL "")
|
||||
continue()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# For debugging
|
||||
#message(STATUS "${_LIB_NAME}: ${_LIB_VERSION}")
|
||||
|
||||
set(${_LIB_NAME} "${_LIB_VERSION}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
if(CUSTOMIZATION)
|
||||
execute_process(COMMAND wget -q --method HEAD ${FAISS_URL} RESULT_VARIABLE return_code)
|
||||
message(STATUS "Check the remote cache file ${FAISS_URL}. return code = ${return_code}")
|
||||
if (NOT return_code EQUAL 0)
|
||||
MESSAGE(FATAL_ERROR "Can't access to ${FAISS_URL}")
|
||||
else()
|
||||
set(FAISS_SOURCE_URL ${FAISS_URL})
|
||||
# set(FAISS_MD5 "a589663865a8558205533c8ac414278c")
|
||||
# set(FAISS_MD5 "57da9c4f599cc8fa4260488b1c96e1cc") # commit-id 6dbdf75987c34a2c853bd172ea0d384feea8358c branch-0.2.0
|
||||
# set(FAISS_MD5 "21deb1c708490ca40ecb899122c01403") # commit-id 643e48f479637fd947e7b93fa4ca72b38ecc9a39 branch-0.2.0
|
||||
# set(FAISS_MD5 "072db398351cca6e88f52d743bbb9fa0") # commit-id 3a2344d04744166af41ef1a74449d68a315bfe17 branch-0.2.1
|
||||
# set(FAISS_MD5 "c89ea8e655f5cdf58f42486f13614714") # commit-id 9c28a1cbb88f41fa03b03d7204106201ad33276b branch-0.2.1
|
||||
# set(FAISS_MD5 "87fdd86351ffcaf3f80dc26ade63c44b") # commit-id 841a156e67e8e22cd8088e1b58c00afbf2efc30b branch-0.2.1
|
||||
# set(FAISS_MD5 "f3b2ce3364c3fa7febd3aa7fdd0fe380") # commit-id 694e03458e6b69ce8a62502f71f69a614af5af8f branch-0.3.0
|
||||
# set(FAISS_MD5 "bb30722c22390ce5f6759ccb216c1b2a") # commit-id d324db297475286afe107847c7fb7a0f9dc7e90e branch-0.3.0
|
||||
# set(FAISS_MD5 "2293cdb209c3718e3b19f3edae8b32b3") # commit-id a13c1205dc52977a9ad3b33a14efa958604a8bff branch-0.3.0
|
||||
set(FAISS_MD5 "3632192f06b70e5f3fac8e2f08ced503") # commit-id fbd5b7566466c5665bb646f2b2f911c8544090f5 branch-0.3.0
|
||||
endif()
|
||||
else()
|
||||
set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/1.6.0.tar.gz")
|
||||
if (DEFINED ENV{FAISS_SOURCE_URL})
|
||||
set(FAISS_SOURCE_URL "$ENV{FAISS_SOURCE_URL}")
|
||||
else ()
|
||||
set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/${FAISS_VERSION}.tar.gz")
|
||||
set(FAISS_MD5 "b02c1a53234f5acc9bea1b0c55524f50")
|
||||
endif()
|
||||
message(STATUS "FAISS URL = ${FAISS_SOURCE_URL}")
|
||||
endif ()
|
||||
|
||||
if(DEFINED ENV{KNOWHERE_ARROW_URL})
|
||||
if (DEFINED ENV{KNOWHERE_ARROW_URL})
|
||||
set(ARROW_SOURCE_URL "$ENV{KNOWHERE_ARROW_URL}")
|
||||
else()
|
||||
else ()
|
||||
set(ARROW_SOURCE_URL
|
||||
"https://github.com/apache/arrow.git"
|
||||
)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if (DEFINED ENV{KNOWHERE_GTEST_URL})
|
||||
set(GTEST_SOURCE_URL "$ENV{KNOWHERE_GTEST_URL}")
|
||||
else ()
|
||||
set(GTEST_SOURCE_URL
|
||||
"https://github.com/google/googletest/archive/release-${GTEST_VERSION}.tar.gz")
|
||||
endif()
|
||||
endif ()
|
||||
set(GTEST_MD5 "2e6fbeb6a91310a16efe181886c59596")
|
||||
|
||||
if(DEFINED ENV{KNOWHERE_LAPACK_URL})
|
||||
if (DEFINED ENV{KNOWHERE_LAPACK_URL})
|
||||
set(LAPACK_SOURCE_URL "$ENV{KNOWHERE_LAPACK_URL}")
|
||||
else()
|
||||
else ()
|
||||
set(LAPACK_SOURCE_URL "https://github.com/Reference-LAPACK/lapack/archive/${LAPACK_VERSION}.tar.gz")
|
||||
endif()
|
||||
endif ()
|
||||
set(LAPACK_MD5 "96591affdbf58c450d45c1daa540dbd2")
|
||||
|
||||
if (DEFINED ENV{KNOWHERE_OPENBLAS_URL})
|
||||
|
@ -284,7 +260,7 @@ if (DEFINED ENV{KNOWHERE_OPENBLAS_URL})
|
|||
else ()
|
||||
set(OPENBLAS_SOURCE_URL
|
||||
"https://github.com/xianyi/OpenBLAS/archive/${OPENBLAS_VERSION}.tar.gz")
|
||||
endif()
|
||||
endif ()
|
||||
set(OPENBLAS_MD5 "8a110a25b819a4b94e8a9580702b6495")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
|
@ -294,10 +270,11 @@ set(ARROW_PREFIX "${INDEX_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep/cpp")
|
|||
macro(build_arrow)
|
||||
message(STATUS "Building Apache ARROW-${ARROW_VERSION} from source")
|
||||
set(ARROW_STATIC_LIB_NAME arrow)
|
||||
set(ARROW_STATIC_LIB
|
||||
set(ARROW_STATIC_LIB
|
||||
"${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include")
|
||||
set(ARROW_LIB_DIR "${ARROW_PREFIX}/lib")
|
||||
set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include")
|
||||
|
||||
set(ARROW_CMAKE_ARGS
|
||||
${EP_COMMON_CMAKE_ARGS}
|
||||
|
@ -327,10 +304,10 @@ macro(build_arrow)
|
|||
-DBOOST_SOURCE=AUTO #try to find BOOST in the system default locations and build from source if not found
|
||||
)
|
||||
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
|
||||
if (USE_JFROG_CACHE STREQUAL "ON")
|
||||
execute_process(COMMAND sh -c "git ls-remote --heads --tags ${ARROW_SOURCE_URL} ${ARROW_VERSION} | cut -f 1" OUTPUT_VARIABLE ARROW_LAST_COMMIT_ID)
|
||||
if(${ARROW_LAST_COMMIT_ID} MATCHES "^[^#][a-z0-9]+")
|
||||
if (${ARROW_LAST_COMMIT_ID} MATCHES "^[^#][a-z0-9]+")
|
||||
string(MD5 ARROW_COMBINE_MD5 "${ARROW_LAST_COMMIT_ID}")
|
||||
set(ARROW_CACHE_PACKAGE_NAME "arrow_${ARROW_COMBINE_MD5}.tar.gz")
|
||||
set(ARROW_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${ARROW_CACHE_PACKAGE_NAME}")
|
||||
|
@ -360,18 +337,18 @@ macro(build_arrow)
|
|||
)
|
||||
|
||||
ExternalProject_Create_Cache(arrow_ep ${ARROW_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/arrow_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${ARROW_CACHE_URL})
|
||||
else()
|
||||
else ()
|
||||
file(DOWNLOAD ${ARROW_CACHE_URL} ${ARROW_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${ARROW_CACHE_URL} TO ${ARROW_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(arrow_ep ${ARROW_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
endif ()
|
||||
else ()
|
||||
message(FATAL_ERROR "The last commit ID of \"${ARROW_SOURCE_URL}\" repository don't match!")
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
else ()
|
||||
externalproject_add(arrow_ep
|
||||
GIT_REPOSITORY
|
||||
${ARROW_SOURCE_URL}
|
||||
|
@ -391,31 +368,31 @@ macro(build_arrow)
|
|||
BUILD_BYPRODUCTS
|
||||
"${ARROW_STATIC_LIB}"
|
||||
)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
file(MAKE_DIRECTORY "${ARROW_PREFIX}/include")
|
||||
file(MAKE_DIRECTORY "${ARROW_INCLUDE_DIR}")
|
||||
add_library(arrow STATIC IMPORTED)
|
||||
set_target_properties(arrow
|
||||
PROPERTIES IMPORTED_LOCATION "${ARROW_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${ARROW_INCLUDE_DIR}")
|
||||
add_dependencies(arrow arrow_ep)
|
||||
add_dependencies(arrow arrow_ep)
|
||||
|
||||
set(JEMALLOC_PREFIX "${INDEX_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep-build/jemalloc_ep-prefix/src/jemalloc_ep")
|
||||
|
||||
add_custom_command(TARGET arrow_ep POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${ARROW_PREFIX}/lib/
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a ${ARROW_PREFIX}/lib/
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${ARROW_LIB_DIR}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a ${ARROW_LIB_DIR}
|
||||
DEPENDS ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a)
|
||||
|
||||
endmacro()
|
||||
|
||||
if(KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep)
|
||||
if (KNOWHERE_WITH_ARROW AND NOT TARGET arrow_ep)
|
||||
|
||||
resolve_dependency(ARROW)
|
||||
resolve_dependency(Arrow)
|
||||
|
||||
link_directories(SYSTEM ${ARROW_PREFIX}/lib/)
|
||||
link_directories(SYSTEM ${ARROW_LIB_DIR})
|
||||
include_directories(SYSTEM ${ARROW_INCLUDE_DIR})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# OpenBLAS
|
||||
|
@ -429,7 +406,7 @@ macro(build_openblas)
|
|||
set(OPENBLAS_REAL_STATIC_LIB
|
||||
"${OPENBLAS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}openblas_haswellp-r0.3.6${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
if (USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(OPENBLAS_CACHE_PACKAGE_NAME "openblas_${OPENBLAS_MD5}.tar.gz")
|
||||
set(OPENBLAS_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${OPENBLAS_CACHE_PACKAGE_NAME}")
|
||||
set(OPENBLAS_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${OPENBLAS_CACHE_PACKAGE_NAME}")
|
||||
|
@ -456,15 +433,15 @@ macro(build_openblas)
|
|||
${OPENBLAS_STATIC_LIB})
|
||||
|
||||
ExternalProject_Create_Cache(openblas_ep ${OPENBLAS_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/openblas_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${OPENBLAS_CACHE_URL})
|
||||
else()
|
||||
else ()
|
||||
file(DOWNLOAD ${OPENBLAS_CACHE_URL} ${OPENBLAS_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${OPENBLAS_CACHE_URL} TO ${OPENBLAS_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(openblas_ep ${OPENBLAS_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
endif ()
|
||||
else ()
|
||||
externalproject_add(openblas_ep
|
||||
URL
|
||||
${OPENBLAS_SOURCE_URL}
|
||||
|
@ -482,7 +459,7 @@ macro(build_openblas)
|
|||
install
|
||||
BUILD_BYPRODUCTS
|
||||
${OPENBLAS_STATIC_LIB})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
file(MAKE_DIRECTORY "${OPENBLAS_INCLUDE_DIR}")
|
||||
add_library(openblas STATIC IMPORTED)
|
||||
|
@ -511,7 +488,7 @@ macro(build_lapack)
|
|||
"-DCMAKE_INSTALL_PREFIX=${LAPACK_PREFIX}"
|
||||
-DCMAKE_INSTALL_LIBDIR=lib)
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
if (USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(LAPACK_CACHE_PACKAGE_NAME "lapack_${LAPACK_MD5}.tar.gz")
|
||||
set(LAPACK_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${LAPACK_CACHE_PACKAGE_NAME}")
|
||||
set(LAPACK_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${LAPACK_CACHE_PACKAGE_NAME}")
|
||||
|
@ -532,15 +509,15 @@ macro(build_lapack)
|
|||
${LAPACK_STATIC_LIB})
|
||||
|
||||
ExternalProject_Create_Cache(lapack_ep ${LAPACK_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/lapack_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${LAPACK_CACHE_URL})
|
||||
else()
|
||||
else ()
|
||||
file(DOWNLOAD ${LAPACK_CACHE_URL} ${LAPACK_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${LAPACK_CACHE_URL} TO ${LAPACK_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(lapack_ep ${LAPACK_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
endif ()
|
||||
else ()
|
||||
externalproject_add(lapack_ep
|
||||
URL
|
||||
${LAPACK_SOURCE_URL}
|
||||
|
@ -552,7 +529,7 @@ macro(build_lapack)
|
|||
${MAKE_BUILD_ARGS}
|
||||
BUILD_BYPRODUCTS
|
||||
${LAPACK_STATIC_LIB})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
file(MAKE_DIRECTORY "${LAPACK_INCLUDE_DIR}")
|
||||
add_library(lapack STATIC IMPORTED)
|
||||
|
@ -572,13 +549,13 @@ macro(build_gtest)
|
|||
set(GTEST_VENDORED TRUE)
|
||||
set(GTEST_CMAKE_CXX_FLAGS "${EP_CXX_FLAGS}")
|
||||
|
||||
if(APPLE)
|
||||
if (APPLE)
|
||||
set(GTEST_CMAKE_CXX_FLAGS
|
||||
${GTEST_CMAKE_CXX_FLAGS}
|
||||
-DGTEST_USE_OWN_TR1_TUPLE=1
|
||||
-Wno-unused-value
|
||||
-Wno-ignored-attributes)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(GTEST_PREFIX "${INDEX_BINARY_DIR}/googletest_ep-prefix/src/googletest_ep")
|
||||
set(GTEST_INCLUDE_DIR "${GTEST_PREFIX}/include")
|
||||
|
@ -597,10 +574,10 @@ macro(build_gtest)
|
|||
set(GMOCK_INCLUDE_DIR "${GTEST_PREFIX}/include")
|
||||
set(GMOCK_STATIC_LIB
|
||||
"${GTEST_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}gmock${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
if (USE_JFROG_CACHE STREQUAL "ON")
|
||||
set(GTEST_CACHE_PACKAGE_NAME "googletest_${GTEST_MD5}.tar.gz")
|
||||
set(GTEST_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${GTEST_CACHE_PACKAGE_NAME}")
|
||||
set(GTEST_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${GTEST_CACHE_PACKAGE_NAME}")
|
||||
|
@ -623,15 +600,15 @@ macro(build_gtest)
|
|||
${EP_LOG_OPTIONS})
|
||||
|
||||
ExternalProject_Create_Cache(googletest_ep ${GTEST_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/googletest_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${GTEST_CACHE_URL})
|
||||
else()
|
||||
else ()
|
||||
file(DOWNLOAD ${GTEST_CACHE_URL} ${GTEST_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${GTEST_CACHE_URL} TO ${GTEST_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(googletest_ep ${GTEST_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
endif ()
|
||||
else ()
|
||||
ExternalProject_Add(googletest_ep
|
||||
URL
|
||||
${GTEST_SOURCE_URL}
|
||||
|
@ -645,20 +622,20 @@ macro(build_gtest)
|
|||
CMAKE_ARGS
|
||||
${GTEST_CMAKE_ARGS}
|
||||
${EP_LOG_OPTIONS})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# The include directory must exist before it is referenced by a target.
|
||||
file(MAKE_DIRECTORY "${GTEST_INCLUDE_DIR}")
|
||||
|
||||
add_library(gtest STATIC IMPORTED)
|
||||
set_target_properties(gtest
|
||||
PROPERTIES IMPORTED_LOCATION "${GTEST_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}")
|
||||
PROPERTIES IMPORTED_LOCATION "${GTEST_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}")
|
||||
|
||||
add_library(gtest_main STATIC IMPORTED)
|
||||
set_target_properties(gtest_main
|
||||
PROPERTIES IMPORTED_LOCATION "${GTEST_MAIN_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}")
|
||||
PROPERTIES IMPORTED_LOCATION "${GTEST_MAIN_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${GTEST_INCLUDE_DIR}")
|
||||
|
||||
add_library(gmock STATIC IMPORTED)
|
||||
set_target_properties(gmock
|
||||
|
@ -674,46 +651,98 @@ endmacro()
|
|||
if (KNOWHERE_BUILD_TESTS AND NOT TARGET googletest_ep)
|
||||
resolve_dependency(GTest)
|
||||
|
||||
if(NOT GTEST_VENDORED)
|
||||
endif()
|
||||
if (NOT GTEST_VENDORED)
|
||||
endif ()
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
get_target_property(GTEST_INCLUDE_DIR gtest INTERFACE_INCLUDE_DIRECTORIES)
|
||||
link_directories(SYSTEM "${GTEST_PREFIX}/lib")
|
||||
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# MKL
|
||||
|
||||
macro(build_mkl)
|
||||
|
||||
if (FAISS_WITH_MKL)
|
||||
if (EXISTS "/proc/cpuinfo")
|
||||
FILE(READ /proc/cpuinfo PROC_CPUINFO)
|
||||
|
||||
SET(VENDOR_ID_RX "vendor_id[ \t]*:[ \t]*([a-zA-Z]+)\n")
|
||||
STRING(REGEX MATCH "${VENDOR_ID_RX}" VENDOR_ID "${PROC_CPUINFO}")
|
||||
STRING(REGEX REPLACE "${VENDOR_ID_RX}" "\\1" VENDOR_ID "${VENDOR_ID}")
|
||||
|
||||
if (NOT ${VENDOR_ID} STREQUAL "GenuineIntel")
|
||||
set(FAISS_WITH_MKL OFF)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
find_path(MKL_LIB_PATH
|
||||
NAMES "libmkl_intel_ilp64.a" "libmkl_gnu_thread.a" "libmkl_core.a"
|
||||
PATH_SUFFIXES "intel/compilers_and_libraries_${MKL_VERSION}/linux/mkl/lib/intel64/")
|
||||
if (${MKL_LIB_PATH} STREQUAL "MKL_LIB_PATH-NOTFOUND")
|
||||
message(FATAL_ERROR "Could not find MKL libraries")
|
||||
endif ()
|
||||
message(STATUS "MKL lib path = ${MKL_LIB_PATH}")
|
||||
|
||||
set(MKL_LIBS
|
||||
${MKL_LIB_PATH}/libmkl_intel_ilp64.a
|
||||
${MKL_LIB_PATH}/libmkl_gnu_thread.a
|
||||
${MKL_LIB_PATH}/libmkl_core.a
|
||||
)
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# FAISS
|
||||
|
||||
macro(build_faiss)
|
||||
message(STATUS "Building FAISS-${FAISS_VERSION} from source")
|
||||
|
||||
set(FAISS_PREFIX "${INDEX_BINARY_DIR}/faiss_ep-prefix/src/faiss_ep")
|
||||
set(FAISS_INCLUDE_DIR "${FAISS_PREFIX}/include")
|
||||
set(FAISS_STATIC_LIB
|
||||
"${FAISS_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}faiss${CMAKE_STATIC_LIBRARY_SUFFIX}")
|
||||
|
||||
|
||||
set(FAISS_CONFIGURE_ARGS
|
||||
"--prefix=${FAISS_PREFIX}"
|
||||
"CFLAGS=${EP_C_FLAGS}"
|
||||
"CXXFLAGS=${EP_CXX_FLAGS}"
|
||||
"LDFLAGS=-L${OPENBLAS_PREFIX}/lib -L${LAPACK_PREFIX}/lib -lopenblas -llapack"
|
||||
--without-python)
|
||||
|
||||
|
||||
if(${KNOWHERE_WITH_FAISS_GPU_VERSION} STREQUAL "ON")
|
||||
if (FAISS_WITH_MKL)
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"CPPFLAGS=-DFINTEGER=long -DMKL_ILP64 -m64 -I${MKL_LIB_PATH}/../../include"
|
||||
"LDFLAGS=-L${MKL_LIB_PATH}"
|
||||
)
|
||||
else ()
|
||||
message(STATUS "Build Faiss with OpenBlas/LAPACK")
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"LDFLAGS=-L${OPENBLAS_PREFIX}/lib -L${LAPACK_PREFIX}/lib")
|
||||
endif ()
|
||||
|
||||
if (KNOWHERE_GPU_VERSION)
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"--with-cuda=${CUDA_TOOLKIT_ROOT_DIR}"
|
||||
"--with-cuda-arch=-gencode=arch=compute_60,code=sm_60 -gencode=arch=compute_61,code=sm_61 -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_75,code=sm_75"
|
||||
)
|
||||
else()
|
||||
else ()
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} --without-cuda)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(USE_JFROG_CACHE STREQUAL "ON")
|
||||
if (USE_JFROG_CACHE STREQUAL "ON")
|
||||
string(MD5 FAISS_COMBINE_MD5 "${FAISS_MD5}${LAPACK_MD5}${OPENBLAS_MD5}")
|
||||
set(FAISS_CACHE_PACKAGE_NAME "faiss_${FAISS_COMBINE_MD5}.tar.gz")
|
||||
if (KNOWHERE_GPU_VERSION)
|
||||
set(FAISS_COMPUTE_TYPE "gpu")
|
||||
else ()
|
||||
set(FAISS_COMPUTE_TYPE "cpu")
|
||||
endif()
|
||||
if (FAISS_WITH_MKL)
|
||||
set(FAISS_CACHE_PACKAGE_NAME "faiss_${FAISS_COMPUTE_TYPE}_mkl_${FAISS_COMBINE_MD5}.tar.gz")
|
||||
else ()
|
||||
set(FAISS_CACHE_PACKAGE_NAME "faiss_${FAISS_COMPUTE_TYPE}_openblas_${FAISS_COMBINE_MD5}.tar.gz")
|
||||
endif()
|
||||
set(FAISS_CACHE_URL "${JFROG_ARTFACTORY_CACHE_URL}/${FAISS_CACHE_PACKAGE_NAME}")
|
||||
set(FAISS_CACHE_PACKAGE_PATH "${THIRDPARTY_PACKAGE_CACHE}/${FAISS_CACHE_PACKAGE_NAME}")
|
||||
|
||||
|
@ -736,18 +765,20 @@ macro(build_faiss)
|
|||
BUILD_BYPRODUCTS
|
||||
${FAISS_STATIC_LIB})
|
||||
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
if (NOT FAISS_WITH_MKL)
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
endif ()
|
||||
|
||||
ExternalProject_Create_Cache(faiss_ep ${FAISS_CACHE_PACKAGE_PATH} "${INDEX_BINARY_DIR}/faiss_ep-prefix" ${JFROG_USER_NAME} ${JFROG_PASSWORD} ${FAISS_CACHE_URL})
|
||||
else()
|
||||
else ()
|
||||
file(DOWNLOAD ${FAISS_CACHE_URL} ${FAISS_CACHE_PACKAGE_PATH} STATUS status)
|
||||
list(GET status 0 status_code)
|
||||
message(STATUS "DOWNLOADING FROM ${FAISS_CACHE_URL} TO ${FAISS_CACHE_PACKAGE_PATH}. STATUS = ${status_code}")
|
||||
if (status_code EQUAL 0)
|
||||
ExternalProject_Use_Cache(faiss_ep ${FAISS_CACHE_PACKAGE_PATH} ${INDEX_BINARY_DIR})
|
||||
endif()
|
||||
endif()
|
||||
else()
|
||||
endif ()
|
||||
endif ()
|
||||
else ()
|
||||
externalproject_add(faiss_ep
|
||||
URL
|
||||
${FAISS_SOURCE_URL}
|
||||
|
@ -764,35 +795,56 @@ macro(build_faiss)
|
|||
BUILD_BYPRODUCTS
|
||||
${FAISS_STATIC_LIB})
|
||||
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
endif()
|
||||
if (NOT FAISS_WITH_MKL)
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
endif ()
|
||||
|
||||
endif ()
|
||||
|
||||
file(MAKE_DIRECTORY "${FAISS_INCLUDE_DIR}")
|
||||
add_library(faiss STATIC IMPORTED)
|
||||
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "openblas;lapack" )
|
||||
)
|
||||
if (FAISS_WITH_MKL)
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES "${MKL_LIBS}")
|
||||
else ()
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES "openblas;lapack")
|
||||
endif ()
|
||||
|
||||
|
||||
add_dependencies(faiss faiss_ep)
|
||||
|
||||
endmacro()
|
||||
|
||||
if(KNOWHERE_WITH_FAISS AND NOT TARGET faiss_ep)
|
||||
if (KNOWHERE_WITH_FAISS AND NOT TARGET faiss_ep)
|
||||
|
||||
resolve_dependency(OpenBLAS)
|
||||
get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM ${OPENBLAS_PREFIX}/lib)
|
||||
if (FAISS_WITH_MKL)
|
||||
resolve_dependency(MKL)
|
||||
else ()
|
||||
resolve_dependency(OpenBLAS)
|
||||
get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${OPENBLAS_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM ${OPENBLAS_PREFIX}/lib)
|
||||
|
||||
resolve_dependency(LAPACK)
|
||||
get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM "${LAPACK_PREFIX}/lib")
|
||||
resolve_dependency(LAPACK)
|
||||
get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${LAPACK_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM "${LAPACK_PREFIX}/lib")
|
||||
endif ()
|
||||
|
||||
resolve_dependency(FAISS)
|
||||
get_target_property(FAISS_INCLUDE_DIR faiss INTERFACE_INCLUDE_DIRECTORIES)
|
||||
include_directories(SYSTEM "${FAISS_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM ${FAISS_PREFIX}/lib/)
|
||||
endif()
|
||||
endif ()
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
|
||||
link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64)
|
||||
|
||||
include_directories(${INDEX_SOURCE_DIR}/knowhere)
|
||||
include_directories(${INDEX_SOURCE_DIR}/thirdparty)
|
||||
include_directories(${INDEX_SOURCE_DIR}/thirdparty/SPTAG/AnnService)
|
||||
|
@ -19,9 +16,9 @@ file(GLOB SRC_FILES
|
|||
${SPTAG_SOURCE_DIR}/AnnService/src/Core/KDT/*.cpp
|
||||
${SPTAG_SOURCE_DIR}/AnnService/src/Helper/*.cpp)
|
||||
|
||||
if(NOT TARGET SPTAGLibStatic)
|
||||
if (NOT TARGET SPTAGLibStatic)
|
||||
add_library(SPTAGLibStatic STATIC ${SRC_FILES} ${HDR_FILES})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(external_srcs
|
||||
knowhere/adapter/SptagAdapter.cpp
|
||||
|
@ -33,22 +30,16 @@ set(external_srcs
|
|||
|
||||
set(index_srcs
|
||||
knowhere/index/preprocessor/Normalize.cpp
|
||||
knowhere/index/vector_index/IndexKDT.cpp
|
||||
knowhere/index/vector_index/IndexSPTAG.cpp
|
||||
knowhere/index/vector_index/IndexIDMAP.cpp
|
||||
knowhere/index/vector_index/IndexIVF.cpp
|
||||
knowhere/index/vector_index/IndexGPUIVF.cpp
|
||||
knowhere/index/vector_index/helpers/KDTParameterMgr.cpp
|
||||
knowhere/index/vector_index/helpers/SPTAGParameterMgr.cpp
|
||||
knowhere/index/vector_index/IndexNSG.cpp
|
||||
knowhere/index/vector_index/nsg/NSG.cpp
|
||||
knowhere/index/vector_index/nsg/NSGIO.cpp
|
||||
knowhere/index/vector_index/nsg/NSGHelper.cpp
|
||||
knowhere/index/vector_index/helpers/Cloner.cpp
|
||||
knowhere/index/vector_index/helpers/FaissGpuResourceMgr.cpp
|
||||
knowhere/index/vector_index/IndexIVFSQ.cpp
|
||||
knowhere/index/vector_index/IndexGPUIVFSQ.cpp
|
||||
knowhere/index/vector_index/IndexIVFSQHybrid.cpp
|
||||
knowhere/index/vector_index/IndexIVFPQ.cpp
|
||||
knowhere/index/vector_index/IndexGPUIVFPQ.cpp
|
||||
knowhere/index/vector_index/FaissBaseIndex.cpp
|
||||
knowhere/index/vector_index/helpers/FaissIO.cpp
|
||||
knowhere/index/vector_index/helpers/IndexParameter.cpp
|
||||
|
@ -57,24 +48,56 @@ set(index_srcs
|
|||
set(depend_libs
|
||||
SPTAGLibStatic
|
||||
faiss
|
||||
openblas
|
||||
lapack
|
||||
arrow
|
||||
${ARROW_PREFIX}/lib/libjemalloc_pic.a
|
||||
cudart
|
||||
cublas
|
||||
${ARROW_LIB_DIR}/libjemalloc_pic.a
|
||||
gomp
|
||||
gfortran
|
||||
pthread
|
||||
)
|
||||
if (FAISS_WITH_MKL)
|
||||
set(depend_libs ${depend_libs}
|
||||
"-Wl,--start-group \
|
||||
${MKL_LIB_PATH}/libmkl_intel_ilp64.a \
|
||||
${MKL_LIB_PATH}/libmkl_gnu_thread.a \
|
||||
${MKL_LIB_PATH}/libmkl_core.a \
|
||||
-Wl,--end-group -lgomp -lpthread -lm -ldl"
|
||||
)
|
||||
else ()
|
||||
set(depend_libs ${depend_libs}
|
||||
lapack
|
||||
openblas)
|
||||
endif ()
|
||||
|
||||
if(NOT TARGET knowhere)
|
||||
if (KNOWHERE_GPU_VERSION)
|
||||
include_directories(${CUDA_INCLUDE_DIRS})
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(cuda_lib
|
||||
cudart
|
||||
cublas
|
||||
)
|
||||
set(depend_libs ${depend_libs}
|
||||
${cuda_lib}
|
||||
)
|
||||
|
||||
set(index_srcs ${index_srcs}
|
||||
knowhere/index/vector_index/IndexGPUIVF.cpp
|
||||
knowhere/index/vector_index/helpers/Cloner.cpp
|
||||
knowhere/index/vector_index/helpers/FaissGpuResourceMgr.cpp
|
||||
knowhere/index/vector_index/IndexGPUIVFSQ.cpp
|
||||
knowhere/index/vector_index/IndexIVFSQHybrid.cpp
|
||||
knowhere/index/vector_index/IndexGPUIVFPQ.cpp
|
||||
knowhere/index/vector_index/IndexGPUIDMAP.cpp
|
||||
)
|
||||
|
||||
endif ()
|
||||
|
||||
if (NOT TARGET knowhere)
|
||||
add_library(
|
||||
knowhere STATIC
|
||||
${external_srcs}
|
||||
${index_srcs}
|
||||
)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
target_link_libraries(
|
||||
knowhere
|
||||
|
|
|
@ -89,34 +89,35 @@ ConvertToDataset(std::vector<SPTAG::QueryResult> query_results) {
|
|||
}
|
||||
}
|
||||
|
||||
auto id_buf = MakeMutableBufferSmart((uint8_t*)p_id, sizeof(int64_t) * elems);
|
||||
auto dist_buf = MakeMutableBufferSmart((uint8_t*)p_dist, sizeof(float) * elems);
|
||||
|
||||
// TODO: magic
|
||||
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
|
||||
auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
auto float_type = std::make_shared<arrow::FloatType>();
|
||||
|
||||
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
// auto id_array_data = std::make_shared<ArrayData>(int64_type, sizeof(int64_t) * elems, id_bufs);
|
||||
// auto dist_array_data = std::make_shared<ArrayData>(float_type, sizeof(float) * elems, dist_bufs);
|
||||
|
||||
// auto ids = ConstructInt64Array((uint8_t*)p_id, sizeof(int64_t) * elems);
|
||||
// auto dists = ConstructFloatArray((uint8_t*)p_dist, sizeof(float) * elems);
|
||||
|
||||
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
std::vector<ArrayPtr> array{ids, dists};
|
||||
|
||||
auto field_id = std::make_shared<Field>("id", std::make_shared<arrow::Int64Type>());
|
||||
auto field_dist = std::make_shared<Field>("dist", std::make_shared<arrow::FloatType>());
|
||||
std::vector<FieldPtr> fields{field_id, field_dist};
|
||||
auto schema = std::make_shared<Schema>(fields);
|
||||
|
||||
return std::make_shared<Dataset>(array, schema);
|
||||
// auto id_buf = MakeMutableBufferSmart((uint8_t*)p_id, sizeof(int64_t) * elems);
|
||||
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)p_dist, sizeof(float) * elems);
|
||||
//
|
||||
// // TODO: magic
|
||||
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
//
|
||||
// auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
// auto float_type = std::make_shared<arrow::FloatType>();
|
||||
//
|
||||
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
// // auto id_array_data = std::make_shared<ArrayData>(int64_type, sizeof(int64_t) * elems, id_bufs);
|
||||
// // auto dist_array_data = std::make_shared<ArrayData>(float_type, sizeof(float) * elems, dist_bufs);
|
||||
//
|
||||
// // auto ids = ConstructInt64Array((uint8_t*)p_id, sizeof(int64_t) * elems);
|
||||
// // auto dists = ConstructFloatArray((uint8_t*)p_dist, sizeof(float) * elems);
|
||||
//
|
||||
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
// std::vector<ArrayPtr> array{ids, dists};
|
||||
//
|
||||
// auto field_id = std::make_shared<Field>("id", std::make_shared<arrow::Int64Type>());
|
||||
// auto field_dist = std::make_shared<Field>("dist", std::make_shared<arrow::FloatType>());
|
||||
// std::vector<FieldPtr> fields{field_id, field_dist};
|
||||
// auto schema = std::make_shared<Schema>(fields);
|
||||
//
|
||||
// return std::make_shared<Dataset>(array, schema);
|
||||
return std::make_shared<Dataset>((void*)p_id, (void*)p_dist);
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include "Log.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
|
@ -50,6 +52,18 @@ struct Cfg {
|
|||
CheckValid() {
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
Dump() {
|
||||
KNOWHERE_LOG_DEBUG << DumpImpl().str();
|
||||
}
|
||||
|
||||
virtual std::stringstream
|
||||
DumpImpl() {
|
||||
std::stringstream ss;
|
||||
ss << "dim: " << d << ", metric: " << int(metric_type) << ", gpuid: " << gpu_id << ", k: " << k;
|
||||
return ss;
|
||||
}
|
||||
};
|
||||
using Config = std::shared_ptr<Cfg>;
|
||||
|
||||
|
|
|
@ -54,6 +54,9 @@ class Dataset {
|
|||
: tensor_(std::move(tensor)), tensor_schema_(std::move(tensor_schema)) {
|
||||
}
|
||||
|
||||
Dataset(void* ids, void* dists) : ids_(ids), dists_(dists) {
|
||||
}
|
||||
|
||||
Dataset(const Dataset&) = delete;
|
||||
Dataset&
|
||||
operator=(const Dataset&) = delete;
|
||||
|
@ -128,6 +131,16 @@ class Dataset {
|
|||
tensor_schema_ = std::move(tensor_schema);
|
||||
}
|
||||
|
||||
void*
|
||||
ids() {
|
||||
return ids_;
|
||||
}
|
||||
|
||||
void*
|
||||
dist() {
|
||||
return dists_;
|
||||
}
|
||||
|
||||
// const Config &
|
||||
// meta() const { return meta_; }
|
||||
|
||||
|
@ -141,6 +154,9 @@ class Dataset {
|
|||
SchemaPtr array_schema_;
|
||||
std::vector<TensorPtr> tensor_;
|
||||
SchemaPtr tensor_schema_;
|
||||
// TODO(yukun): using smart pointer
|
||||
void* ids_;
|
||||
void* dists_;
|
||||
// Config meta_;
|
||||
};
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "external/easyloggingpp/easylogging++.h"
|
||||
#include "easyloggingpp/easylogging++.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "knowhere/index/vector_index/IndexGPUIDMAP.h"
|
||||
|
||||
#include <faiss/AutoTune.h>
|
||||
#include <faiss/IndexFlat.h>
|
||||
#include <faiss/MetaIndexes.h>
|
||||
#include <faiss/index_io.h>
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
|
||||
#include <faiss/gpu/GpuCloner.h>
|
||||
|
||||
#endif
|
||||
|
||||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/index/vector_index/IndexIDMAP.h"
|
||||
#include "knowhere/index/vector_index/helpers/FaissIO.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
VectorIndexPtr
|
||||
GPUIDMAP::CopyGpuToCpu(const Config& config) {
|
||||
std::lock_guard<std::mutex> lk(mutex_);
|
||||
|
||||
faiss::Index* device_index = index_.get();
|
||||
faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(device_index);
|
||||
|
||||
std::shared_ptr<faiss::Index> new_index;
|
||||
new_index.reset(host_index);
|
||||
return std::make_shared<IDMAP>(new_index);
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
GPUIDMAP::Clone() {
|
||||
auto cpu_idx = CopyGpuToCpu(Config());
|
||||
|
||||
if (auto idmap = std::dynamic_pointer_cast<IDMAP>(cpu_idx)) {
|
||||
return idmap->CopyCpuToGpu(gpu_id_, Config());
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("IndexType not Support GpuClone");
|
||||
}
|
||||
}
|
||||
|
||||
BinarySet
|
||||
GPUIDMAP::SerializeImpl() {
|
||||
try {
|
||||
MemoryIOWriter writer;
|
||||
{
|
||||
faiss::Index* index = index_.get();
|
||||
faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(index);
|
||||
|
||||
faiss::write_index(host_index, &writer);
|
||||
delete host_index;
|
||||
}
|
||||
auto data = std::make_shared<uint8_t>();
|
||||
data.reset(writer.data_);
|
||||
|
||||
BinarySet res_set;
|
||||
res_set.Append("IVF", data, writer.rp);
|
||||
|
||||
return res_set;
|
||||
} catch (std::exception& e) {
|
||||
KNOWHERE_THROW_MSG(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPUIDMAP::LoadImpl(const BinarySet& index_binary) {
|
||||
auto binary = index_binary.GetByName("IVF");
|
||||
MemoryIOReader reader;
|
||||
{
|
||||
reader.total = binary->size;
|
||||
reader.data_ = binary->data.get();
|
||||
|
||||
faiss::Index* index = faiss::read_index(&reader);
|
||||
|
||||
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_)) {
|
||||
ResScope rs(res, gpu_id_, false);
|
||||
auto device_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), gpu_id_, index);
|
||||
index_.reset(device_index);
|
||||
res_ = res;
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("Load error, can't get gpu resource");
|
||||
}
|
||||
|
||||
delete index;
|
||||
}
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
GPUIDMAP::CopyGpuToGpu(const int64_t& device_id, const Config& config) {
|
||||
auto cpu_index = CopyGpuToCpu(config);
|
||||
return std::static_pointer_cast<IDMAP>(cpu_index)->CopyCpuToGpu(device_id, config);
|
||||
}
|
||||
|
||||
float*
|
||||
GPUIDMAP::GetRawVectors() {
|
||||
KNOWHERE_THROW_MSG("Not support");
|
||||
}
|
||||
|
||||
int64_t*
|
||||
GPUIDMAP::GetRawIds() {
|
||||
KNOWHERE_THROW_MSG("Not support");
|
||||
}
|
||||
|
||||
void
|
||||
GPUIDMAP::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) {
|
||||
ResScope rs(res_, gpu_id_);
|
||||
index_->search(n, (float*)data, k, distances, labels);
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
|
@ -0,0 +1,63 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "IndexGPUIVF.h"
|
||||
#include "IndexIDMAP.h"
|
||||
#include "IndexIVF.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
class GPUIDMAP : public IDMAP, public GPUIndex {
|
||||
public:
|
||||
explicit GPUIDMAP(std::shared_ptr<faiss::Index> index, const int64_t& device_id, ResPtr& res)
|
||||
: IDMAP(std::move(index)), GPUIndex(device_id, res) {
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
CopyGpuToCpu(const Config& config) override;
|
||||
|
||||
float*
|
||||
GetRawVectors() override;
|
||||
|
||||
int64_t*
|
||||
GetRawIds() override;
|
||||
|
||||
VectorIndexPtr
|
||||
Clone() override;
|
||||
|
||||
VectorIndexPtr
|
||||
CopyGpuToGpu(const int64_t& device_id, const Config& config) override;
|
||||
|
||||
protected:
|
||||
void
|
||||
search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) override;
|
||||
|
||||
BinarySet
|
||||
SerializeImpl() override;
|
||||
|
||||
void
|
||||
LoadImpl(const BinarySet& index_binary) override;
|
||||
};
|
||||
|
||||
using GPUIDMAPPtr = std::shared_ptr<GPUIDMAP>;
|
||||
|
||||
} // namespace knowhere
|
|
@ -39,17 +39,19 @@ GPUIVFPQ::Train(const DatasetPtr& dataset, const Config& config) {
|
|||
|
||||
GETTENSOR(dataset)
|
||||
|
||||
// TODO(linxj): set device here.
|
||||
// TODO(linxj): set gpu resource here.
|
||||
faiss::gpu::StandardGpuResources res;
|
||||
faiss::gpu::GpuIndexIVFPQ device_index(&res, dim, build_cfg->nlist, build_cfg->m, build_cfg->nbits,
|
||||
GetMetricType(build_cfg->metric_type)); // IP not support
|
||||
device_index.train(rows, (float*)p_data);
|
||||
|
||||
std::shared_ptr<faiss::Index> host_index = nullptr;
|
||||
host_index.reset(faiss::gpu::index_gpu_to_cpu(&device_index));
|
||||
|
||||
return std::make_shared<IVFIndexModel>(host_index);
|
||||
auto temp_resource = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_);
|
||||
if (temp_resource != nullptr) {
|
||||
ResScope rs(temp_resource, gpu_id_, true);
|
||||
auto device_index = new faiss::gpu::GpuIndexIVFPQ(temp_resource->faiss_res.get(), dim, build_cfg->nlist,
|
||||
build_cfg->m, build_cfg->nbits,
|
||||
GetMetricType(build_cfg->metric_type)); // IP not support
|
||||
device_index->train(rows, (float*)p_data);
|
||||
std::shared_ptr<faiss::Index> host_index = nullptr;
|
||||
host_index.reset(faiss::gpu::index_gpu_to_cpu(device_index));
|
||||
return std::make_shared<IVFIndexModel>(host_index);
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("Build IVFSQ can't get gpu resource");
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<faiss::IVFSearchParameters>
|
||||
|
@ -66,7 +68,14 @@ GPUIVFPQ::GenParams(const Config& config) {
|
|||
|
||||
VectorIndexPtr
|
||||
GPUIVFPQ::CopyGpuToCpu(const Config& config) {
|
||||
KNOWHERE_THROW_MSG("not support yet");
|
||||
std::lock_guard<std::mutex> lk(mutex_);
|
||||
|
||||
faiss::Index* device_index = index_.get();
|
||||
faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(device_index);
|
||||
|
||||
std::shared_ptr<faiss::Index> new_index;
|
||||
new_index.reset(host_index);
|
||||
return std::make_shared<IVFPQ>(new_index);
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "IndexGPUIVF.h"
|
||||
|
||||
|
@ -28,6 +29,10 @@ class GPUIVFPQ : public GPUIVF {
|
|||
explicit GPUIVFPQ(const int& device_id) : GPUIVF(device_id) {
|
||||
}
|
||||
|
||||
GPUIVFPQ(std::shared_ptr<faiss::Index> index, const int64_t& device_id, ResPtr& resource)
|
||||
: GPUIVF(std::move(index), device_id, resource) {
|
||||
}
|
||||
|
||||
IndexModelPtr
|
||||
Train(const DatasetPtr& dataset, const Config& config) override;
|
||||
|
||||
|
|
|
@ -17,10 +17,18 @@
|
|||
|
||||
#include <faiss/IndexFlat.h>
|
||||
#include <faiss/MetaIndexes.h>
|
||||
#include <faiss/gpu/GpuCloner.h>
|
||||
|
||||
#include <faiss/AutoTune.h>
|
||||
#include <faiss/clone_index.h>
|
||||
#include <faiss/index_factory.h>
|
||||
#include <faiss/index_io.h>
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
|
||||
#include <faiss/gpu/GpuCloner.h>
|
||||
|
||||
#endif
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
|
@ -28,6 +36,13 @@
|
|||
#include "knowhere/index/vector_index/IndexIDMAP.h"
|
||||
#include "knowhere/index/vector_index/helpers/FaissIO.h"
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
|
||||
#include "knowhere/index/vector_index/IndexGPUIDMAP.h"
|
||||
#include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h"
|
||||
|
||||
#endif
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
BinarySet
|
||||
|
@ -65,23 +80,24 @@ IDMAP::Search(const DatasetPtr& dataset, const Config& config) {
|
|||
|
||||
search_impl(rows, (float*)p_data, config->k, res_dis, res_ids, Config());
|
||||
|
||||
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
|
||||
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
|
||||
|
||||
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
|
||||
auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
auto float_type = std::make_shared<arrow::FloatType>();
|
||||
|
||||
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
|
||||
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
std::vector<ArrayPtr> array{ids, dists};
|
||||
|
||||
return std::make_shared<Dataset>(array, nullptr);
|
||||
// auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
|
||||
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
|
||||
//
|
||||
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
//
|
||||
// auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
// auto float_type = std::make_shared<arrow::FloatType>();
|
||||
//
|
||||
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
//
|
||||
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
// std::vector<ArrayPtr> array{ids, dists};
|
||||
//
|
||||
// return std::make_shared<Dataset>(array, nullptr);
|
||||
return std::make_shared<Dataset>((void*)res_ids, (void*)res_dis);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -160,6 +176,8 @@ IDMAP::Clone() {
|
|||
|
||||
VectorIndexPtr
|
||||
IDMAP::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
|
||||
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) {
|
||||
ResScope rs(res, device_id, false);
|
||||
auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get());
|
||||
|
@ -170,6 +188,9 @@ IDMAP::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
|||
} else {
|
||||
KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource");
|
||||
}
|
||||
#else
|
||||
KNOWHERE_THROW_MSG("Calling IDMAP::CopyCpuToGpu when we are using CPU version");
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -177,95 +198,4 @@ IDMAP::Seal() {
|
|||
// do nothing
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
GPUIDMAP::CopyGpuToCpu(const Config& config) {
|
||||
std::lock_guard<std::mutex> lk(mutex_);
|
||||
|
||||
faiss::Index* device_index = index_.get();
|
||||
faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(device_index);
|
||||
|
||||
std::shared_ptr<faiss::Index> new_index;
|
||||
new_index.reset(host_index);
|
||||
return std::make_shared<IDMAP>(new_index);
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
GPUIDMAP::Clone() {
|
||||
auto cpu_idx = CopyGpuToCpu(Config());
|
||||
|
||||
if (auto idmap = std::dynamic_pointer_cast<IDMAP>(cpu_idx)) {
|
||||
return idmap->CopyCpuToGpu(gpu_id_, Config());
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("IndexType not Support GpuClone");
|
||||
}
|
||||
}
|
||||
|
||||
BinarySet
|
||||
GPUIDMAP::SerializeImpl() {
|
||||
try {
|
||||
MemoryIOWriter writer;
|
||||
{
|
||||
faiss::Index* index = index_.get();
|
||||
faiss::Index* host_index = faiss::gpu::index_gpu_to_cpu(index);
|
||||
|
||||
faiss::write_index(host_index, &writer);
|
||||
delete host_index;
|
||||
}
|
||||
auto data = std::make_shared<uint8_t>();
|
||||
data.reset(writer.data_);
|
||||
|
||||
BinarySet res_set;
|
||||
res_set.Append("IVF", data, writer.rp);
|
||||
|
||||
return res_set;
|
||||
} catch (std::exception& e) {
|
||||
KNOWHERE_THROW_MSG(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
GPUIDMAP::LoadImpl(const BinarySet& index_binary) {
|
||||
auto binary = index_binary.GetByName("IVF");
|
||||
MemoryIOReader reader;
|
||||
{
|
||||
reader.total = binary->size;
|
||||
reader.data_ = binary->data.get();
|
||||
|
||||
faiss::Index* index = faiss::read_index(&reader);
|
||||
|
||||
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_)) {
|
||||
ResScope rs(res, gpu_id_, false);
|
||||
auto device_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), gpu_id_, index);
|
||||
index_.reset(device_index);
|
||||
res_ = res;
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("Load error, can't get gpu resource");
|
||||
}
|
||||
|
||||
delete index;
|
||||
}
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
GPUIDMAP::CopyGpuToGpu(const int64_t& device_id, const Config& config) {
|
||||
auto cpu_index = CopyGpuToCpu(config);
|
||||
return std::static_pointer_cast<IDMAP>(cpu_index)->CopyCpuToGpu(device_id, config);
|
||||
}
|
||||
|
||||
float*
|
||||
GPUIDMAP::GetRawVectors() {
|
||||
KNOWHERE_THROW_MSG("Not support");
|
||||
}
|
||||
|
||||
int64_t*
|
||||
GPUIDMAP::GetRawIds() {
|
||||
KNOWHERE_THROW_MSG("Not support");
|
||||
}
|
||||
|
||||
void
|
||||
GPUIDMAP::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) {
|
||||
ResScope rs(res_, gpu_id_);
|
||||
index_->search(n, (float*)data, k, distances, labels);
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include "IndexGPUIVF.h"
|
||||
#include "IndexIVF.h"
|
||||
|
||||
#include <memory>
|
||||
|
@ -67,32 +66,4 @@ class IDMAP : public VectorIndex, public FaissBaseIndex {
|
|||
|
||||
using IDMAPPtr = std::shared_ptr<IDMAP>;
|
||||
|
||||
class GPUIDMAP : public IDMAP, public GPUIndex {
|
||||
public:
|
||||
explicit GPUIDMAP(std::shared_ptr<faiss::Index> index, const int64_t& device_id, ResPtr& res)
|
||||
: IDMAP(std::move(index)), GPUIndex(device_id, res) {
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
CopyGpuToCpu(const Config& config) override;
|
||||
float*
|
||||
GetRawVectors() override;
|
||||
int64_t*
|
||||
GetRawIds() override;
|
||||
VectorIndexPtr
|
||||
Clone() override;
|
||||
VectorIndexPtr
|
||||
CopyGpuToGpu(const int64_t& device_id, const Config& config) override;
|
||||
|
||||
protected:
|
||||
void
|
||||
search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) override;
|
||||
BinarySet
|
||||
SerializeImpl() override;
|
||||
void
|
||||
LoadImpl(const BinarySet& index_binary) override;
|
||||
};
|
||||
|
||||
using GPUIDMAPPtr = std::shared_ptr<GPUIDMAP>;
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -15,11 +15,19 @@
|
|||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <faiss/AutoTune.h>
|
||||
#include <faiss/IVFlib.h>
|
||||
#include <faiss/IndexFlat.h>
|
||||
#include <faiss/IndexIVF.h>
|
||||
#include <faiss/IndexIVFFlat.h>
|
||||
#include <faiss/IndexIVFPQ.h>
|
||||
#include <faiss/clone_index.h>
|
||||
#include <faiss/index_factory.h>
|
||||
#include <faiss/index_io.h>
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include <faiss/gpu/GpuAutoTune.h>
|
||||
#include <faiss/gpu/GpuCloner.h>
|
||||
#endif
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
|
@ -29,7 +37,9 @@
|
|||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/common/Log.h"
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include "knowhere/index/vector_index/IndexGPUIVF.h"
|
||||
#endif
|
||||
#include "knowhere/index/vector_index/IndexIVF.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
@ -129,23 +139,23 @@ IVF::Search(const DatasetPtr& dataset, const Config& config) {
|
|||
// std::cout << ss_res_id.str() << std::endl;
|
||||
// std::cout << ss_res_dist.str() << std::endl << std::endl;
|
||||
|
||||
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
|
||||
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
|
||||
// auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
|
||||
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
|
||||
//
|
||||
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
//
|
||||
// auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
// auto float_type = std::make_shared<arrow::FloatType>();
|
||||
//
|
||||
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
//
|
||||
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
// std::vector<ArrayPtr> array{ids, dists};
|
||||
|
||||
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
|
||||
auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
auto float_type = std::make_shared<arrow::FloatType>();
|
||||
|
||||
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
|
||||
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
std::vector<ArrayPtr> array{ids, dists};
|
||||
|
||||
return std::make_shared<Dataset>(array, nullptr);
|
||||
return std::make_shared<Dataset>((void*)res_ids, (void*)res_dis);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -221,16 +231,17 @@ IVF::search_impl(int64_t n, const float* data, int64_t k, float* distances, int6
|
|||
faiss::ivflib::search_with_parameters(index_.get(), n, (float*)data, k, distances, labels, params.get());
|
||||
stdclock::time_point after = stdclock::now();
|
||||
double search_cost = (std::chrono::duration<double, std::micro>(after - before)).count();
|
||||
KNOWHERE_LOG_DEBUG << "K=" << k << " NQ=" << n << " NL=" << faiss::indexIVF_stats.nlist
|
||||
<< " ND=" << faiss::indexIVF_stats.ndis << " NH=" << faiss::indexIVF_stats.nheap_updates
|
||||
<< " Q=" << faiss::indexIVF_stats.quantization_time
|
||||
<< " S=" << faiss::indexIVF_stats.search_time;
|
||||
KNOWHERE_LOG_DEBUG << "IVF search cost: " << search_cost
|
||||
<< ", quantization cost: " << faiss::indexIVF_stats.quantization_time
|
||||
<< ", data search cost: " << faiss::indexIVF_stats.search_time;
|
||||
faiss::indexIVF_stats.quantization_time = 0;
|
||||
faiss::indexIVF_stats.search_time = 0;
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
IVF::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
|
||||
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) {
|
||||
ResScope rs(res, device_id, false);
|
||||
auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get());
|
||||
|
@ -241,6 +252,10 @@ IVF::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
|||
} else {
|
||||
KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource");
|
||||
}
|
||||
|
||||
#else
|
||||
KNOWHERE_THROW_MSG("Calling IVF::CopyCpuToGpu when we are using CPU version");
|
||||
#endif
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
|
|
|
@ -17,11 +17,19 @@
|
|||
|
||||
#include <faiss/IndexFlat.h>
|
||||
#include <faiss/IndexIVFPQ.h>
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include <faiss/gpu/GpuCloner.h>
|
||||
#endif
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include "knowhere/index/vector_index/IndexGPUIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFPQ.h"
|
||||
#endif
|
||||
#include "knowhere/index/vector_index/IndexIVFPQ.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
@ -60,4 +68,22 @@ IVFPQ::Clone_impl(const std::shared_ptr<faiss::Index>& index) {
|
|||
return std::make_shared<IVFPQ>(index);
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
IVFPQ::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) {
|
||||
ResScope rs(res, device_id, false);
|
||||
auto gpu_index = faiss::gpu::index_cpu_to_gpu(res->faiss_res.get(), device_id, index_.get());
|
||||
|
||||
std::shared_ptr<faiss::Index> device_index;
|
||||
device_index.reset(gpu_index);
|
||||
return std::make_shared<GPUIVFPQ>(device_index, device_id, res);
|
||||
} else {
|
||||
KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource");
|
||||
}
|
||||
#else
|
||||
KNOWHERE_THROW_MSG("Calling IVFPQ::CopyCpuToGpu when we are using CPU version");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -34,6 +34,9 @@ class IVFPQ : public IVF {
|
|||
IndexModelPtr
|
||||
Train(const DatasetPtr& dataset, const Config& config) override;
|
||||
|
||||
VectorIndexPtr
|
||||
CopyCpuToGpu(const int64_t& device_id, const Config& config) override;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<faiss::IVFSearchParameters>
|
||||
GenParams(const Config& config) override;
|
||||
|
|
|
@ -15,15 +15,22 @@
|
|||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include <faiss/gpu/GpuAutoTune.h>
|
||||
#include <faiss/gpu/GpuCloner.h>
|
||||
#endif
|
||||
#include <faiss/index_factory.h>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFSQ.h"
|
||||
#include "knowhere/index/vector_index/IndexIVFSQ.h"
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include "knowhere/index/vector_index/IndexGPUIVFSQ.h"
|
||||
#include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h"
|
||||
#endif
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
|
@ -54,6 +61,8 @@ IVFSQ::Clone_impl(const std::shared_ptr<faiss::Index>& index) {
|
|||
|
||||
VectorIndexPtr
|
||||
IVFSQ::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
|
||||
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) {
|
||||
ResScope rs(res, device_id, false);
|
||||
|
||||
|
@ -65,6 +74,10 @@ IVFSQ::CopyCpuToGpu(const int64_t& device_id, const Config& config) {
|
|||
} else {
|
||||
KNOWHERE_THROW_MSG("CopyCpuToGpu Error, can't get gpu_resource");
|
||||
}
|
||||
|
||||
#else
|
||||
KNOWHERE_THROW_MSG("Calling IVFSQ::CopyCpuToGpu when we are using CPU version");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -1,180 +0,0 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <SPTAG/AnnService/inc/Core/Common.h>
|
||||
#include <SPTAG/AnnService/inc/Core/VectorSet.h>
|
||||
#include <SPTAG/AnnService/inc/Server/QueryParser.h>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
|
||||
#undef mkdir
|
||||
|
||||
#include "knowhere/index/vector_index/IndexKDT.h"
|
||||
#include "knowhere/index/vector_index/helpers/Definitions.h"
|
||||
//#include "knowhere/index/preprocessor/normalize.h"
|
||||
#include "knowhere/adapter/SptagAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/index/vector_index/helpers/KDTParameterMgr.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
BinarySet
|
||||
CPUKDTRNG::Serialize() {
|
||||
std::vector<void*> index_blobs;
|
||||
std::vector<int64_t> index_len;
|
||||
|
||||
// TODO(zirui): dev
|
||||
// index_ptr_->SaveIndexToMemory(index_blobs, index_len);
|
||||
BinarySet binary_set;
|
||||
|
||||
//
|
||||
// auto sample = std::make_shared<uint8_t>();
|
||||
// sample.reset(static_cast<uint8_t*>(index_blobs[0]));
|
||||
// auto tree = std::make_shared<uint8_t>();
|
||||
// tree.reset(static_cast<uint8_t*>(index_blobs[1]));
|
||||
// auto graph = std::make_shared<uint8_t>();
|
||||
// graph.reset(static_cast<uint8_t*>(index_blobs[2]));
|
||||
// auto metadata = std::make_shared<uint8_t>();
|
||||
// metadata.reset(static_cast<uint8_t*>(index_blobs[3]));
|
||||
//
|
||||
// binary_set.Append("samples", sample, index_len[0]);
|
||||
// binary_set.Append("tree", tree, index_len[1]);
|
||||
// binary_set.Append("graph", graph, index_len[2]);
|
||||
// binary_set.Append("metadata", metadata, index_len[3]);
|
||||
return binary_set;
|
||||
}
|
||||
|
||||
void
|
||||
CPUKDTRNG::Load(const BinarySet& binary_set) {
|
||||
// TODO(zirui): dev
|
||||
|
||||
// std::vector<void*> index_blobs;
|
||||
//
|
||||
// auto samples = binary_set.GetByName("samples");
|
||||
// index_blobs.push_back(samples->data.get());
|
||||
//
|
||||
// auto tree = binary_set.GetByName("tree");
|
||||
// index_blobs.push_back(tree->data.get());
|
||||
//
|
||||
// auto graph = binary_set.GetByName("graph");
|
||||
// index_blobs.push_back(graph->data.get());
|
||||
//
|
||||
// auto metadata = binary_set.GetByName("metadata");
|
||||
// index_blobs.push_back(metadata->data.get());
|
||||
//
|
||||
// index_ptr_->LoadIndexFromMemory(index_blobs);
|
||||
}
|
||||
|
||||
// PreprocessorPtr
|
||||
// CPUKDTRNG::BuildPreprocessor(const DatasetPtr &dataset, const Config &config) {
|
||||
// return std::make_shared<NormalizePreprocessor>();
|
||||
//}
|
||||
|
||||
IndexModelPtr
|
||||
CPUKDTRNG::Train(const DatasetPtr& origin, const Config& train_config) {
|
||||
SetParameters(train_config);
|
||||
DatasetPtr dataset = origin->Clone();
|
||||
|
||||
// if (index_ptr_->GetDistCalcMethod() == SPTAG::DistCalcMethod::Cosine
|
||||
// && preprocessor_) {
|
||||
// preprocessor_->Preprocess(dataset);
|
||||
//}
|
||||
|
||||
auto vectorset = ConvertToVectorSet(dataset);
|
||||
auto metaset = ConvertToMetadataSet(dataset);
|
||||
index_ptr_->BuildIndex(vectorset, metaset);
|
||||
|
||||
// TODO: return IndexModelPtr
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void
|
||||
CPUKDTRNG::Add(const DatasetPtr& origin, const Config& add_config) {
|
||||
SetParameters(add_config);
|
||||
DatasetPtr dataset = origin->Clone();
|
||||
|
||||
// if (index_ptr_->GetDistCalcMethod() == SPTAG::DistCalcMethod::Cosine
|
||||
// && preprocessor_) {
|
||||
// preprocessor_->Preprocess(dataset);
|
||||
//}
|
||||
|
||||
auto vectorset = ConvertToVectorSet(dataset);
|
||||
auto metaset = ConvertToMetadataSet(dataset);
|
||||
index_ptr_->AddIndex(vectorset, metaset);
|
||||
}
|
||||
|
||||
void
|
||||
CPUKDTRNG::SetParameters(const Config& config) {
|
||||
for (auto& para : KDTParameterMgr::GetInstance().GetKDTParameters()) {
|
||||
// auto value = config.get_with_default(para.first, para.second);
|
||||
index_ptr_->SetParameter(para.first, para.second);
|
||||
}
|
||||
}
|
||||
|
||||
DatasetPtr
|
||||
CPUKDTRNG::Search(const DatasetPtr& dataset, const Config& config) {
|
||||
SetParameters(config);
|
||||
auto tensor = dataset->tensor()[0];
|
||||
auto p = (float*)tensor->raw_mutable_data();
|
||||
for (auto i = 0; i < 10; ++i) {
|
||||
for (auto j = 0; j < 10; ++j) {
|
||||
std::cout << p[i * 10 + j] << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::vector<SPTAG::QueryResult> query_results = ConvertToQueryResult(dataset, config);
|
||||
|
||||
#pragma omp parallel for
|
||||
for (auto i = 0; i < query_results.size(); ++i) {
|
||||
auto target = (float*)query_results[i].GetTarget();
|
||||
std::cout << target[0] << ", " << target[1] << ", " << target[2] << std::endl;
|
||||
index_ptr_->SearchIndex(query_results[i]);
|
||||
}
|
||||
|
||||
return ConvertToDataset(query_results);
|
||||
}
|
||||
|
||||
int64_t
|
||||
CPUKDTRNG::Count() {
|
||||
index_ptr_->GetNumSamples();
|
||||
}
|
||||
|
||||
int64_t
|
||||
CPUKDTRNG::Dimension() {
|
||||
index_ptr_->GetFeatureDim();
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
CPUKDTRNG::Clone() {
|
||||
KNOWHERE_THROW_MSG("not support");
|
||||
}
|
||||
|
||||
void
|
||||
CPUKDTRNG::Seal() {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
// TODO(linxj):
|
||||
BinarySet
|
||||
CPUKDTRNGIndexModel::Serialize() {
|
||||
}
|
||||
|
||||
void
|
||||
CPUKDTRNGIndexModel::Load(const BinarySet& binary) {
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
|
@ -19,8 +19,10 @@
|
|||
#include "knowhere/adapter/VectorAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/common/Timer.h"
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
#include "knowhere/index/vector_index/IndexGPUIVF.h"
|
||||
#include "knowhere/index/vector_index/IndexIDMAP.h"
|
||||
#endif
|
||||
|
||||
#include "knowhere/index/vector_index/IndexIVF.h"
|
||||
#include "knowhere/index/vector_index/nsg/NSG.h"
|
||||
#include "knowhere/index/vector_index/nsg/NSGIO.h"
|
||||
|
@ -86,23 +88,24 @@ NSG::Search(const DatasetPtr& dataset, const Config& config) {
|
|||
s_params.search_length = build_cfg->search_length;
|
||||
index_->Search((float*)p_data, rows, dim, build_cfg->k, res_dis, res_ids, s_params);
|
||||
|
||||
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
|
||||
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
|
||||
// auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
|
||||
// auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
|
||||
|
||||
std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
|
||||
auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
auto float_type = std::make_shared<arrow::FloatType>();
|
||||
|
||||
auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
|
||||
auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
std::vector<ArrayPtr> array{ids, dists};
|
||||
|
||||
return std::make_shared<Dataset>(array, nullptr);
|
||||
// std::vector<BufferPtr> id_bufs{nullptr, id_buf};
|
||||
// std::vector<BufferPtr> dist_bufs{nullptr, dist_buf};
|
||||
//
|
||||
// auto int64_type = std::make_shared<arrow::Int64Type>();
|
||||
// auto float_type = std::make_shared<arrow::FloatType>();
|
||||
//
|
||||
// auto id_array_data = arrow::ArrayData::Make(int64_type, elems, id_bufs);
|
||||
// auto dist_array_data = arrow::ArrayData::Make(float_type, elems, dist_bufs);
|
||||
//
|
||||
// auto ids = std::make_shared<NumericArray<arrow::Int64Type>>(id_array_data);
|
||||
// auto dists = std::make_shared<NumericArray<arrow::FloatType>>(dist_array_data);
|
||||
// std::vector<ArrayPtr> array{ids, dists};
|
||||
//
|
||||
// return std::make_shared<Dataset>(array, nullptr);
|
||||
return std::make_shared<Dataset>((void*)res_ids, (void*)res_dis);
|
||||
}
|
||||
|
||||
IndexModelPtr
|
||||
|
@ -117,7 +120,11 @@ NSG::Train(const DatasetPtr& dataset, const Config& config) {
|
|||
}
|
||||
|
||||
// TODO(linxj): dev IndexFactory, support more IndexType
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
auto preprocess_index = std::make_shared<GPUIVF>(build_cfg->gpu_id);
|
||||
#else
|
||||
auto preprocess_index = std::make_shared<IVF>();
|
||||
#endif
|
||||
auto model = preprocess_index->Train(dataset, config);
|
||||
preprocess_index->set_index_model(model);
|
||||
preprocess_index->AddWithoutIds(dataset, config);
|
||||
|
|
|
@ -0,0 +1,348 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <SPTAG/AnnService/inc/Core/Common.h>
|
||||
#include <SPTAG/AnnService/inc/Core/VectorSet.h>
|
||||
#include <SPTAG/AnnService/inc/Server/QueryParser.h>
|
||||
|
||||
#include <array>
|
||||
#include <sstream>
|
||||
#include <vector>
|
||||
|
||||
#undef mkdir
|
||||
|
||||
#include "knowhere/adapter/SptagAdapter.h"
|
||||
#include "knowhere/common/Exception.h"
|
||||
#include "knowhere/index/vector_index/IndexSPTAG.h"
|
||||
#include "knowhere/index/vector_index/helpers/Definitions.h"
|
||||
#include "knowhere/index/vector_index/helpers/SPTAGParameterMgr.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
CPUSPTAGRNG::CPUSPTAGRNG(const std::string& IndexType) {
|
||||
if (IndexType == "KDT") {
|
||||
index_ptr_ = SPTAG::VectorIndex::CreateInstance(SPTAG::IndexAlgoType::KDT, SPTAG::VectorValueType::Float);
|
||||
index_ptr_->SetParameter("DistCalcMethod", "L2");
|
||||
index_type_ = SPTAG::IndexAlgoType::KDT;
|
||||
} else {
|
||||
index_ptr_ = SPTAG::VectorIndex::CreateInstance(SPTAG::IndexAlgoType::BKT, SPTAG::VectorValueType::Float);
|
||||
index_ptr_->SetParameter("DistCalcMethod", "L2");
|
||||
index_type_ = SPTAG::IndexAlgoType::BKT;
|
||||
}
|
||||
}
|
||||
|
||||
BinarySet
|
||||
CPUSPTAGRNG::Serialize() {
|
||||
std::string index_config;
|
||||
std::vector<SPTAG::ByteArray> index_blobs;
|
||||
|
||||
std::shared_ptr<std::vector<std::uint64_t>> buffersize = index_ptr_->CalculateBufferSize();
|
||||
std::vector<char*> res(buffersize->size() + 1);
|
||||
for (uint64_t i = 1; i < res.size(); i++) {
|
||||
res[i] = new char[buffersize->at(i - 1)];
|
||||
auto ptr = &res[i][0];
|
||||
index_blobs.emplace_back(SPTAG::ByteArray((std::uint8_t*)ptr, buffersize->at(i - 1), false));
|
||||
}
|
||||
|
||||
index_ptr_->SaveIndex(index_config, index_blobs);
|
||||
|
||||
size_t length = index_config.length();
|
||||
char* cstr = new char[length];
|
||||
snprintf(cstr, length, "%s", index_config.c_str());
|
||||
|
||||
BinarySet binary_set;
|
||||
auto sample = std::make_shared<uint8_t>();
|
||||
sample.reset(static_cast<uint8_t*>(index_blobs[0].Data()));
|
||||
auto tree = std::make_shared<uint8_t>();
|
||||
tree.reset(static_cast<uint8_t*>(index_blobs[1].Data()));
|
||||
auto graph = std::make_shared<uint8_t>();
|
||||
graph.reset(static_cast<uint8_t*>(index_blobs[2].Data()));
|
||||
auto deleteid = std::make_shared<uint8_t>();
|
||||
deleteid.reset(static_cast<uint8_t*>(index_blobs[3].Data()));
|
||||
auto metadata1 = std::make_shared<uint8_t>();
|
||||
metadata1.reset(static_cast<uint8_t*>(index_blobs[4].Data()));
|
||||
auto metadata2 = std::make_shared<uint8_t>();
|
||||
metadata2.reset(static_cast<uint8_t*>(index_blobs[5].Data()));
|
||||
auto config = std::make_shared<uint8_t>();
|
||||
config.reset(static_cast<uint8_t*>((void*)cstr));
|
||||
|
||||
binary_set.Append("samples", sample, index_blobs[0].Length());
|
||||
binary_set.Append("tree", tree, index_blobs[1].Length());
|
||||
binary_set.Append("deleteid", deleteid, index_blobs[3].Length());
|
||||
binary_set.Append("metadata1", metadata1, index_blobs[4].Length());
|
||||
binary_set.Append("metadata2", metadata2, index_blobs[5].Length());
|
||||
binary_set.Append("config", config, length);
|
||||
binary_set.Append("graph", graph, index_blobs[2].Length());
|
||||
|
||||
// MemoryIOWriter writer;
|
||||
// size_t len = 0;
|
||||
// for (int i = 0; i < 6; ++i) {
|
||||
// len = index_blobs[i].Length();
|
||||
// assert(len != 0);
|
||||
// writer(&len, sizeof(size_t), 1);
|
||||
// writer(index_blobs[i].Data(), len, 1);
|
||||
// len = 0;
|
||||
// }
|
||||
// writer(&length, sizeof(size_t), 1);
|
||||
// writer(cstr, length, 1);
|
||||
// auto data = std::make_shared<uint8_t>();
|
||||
// data.reset(writer.data_);
|
||||
// BinarySet binary_set;
|
||||
// binary_set.Append("sptag", data, writer.total);
|
||||
|
||||
// MemoryIOWriter writer;
|
||||
// size_t len = 0;
|
||||
// for (int i = 0; i < 6; ++i) {
|
||||
// if (i == 2) continue;
|
||||
// len = index_blobs[i].Length();
|
||||
// assert(len != 0);
|
||||
// writer(&len, sizeof(size_t), 1);
|
||||
// writer(index_blobs[i].Data(), len, 1);
|
||||
// len = 0;
|
||||
// }
|
||||
// writer(&length, sizeof(size_t), 1);
|
||||
// writer(cstr, length, 1);
|
||||
// auto data = std::make_shared<uint8_t>();
|
||||
// data.reset(writer.data_);
|
||||
// BinarySet binary_set;
|
||||
// binary_set.Append("sptag", data, writer.total);
|
||||
// auto graph = std::make_shared<uint8_t>();
|
||||
// graph.reset(static_cast<uint8_t*>(index_blobs[2].Data()));
|
||||
// binary_set.Append("graph", graph, index_blobs[2].Length());
|
||||
|
||||
return binary_set;
|
||||
}
|
||||
|
||||
void
|
||||
CPUSPTAGRNG::Load(const BinarySet& binary_set) {
|
||||
std::string index_config;
|
||||
std::vector<SPTAG::ByteArray> index_blobs;
|
||||
|
||||
auto samples = binary_set.GetByName("samples");
|
||||
index_blobs.push_back(SPTAG::ByteArray(samples->data.get(), samples->size, false));
|
||||
|
||||
auto tree = binary_set.GetByName("tree");
|
||||
index_blobs.push_back(SPTAG::ByteArray(tree->data.get(), tree->size, false));
|
||||
|
||||
auto graph = binary_set.GetByName("graph");
|
||||
index_blobs.push_back(SPTAG::ByteArray(graph->data.get(), graph->size, false));
|
||||
|
||||
auto deleteid = binary_set.GetByName("deleteid");
|
||||
index_blobs.push_back(SPTAG::ByteArray(deleteid->data.get(), deleteid->size, false));
|
||||
|
||||
auto metadata1 = binary_set.GetByName("metadata1");
|
||||
index_blobs.push_back(SPTAG::ByteArray(metadata1->data.get(), metadata1->size, false));
|
||||
|
||||
auto metadata2 = binary_set.GetByName("metadata2");
|
||||
index_blobs.push_back(SPTAG::ByteArray(metadata2->data.get(), metadata2->size, false));
|
||||
|
||||
auto config = binary_set.GetByName("config");
|
||||
index_config = reinterpret_cast<char*>(config->data.get());
|
||||
|
||||
// std::vector<SPTAG::ByteArray> index_blobs;
|
||||
// auto data = binary_set.GetByName("sptag");
|
||||
// MemoryIOReader reader;
|
||||
// reader.total = data->size;
|
||||
// reader.data_ = data->data.get();
|
||||
// size_t len = 0;
|
||||
// for (int i = 0; i < 6; ++i) {
|
||||
// reader(&len, sizeof(size_t), 1);
|
||||
// assert(len != 0);
|
||||
// auto binary = new uint8_t[len];
|
||||
// reader(binary, len, 1);
|
||||
// index_blobs.emplace_back(SPTAG::ByteArray(binary, len, true));
|
||||
// len = 0;
|
||||
// }
|
||||
// reader(&len, sizeof(size_t), 1);
|
||||
// assert(len != 0);
|
||||
// auto config = new char[len];
|
||||
// reader(config, len, 1);
|
||||
// std::string index_config = config;
|
||||
// delete[] config;
|
||||
|
||||
// std::vector<SPTAG::ByteArray> index_blobs;
|
||||
// auto data = binary_set.GetByName("sptag");
|
||||
// MemoryIOReader reader;
|
||||
// reader.total = data->size;
|
||||
// reader.data_ = data->data.get();
|
||||
// size_t len = 0;
|
||||
// for (int i = 0; i < 6; ++i) {
|
||||
// if (i == 2) {
|
||||
// auto graph = binary_set.GetByName("graph");
|
||||
// index_blobs.emplace_back(SPTAG::ByteArray(graph->data.get(), graph->size, false));
|
||||
// continue;
|
||||
// }
|
||||
// reader(&len, sizeof(size_t), 1);
|
||||
// assert(len != 0);
|
||||
// auto binary = new uint8_t[len];
|
||||
// reader(binary, len, 1);
|
||||
// index_blobs.emplace_back(SPTAG::ByteArray(binary, len, true));
|
||||
// len = 0;
|
||||
// }
|
||||
// reader(&len, sizeof(size_t), 1);
|
||||
// assert(len != 0);
|
||||
// auto config = new char[len];
|
||||
// reader(config, len, 1);
|
||||
// std::string index_config = config;
|
||||
// delete[] config;
|
||||
index_ptr_->LoadIndex(index_config, index_blobs);
|
||||
}
|
||||
|
||||
// PreprocessorPtr
|
||||
// CPUKDTRNG::BuildPreprocessor(const DatasetPtr &dataset, const Config &config) {
|
||||
// return std::make_shared<NormalizePreprocessor>();
|
||||
//}
|
||||
|
||||
IndexModelPtr
|
||||
CPUSPTAGRNG::Train(const DatasetPtr& origin, const Config& train_config) {
|
||||
SetParameters(train_config);
|
||||
DatasetPtr dataset = origin->Clone();
|
||||
|
||||
// if (index_ptr_->GetDistCalcMethod() == SPTAG::DistCalcMethod::Cosine
|
||||
// && preprocessor_) {
|
||||
// preprocessor_->Preprocess(dataset);
|
||||
//}
|
||||
|
||||
auto vectorset = ConvertToVectorSet(dataset);
|
||||
auto metaset = ConvertToMetadataSet(dataset);
|
||||
index_ptr_->BuildIndex(vectorset, metaset);
|
||||
|
||||
// TODO: return IndexModelPtr
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void
|
||||
CPUSPTAGRNG::Add(const DatasetPtr& origin, const Config& add_config) {
|
||||
// SetParameters(add_config);
|
||||
// DatasetPtr dataset = origin->Clone();
|
||||
//
|
||||
// // if (index_ptr_->GetDistCalcMethod() == SPTAG::DistCalcMethod::Cosine
|
||||
// // && preprocessor_) {
|
||||
// // preprocessor_->Preprocess(dataset);
|
||||
// //}
|
||||
//
|
||||
// auto vectorset = ConvertToVectorSet(dataset);
|
||||
// auto metaset = ConvertToMetadataSet(dataset);
|
||||
// index_ptr_->AddIndex(vectorset, metaset);
|
||||
}
|
||||
|
||||
void
|
||||
CPUSPTAGRNG::SetParameters(const Config& config) {
|
||||
#define Assign(param_name, str_name) \
|
||||
conf->param_name == INVALID_VALUE ? index_ptr_->SetParameter(str_name, std::to_string(build_cfg->param_name)) \
|
||||
: index_ptr_->SetParameter(str_name, std::to_string(conf->param_name))
|
||||
|
||||
if (index_type_ == SPTAG::IndexAlgoType::KDT) {
|
||||
auto conf = std::dynamic_pointer_cast<KDTCfg>(config);
|
||||
auto build_cfg = SPTAGParameterMgr::GetInstance().GetKDTParameters();
|
||||
|
||||
Assign(kdtnumber, "KDTNumber");
|
||||
Assign(numtopdimensionkdtsplit, "NumTopDimensionKDTSplit");
|
||||
Assign(samples, "Samples");
|
||||
Assign(tptnumber, "TPTNumber");
|
||||
Assign(tptleafsize, "TPTLeafSize");
|
||||
Assign(numtopdimensiontptsplit, "NumTopDimensionTPTSplit");
|
||||
Assign(neighborhoodsize, "NeighborhoodSize");
|
||||
Assign(graphneighborhoodscale, "GraphNeighborhoodScale");
|
||||
Assign(graphcefscale, "GraphCEFScale");
|
||||
Assign(refineiterations, "RefineIterations");
|
||||
Assign(cef, "CEF");
|
||||
Assign(maxcheckforrefinegraph, "MaxCheckForRefineGraph");
|
||||
Assign(numofthreads, "NumberOfThreads");
|
||||
Assign(maxcheck, "MaxCheck");
|
||||
Assign(thresholdofnumberofcontinuousnobetterpropagation, "ThresholdOfNumberOfContinuousNoBetterPropagation");
|
||||
Assign(numberofinitialdynamicpivots, "NumberOfInitialDynamicPivots");
|
||||
Assign(numberofotherdynamicpivots, "NumberOfOtherDynamicPivots");
|
||||
} else {
|
||||
auto conf = std::dynamic_pointer_cast<BKTCfg>(config);
|
||||
auto build_cfg = SPTAGParameterMgr::GetInstance().GetBKTParameters();
|
||||
|
||||
Assign(bktnumber, "BKTNumber");
|
||||
Assign(bktkmeansk, "BKTKMeansK");
|
||||
Assign(bktleafsize, "BKTLeafSize");
|
||||
Assign(samples, "Samples");
|
||||
Assign(tptnumber, "TPTNumber");
|
||||
Assign(tptleafsize, "TPTLeafSize");
|
||||
Assign(numtopdimensiontptsplit, "NumTopDimensionTPTSplit");
|
||||
Assign(neighborhoodsize, "NeighborhoodSize");
|
||||
Assign(graphneighborhoodscale, "GraphNeighborhoodScale");
|
||||
Assign(graphcefscale, "GraphCEFScale");
|
||||
Assign(refineiterations, "RefineIterations");
|
||||
Assign(cef, "CEF");
|
||||
Assign(maxcheckforrefinegraph, "MaxCheckForRefineGraph");
|
||||
Assign(numofthreads, "NumberOfThreads");
|
||||
Assign(maxcheck, "MaxCheck");
|
||||
Assign(thresholdofnumberofcontinuousnobetterpropagation, "ThresholdOfNumberOfContinuousNoBetterPropagation");
|
||||
Assign(numberofinitialdynamicpivots, "NumberOfInitialDynamicPivots");
|
||||
Assign(numberofotherdynamicpivots, "NumberOfOtherDynamicPivots");
|
||||
}
|
||||
}
|
||||
|
||||
DatasetPtr
|
||||
CPUSPTAGRNG::Search(const DatasetPtr& dataset, const Config& config) {
|
||||
SetParameters(config);
|
||||
auto tensor = dataset->tensor()[0];
|
||||
auto p = (float*)tensor->raw_mutable_data();
|
||||
for (auto i = 0; i < 10; ++i) {
|
||||
for (auto j = 0; j < 10; ++j) {
|
||||
std::cout << p[i * 10 + j] << " ";
|
||||
}
|
||||
std::cout << std::endl;
|
||||
}
|
||||
std::vector<SPTAG::QueryResult> query_results = ConvertToQueryResult(dataset, config);
|
||||
|
||||
#pragma omp parallel for
|
||||
for (auto i = 0; i < query_results.size(); ++i) {
|
||||
auto target = (float*)query_results[i].GetTarget();
|
||||
std::cout << target[0] << ", " << target[1] << ", " << target[2] << std::endl;
|
||||
index_ptr_->SearchIndex(query_results[i]);
|
||||
}
|
||||
|
||||
return ConvertToDataset(query_results);
|
||||
}
|
||||
|
||||
int64_t
|
||||
CPUSPTAGRNG::Count() {
|
||||
return index_ptr_->GetNumSamples();
|
||||
}
|
||||
|
||||
int64_t
|
||||
CPUSPTAGRNG::Dimension() {
|
||||
return index_ptr_->GetFeatureDim();
|
||||
}
|
||||
|
||||
VectorIndexPtr
|
||||
CPUSPTAGRNG::Clone() {
|
||||
KNOWHERE_THROW_MSG("not support");
|
||||
}
|
||||
|
||||
void
|
||||
CPUSPTAGRNG::Seal() {
|
||||
return; // do nothing
|
||||
}
|
||||
|
||||
BinarySet
|
||||
CPUSPTAGRNGIndexModel::Serialize() {
|
||||
// KNOWHERE_THROW_MSG("not support"); // not support
|
||||
}
|
||||
|
||||
void
|
||||
CPUSPTAGRNGIndexModel::Load(const BinarySet& binary) {
|
||||
// KNOWHERE_THROW_MSG("not support"); // not support
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
|
@ -18,33 +18,37 @@
|
|||
#pragma once
|
||||
|
||||
#include <SPTAG/AnnService/inc/Core/VectorIndex.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "VectorIndex.h"
|
||||
#include "knowhere/index/IndexModel.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
class CPUKDTRNG : public VectorIndex {
|
||||
class CPUSPTAGRNG : public VectorIndex {
|
||||
public:
|
||||
CPUKDTRNG() {
|
||||
index_ptr_ = SPTAG::VectorIndex::CreateInstance(SPTAG::IndexAlgoType::KDT, SPTAG::VectorValueType::Float);
|
||||
index_ptr_->SetParameter("DistCalcMethod", "L2");
|
||||
}
|
||||
explicit CPUSPTAGRNG(const std::string& IndexType);
|
||||
|
||||
public:
|
||||
BinarySet
|
||||
Serialize() override;
|
||||
|
||||
VectorIndexPtr
|
||||
Clone() override;
|
||||
|
||||
void
|
||||
Load(const BinarySet& index_array) override;
|
||||
|
||||
public:
|
||||
// PreprocessorPtr
|
||||
// BuildPreprocessor(const DatasetPtr &dataset, const Config &config) override;
|
||||
|
||||
int64_t
|
||||
Count() override;
|
||||
|
||||
int64_t
|
||||
Dimension() override;
|
||||
|
||||
|
@ -56,6 +60,7 @@ class CPUKDTRNG : public VectorIndex {
|
|||
|
||||
DatasetPtr
|
||||
Search(const DatasetPtr& dataset, const Config& config) override;
|
||||
|
||||
void
|
||||
Seal() override;
|
||||
|
||||
|
@ -66,11 +71,12 @@ class CPUKDTRNG : public VectorIndex {
|
|||
private:
|
||||
PreprocessorPtr preprocessor_;
|
||||
std::shared_ptr<SPTAG::VectorIndex> index_ptr_;
|
||||
SPTAG::IndexAlgoType index_type_;
|
||||
};
|
||||
|
||||
using CPUKDTRNGPtr = std::shared_ptr<CPUKDTRNG>;
|
||||
using CPUSPTAGRNGPtr = std::shared_ptr<CPUSPTAGRNG>;
|
||||
|
||||
class CPUKDTRNGIndexModel : public IndexModel {
|
||||
class CPUSPTAGRNGIndexModel : public IndexModel {
|
||||
public:
|
||||
BinarySet
|
||||
Serialize() override;
|
||||
|
@ -82,6 +88,6 @@ class CPUKDTRNGIndexModel : public IndexModel {
|
|||
std::shared_ptr<SPTAG::VectorIndex> index_;
|
||||
};
|
||||
|
||||
using CPUKDTRNGIndexModelPtr = std::shared_ptr<CPUKDTRNGIndexModel>;
|
||||
using CPUSPTAGRNGIndexModelPtr = std::shared_ptr<CPUSPTAGRNGIndexModel>;
|
||||
|
||||
} // namespace knowhere
|
|
@ -49,7 +49,7 @@ CopyCpuToGpu(const VectorIndexPtr& index, const int64_t& device_id, const Config
|
|||
if (auto cpu_index = std::dynamic_pointer_cast<IVFSQ>(index)) {
|
||||
return cpu_index->CopyCpuToGpu(device_id, config);
|
||||
} else if (auto cpu_index = std::dynamic_pointer_cast<IVFPQ>(index)) {
|
||||
KNOWHERE_THROW_MSG("IVFPQ not support transfer to gpu");
|
||||
return cpu_index->CopyCpuToGpu(device_id, config);
|
||||
} else if (auto cpu_index = std::dynamic_pointer_cast<IVF>(index)) {
|
||||
return cpu_index->CopyCpuToGpu(device_id, config);
|
||||
} else if (auto cpu_index = std::dynamic_pointer_cast<IDMAP>(index)) {
|
||||
|
|
|
@ -34,4 +34,26 @@ GetMetricType(METRICTYPE& type) {
|
|||
KNOWHERE_THROW_MSG("Metric type is invalid");
|
||||
}
|
||||
|
||||
std::stringstream
|
||||
IVFCfg::DumpImpl() {
|
||||
auto ss = Cfg::DumpImpl();
|
||||
ss << ", nlist: " << nlist << ", nprobe: " << nprobe;
|
||||
return ss;
|
||||
}
|
||||
|
||||
std::stringstream
|
||||
IVFSQCfg::DumpImpl() {
|
||||
auto ss = IVFCfg::DumpImpl();
|
||||
ss << ", nbits: " << nbits;
|
||||
return ss;
|
||||
}
|
||||
|
||||
std::stringstream
|
||||
NSGCfg::DumpImpl() {
|
||||
auto ss = IVFCfg::DumpImpl();
|
||||
ss << ", knng: " << knng << ", search_length: " << search_length << ", out_degree: " << out_degree
|
||||
<< ", candidate: " << candidate_pool_size;
|
||||
return ss;
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -42,6 +42,32 @@ constexpr int64_t DEFAULT_OUT_DEGREE = INVALID_VALUE;
|
|||
constexpr int64_t DEFAULT_CANDIDATE_SISE = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NNG_K = INVALID_VALUE;
|
||||
|
||||
// SPTAG Config
|
||||
constexpr int64_t DEFAULT_SAMPLES = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_TPTNUMBER = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_TPTLEAFSIZE = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NUMTOPDIMENSIONTPTSPLIT = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NEIGHBORHOODSIZE = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_GRAPHNEIGHBORHOODSCALE = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_GRAPHCEFSCALE = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_REFINEITERATIONS = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_CEF = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_MAXCHECKFORREFINEGRAPH = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NUMOFTHREADS = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_MAXCHECK = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_THRESHOLDOFNUMBEROFCONTINUOUSNOBETTERPROPAGATION = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NUMBEROFINITIALDYNAMICPIVOTS = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NUMBEROFOTHERDYNAMICPIVOTS = INVALID_VALUE;
|
||||
|
||||
// KDT Config
|
||||
constexpr int64_t DEFAULT_KDTNUMBER = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_NUMTOPDIMENSIONKDTSPLIT = INVALID_VALUE;
|
||||
|
||||
// BKT Config
|
||||
constexpr int64_t DEFAULT_BKTNUMBER = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_BKTKMEANSK = INVALID_VALUE;
|
||||
constexpr int64_t DEFAULT_BKTLEAFSIZE = INVALID_VALUE;
|
||||
|
||||
struct IVFCfg : public Cfg {
|
||||
int64_t nlist = DEFAULT_NLIST;
|
||||
int64_t nprobe = DEFAULT_NPROBE;
|
||||
|
@ -53,6 +79,9 @@ struct IVFCfg : public Cfg {
|
|||
|
||||
IVFCfg() = default;
|
||||
|
||||
std::stringstream
|
||||
DumpImpl() override;
|
||||
|
||||
bool
|
||||
CheckValid() override {
|
||||
return true;
|
||||
|
@ -69,6 +98,9 @@ struct IVFSQCfg : public IVFCfg {
|
|||
: IVFCfg(dim, k, gpu_id, nlist, nprobe, type), nbits(nbits) {
|
||||
}
|
||||
|
||||
std::stringstream
|
||||
DumpImpl() override;
|
||||
|
||||
IVFSQCfg() = default;
|
||||
|
||||
bool
|
||||
|
@ -119,6 +151,9 @@ struct NSGCfg : public IVFCfg {
|
|||
|
||||
NSGCfg() = default;
|
||||
|
||||
std::stringstream
|
||||
DumpImpl() override;
|
||||
|
||||
bool
|
||||
CheckValid() override {
|
||||
return true;
|
||||
|
@ -126,8 +161,57 @@ struct NSGCfg : public IVFCfg {
|
|||
};
|
||||
using NSGConfig = std::shared_ptr<NSGCfg>;
|
||||
|
||||
struct KDTCfg : public Cfg {
|
||||
int64_t tptnubmber = -1;
|
||||
struct SPTAGCfg : public Cfg {
|
||||
int64_t samples = DEFAULT_SAMPLES;
|
||||
int64_t tptnumber = DEFAULT_TPTNUMBER;
|
||||
int64_t tptleafsize = DEFAULT_TPTLEAFSIZE;
|
||||
int64_t numtopdimensiontptsplit = DEFAULT_NUMTOPDIMENSIONTPTSPLIT;
|
||||
int64_t neighborhoodsize = DEFAULT_NEIGHBORHOODSIZE;
|
||||
int64_t graphneighborhoodscale = DEFAULT_GRAPHNEIGHBORHOODSCALE;
|
||||
int64_t graphcefscale = DEFAULT_GRAPHCEFSCALE;
|
||||
int64_t refineiterations = DEFAULT_REFINEITERATIONS;
|
||||
int64_t cef = DEFAULT_CEF;
|
||||
int64_t maxcheckforrefinegraph = DEFAULT_MAXCHECKFORREFINEGRAPH;
|
||||
int64_t numofthreads = DEFAULT_NUMOFTHREADS;
|
||||
int64_t maxcheck = DEFAULT_MAXCHECK;
|
||||
int64_t thresholdofnumberofcontinuousnobetterpropagation = DEFAULT_THRESHOLDOFNUMBEROFCONTINUOUSNOBETTERPROPAGATION;
|
||||
int64_t numberofinitialdynamicpivots = DEFAULT_NUMBEROFINITIALDYNAMICPIVOTS;
|
||||
int64_t numberofotherdynamicpivots = DEFAULT_NUMBEROFOTHERDYNAMICPIVOTS;
|
||||
|
||||
SPTAGCfg() = default;
|
||||
|
||||
bool
|
||||
CheckValid() override {
|
||||
return true;
|
||||
};
|
||||
};
|
||||
using SPTAGConfig = std::shared_ptr<SPTAGCfg>;
|
||||
|
||||
struct KDTCfg : public SPTAGCfg {
|
||||
int64_t kdtnumber = DEFAULT_KDTNUMBER;
|
||||
int64_t numtopdimensionkdtsplit = DEFAULT_NUMTOPDIMENSIONKDTSPLIT;
|
||||
|
||||
KDTCfg() = default;
|
||||
|
||||
bool
|
||||
CheckValid() override {
|
||||
return true;
|
||||
};
|
||||
};
|
||||
using KDTConfig = std::shared_ptr<KDTCfg>;
|
||||
|
||||
struct BKTCfg : public SPTAGCfg {
|
||||
int64_t bktnumber = DEFAULT_BKTNUMBER;
|
||||
int64_t bktkmeansk = DEFAULT_BKTKMEANSK;
|
||||
int64_t bktleafsize = DEFAULT_BKTLEAFSIZE;
|
||||
|
||||
BKTCfg() = default;
|
||||
|
||||
bool
|
||||
CheckValid() override {
|
||||
return true;
|
||||
};
|
||||
};
|
||||
using BKTConfig = std::shared_ptr<BKTCfg>;
|
||||
|
||||
} // namespace knowhere
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "knowhere/index/vector_index/helpers/KDTParameterMgr.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
const std::vector<KDTParameter>&
|
||||
KDTParameterMgr::GetKDTParameters() {
|
||||
return kdt_parameters_;
|
||||
}
|
||||
|
||||
KDTParameterMgr::KDTParameterMgr() {
|
||||
kdt_parameters_ = std::vector<KDTParameter>{
|
||||
{"KDTNumber", "1"},
|
||||
{"NumTopDimensionKDTSplit", "5"},
|
||||
{"NumSamplesKDTSplitConsideration", "100"},
|
||||
|
||||
{"TPTNumber", "1"},
|
||||
{"TPTLeafSize", "2000"},
|
||||
{"NumTopDimensionTPTSplit", "5"},
|
||||
|
||||
{"NeighborhoodSize", "32"},
|
||||
{"GraphNeighborhoodScale", "2"},
|
||||
{"GraphCEFScale", "2"},
|
||||
{"RefineIterations", "0"},
|
||||
{"CEF", "1000"},
|
||||
{"MaxCheckForRefineGraph", "10000"},
|
||||
|
||||
{"NumberOfThreads", "1"},
|
||||
|
||||
{"MaxCheck", "8192"},
|
||||
{"ThresholdOfNumberOfContinuousNoBetterPropagation", "3"},
|
||||
{"NumberOfInitialDynamicPivots", "50"},
|
||||
{"NumberOfOtherDynamicPivots", "4"},
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
|
@ -0,0 +1,75 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "knowhere/index/vector_index/helpers/SPTAGParameterMgr.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
const KDTConfig&
|
||||
SPTAGParameterMgr::GetKDTParameters() {
|
||||
return kdt_config_;
|
||||
}
|
||||
|
||||
const BKTConfig&
|
||||
SPTAGParameterMgr::GetBKTParameters() {
|
||||
return bkt_config_;
|
||||
}
|
||||
|
||||
SPTAGParameterMgr::SPTAGParameterMgr() {
|
||||
kdt_config_ = std::make_shared<KDTCfg>();
|
||||
kdt_config_->kdtnumber = 1;
|
||||
kdt_config_->numtopdimensionkdtsplit = 5;
|
||||
kdt_config_->samples = 100;
|
||||
kdt_config_->tptnumber = 1;
|
||||
kdt_config_->tptleafsize = 2000;
|
||||
kdt_config_->numtopdimensiontptsplit = 5;
|
||||
kdt_config_->neighborhoodsize = 32;
|
||||
kdt_config_->graphneighborhoodscale = 2;
|
||||
kdt_config_->graphcefscale = 2;
|
||||
kdt_config_->refineiterations = 0;
|
||||
kdt_config_->cef = 1000;
|
||||
kdt_config_->maxcheckforrefinegraph = 10000;
|
||||
kdt_config_->numofthreads = 1;
|
||||
kdt_config_->maxcheck = 8192;
|
||||
kdt_config_->thresholdofnumberofcontinuousnobetterpropagation = 3;
|
||||
kdt_config_->numberofinitialdynamicpivots = 50;
|
||||
kdt_config_->numberofotherdynamicpivots = 4;
|
||||
|
||||
bkt_config_ = std::make_shared<BKTCfg>();
|
||||
bkt_config_->bktnumber = 1;
|
||||
bkt_config_->bktkmeansk = 32;
|
||||
bkt_config_->bktleafsize = 8;
|
||||
bkt_config_->samples = 100;
|
||||
bkt_config_->tptnumber = 1;
|
||||
bkt_config_->tptleafsize = 2000;
|
||||
bkt_config_->numtopdimensiontptsplit = 5;
|
||||
bkt_config_->neighborhoodsize = 32;
|
||||
bkt_config_->graphneighborhoodscale = 2;
|
||||
bkt_config_->graphcefscale = 2;
|
||||
bkt_config_->refineiterations = 0;
|
||||
bkt_config_->cef = 1000;
|
||||
bkt_config_->maxcheckforrefinegraph = 10000;
|
||||
bkt_config_->numofthreads = 1;
|
||||
bkt_config_->maxcheck = 8192;
|
||||
bkt_config_->thresholdofnumberofcontinuousnobetterpropagation = 3;
|
||||
bkt_config_->numberofinitialdynamicpivots = 50;
|
||||
bkt_config_->numberofotherdynamicpivots = 4;
|
||||
}
|
||||
|
||||
} // namespace knowhere
|
|
@ -22,31 +22,40 @@
|
|||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include <SPTAG/AnnService/inc/Core/Common.h>
|
||||
#include "IndexParameter.h"
|
||||
|
||||
namespace knowhere {
|
||||
|
||||
using KDTParameter = std::pair<std::string, std::string>;
|
||||
using KDTConfig = std::shared_ptr<KDTCfg>;
|
||||
using BKTConfig = std::shared_ptr<BKTCfg>;
|
||||
|
||||
class KDTParameterMgr {
|
||||
class SPTAGParameterMgr {
|
||||
public:
|
||||
const std::vector<KDTParameter>&
|
||||
const KDTConfig&
|
||||
GetKDTParameters();
|
||||
|
||||
const BKTConfig&
|
||||
GetBKTParameters();
|
||||
|
||||
public:
|
||||
static KDTParameterMgr&
|
||||
static SPTAGParameterMgr&
|
||||
GetInstance() {
|
||||
static KDTParameterMgr instance;
|
||||
static SPTAGParameterMgr instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
KDTParameterMgr(const KDTParameterMgr&) = delete;
|
||||
KDTParameterMgr&
|
||||
operator=(const KDTParameterMgr&) = delete;
|
||||
SPTAGParameterMgr(const SPTAGParameterMgr&) = delete;
|
||||
|
||||
SPTAGParameterMgr&
|
||||
operator=(const SPTAGParameterMgr&) = delete;
|
||||
|
||||
private:
|
||||
KDTParameterMgr();
|
||||
SPTAGParameterMgr();
|
||||
|
||||
private:
|
||||
std::vector<KDTParameter> kdt_parameters_;
|
||||
KDTConfig kdt_config_;
|
||||
BKTConfig bkt_config_;
|
||||
};
|
||||
|
||||
} // namespace knowhere
|
|
@ -195,7 +195,7 @@ namespace SPTAG
|
|||
C = *((DimensionType*)pDataPointsMemFile);
|
||||
pDataPointsMemFile += sizeof(DimensionType);
|
||||
|
||||
Initialize(R, C, (T*)pDataPointsMemFile);
|
||||
Initialize(R, C, (T*)pDataPointsMemFile, false);
|
||||
std::cout << "Load " << name << " (" << R << ", " << C << ") Finish!" << std::endl;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
ARROW_VERSION=apache-arrow-0.14.0
|
||||
ARROW_VERSION=apache-arrow-0.15.1
|
||||
BOOST_VERSION=1.70.0
|
||||
GTEST_VERSION=1.8.1
|
||||
LAPACK_VERSION=v3.8.0
|
||||
OPENBLAS_VERSION=v0.3.6
|
||||
FAISS_VERSION=branch-0.3.0
|
||||
FAISS_VERSION=1.6.0
|
||||
MKL_VERSION=2019.5.281
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue