mirror of https://github.com/milvus-io/milvus.git
commit
54208931d9
|
@ -26,3 +26,9 @@ cmake_build
|
|||
*.lo
|
||||
*.tar.gz
|
||||
*.log
|
||||
.coverage
|
||||
*.pyc
|
||||
cov_html/
|
||||
|
||||
# temp
|
||||
shards/all_in_one_with_mysql/metadata/
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
language: cpp
|
||||
sudo: required
|
||||
dist: bionic
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.ccache
|
||||
|
||||
addons:
|
||||
apt:
|
||||
update: true
|
||||
|
||||
before_install:
|
||||
- source ci/travis/before-install.sh
|
||||
|
||||
install:
|
||||
- source $TRAVIS_BUILD_DIR/ci/travis/install_dependency.sh
|
||||
|
||||
script:
|
||||
- $TRAVIS_BUILD_DIR/ci/travis/travis_build.sh
|
96
CHANGELOG.md
96
CHANGELOG.md
|
@ -1,6 +1,94 @@
|
|||
# Changelog
|
||||
|
||||
Please mark all change in change log and use the ticket from JIRA.
|
||||
# Milvus 0.6.0 (TODO)
|
||||
|
||||
## Bug
|
||||
- \#228 - memory usage increased slowly during searching vectors
|
||||
- \#246 - Exclude src/external folder from code coverage for jenkin ci
|
||||
- \#248 - Reside src/external in thirdparty
|
||||
- \#316 - Some files not merged after vectors added
|
||||
- \#327 - Search does not use GPU when index type is FLAT
|
||||
- \#331 - Add exception handle when search fail
|
||||
- \#340 - Test cases run failed on 0.6.0
|
||||
- \#353 - Rename config.h.in to version.h.in
|
||||
- \#374 - sdk_simple return empty result
|
||||
- \#377 - Create partition success if tag name only contains spaces
|
||||
- \#397 - sdk_simple return incorrect result
|
||||
- \#399 - Create partition should be failed if partition tag existed
|
||||
- \#412 - Message returned is confused when partition created with null partition name
|
||||
- \#416 - Drop the same partition success repeatally
|
||||
- \#440 - Query API in customization still uses old version
|
||||
- \#440 - Server cannot startup with gpu_resource_config.enable=false in GPU version
|
||||
- \#458 - Index data is not compatible between 0.5 and 0.6
|
||||
- \#465 - Server hang caused by searching with nsg index
|
||||
- \#485 - Increase code coverage rate
|
||||
- \#486 - gpu no usage during index building
|
||||
- \#497 - CPU-version search performance decreased
|
||||
- \#504 - The code coverage rate of core/src/scheduler/optimizer is too low
|
||||
- \#509 - IVF_PQ index build trapped into dead loop caused by invalid params
|
||||
- \#513 - Unittest DELETE_BY_RANGE sometimes failed
|
||||
- \#523 - Erase file data from cache once the file is marked as deleted
|
||||
- \#527 - faiss benchmark not compatible with faiss 1.6.0
|
||||
- \#530 - BuildIndex stop when do build index and search simultaneously
|
||||
- \#532 - assigin value to `table_name` from confest shell
|
||||
- \#533 - NSG build failed with MetricType Inner Product
|
||||
- \#543 - client raise exception in shards when search results is empty
|
||||
- \#545 - Avoid dead circle of build index thread when error occurs
|
||||
- \#547 - NSG build failed using GPU-edition if set gpu_enable false
|
||||
- \#548 - NSG search accuracy is too low
|
||||
- \#552 - Server down during building index_type: IVF_PQ using GPU-edition
|
||||
- \#561 - Milvus server should report exception/error message or terminate on mysql metadata backend error
|
||||
- \#579 - Build index hang in GPU version when gpu_resources disabled
|
||||
- \#596 - Frequently insert operation cost too much disk space
|
||||
- \#599 - Build index log is incorrect
|
||||
- \#602 - Optimizer specify wrong gpu_id
|
||||
- \#606 - No log generated during building index with CPU
|
||||
- \#616 - IP search metric_type is not supported by IVF_PQ index
|
||||
- \#631 - FAISS isn't compiled with O3 option
|
||||
- \#636 - [CPU] Create index PQ should be failed if table metric type set Inner Product
|
||||
- \#649 - Typo "partiton" should be "partition"
|
||||
- \#654 - Random crash when frequently insert vector one by one
|
||||
- \#658 - Milvus error out when building SQ8H index without GPU resources
|
||||
|
||||
## Feature
|
||||
- \#12 - Pure CPU version for Milvus
|
||||
- \#77 - Support table partition
|
||||
- \#127 - Support new Index type IVFPQ
|
||||
- \#226 - Experimental shards middleware for Milvus
|
||||
- \#227 - Support new index types SPTAG-KDT and SPTAG-BKT
|
||||
- \#346 - Support build index with multiple gpu
|
||||
- \#420 - Update shards merge part to match v0.5.3
|
||||
- \#488 - Add log in scheduler/optimizer
|
||||
- \#502 - C++ SDK support IVFPQ and SPTAG
|
||||
- \#560 - Add version in server config file
|
||||
- \#605 - Print more messages when server start
|
||||
- \#644 - Add a new rpc command to get milvus build version whether cpu or gpu
|
||||
|
||||
## Improvement
|
||||
- \#255 - Add ivfsq8 test report detailed version
|
||||
- \#260 - C++ SDK README
|
||||
- \#266 - Rpc request source code refactor
|
||||
- \#274 - Logger the time cost during preloading data
|
||||
- \#275 - Rename C++ SDK IndexType
|
||||
- \#284 - Change C++ SDK to shared library
|
||||
- \#306 - Use int64 for all config integer
|
||||
- \#310 - Add Q&A for 'protocol https not supported or disable in libcurl' issue
|
||||
- \#314 - add Find FAISS in CMake
|
||||
- \#322 - Add option to enable / disable prometheus
|
||||
- \#354 - Build migration scripts into milvus docker image
|
||||
- \#358 - Add more information in build.sh and install.md
|
||||
- \#404 - Add virtual method Init() in Pass abstract class
|
||||
- \#409 - Add a Fallback pass in optimizer
|
||||
- \#433 - C++ SDK query result is not easy to use
|
||||
- \#449 - Add ShowPartitions example for C++ SDK
|
||||
- \#470 - Small raw files should not be build index
|
||||
- \#584 - Intergrate internal FAISS
|
||||
- \#611 - Remove MILVUS_CPU_VERSION
|
||||
- \#634 - FAISS GPU version is compiled with O0
|
||||
|
||||
## Task
|
||||
|
||||
# Milvus 0.5.3 (2019-11-13)
|
||||
|
||||
## Bug
|
||||
|
@ -87,7 +175,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-658 - Fix SQ8 Hybrid can't search
|
||||
- MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources
|
||||
- \#9 - Change default gpu_cache_capacity to 4
|
||||
- \#20 - C++ sdk example get grpc error
|
||||
- \#20 - C++ sdk example get grpc error
|
||||
- \#23 - Add unittest to improve code coverage
|
||||
- \#31 - make clang-format failed after run build.sh -l
|
||||
- \#39 - Create SQ8H index hang if using github server version
|
||||
|
@ -139,7 +227,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-635 - Add compile option to support customized faiss
|
||||
- MS-660 - add ubuntu_build_deps.sh
|
||||
- \#18 - Add all test cases
|
||||
|
||||
|
||||
# Milvus 0.4.0 (2019-09-12)
|
||||
|
||||
## Bug
|
||||
|
@ -348,11 +436,11 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-82 - Update server startup welcome message
|
||||
- MS-83 - Update vecwise to Milvus
|
||||
- MS-77 - Performance issue of post-search action
|
||||
- MS-22 - Enhancement for MemVector size control
|
||||
- MS-22 - Enhancement for MemVector size control
|
||||
- MS-92 - Unify behavior of debug and release build
|
||||
- MS-98 - Install all unit test to installation directory
|
||||
- MS-115 - Change is_startup of metric_config switch from true to on
|
||||
- MS-122 - Archive criteria config
|
||||
- MS-122 - Archive criteria config
|
||||
- MS-124 - HasTable interface
|
||||
- MS-126 - Add more error code
|
||||
- MS-128 - Change default db path
|
||||
|
|
16
README.md
16
README.md
|
@ -5,8 +5,9 @@
|
|||

|
||||

|
||||
[](https://codebeat.co/projects/github-com-jinhai-cn-milvus-master)
|
||||

|
||||

|
||||

|
||||
[](https://codecov.io/gh/milvus-io/milvus)
|
||||
|
||||
[中文版](README_CN.md) | [日本語版](README_JP.md)
|
||||
|
||||
|
@ -18,7 +19,7 @@ For more detailed introduction of Milvus and its architecture, see [Milvus overv
|
|||
|
||||
Milvus provides stable [Python](https://github.com/milvus-io/pymilvus), [Java](https://github.com/milvus-io/milvus-sdk-java) and [C++](https://github.com/milvus-io/milvus/tree/master/core/src/sdk) APIs.
|
||||
|
||||
Keep up-to-date with newest releases and latest updates by reading Milvus [release notes](https://www.milvus.io/docs/en/release/v0.5.2/).
|
||||
Keep up-to-date with newest releases and latest updates by reading Milvus [release notes](https://www.milvus.io/docs/en/release/v0.5.3/).
|
||||
|
||||
## Get started
|
||||
|
||||
|
@ -52,12 +53,13 @@ We use [GitHub issues](https://github.com/milvus-io/milvus/issues) to track issu
|
|||
|
||||
To connect with other users and contributors, welcome to join our [Slack channel](https://join.slack.com/t/milvusio/shared_invite/enQtNzY1OTQ0NDI3NjMzLWNmYmM1NmNjOTQ5MGI5NDhhYmRhMGU5M2NhNzhhMDMzY2MzNDdlYjM5ODQ5MmE3ODFlYzU3YjJkNmVlNDQ2ZTk).
|
||||
|
||||
## Thanks
|
||||
## Contributors
|
||||
|
||||
We greatly appreciate the help of the following people.
|
||||
Below is a list of Milvus contributors. We greatly appreciate your contributions!
|
||||
|
||||
- [akihoni](https://github.com/akihoni) provided the CN version of README, and found a broken link in the doc.
|
||||
- [goodhamgupta](https://github.com/goodhamgupta) fixed a filename typo in the bootcamp doc.
|
||||
- [erdustiggen](https://github.com/erdustiggen) changed from std::cout to LOG for error messages, and fixed a clang format issue as well as some grammatical errors.
|
||||
|
||||
## Resources
|
||||
|
||||
|
@ -65,6 +67,10 @@ We greatly appreciate the help of the following people.
|
|||
|
||||
- [Milvus bootcamp](https://github.com/milvus-io/bootcamp)
|
||||
|
||||
- [Milvus test reports](https://github.com/milvus-io/milvus/tree/master/docs)
|
||||
|
||||
- [Milvus FAQ](https://www.milvus.io/docs/en/faq/operational_faq/)
|
||||
|
||||
- [Milvus Medium](https://medium.com/@milvusio)
|
||||
|
||||
- [Milvus CSDN](https://zilliz.blog.csdn.net/)
|
||||
|
@ -76,5 +82,3 @@ We greatly appreciate the help of the following people.
|
|||
## License
|
||||
|
||||
[Apache License 2.0](LICENSE)
|
||||
|
||||
|
||||
|
|
14
README_CN.md
14
README_CN.md
|
@ -4,8 +4,10 @@
|
|||

|
||||

|
||||
[](https://codebeat.co/projects/github-com-jinhai-cn-milvus-master)
|
||||

|
||||

|
||||

|
||||
[](https://codecov.io/gh/milvus-io/milvus)
|
||||
|
||||
|
||||
# 欢迎来到 Milvus
|
||||
|
||||
|
@ -15,9 +17,9 @@ Milvus 是一款开源的、针对海量特征向量的相似性搜索引擎。
|
|||
|
||||
若要了解 Milvus 详细介绍和整体架构,请访问 [Milvus 简介](https://www.milvus.io/docs/zh-CN/aboutmilvus/overview/)。
|
||||
|
||||
Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)、[Java](https://github.com/milvus-io/milvus-sdk-java) 以及 C++ 的 API 接口。
|
||||
Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)、[Java](https://github.com/milvus-io/milvus-sdk-java) 以及[C++](https://github.com/milvus-io/milvus/tree/master/core/src/sdk) 的 API 接口。
|
||||
|
||||
通过 [版本发布说明](https://milvus.io/docs/zh-CN/release/v0.5.2/) 获取最新版本的功能和更新。
|
||||
通过 [版本发布说明](https://milvus.io/docs/zh-CN/release/v0.5.3/) 获取最新版本的功能和更新。
|
||||
|
||||
## 开始使用 Milvus
|
||||
|
||||
|
@ -57,6 +59,7 @@ Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)、[Java](
|
|||
|
||||
- [akihoni](https://github.com/akihoni) 提供了中文版 README,并发现了 README 中的无效链接。
|
||||
- [goodhamgupta](https://github.com/goodhamgupta) 发现并修正了在线训练营文档中的文件名拼写错误。
|
||||
- [erdustiggen](https://github.com/erdustiggen) 将错误信息里的 std::cout 修改为 LOG,修正了一个 Clang 格式问题和一些语法错误。
|
||||
|
||||
## 相关链接
|
||||
|
||||
|
@ -64,6 +67,10 @@ Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)、[Java](
|
|||
|
||||
- [Milvus 在线训练营](https://github.com/milvus-io/bootcamp)
|
||||
|
||||
- [Milvus 测试报告](https://github.com/milvus-io/milvus/tree/master/docs)
|
||||
|
||||
- [Milvus 常见问题](https://www.milvus.io/docs/zh-CN/faq/operational_faq/)
|
||||
|
||||
- [Milvus Medium](https://medium.com/@milvusio)
|
||||
|
||||
- [Milvus CSDN](https://zilliz.blog.csdn.net/)
|
||||
|
@ -75,4 +82,3 @@ Milvus 提供稳定的 [Python](https://github.com/milvus-io/pymilvus)、[Java](
|
|||
## 许可协议
|
||||
|
||||
[Apache 许可协议2.0版](https://github.com/milvus-io/milvus/blob/master/LICENSE)
|
||||
|
||||
|
|
12
README_JP.md
12
README_JP.md
|
@ -5,7 +5,7 @@
|
|||

|
||||

|
||||
[](https://codebeat.co/projects/github-com-jinhai-cn-milvus-master)
|
||||

|
||||

|
||||

|
||||
|
||||
|
||||
|
@ -15,9 +15,9 @@
|
|||
|
||||
Milvusは世界中一番早い特徴ベクトルにむかう類似性検索エンジンです。不均質な計算アーキテクチャーに基づいて効率を最大化出来ます。数十億のベクタの中に目標を検索できるまで数ミリ秒しかかからず、最低限の計算資源だけが必要です。
|
||||
|
||||
Milvusは安定的なPython、Java又は C++ APIsを提供します。
|
||||
Milvusは安定的な[Python](https://github.com/milvus-io/pymilvus)、[Java](https://github.com/milvus-io/milvus-sdk-java)又は [C++](https://github.com/milvus-io/milvus/tree/master/core/src/sdk) APIsを提供します。
|
||||
|
||||
Milvus [リリースノート](https://milvus.io/docs/en/release/v0.5.2/)を読んで最新バージョンや更新情報を手に入れます。
|
||||
Milvus [リリースノート](https://milvus.io/docs/en/release/v0.5.3/)を読んで最新バージョンや更新情報を手に入れます。
|
||||
|
||||
|
||||
## はじめに
|
||||
|
@ -46,7 +46,7 @@ C++サンプルコードを実行するために、次のコマンドをつか
|
|||
|
||||
本プロジェクトへの貢献に心より感謝いたします。 Milvusを貢献したいと思うなら、[貢献規約](CONTRIBUTING.md)を読んでください。 本プロジェクトはMilvusの[行動規範](CODE_OF_CONDUCT.md)に従います。プロジェクトに参加したい場合は、行動規範を従ってください。
|
||||
|
||||
[GitHub issues](https://github.com/milvus-io/milvus/issues/new/choose) を使って問題やバッグなとを報告しでください。 一般てきな問題なら, Milvusコミュニティに参加してください。
|
||||
[GitHub issues](https://github.com/milvus-io/milvus/issues) を使って問題やバッグなとを報告しでください。 一般てきな問題なら, Milvusコミュニティに参加してください。
|
||||
|
||||
## Milvusコミュニティを参加する
|
||||
|
||||
|
@ -59,6 +59,10 @@ C++サンプルコードを実行するために、次のコマンドをつか
|
|||
|
||||
- [Milvus](https://github.com/milvus-io/bootcamp)
|
||||
|
||||
- [Milvus テストレポート](https://github.com/milvus-io/milvus/tree/master/docs)
|
||||
|
||||
- [Milvusのよくある質問](https://www.milvus.io/docs/en/faq/operational_faq/)
|
||||
|
||||
- [Milvus Medium](https://medium.com/@milvusio)
|
||||
|
||||
- [Milvus CSDN](https://zilliz.blog.csdn.net/)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
String cron_timezone = "TZ=Asia/Shanghai"
|
||||
String cron_string = BRANCH_NAME == "master" ? "H 0 * * * " : ""
|
||||
cron_string = BRANCH_NAME == "0.5.2" ? "H 1 * * * " : cron_string
|
||||
cron_string = BRANCH_NAME == "0.6.0" ? "H 1 * * * " : cron_string
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
@ -17,7 +17,7 @@ pipeline {
|
|||
}
|
||||
|
||||
parameters{
|
||||
choice choices: ['Release', 'Debug'], description: '', name: 'BUILD_TYPE'
|
||||
choice choices: ['Release', 'Debug'], description: 'Build Type', name: 'BUILD_TYPE'
|
||||
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
|
||||
string defaultValue: 'ba070c98-c8cc-4f7c-b657-897715f359fc', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
|
||||
string defaultValue: 'http://192.168.1.202/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
|
||||
|
@ -26,135 +26,285 @@ pipeline {
|
|||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
MILVUS_ROOT_PATH="/var/lib"
|
||||
MILVUS_INSTALL_PREFIX="${env.MILVUS_ROOT_PATH}/${env.PROJECT_NAME}"
|
||||
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${BRANCH_NAME}"
|
||||
JOBNAMES = env.JOB_NAME.split('/')
|
||||
PIPELINE_NAME = "${JOBNAMES[0]}"
|
||||
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
|
||||
PIPELINE_NAME = "${env.JOB_NAME.contains('/') ? env.JOB_NAME.getAt(0..(env.JOB_NAME.indexOf('/') - 1)) : env.JOB_NAME}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 18.04") {
|
||||
stage("Ubuntu 18.04 x86_64") {
|
||||
environment {
|
||||
OS_NAME = "ubuntu18.04"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-ubuntu18.04-x86_64-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
CPU_ARCH = "amd64"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-build-env-pod.yaml'
|
||||
}
|
||||
parallel {
|
||||
stage ("GPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "gpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-gpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-gpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-gpu-version-build-env-pod.yaml'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container('milvus-build-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/packaging.groovy"
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
environment {
|
||||
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-")
|
||||
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
|
||||
}
|
||||
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
|
||||
stage ("CPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "cpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-cpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-cpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/milvus-cpu-version-build-env-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/coverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
environment {
|
||||
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-")
|
||||
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
|
||||
}
|
||||
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,479 @@
|
|||
#!/usr/bin/env groovy
|
||||
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
parameters{
|
||||
choice choices: ['Release', 'Debug'], description: 'Build Type', name: 'BUILD_TYPE'
|
||||
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
|
||||
string defaultValue: 'a54e38ef-c424-4ea9-9224-b25fc20e3924', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
|
||||
string defaultValue: 'http://192.168.1.201/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
|
||||
string defaultValue: '76fd48ab-2b8e-4eed-834d-2eefd23bb3a6', description: 'JFROG CREDENTIALS ID', name: 'JFROG_CREDENTIALS_ID', trim: true
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
MILVUS_ROOT_PATH="/var/lib"
|
||||
MILVUS_INSTALL_PREFIX="${env.MILVUS_ROOT_PATH}/${env.PROJECT_NAME}"
|
||||
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
|
||||
PIPELINE_NAME = "${env.JOB_NAME.contains('/') ? env.JOB_NAME.getAt(0..(env.JOB_NAME.indexOf('/') - 1)) : env.JOB_NAME}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 18.04 x86_64") {
|
||||
environment {
|
||||
OS_NAME = "ubuntu18.04"
|
||||
CPU_ARCH = "amd64"
|
||||
}
|
||||
|
||||
parallel {
|
||||
stage ("GPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "gpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-gpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-gpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-gpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: gpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-gpu-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: BUILD_ENV_IMAGE_ID
|
||||
value: "da9023b0f858f072672f86483a869aa87e90a5140864f89e5a012ec766d96dea"
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "24Gi"
|
||||
cpu: "8.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/internalCoverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-images
|
||||
image: registry.zilliz.com/library/docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
environment {
|
||||
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-")
|
||||
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
|
||||
}
|
||||
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-test-env
|
||||
image: registry.zilliz.com/milvus/milvus-test-env:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("CPU Version") {
|
||||
environment {
|
||||
BINRARY_VERSION = "cpu"
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-cpu-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
|
||||
]);
|
||||
DOCKER_VERSION = "${SEMVER}-cpu-${OS_NAME}-${LOWER_BUILD_TYPE}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-build"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-cpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: cpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-cpu-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: BUILD_ENV_IMAGE_ID
|
||||
value: "23476391bec80c64f10d44a6370c73c71f011a6b95114b10ff82a60e771e11c7"
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "24Gi"
|
||||
cpu: "8.0"
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/build.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Code Coverage') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/internalCoverage.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Upload Package') {
|
||||
steps {
|
||||
container("milvus-${env.BINRARY_VERSION}-build-env") {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/package.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker images") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-publish"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-images
|
||||
image: registry.zilliz.com/library/docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Publish') {
|
||||
steps {
|
||||
container('publish-images'){
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/publishImages.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
environment {
|
||||
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-")
|
||||
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${env.BINRARY_VERSION}".toLowerCase()
|
||||
}
|
||||
|
||||
agent {
|
||||
kubernetes {
|
||||
label "${env.BINRARY_VERSION}-dev-test"
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-test-env
|
||||
image: registry.zilliz.com/milvus/milvus-test-env:v0.1
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/deploySingle2Dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
boolean isNightlyTest = isTimeTriggeredBuild()
|
||||
if (isNightlyTest) {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevNightlyTest.groovy"
|
||||
} else {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/singleDevTest.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
unsuccessful {
|
||||
container('milvus-test-env') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkins/step/cleanupSingleDev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
boolean isTimeTriggeredBuild() {
|
||||
if (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-cpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: cpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-cpu-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: BUILD_ENV_IMAGE_ID
|
||||
value: "23476391bec80c64f10d44a6370c73c71f011a6b95114b10ff82a60e771e11c7"
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "12Gi"
|
||||
cpu: "8.0"
|
||||
requests:
|
||||
memory: "8Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
|
@ -1,29 +1,31 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
name: milvus-gpu-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
componet: gpu-build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.5.1-ubuntu18.04
|
||||
- name: milvus-gpu-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-gpu-build-env:v0.6.0-ubuntu18.04
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: BUILD_ENV_IMAGE_ID
|
||||
value: "da9023b0f858f072672f86483a869aa87e90a5140864f89e5a012ec766d96dea"
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "32Gi"
|
||||
memory: "12Gi"
|
||||
cpu: "8.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "16Gi"
|
||||
memory: "8Gi"
|
||||
cpu: "4.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
|
@ -1,13 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
pip3 install -r requirements.txt
|
||||
./yaml_processor.py merge -f /opt/milvus/conf/server_config.yaml -m ../yaml/update_server_config.yaml -i && \
|
||||
rm /opt/milvus/conf/server_config.yaml.bak
|
||||
|
||||
if [ -d "/opt/milvus/unittest" ]; then
|
||||
rm -rf "/opt/milvus/unittest"
|
||||
fi
|
||||
|
||||
tar -zcvf ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz -C /opt/ milvus
|
|
@ -1,9 +1,13 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./build.sh -l"
|
||||
dir ("ci/scripts") {
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -j -u -c"
|
||||
def checkResult = sh(script: "./check_ccache.sh -l ${params.JFROG_ARTFACTORY_URL}/ccache", returnStatus: true)
|
||||
if ("${env.BINRARY_VERSION}" == "gpu") {
|
||||
sh ". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -o ${env.MILVUS_INSTALL_PREFIX} -l -g -x -u -c"
|
||||
} else {
|
||||
sh ". ./before-install.sh && ./build.sh -t ${params.BUILD_TYPE} -o ${env.MILVUS_INSTALL_PREFIX} -l -u -c"
|
||||
}
|
||||
sh "./update_ccache.sh -l ${params.JFROG_ARTFACTORY_URL}/ccache -u ${USERNAME} -p ${PASSWORD}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
try {
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
def helmResult = sh script: "helm status ${env.HELM_RELEASE_NAME}", returnStatus: true
|
||||
if (!helmResult) {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
sh "helm del --purge ${env.HELM_RELEASE_NAME}"
|
||||
}
|
||||
} catch (exc) {
|
||||
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
|
||||
def helmResult = sh script: "helm status ${env.HELM_RELEASE_NAME}", returnStatus: true
|
||||
if (!helmResult) {
|
||||
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
|
||||
sh "helm del --purge ${env.HELM_RELEASE_NAME}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
timeout(time: 30, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP"
|
||||
dir ("ci/scripts") {
|
||||
sh "./coverage.sh -o ${env.MILVUS_INSTALL_PREFIX} -u root -p 123456 -t \$POD_IP"
|
||||
// Set some env variables so codecov detection script works correctly
|
||||
withCredentials([[$class: 'StringBinding', credentialsId: "${env.PIPELINE_NAME}-codecov-token", variable: 'CODECOV_TOKEN']]) {
|
||||
sh 'curl -s https://codecov.io/bash | bash -s - -f output_new.info || echo "Codecov did not collect coverage reports"'
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo update'
|
||||
dir ('milvus-helm') {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.3"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.3:refs/remotes/origin/0.5.3"]]])
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/sqlite_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
|
||||
dir ("milvus") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.HELM_RELEASE_NAME} -f ci/db_backend/sqlite_${env.BINRARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
timeout(time: 30, unit: 'MINUTES') {
|
||||
dir ("ci/scripts") {
|
||||
sh "./coverage.sh -o ${env.MILVUS_INSTALL_PREFIX} -u root -p 123456 -t \$POD_IP"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
// dir ("ci/jenkins/scripts") {
|
||||
// sh "pip3 install -r requirements.txt"
|
||||
// sh "./yaml_processor.py merge -f ${env.MILVUS_INSTALL_PREFIX}/conf/server_config.yaml -m ../yaml/update_server_config.yaml -i && rm ${env.MILVUS_INSTALL_PREFIX}/conf/server_config.yaml.bak"
|
||||
// }
|
||||
|
||||
sh "rm -rf ${env.MILVUS_INSTALL_PREFIX}/unittest"
|
||||
sh "tar -zcvf ./${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz -C ${env.MILVUS_ROOT_PATH}/ ${env.PROJECT_NAME}"
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz")
|
||||
if (uploadStatus != 0) {
|
||||
error("\" ${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${env.PROJECT_NAME}-${env.PACKAGE_VERSION}.tar.gz \" failed!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("ci/jenkins/scripts") {
|
||||
sh "./packaging.sh"
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz")
|
||||
if (uploadStatus != 0) {
|
||||
error("\" ${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" failed!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,47 +1,45 @@
|
|||
container('publish-images') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
dir ("docker/deploy/${OS_NAME}") {
|
||||
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
dir ("docker/deploy/${env.BINRARY_VERSION}/${env.OS_NAME}") {
|
||||
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
|
||||
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def downloadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -O ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage}")
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
|
||||
def downloadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -O ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage}")
|
||||
|
||||
if (downloadStatus != 0) {
|
||||
error("\" Download \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage} \" failed!")
|
||||
}
|
||||
if (downloadStatus != 0) {
|
||||
error("\" Download \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage} \" failed!")
|
||||
}
|
||||
sh "tar zxvf ${binaryPackage}"
|
||||
def imageName = "${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
sh "tar zxvf ${binaryPackage}"
|
||||
def imageName = "${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
|
||||
try {
|
||||
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
|
||||
if (isExistSourceImage == 0) {
|
||||
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
|
||||
}
|
||||
|
||||
def customImage = docker.build("${imageName}")
|
||||
|
||||
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
|
||||
if (isExistTargeImage == 0) {
|
||||
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
|
||||
}
|
||||
|
||||
docker.withRegistry("https://${params.DOKCER_REGISTRY_URL}", "${params.DOCKER_CREDENTIALS_ID}") {
|
||||
customImage.push()
|
||||
}
|
||||
} catch (exc) {
|
||||
throw exc
|
||||
} finally {
|
||||
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
|
||||
if (isExistSourceImage == 0) {
|
||||
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
|
||||
}
|
||||
|
||||
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
|
||||
if (isExistTargeImage == 0) {
|
||||
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
|
||||
}
|
||||
try {
|
||||
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
|
||||
if (isExistSourceImage == 0) {
|
||||
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
|
||||
}
|
||||
}
|
||||
|
||||
def customImage = docker.build("${imageName}")
|
||||
|
||||
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
|
||||
if (isExistTargeImage == 0) {
|
||||
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
|
||||
}
|
||||
|
||||
docker.withRegistry("https://${params.DOKCER_REGISTRY_URL}", "${params.DOCKER_CREDENTIALS_ID}") {
|
||||
customImage.push()
|
||||
}
|
||||
} catch (exc) {
|
||||
throw exc
|
||||
} finally {
|
||||
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
|
||||
if (isExistSourceImage == 0) {
|
||||
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
|
||||
}
|
||||
|
||||
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
|
||||
if (isExistTargeImage == 0) {
|
||||
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
timeout(time: 90, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.HELM_RELEASE_NAME}-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
load "ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.5.3"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.3:refs/remotes/origin/0.5.3"]]])
|
||||
checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
dir ("milvus") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.HELM_RELEASE_NAME} -f ci/db_backend/mysql_${env.BINRARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
}
|
||||
}
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.HELM_RELEASE_NAME}-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,24 +1,23 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
dir ("tests/milvus_python_test") {
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.HELM_RELEASE_NAME}-engine.milvus.svc.cluster.local"
|
||||
}
|
||||
// mysql database backend test
|
||||
// load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
// Remove mysql-version tests: 10-28
|
||||
// mysql database backend test
|
||||
// load "ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
|
||||
|
||||
// if (!fileExists('milvus-helm')) {
|
||||
// dir ("milvus-helm") {
|
||||
// checkout([$class: 'GitSCM', branches: [[name: "0.5.3"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.3:refs/remotes/origin/0.5.3"]]])
|
||||
// checkout([$class: 'GitSCM', branches: [[name: "0.6.0"]], userRemoteConfigs: [[url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.6.0:refs/remotes/origin/0.6.0"]]])
|
||||
// }
|
||||
// }
|
||||
// dir ("milvus-helm") {
|
||||
// dir ("milvus-gpu") {
|
||||
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
// dir ("milvus") {
|
||||
// sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.HELM_RELEASE_NAME} -f ci/db_backend/mysql_${env.BINRARY_VERSION}_values.yaml -f ci/filebeat/values.yaml --namespace milvus ."
|
||||
// }
|
||||
// }
|
||||
// dir ("tests/milvus_python_test") {
|
||||
// sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
|
||||
// sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.HELM_RELEASE_NAME}-engine.milvus.svc.cluster.local"
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
db_config:
|
||||
primary_path: /opt/milvus
|
||||
primary_path: /var/lib/milvus
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
try {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
} catch (exc) {
|
||||
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
|
||||
if (!result) {
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
}
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-cluster") {
|
||||
sh "helm install --wait --timeout 300 --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.5.0 . "
|
||||
}
|
||||
}
|
||||
/*
|
||||
timeout(time: 2, unit: 'MINUTES') {
|
||||
waitUntil {
|
||||
def result = sh script: "nc -z -w 3 ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local 19530", returnStatus: true
|
||||
return !result
|
||||
}
|
||||
}
|
||||
*/
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
timeout(time: 25, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements_cluster.txt'
|
||||
sh "pytest . --alluredir=cluster_test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Cluster Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
try {
|
||||
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
|
||||
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
|
||||
sh 'helm repo update'
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.repository=\"zilliz.azurecr.cn/milvus/engine\" --set engine.image.tag=${DOCKER_VERSION} --set expose.type=loadBalancer --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Helm running failed!'
|
||||
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
|
||||
throw exc
|
||||
}
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
timeout(time: 30, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local --internal=true"
|
||||
}
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local --internal=true"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
timeout(time: 60, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local --internal=true"
|
||||
}
|
||||
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.4.0 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local --internal=true"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 120, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
|
||||
dir ("core") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_USER}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "./build.sh -l"
|
||||
sh "rm -rf cmake_build"
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' \
|
||||
&& export JFROG_USER_NAME='${USERNAME}' \
|
||||
&& export JFROG_PASSWORD='${PASSWORD}' \
|
||||
&& export FAISS_URL='http://192.168.1.105:6060/jinhai/faiss/-/archive/branch-0.3.0/faiss-branch-0.3.0.tar.gz' \
|
||||
&& ./build.sh -t ${params.BUILD_TYPE} -d /opt/milvus -j -u -c"
|
||||
|
||||
sh "./coverage.sh -u root -p 123456 -t \$POD_IP"
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 120, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Build Engine') {
|
||||
dir ("milvus_engine") {
|
||||
try {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
|
||||
dir ("core") {
|
||||
sh "git config --global user.email \"test@zilliz.com\""
|
||||
sh "git config --global user.name \"test\""
|
||||
withCredentials([usernamePassword(credentialsId: "${params.JFROG_USER}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
|
||||
sh "./build.sh -l"
|
||||
sh "rm -rf cmake_build"
|
||||
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' \
|
||||
&& export JFROG_USER_NAME='${USERNAME}' \
|
||||
&& export JFROG_PASSWORD='${PASSWORD}' \
|
||||
&& export FAISS_URL='http://192.168.1.105:6060/jinhai/faiss/-/archive/branch-0.3.0/faiss-branch-0.3.0.tar.gz' \
|
||||
&& ./build.sh -t ${params.BUILD_TYPE} -j -d /opt/milvus"
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
container('publish-docker') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Publish Engine Docker') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_build") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("docker/deploy/ubuntu16.04/free_version") {
|
||||
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
try {
|
||||
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
|
||||
docker.withRegistry('https://registry.zilliz.com', "${params.DOCKER_PUBLISH_USER}") {
|
||||
customImage.push()
|
||||
}
|
||||
docker.withRegistry('https://zilliz.azurecr.cn', "${params.AZURE_DOCKER_PUBLISH_USER}") {
|
||||
customImage.push()
|
||||
}
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
|
||||
throw exc
|
||||
} finally {
|
||||
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
|
||||
echo 'Publish docker failed!'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("milvus_engine") {
|
||||
dir ("core") {
|
||||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
if (fileExists('milvus/unittest')) {
|
||||
sh "rm -rf ./milvus/unittest"
|
||||
}
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
error("Milvus binary directory don't exists!")
|
||||
}
|
||||
}
|
||||
|
||||
gitlabCommitStatus(name: 'Packaged Engine lcov') {
|
||||
if (fileExists('lcov_out')) {
|
||||
try {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("lcov_out/", "${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus lcov out Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}/lcov_out/\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
|
||||
error("Milvus lcov out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
container('milvus-build-env') {
|
||||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("milvus_engine") {
|
||||
dir ("core") {
|
||||
gitlabCommitStatus(name: 'Packaged Engine') {
|
||||
if (fileExists('milvus')) {
|
||||
try {
|
||||
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
throw exc
|
||||
}
|
||||
} else {
|
||||
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
|
||||
error("Milvus binary directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
container('publish-docker') {
|
||||
timeout(time: 15, unit: 'MINUTES') {
|
||||
gitlabCommitStatus(name: 'Publish Engine Docker') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_build") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
dir ("docker/deploy/ubuntu16.04/free_version") {
|
||||
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
|
||||
try {
|
||||
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
|
||||
docker.withRegistry('https://registry.zilliz.com', "${params.DOCKER_PUBLISH_USER}") {
|
||||
customImage.push()
|
||||
}
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
|
||||
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
|
||||
throw exc
|
||||
} finally {
|
||||
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (exc) {
|
||||
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
|
||||
echo 'Publish docker failed!'
|
||||
throw exc
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
timeout(time: 40, unit: 'MINUTES') {
|
||||
try {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
sh 'python3 -m pip install -r requirements.txt'
|
||||
def service_ip = sh (script: "kubectl get svc --namespace milvus-1 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine --template \"{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}\"",returnStdout: true).trim()
|
||||
sh "pytest . --alluredir=\"test_out/staging/single/sqlite\" --ip ${service_ip}"
|
||||
}
|
||||
|
||||
// mysql database backend test
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
|
||||
|
||||
if (!fileExists('milvus-helm')) {
|
||||
dir ("milvus-helm") {
|
||||
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
|
||||
}
|
||||
}
|
||||
dir ("milvus-helm") {
|
||||
dir ("milvus/milvus-gpu") {
|
||||
sh "helm install --wait --timeout 300 --set engine.image.repository=\"zilliz.azurecr.cn/milvus/engine\" --set engine.image.tag=${DOCKER_VERSION} --set expose.type=loadBalancer --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.5.0 ."
|
||||
}
|
||||
}
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
def service_ip = sh (script: "kubectl get svc --namespace milvus-2 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine --template \"{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}\"",returnStdout: true).trim()
|
||||
sh "pytest . --alluredir=\"test_out/staging/single/mysql\" --ip ${service_ip}"
|
||||
}
|
||||
} catch (exc) {
|
||||
echo 'Milvus Test Failed !'
|
||||
throw exc
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('cluster_test_out')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("cluster_test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('test_out/dev')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/dev/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
timeout(time: 5, unit: 'MINUTES') {
|
||||
dir ("${PROJECT_NAME}_test") {
|
||||
if (fileExists('test_out/staging')) {
|
||||
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
|
||||
fileTransfer.FileTransfer("test_out/staging/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
|
||||
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
|
||||
}
|
||||
} else {
|
||||
error("Milvus Dev Test Out directory don't exists!")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,396 +0,0 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "28Gi"
|
||||
cpu: "10.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "14Gi"
|
||||
cpu: "5.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stage("Cluster") {
|
||||
// agent {
|
||||
// kubernetes {
|
||||
// label 'dev-test'
|
||||
// defaultContainer 'jnlp'
|
||||
// yaml """
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// labels:
|
||||
// app: milvus
|
||||
// componet: test
|
||||
// spec:
|
||||
// containers:
|
||||
// - name: milvus-testframework
|
||||
// image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
// command:
|
||||
// - cat
|
||||
// tty: true
|
||||
// volumeMounts:
|
||||
// - name: kubeconf
|
||||
// mountPath: /root/.kube/
|
||||
// readOnly: true
|
||||
// volumes:
|
||||
// - name: kubeconf
|
||||
// secret:
|
||||
// secretName: test-cluster-config
|
||||
// """
|
||||
// }
|
||||
// }
|
||||
// stages {
|
||||
// stage("Deploy to Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage("Dev Test") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy Test') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage ("Cleanup Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// success {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD success !"
|
||||
// }
|
||||
// }
|
||||
// aborted {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD aborted !"
|
||||
// }
|
||||
// }
|
||||
// failure {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD failure !"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,396 +0,0 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "28Gi"
|
||||
cpu: "10.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "14Gi"
|
||||
cpu: "5.0"
|
||||
- name: milvus-mysql
|
||||
image: mysql:5.6
|
||||
env:
|
||||
- name: MYSQL_ROOT_PASSWORD
|
||||
value: 123456
|
||||
ports:
|
||||
- containerPort: 3306
|
||||
name: mysql
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build_no_ut.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus_no_ut.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stage("Cluster") {
|
||||
// agent {
|
||||
// kubernetes {
|
||||
// label 'dev-test'
|
||||
// defaultContainer 'jnlp'
|
||||
// yaml """
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// labels:
|
||||
// app: milvus
|
||||
// componet: test
|
||||
// spec:
|
||||
// containers:
|
||||
// - name: milvus-testframework
|
||||
// image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
// command:
|
||||
// - cat
|
||||
// tty: true
|
||||
// volumeMounts:
|
||||
// - name: kubeconf
|
||||
// mountPath: /root/.kube/
|
||||
// readOnly: true
|
||||
// volumes:
|
||||
// - name: kubeconf
|
||||
// secret:
|
||||
// secretName: test-cluster-config
|
||||
// """
|
||||
// }
|
||||
// }
|
||||
// stages {
|
||||
// stage("Deploy to Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage("Dev Test") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy Test') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage ("Cleanup Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// success {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD success !"
|
||||
// }
|
||||
// }
|
||||
// aborted {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD aborted !"
|
||||
// }
|
||||
// }
|
||||
// failure {
|
||||
// script {
|
||||
// echo "Milvus Cluster CI/CD failure !"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (env.gitlabAfter != null) {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,478 +0,0 @@
|
|||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
}
|
||||
|
||||
environment {
|
||||
PROJECT_NAME = "milvus"
|
||||
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
|
||||
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
|
||||
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
|
||||
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
|
||||
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, \"yyyyMMdd\"}' : '${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}'}"
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Ubuntu 16.04") {
|
||||
environment {
|
||||
PACKAGE_VERSION = VersionNumber([
|
||||
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
|
||||
]);
|
||||
|
||||
DOCKER_VERSION = VersionNumber([
|
||||
versionNumberString : '${DOCKER_VERSION_STR}'
|
||||
]);
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Run Build") {
|
||||
agent {
|
||||
kubernetes {
|
||||
cloud 'build-kubernetes'
|
||||
label 'build'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: milvus-build-env
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
resources:
|
||||
limits:
|
||||
memory: "28Gi"
|
||||
cpu: "10.0"
|
||||
nvidia.com/gpu: 1
|
||||
requests:
|
||||
memory: "14Gi"
|
||||
cpu: "5.0"
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Build') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'canceled'
|
||||
echo "Milvus Build aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Build', state: 'failed'
|
||||
echo "Milvus Build failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Publish docker and helm") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'publish'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Publish Docker') {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Publish Docker') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/nightly_publish_docker.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
|
||||
echo "Milvus Publish Docker aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
|
||||
echo "Milvus Publish Docker failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Development") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: test-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Dev Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test_all.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Dev") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Deploy to Dev Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Deploy to Dev Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Deploy to Dev Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stage("Cluster") {
|
||||
// agent {
|
||||
// kubernetes {
|
||||
// label 'dev-test'
|
||||
// defaultContainer 'jnlp'
|
||||
// yaml """
|
||||
// apiVersion: v1
|
||||
// kind: Pod
|
||||
// metadata:
|
||||
// labels:
|
||||
// app: milvus
|
||||
// componet: test
|
||||
// spec:
|
||||
// containers:
|
||||
// - name: milvus-testframework
|
||||
// image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
// command:
|
||||
// - cat
|
||||
// tty: true
|
||||
// volumeMounts:
|
||||
// - name: kubeconf
|
||||
// mountPath: /root/.kube/
|
||||
// readOnly: true
|
||||
// volumes:
|
||||
// - name: kubeconf
|
||||
// secret:
|
||||
// secretName: test-cluster-config
|
||||
// """
|
||||
// }
|
||||
// }
|
||||
// stages {
|
||||
// stage("Deploy to Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy to Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage("Dev Test") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Deloy Test') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// stage ("Cleanup Dev") {
|
||||
// steps {
|
||||
// gitlabCommitStatus(name: 'Cleanup Dev') {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// post {
|
||||
// always {
|
||||
// container('milvus-testframework') {
|
||||
// script {
|
||||
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// success {
|
||||
// script {
|
||||
// echo "Milvus Deploy to Dev Cluster CI/CD success !"
|
||||
// }
|
||||
// }
|
||||
// aborted {
|
||||
// script {
|
||||
// echo "Milvus Deploy to Dev Cluster CI/CD aborted !"
|
||||
// }
|
||||
// }
|
||||
// failure {
|
||||
// script {
|
||||
// echo "Milvus Deploy to Dev Cluster CI/CD failure !"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
stage("Deploy to Staging") {
|
||||
parallel {
|
||||
stage("Single Node") {
|
||||
agent {
|
||||
kubernetes {
|
||||
label 'dev-test'
|
||||
defaultContainer 'jnlp'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: test
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-testframework
|
||||
image: registry.zilliz.com/milvus/milvus-test:v0.2
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: kubeconf
|
||||
mountPath: /root/.kube/
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: kubeconf
|
||||
secret:
|
||||
secretName: aks-gpu-cluster-config
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage("Deploy to Staging") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Deloy to Staging') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2staging.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage("Staging Test") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Staging Test') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/staging_test.groovy"
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/upload_staging_test_out.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage ("Cleanup Staging") {
|
||||
steps {
|
||||
gitlabCommitStatus(name: 'Cleanup Staging') {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('milvus-testframework') {
|
||||
script {
|
||||
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
|
||||
}
|
||||
}
|
||||
}
|
||||
success {
|
||||
script {
|
||||
echo "Milvus Deploy to Staging Single Node CI/CD success !"
|
||||
}
|
||||
}
|
||||
aborted {
|
||||
script {
|
||||
echo "Milvus Deploy to Staging Single Node CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
failure {
|
||||
script {
|
||||
echo "Milvus Deploy to Staging Single Node CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
script {
|
||||
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
|
||||
// Send an email only if the build status has changed from green/unstable to red
|
||||
emailext subject: '$DEFAULT_SUBJECT',
|
||||
body: '$DEFAULT_CONTENT',
|
||||
recipientProviders: [
|
||||
[$class: 'DevelopersRecipientProvider'],
|
||||
[$class: 'RequesterRecipientProvider']
|
||||
],
|
||||
replyTo: '$DEFAULT_REPLYTO',
|
||||
to: '$DEFAULT_RECIPIENTS'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
success {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
|
||||
echo "Milvus CI/CD success !"
|
||||
}
|
||||
}
|
||||
|
||||
aborted {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
|
||||
echo "Milvus CI/CD aborted !"
|
||||
}
|
||||
}
|
||||
|
||||
failure {
|
||||
script {
|
||||
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
|
||||
echo "Milvus CI/CD failure !"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: milvus
|
||||
componet: build-env
|
||||
spec:
|
||||
containers:
|
||||
- name: milvus-build-env
|
||||
image: registry.zilliz.com/milvus/milvus-build-env:v0.9
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
app: publish
|
||||
componet: docker
|
||||
spec:
|
||||
containers:
|
||||
- name: publish-docker
|
||||
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- cat
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: docker-sock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-sock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
export CCACHE_COMPRESS=1
|
||||
export CCACHE_COMPRESSLEVEL=5
|
||||
export CCACHE_COMPILERCHECK=content
|
||||
export PATH=/usr/lib/ccache/:$PATH
|
||||
ccache --show-stats
|
||||
|
||||
set +ex
|
|
@ -1,5 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
@ -8,38 +10,45 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
|
|||
done
|
||||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
|
||||
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
|
||||
CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build"
|
||||
BUILD_TYPE="Debug"
|
||||
BUILD_UNITTEST="OFF"
|
||||
INSTALL_PREFIX="/opt/milvus"
|
||||
INSTALL_PREFIX="/var/lib/milvus"
|
||||
FAISS_ROOT=""
|
||||
PRIVILEGES="OFF"
|
||||
CUSTOMIZATION="OFF" # default use origin faiss
|
||||
BUILD_COVERAGE="OFF"
|
||||
DB_PATH="/opt/milvus"
|
||||
PROFILING="OFF"
|
||||
USE_JFROG_CACHE="OFF"
|
||||
RUN_CPPLINT="OFF"
|
||||
CUSTOMIZATION="OFF" # default use ori faiss
|
||||
GPU_VERSION="OFF"
|
||||
WITH_MKL="OFF"
|
||||
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
|
||||
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
|
||||
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
|
||||
if [ $? -eq 0 ]; then
|
||||
CUSTOMIZATION="ON"
|
||||
else
|
||||
CUSTOMIZATION="OFF"
|
||||
fi
|
||||
|
||||
while getopts "o:d:t:ulcgjhx" arg
|
||||
while getopts "o:t:b:f:pgxulcjmh" arg
|
||||
do
|
||||
case $arg in
|
||||
o)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
b)
|
||||
CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR
|
||||
;;
|
||||
f)
|
||||
FAISS_ROOT=$OPTARG # FAISS ROOT PATH
|
||||
;;
|
||||
p)
|
||||
PRIVILEGES="ON" # ELEVATED PRIVILEGES
|
||||
;;
|
||||
g)
|
||||
GPU_VERSION="ON";
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="ON";
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases" ;
|
||||
BUILD_UNITTEST="ON";
|
||||
|
@ -50,31 +59,32 @@ do
|
|||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
g)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="OFF" # force use ori faiss
|
||||
m)
|
||||
WITH_MKL="ON"
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-o: install prefix(default: /opt/milvus)
|
||||
-d: db data path(default: /opt/milvus)
|
||||
-o: install prefix(default: /var/lib/milvus)
|
||||
-t: build type(default: Debug)
|
||||
-b: core code build directory
|
||||
-f: faiss root path
|
||||
-p: install command with elevated privileges
|
||||
-g: gpu version
|
||||
-x: milvus customization (default: OFF)
|
||||
-u: building unit test options(default: OFF)
|
||||
-l: run cpplint, clang-format and clang-tidy(default: OFF)
|
||||
-c: code coverage(default: OFF)
|
||||
-g: profiling(default: OFF)
|
||||
-j: use jfrog cache build directory(default: OFF)
|
||||
-m: build with MKL(default: OFF)
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
|
||||
./build.sh -o \${INSTALL_PREFIX} -t \${BUILD_TYPE} -b \${CORE_BUILD_DIR} -f \${FAISS_ROOT} [-p] [-g] [-x] [-u] [-l] [-c] [-j] [-m] [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
|
@ -85,31 +95,30 @@ usage:
|
|||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d ${CMAKE_BUILD_DIR} ]]; then
|
||||
mkdir ${CMAKE_BUILD_DIR}
|
||||
if [[ ! -d ${CORE_BUILD_DIR} ]]; then
|
||||
mkdir ${CORE_BUILD_DIR}
|
||||
fi
|
||||
|
||||
cd ${CMAKE_BUILD_DIR}
|
||||
|
||||
# remove make cache since build.sh -l use default variables
|
||||
# force update the variables each time
|
||||
make rebuild_cache
|
||||
cd ${CORE_BUILD_DIR}
|
||||
|
||||
CMAKE_CMD="cmake \
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DMILVUS_DB_PATH=${DB_PATH} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
|
||||
-DCUSTOMIZATION=${CUSTOMIZATION} \
|
||||
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
|
||||
.."
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DFAISS_ROOT=${FAISS_ROOT} \
|
||||
-DFAISS_WITH_MKL=${WITH_MKL} \
|
||||
-DArrow_SOURCE=AUTO \
|
||||
-DFAISS_SOURCE=AUTO \
|
||||
${MILVUS_CORE_DIR}"
|
||||
echo ${CMAKE_CMD}
|
||||
${CMAKE_CMD}
|
||||
|
||||
|
||||
if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
||||
# cpplint check
|
||||
make lint
|
||||
|
@ -135,8 +144,13 @@ if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
|||
# exit 1
|
||||
# fi
|
||||
# echo "clang-tidy check passed!"
|
||||
fi
|
||||
|
||||
# compile and build
|
||||
make -j8 || exit 1
|
||||
|
||||
if [[ ${PRIVILEGES} == "ON" ]];then
|
||||
sudo make install || exit 1
|
||||
else
|
||||
# compile and build
|
||||
make -j8 || exit 1
|
||||
make install || exit 1
|
||||
fi
|
|
@ -0,0 +1,73 @@
|
|||
#!/bin/bash
|
||||
|
||||
OS_NAME="linux"
|
||||
CODE_NAME=$(lsb_release -sc)
|
||||
BUILD_ENV_DOCKER_IMAGE_ID="${BUILD_ENV_IMAGE_ID}"
|
||||
BRANCH_NAMES=$(git log --decorate | head -n 1 | sed 's/.*(\(.*\))/\1/' | sed 's=[a-zA-Z]*\/==g' | awk -F", " '{$1=""; print $0}')
|
||||
ARTIFACTORY_URL=""
|
||||
CCACHE_DIRECTORY="${HOME}/.ccache"
|
||||
|
||||
while getopts "l:d:h" arg
|
||||
do
|
||||
case $arg in
|
||||
l)
|
||||
ARTIFACTORY_URL=$OPTARG
|
||||
;;
|
||||
d)
|
||||
CCACHE_DIRECTORY=$OPTARG
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-l: artifactory url
|
||||
-d: ccache directory
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -l \${ARTIFACTORY_URL} -d \${CCACHE_DIRECTORY} [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "${ARTIFACTORY_URL}" || "${ARTIFACTORY_URL}" == "" ]];then
|
||||
echo "you have not input ARTIFACTORY_URL !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_ccache() {
|
||||
BRANCH=$1
|
||||
echo "fetching ${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz"
|
||||
wget -q --method HEAD "${ARTIFACTORY_URL}/${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz"
|
||||
if [[ $? == 0 ]];then
|
||||
wget -q "${ARTIFACTORY_URL}/${BRANCH}/ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz" && \
|
||||
mkdir -p ${CCACHE_DIRECTORY} && \
|
||||
tar zxf ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz -C ${CCACHE_DIRECTORY} && \
|
||||
rm ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz
|
||||
if [[ $? == 0 ]];then
|
||||
echo "found cache"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ -n "${CHANGE_BRANCH}" && "${BRANCH_NAME}" =~ "PR-" ]];then
|
||||
check_ccache ${CHANGE_BRANCH}
|
||||
check_ccache ${BRANCH_NAME}
|
||||
fi
|
||||
|
||||
for CURRENT_BRANCH in ${BRANCH_NAMES}
|
||||
do
|
||||
if [[ "${CURRENT_BRANCH}" != "HEAD" ]];then
|
||||
check_ccache ${CURRENT_BRANCH}
|
||||
fi
|
||||
done
|
||||
|
||||
echo "could not download cache" && exit 1
|
||||
|
|
@ -8,19 +8,23 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
|
|||
done
|
||||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
INSTALL_PREFIX="/opt/milvus"
|
||||
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
|
||||
INSTALL_PREFIX="/var/lib/milvus"
|
||||
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
|
||||
CORE_BUILD_DIR="${MILVUS_CORE_DIR}/cmake_build"
|
||||
MYSQL_USER_NAME=root
|
||||
MYSQL_PASSWORD=123456
|
||||
MYSQL_HOST='127.0.0.1'
|
||||
MYSQL_PORT='3306'
|
||||
|
||||
while getopts "o:u:p:t:h" arg
|
||||
while getopts "o:b:u:p:t:h" arg
|
||||
do
|
||||
case $arg in
|
||||
o)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
b)
|
||||
CORE_BUILD_DIR=$OPTARG # CORE_BUILD_DIR
|
||||
;;
|
||||
u)
|
||||
MYSQL_USER_NAME=$OPTARG
|
||||
;;
|
||||
|
@ -34,14 +38,15 @@ do
|
|||
echo "
|
||||
|
||||
parameter:
|
||||
-o: milvus install prefix(default: /opt/milvus)
|
||||
-o: milvus install prefix(default: /var/lib/milvus)
|
||||
-b: core code build directory
|
||||
-u: mysql account
|
||||
-p: mysql password
|
||||
-t: mysql host
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./coverage.sh -o \${INSTALL_PREFIX} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
|
||||
./coverage.sh -o \${INSTALL_PREFIX} -b \${CORE_BUILD_DIR} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
|
@ -63,12 +68,14 @@ FILE_INFO_OUTPUT="output.info"
|
|||
FILE_INFO_OUTPUT_NEW="output_new.info"
|
||||
DIR_LCOV_OUTPUT="lcov_out"
|
||||
|
||||
DIR_GCNO="${CMAKE_BUILD_DIR}"
|
||||
DIR_GCNO="${CORE_BUILD_DIR}"
|
||||
DIR_UNITTEST="${INSTALL_PREFIX}/unittest"
|
||||
|
||||
cd ${SCRIPTS_DIR}
|
||||
|
||||
# delete old code coverage info files
|
||||
rm -rf lcov_out
|
||||
rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW
|
||||
rm -rf ${DIR_LCOV_OUTPUT}
|
||||
rm -f ${FILE_INFO_BASE} ${FILE_INFO_MILVUS} ${FILE_INFO_OUTPUT} ${FILE_INFO_OUTPUT_NEW}
|
||||
|
||||
MYSQL_DB_NAME=milvus_`date +%s%N`
|
||||
|
||||
|
@ -109,7 +116,7 @@ for test in `ls ${DIR_UNITTEST}`; do
|
|||
if [ $? -ne 0 ]; then
|
||||
echo ${args}
|
||||
echo ${DIR_UNITTEST}/${test} "run failed"
|
||||
exit -1
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -132,12 +139,11 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
|||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/src/external/easyloggingpp/easylogging++.h" \
|
||||
"*/src/external/easyloggingpp/easylogging++.cc"
|
||||
"*/thirdparty/*"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "gen ${FILE_INFO_OUTPUT_NEW} failed"
|
||||
exit -2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# gen html report
|
|
@ -0,0 +1,71 @@
|
|||
#!/bin/bash
|
||||
|
||||
OS_NAME="linux"
|
||||
CODE_NAME=$(lsb_release -sc)
|
||||
BUILD_ENV_DOCKER_IMAGE_ID="${BUILD_ENV_IMAGE_ID}"
|
||||
BRANCH_NAME=$(git log --decorate | head -n 1 | sed 's/.*(\(.*\))/\1/' | sed 's/.*, //' | sed 's=[a-zA-Z]*\/==g')
|
||||
ARTIFACTORY_URL=""
|
||||
ARTIFACTORY_USER=""
|
||||
ARTIFACTORY_PASSWORD=""
|
||||
CCACHE_DIRECTORY="${HOME}/.ccache"
|
||||
|
||||
while getopts "l:u:p:d:h" arg
|
||||
do
|
||||
case $arg in
|
||||
l)
|
||||
ARTIFACTORY_URL=$OPTARG
|
||||
;;
|
||||
u)
|
||||
ARTIFACTORY_USER=$OPTARG
|
||||
;;
|
||||
p)
|
||||
ARTIFACTORY_PASSWORD=$OPTARG
|
||||
;;
|
||||
d)
|
||||
CCACHE_DIRECTORY=$OPTARG
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-l: artifactory url
|
||||
-u: artifactory user
|
||||
-p: artifactory password
|
||||
-d: ccache directory
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -l \${ARTIFACTORY_URL} -u \${ARTIFACTORY_USER} -p \${ARTIFACTORY_PASSWORD} -d \${CCACHE_DIRECTORY} [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "${ARTIFACTORY_URL}" || "${ARTIFACTORY_URL}" == "" ]];then
|
||||
echo "you have not input ARTIFACTORY_URL !"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PACKAGE_FILE="ccache-${OS_NAME}-${CODE_NAME}-${BUILD_ENV_DOCKER_IMAGE_ID}.tar.gz"
|
||||
REMOTE_PACKAGE_PATH="${ARTIFACTORY_URL}/${BRANCH_NAME}"
|
||||
|
||||
ccache --show-stats
|
||||
|
||||
if [[ "${BRANCH_NAME}" != "HEAD" ]];then
|
||||
echo "Updating ccache package file: ${PACKAGE_FILE}"
|
||||
tar zcf ./${PACKAGE_FILE} -C ${HOME}/.ccache .
|
||||
echo "Uploading ccache package file ${PACKAGE_FILE} to ${REMOTE_PACKAGE_PATH}"
|
||||
curl -u${ARTIFACTORY_USER}:${ARTIFACTORY_PASSWORD} -T ${PACKAGE_FILE} ${REMOTE_PACKAGE_PATH}/${PACKAGE_FILE}
|
||||
if [[ $? == 0 ]];then
|
||||
echo "Uploading ccache package file success !"
|
||||
exit 0
|
||||
else
|
||||
echo "Uploading ccache package file fault !"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
|
||||
if [[ "${TRAVIS_OS_NAME}" == "linux" ]]; then
|
||||
export CCACHE_COMPRESS=1
|
||||
export CCACHE_COMPRESSLEVEL=5
|
||||
export CCACHE_COMPILERCHECK=content
|
||||
export PATH=/usr/lib/ccache/:$PATH
|
||||
ccache --show-stats
|
||||
fi
|
||||
|
||||
set +ex
|
|
@ -0,0 +1,44 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
|
||||
|
||||
sudo apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
|
||||
|
||||
echo "deb https://apt.repos.intel.com/mkl all main" | \
|
||||
sudo tee /etc/apt/sources.list.d/intel-mkl.list
|
||||
|
||||
sudo wget -O /usr/share/keyrings/apache-arrow-keyring.gpg https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/apache-arrow-keyring.gpg
|
||||
|
||||
sudo tee /etc/apt/sources.list.d/apache-arrow.list <<APT_LINE
|
||||
deb [arch=amd64 signed-by=/usr/share/keyrings/apache-arrow-keyring.gpg] https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/ $(lsb_release --codename --short) main
|
||||
deb-src [signed-by=/usr/share/keyrings/apache-arrow-keyring.gpg] https://dl.bintray.com/apache/arrow/$(lsb_release --id --short | tr 'A-Z' 'a-z')/ $(lsb_release --codename --short) main
|
||||
APT_LINE
|
||||
|
||||
sudo apt-get update -qq
|
||||
|
||||
sudo apt-get install -y -q --no-install-recommends \
|
||||
gfortran \
|
||||
lsb-core \
|
||||
libtool \
|
||||
automake \
|
||||
ccache \
|
||||
pkg-config \
|
||||
libarrow-dev \
|
||||
libjemalloc-dev \
|
||||
libboost-serialization-dev \
|
||||
libboost-filesystem-dev \
|
||||
libboost-system-dev \
|
||||
libboost-regex-dev \
|
||||
intel-mkl-gnu-2019.5-281 \
|
||||
intel-mkl-core-2019.5-281 \
|
||||
libmysqlclient-dev \
|
||||
clang-format-6.0 \
|
||||
clang-tidy-6.0 \
|
||||
lcov
|
||||
|
||||
sudo ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \
|
||||
/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
|
||||
|
||||
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
|
||||
source $TRAVIS_BUILD_DIR/ci/travis/travis_env_common.sh
|
||||
|
||||
only_library_mode=no
|
||||
|
||||
while true; do
|
||||
case "$1" in
|
||||
--only-library)
|
||||
only_library_mode=yes
|
||||
shift ;;
|
||||
*) break ;;
|
||||
esac
|
||||
done
|
||||
|
||||
BUILD_COMMON_FLAGS="-t ${MILVUS_BUILD_TYPE} -o ${MILVUS_INSTALL_PREFIX} -b ${MILVUS_BUILD_DIR}"
|
||||
|
||||
if [ $only_library_mode == "yes" ]; then
|
||||
${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -m -p
|
||||
else
|
||||
${TRAVIS_BUILD_DIR}/ci/scripts/build.sh ${BUILD_COMMON_FLAGS} -m -p -u -c
|
||||
fi
|
|
@ -0,0 +1,10 @@
|
|||
export MILVUS_CORE_DIR=${TRAVIS_BUILD_DIR}/core
|
||||
export MILVUS_BUILD_DIR=${TRAVIS_BUILD_DIR}/core/cmake_build
|
||||
export MILVUS_INSTALL_PREFIX=/var/lib/milvus
|
||||
export MILVUS_TRAVIS_COVERAGE=${MILVUS_TRAVIS_COVERAGE:=0}
|
||||
|
||||
if [ "${MILVUS_TRAVIS_COVERAGE}" == "1" ]; then
|
||||
export MILVUS_CPP_COVERAGE_FILE=${TRAVIS_BUILD_DIR}/output_new.info
|
||||
fi
|
||||
|
||||
export MILVUS_BUILD_TYPE=${MILVUS_BUILD_TYPE:=Release}
|
|
@ -1,10 +1,13 @@
|
|||
milvus/
|
||||
conf/server_config.yaml
|
||||
conf/log_config.conf
|
||||
version.h
|
||||
src/config.h
|
||||
src/version.h
|
||||
lcov_out/
|
||||
base.info
|
||||
output.info
|
||||
output_new.info
|
||||
server.info
|
||||
*.pyc
|
||||
src/grpc/python_gen.h
|
||||
src/grpc/python/
|
||||
|
|
|
@ -18,47 +18,54 @@
|
|||
#-------------------------------------------------------------------------------
|
||||
|
||||
|
||||
cmake_minimum_required(VERSION 3.14)
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
MACRO (GET_CURRENT_TIME CURRENT_TIME)
|
||||
MACRO(GET_CURRENT_TIME CURRENT_TIME)
|
||||
execute_process(COMMAND "date" +"%Y-%m-%d %H:%M.%S" OUTPUT_VARIABLE ${CURRENT_TIME})
|
||||
ENDMACRO (GET_CURRENT_TIME)
|
||||
ENDMACRO(GET_CURRENT_TIME)
|
||||
|
||||
GET_CURRENT_TIME(BUILD_TIME)
|
||||
string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME})
|
||||
message(STATUS "Build time = ${BUILD_TIME}")
|
||||
|
||||
MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
|
||||
execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
if(GIT_BRANCH_NAME STREQUAL "")
|
||||
execute_process(COMMAND "git" symbolic-ref --short -q HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
endif()
|
||||
ENDMACRO (GET_GIT_BRANCH_NAME)
|
||||
if (NOT DEFINED CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build.")
|
||||
endif ()
|
||||
|
||||
set(GIT_BRANCH_NAME_REGEX "[0-9]+\\.[0-9]+\\.[0-9]")
|
||||
|
||||
MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
|
||||
execute_process(COMMAND sh "-c" "git log --decorate | head -n 1 | sed 's/.*(\\(.*\\))/\\1/' | sed 's/.*, //' | sed 's=[a-zA-Z]*\/==g'"
|
||||
OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
if (NOT GIT_BRANCH_NAME MATCHES "${GIT_BRANCH_NAME_REGEX}")
|
||||
execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
endif ()
|
||||
if (NOT GIT_BRANCH_NAME MATCHES "${GIT_BRANCH_NAME_REGEX}")
|
||||
execute_process(COMMAND "git" symbolic-ref --short -q HEAD HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
|
||||
endif ()
|
||||
ENDMACRO(GET_GIT_BRANCH_NAME)
|
||||
|
||||
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
|
||||
message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}")
|
||||
if(NOT GIT_BRANCH_NAME STREQUAL "")
|
||||
if (NOT GIT_BRANCH_NAME STREQUAL "")
|
||||
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(MILVUS_VERSION "${GIT_BRANCH_NAME}")
|
||||
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MILVUS_VERSION "${MILVUS_VERSION}")
|
||||
string(REGEX MATCH "${GIT_BRANCH_NAME_REGEX}" MILVUS_VERSION "${MILVUS_VERSION}")
|
||||
|
||||
find_package(ClangTools)
|
||||
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(BUILD_TYPE "Release")
|
||||
else()
|
||||
else ()
|
||||
set(BUILD_TYPE "Debug")
|
||||
endif()
|
||||
endif ()
|
||||
message(STATUS "Build type = ${BUILD_TYPE}")
|
||||
|
||||
project(milvus VERSION "${MILVUS_VERSION}")
|
||||
project(milvus_engine LANGUAGES CUDA CXX)
|
||||
project(milvus_engine LANGUAGES CXX)
|
||||
|
||||
unset(CMAKE_EXPORT_COMPILE_COMMANDS CACHE)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
@ -67,15 +74,15 @@ set(MILVUS_VERSION_MAJOR "${milvus_VERSION_MAJOR}")
|
|||
set(MILVUS_VERSION_MINOR "${milvus_VERSION_MINOR}")
|
||||
set(MILVUS_VERSION_PATCH "${milvus_VERSION_PATCH}")
|
||||
|
||||
if(MILVUS_VERSION_MAJOR STREQUAL ""
|
||||
if (MILVUS_VERSION_MAJOR STREQUAL ""
|
||||
OR MILVUS_VERSION_MINOR STREQUAL ""
|
||||
OR MILVUS_VERSION_PATCH STREQUAL "")
|
||||
message(WARNING "Failed to determine Milvus version from git branch name")
|
||||
set(MILVUS_VERSION "0.5.3")
|
||||
endif()
|
||||
set(MILVUS_VERSION "0.6.0")
|
||||
endif ()
|
||||
|
||||
message(STATUS "Build version = ${MILVUS_VERSION}")
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/version.h @ONLY)
|
||||
|
||||
message(STATUS "Milvus version: "
|
||||
"${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} "
|
||||
|
@ -84,77 +91,109 @@ message(STATUS "Milvus version: "
|
|||
set(CMAKE_CXX_STANDARD 14)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED on)
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
if (CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
message(STATUS "Building milvus_engine on x86 architecture")
|
||||
set(MILVUS_BUILD_ARCH x86_64)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
message(STATUS "Building milvus_engine on ppc architecture")
|
||||
set(MILVUS_BUILD_ARCH ppc64le)
|
||||
else()
|
||||
else ()
|
||||
message(WARNING "Unknown processor type")
|
||||
message(WARNING "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}")
|
||||
set(MILVUS_BUILD_ARCH unknown)
|
||||
endif()
|
||||
|
||||
find_package (Python COMPONENTS Interpreter Development)
|
||||
|
||||
find_package(CUDA)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
# Ensure that a default make is set
|
||||
if("${MAKE}" STREQUAL "")
|
||||
if(NOT MSVC)
|
||||
if ("${MAKE}" STREQUAL "")
|
||||
if (NOT MSVC)
|
||||
find_program(MAKE make)
|
||||
endif()
|
||||
endif()
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
find_path(MYSQL_INCLUDE_DIR
|
||||
NAMES "mysql.h"
|
||||
PATH_SUFFIXES "mysql")
|
||||
NAMES "mysql.h"
|
||||
PATH_SUFFIXES "mysql")
|
||||
if (${MYSQL_INCLUDE_DIR} STREQUAL "MYSQL_INCLUDE_DIR-NOTFOUND")
|
||||
message(FATAL_ERROR "Could not found MySQL include directory")
|
||||
else()
|
||||
else ()
|
||||
include_directories(${MYSQL_INCLUDE_DIR})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR})
|
||||
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
|
||||
set(MILVUS_THIRDPARTY_SRC ${PROJECT_SOURCE_DIR}/thirdparty)
|
||||
|
||||
include(ExternalProject)
|
||||
include(DefineOptions)
|
||||
include(BuildUtils)
|
||||
include(ThirdPartyPackages)
|
||||
|
||||
config_summary()
|
||||
if (MILVUS_USE_CCACHE)
|
||||
find_program(CCACHE_FOUND ccache)
|
||||
if (CCACHE_FOUND)
|
||||
message(STATUS "Using ccache: ${CCACHE_FOUND}")
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ${CCACHE_FOUND})
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE_FOUND})
|
||||
# let ccache preserve C++ comments, because some of them may be
|
||||
# meaningful to the compiler
|
||||
set(ENV{CCACHE_COMMENTS} "1")
|
||||
endif (CCACHE_FOUND)
|
||||
endif ()
|
||||
|
||||
if (CUSTOMIZATION)
|
||||
add_definitions(-DCUSTOMIZATION)
|
||||
endif (CUSTOMIZATION)
|
||||
set(MILVUS_GPU_VERSION ON)
|
||||
add_compile_definitions(CUSTOMIZATION)
|
||||
endif ()
|
||||
|
||||
if (MILVUS_GPU_VERSION)
|
||||
message(STATUS "Building Milvus GPU version")
|
||||
add_compile_definitions("MILVUS_GPU_VERSION")
|
||||
enable_language(CUDA)
|
||||
find_package(CUDA 10 REQUIRED)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
|
||||
else ()
|
||||
message(STATUS "Building Milvus CPU version")
|
||||
endif ()
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
add_compile_definitions("MILVUS_WITH_PROMETHEUS")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
if (MILVUS_GPU_VERSION)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
|
||||
endif ()
|
||||
else ()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
if (MILVUS_GPU_VERSION)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
config_summary()
|
||||
add_subdirectory(src)
|
||||
|
||||
if (BUILD_UNIT_TEST STREQUAL "ON")
|
||||
if (BUILD_COVERAGE STREQUAL "ON")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
endif()
|
||||
endif ()
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean)
|
||||
|
||||
if("${MILVUS_DB_PATH}" STREQUAL "")
|
||||
set(MILVUS_DB_PATH "/tmp/milvus")
|
||||
endif()
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
|
||||
if ("${MILVUS_DB_PATH}" STREQUAL "")
|
||||
set(MILVUS_DB_PATH "${CMAKE_INSTALL_PREFIX}")
|
||||
endif ()
|
||||
|
||||
if (MILVUS_GPU_VERSION)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_gpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
|
||||
else ()
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_cpu_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
|
||||
endif ()
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf)
|
||||
|
||||
install(DIRECTORY scripts/
|
||||
|
@ -163,25 +202,33 @@ install(DIRECTORY scripts/
|
|||
GROUP_EXECUTE GROUP_READ
|
||||
WORLD_EXECUTE WORLD_READ
|
||||
FILES_MATCHING PATTERN "*.sh")
|
||||
install(DIRECTORY scripts/migration
|
||||
DESTINATION scripts
|
||||
FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
|
||||
GROUP_EXECUTE GROUP_READ
|
||||
WORLD_EXECUTE WORLD_READ)
|
||||
install(FILES
|
||||
conf/server_config.yaml
|
||||
conf/log_config.conf
|
||||
DESTINATION
|
||||
conf)
|
||||
|
||||
find_package(Python COMPONENTS Interpreter Development)
|
||||
find_package(ClangTools)
|
||||
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
|
||||
|
||||
#
|
||||
# "make lint" target
|
||||
#
|
||||
if(NOT MILVUS_VERBOSE_LINT)
|
||||
set(MILVUS_LINT_QUIET "--quiet")
|
||||
endif()
|
||||
if (NOT MILVUS_VERBOSE_LINT)
|
||||
set(MILVUS_LINT_QUIET "--quiet")
|
||||
endif ()
|
||||
|
||||
if(NOT LINT_EXCLUSIONS_FILE)
|
||||
# source files matching a glob from a line in this file
|
||||
# will be excluded from linting (cpplint, clang-tidy, clang-format)
|
||||
set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt)
|
||||
endif()
|
||||
if (NOT LINT_EXCLUSIONS_FILE)
|
||||
# source files matching a glob from a line in this file
|
||||
# will be excluded from linting (cpplint, clang-tidy, clang-format)
|
||||
set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt)
|
||||
endif ()
|
||||
|
||||
find_program(CPPLINT_BIN NAMES cpplint cpplint.py HINTS ${BUILD_SUPPORT_DIR})
|
||||
message(STATUS "Found cpplint executable at ${CPPLINT_BIN}")
|
||||
|
@ -190,77 +237,76 @@ message(STATUS "Found cpplint executable at ${CPPLINT_BIN}")
|
|||
# "make lint" targets
|
||||
#
|
||||
add_custom_target(lint
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_cpplint.py
|
||||
--cpplint_binary
|
||||
${CPPLINT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${MILVUS_LINT_QUIET})
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_cpplint.py
|
||||
--cpplint_binary
|
||||
${CPPLINT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
#
|
||||
# "make clang-format" and "make check-clang-format" targets
|
||||
#
|
||||
if(${CLANG_FORMAT_FOUND})
|
||||
# runs clang format and updates files in place.
|
||||
add_custom_target(clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
if (${CLANG_FORMAT_FOUND})
|
||||
# runs clang format and updates files in place.
|
||||
add_custom_target(clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
# runs clang format and exits with a non-zero exit code if any files need to be reformatted
|
||||
add_custom_target(check-clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif()
|
||||
# runs clang format and exits with a non-zero exit code if any files need to be reformatted
|
||||
add_custom_target(check-clang-format
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_format.py
|
||||
--clang_format_binary
|
||||
${CLANG_FORMAT_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif ()
|
||||
|
||||
#
|
||||
# "make clang-tidy" and "make check-clang-tidy" targets
|
||||
#
|
||||
if(${CLANG_TIDY_FOUND})
|
||||
# runs clang-tidy and attempts to fix any warning automatically
|
||||
add_custom_target(clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
# runs clang-tidy and exits with a non-zero exit code if any errors are found.
|
||||
add_custom_target(check-clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif()
|
||||
if (${CLANG_TIDY_FOUND})
|
||||
# runs clang-tidy and attempts to fix any warning automatically
|
||||
add_custom_target(clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
--fix
|
||||
${MILVUS_LINT_QUIET})
|
||||
|
||||
# runs clang-tidy and exits with a non-zero exit code if any errors are found.
|
||||
add_custom_target(check-clang-tidy
|
||||
${PYTHON_EXECUTABLE}
|
||||
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
|
||||
--clang_tidy_binary
|
||||
${CLANG_TIDY_BIN}
|
||||
--exclude_globs
|
||||
${LINT_EXCLUSIONS_FILE}
|
||||
--compile_commands
|
||||
${CMAKE_BINARY_DIR}/compile_commands.json
|
||||
--source_dir
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/src
|
||||
${MILVUS_LINT_QUIET})
|
||||
endif ()
|
||||
|
|
|
@ -6,5 +6,5 @@
|
|||
*easylogging++*
|
||||
*SqliteMetaImpl.cpp
|
||||
*src/grpc*
|
||||
*src/external*
|
||||
*thirdparty*
|
||||
*milvus/include*
|
181
core/build.sh
181
core/build.sh
|
@ -12,124 +12,143 @@ USE_JFROG_CACHE="OFF"
|
|||
RUN_CPPLINT="OFF"
|
||||
CUSTOMIZATION="OFF" # default use ori faiss
|
||||
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
|
||||
GPU_VERSION="OFF" #defaults to CPU version
|
||||
WITH_MKL="OFF"
|
||||
FAISS_ROOT="" #FAISS root path
|
||||
FAISS_SOURCE="BUNDLED"
|
||||
WITH_PROMETHEUS="ON"
|
||||
|
||||
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
|
||||
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
|
||||
if [ $? -eq 0 ]; then
|
||||
CUSTOMIZATION="ON"
|
||||
else
|
||||
CUSTOMIZATION="OFF"
|
||||
fi
|
||||
|
||||
while getopts "p:d:t:ulrcgjhx" arg
|
||||
do
|
||||
case $arg in
|
||||
p)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases" ;
|
||||
BUILD_UNITTEST="ON";
|
||||
;;
|
||||
l)
|
||||
RUN_CPPLINT="ON"
|
||||
;;
|
||||
r)
|
||||
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
rm ./${BUILD_OUTPUT_DIR} -r
|
||||
MAKE_CLEAN="ON"
|
||||
fi
|
||||
;;
|
||||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
g)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="OFF" # force use ori faiss
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
while getopts "p:d:t:f:ulrcgjhxzme" arg; do
|
||||
case $arg in
|
||||
p)
|
||||
INSTALL_PREFIX=$OPTARG
|
||||
;;
|
||||
d)
|
||||
DB_PATH=$OPTARG
|
||||
;;
|
||||
t)
|
||||
BUILD_TYPE=$OPTARG # BUILD_TYPE
|
||||
;;
|
||||
f)
|
||||
FAISS_ROOT=$OPTARG
|
||||
FAISS_SOURCE="AUTO"
|
||||
;;
|
||||
u)
|
||||
echo "Build and run unittest cases"
|
||||
BUILD_UNITTEST="ON"
|
||||
;;
|
||||
l)
|
||||
RUN_CPPLINT="ON"
|
||||
;;
|
||||
r)
|
||||
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
rm ./${BUILD_OUTPUT_DIR} -r
|
||||
MAKE_CLEAN="ON"
|
||||
fi
|
||||
;;
|
||||
c)
|
||||
BUILD_COVERAGE="ON"
|
||||
;;
|
||||
z)
|
||||
PROFILING="ON"
|
||||
;;
|
||||
j)
|
||||
USE_JFROG_CACHE="ON"
|
||||
;;
|
||||
x)
|
||||
CUSTOMIZATION="ON"
|
||||
;;
|
||||
g)
|
||||
GPU_VERSION="ON"
|
||||
;;
|
||||
m)
|
||||
WITH_MKL="ON"
|
||||
;;
|
||||
e)
|
||||
WITH_PROMETHEUS="OFF"
|
||||
;;
|
||||
h) # help
|
||||
echo "
|
||||
|
||||
parameter:
|
||||
-p: install prefix(default: $(pwd)/milvus)
|
||||
-d: db data path(default: /tmp/milvus)
|
||||
-t: build type(default: Debug)
|
||||
-f: FAISS root path(default: empty). The path should be an absolute path
|
||||
containing the pre-installed lib/ and include/ directory of FAISS. If they can't be found,
|
||||
we will build the original FAISS from source instead.
|
||||
-u: building unit test options(default: OFF)
|
||||
-l: run cpplint, clang-format and clang-tidy(default: OFF)
|
||||
-r: remove previous build directory(default: OFF)
|
||||
-c: code coverage(default: OFF)
|
||||
-g: profiling(default: OFF)
|
||||
-z: profiling(default: OFF)
|
||||
-j: use jfrog cache build directory(default: OFF)
|
||||
-g: build GPU version(default: OFF)
|
||||
-m: build with MKL(default: OFF)
|
||||
-e: build without prometheus(default: OFF)
|
||||
-h: help
|
||||
|
||||
usage:
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
|
||||
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} -f \${FAISS_ROOT} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-e] [-h]
|
||||
"
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 0
|
||||
;;
|
||||
?)
|
||||
echo "ERROR! unknown argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
|
||||
mkdir ${BUILD_OUTPUT_DIR}
|
||||
mkdir ${BUILD_OUTPUT_DIR}
|
||||
fi
|
||||
|
||||
cd ${BUILD_OUTPUT_DIR}
|
||||
|
||||
# remove make cache since build.sh -l use default variables
|
||||
# force update the variables each time
|
||||
make rebuild_cache > /dev/null 2>&1
|
||||
make rebuild_cache >/dev/null 2>&1
|
||||
|
||||
CMAKE_CMD="cmake \
|
||||
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
|
||||
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
|
||||
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
|
||||
-DFAISS_ROOT=${FAISS_ROOT} \
|
||||
-DFAISS_SOURCE=${FAISS_SOURCE} \
|
||||
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
|
||||
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
|
||||
-DMILVUS_DB_PATH=${DB_PATH} \
|
||||
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
|
||||
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
|
||||
-DCUSTOMIZATION=${CUSTOMIZATION} \
|
||||
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
|
||||
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
|
||||
-DFAISS_WITH_MKL=${WITH_MKL} \
|
||||
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
|
||||
../"
|
||||
echo ${CMAKE_CMD}
|
||||
${CMAKE_CMD}
|
||||
|
||||
if [[ ${MAKE_CLEAN} == "ON" ]]; then
|
||||
make clean
|
||||
make clean
|
||||
fi
|
||||
|
||||
if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
||||
# cpplint check
|
||||
make lint
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! cpplint check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "cpplint check passed!"
|
||||
# cpplint check
|
||||
make lint
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! cpplint check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "cpplint check passed!"
|
||||
|
||||
# clang-format check
|
||||
make check-clang-format
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! clang-format check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "clang-format check passed!"
|
||||
# clang-format check
|
||||
make check-clang-format
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "ERROR! clang-format check failed"
|
||||
exit 1
|
||||
fi
|
||||
echo "clang-format check passed!"
|
||||
|
||||
# # clang-tidy check
|
||||
# make check-clang-tidy
|
||||
|
@ -140,11 +159,11 @@ if [[ ${RUN_CPPLINT} == "ON" ]]; then
|
|||
# echo "clang-tidy check passed!"
|
||||
else
|
||||
|
||||
# strip binary symbol
|
||||
if [[ ${BUILD_TYPE} != "Debug" ]]; then
|
||||
strip src/milvus_server
|
||||
fi
|
||||
# strip binary symbol
|
||||
if [[ ${BUILD_TYPE} != "Debug" ]]; then
|
||||
strip src/milvus_server
|
||||
fi
|
||||
|
||||
# compile and build
|
||||
make -j 8 install || exit 1
|
||||
fi
|
||||
# compile and build
|
||||
make -j 8 install || exit 1
|
||||
fi
|
||||
|
|
|
@ -74,7 +74,7 @@ function(ExternalProject_Use_Cache project_name package_file install_path)
|
|||
${CMAKE_COMMAND} -E echo
|
||||
"Extracting ${package_file} to ${install_path}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} -E tar xzvf ${package_file} ${install_path}
|
||||
${CMAKE_COMMAND} -E tar xzf ${package_file} ${install_path}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
)
|
||||
|
||||
|
|
|
@ -13,16 +13,16 @@ macro(define_option name description default)
|
|||
endmacro()
|
||||
|
||||
function(list_join lst glue out)
|
||||
if("${${lst}}" STREQUAL "")
|
||||
if ("${${lst}}" STREQUAL "")
|
||||
set(${out} "" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
list(GET ${lst} 0 joined)
|
||||
list(REMOVE_AT ${lst} 0)
|
||||
foreach(item ${${lst}})
|
||||
foreach (item ${${lst}})
|
||||
set(joined "${joined}${glue}${item}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
set(${out} ${joined} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
|
@ -35,22 +35,31 @@ macro(define_option_string name description default)
|
|||
|
||||
set("${name}_OPTION_ENUM" ${ARGN})
|
||||
list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM")
|
||||
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
set_property(CACHE ${name} PROPERTY STRINGS ${ARGN})
|
||||
endif()
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Milvus Build Option")
|
||||
|
||||
define_option(MILVUS_GPU_VERSION "Build GPU version" OFF)
|
||||
|
||||
define_option(CUSTOMIZATION "Build with customized FAISS library" OFF)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Thirdparty")
|
||||
|
||||
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO")
|
||||
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "BUNDLED")
|
||||
|
||||
define_option_string(MILVUS_DEPENDENCY_SOURCE
|
||||
"Method to use for acquiring MILVUS's build dependencies"
|
||||
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
"Method to use for acquiring MILVUS's build dependencies"
|
||||
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
|
||||
define_option(MILVUS_USE_CCACHE "Use ccache when compiling (if available)" ON)
|
||||
|
||||
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
|
||||
"Show output from ExternalProjects rather than just logging to files" ON)
|
||||
|
@ -70,33 +79,21 @@ define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
|
|||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
|
||||
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
define_option(MILVUS_WITH_GRPC "Build with GRPC" ON)
|
||||
|
||||
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
if(MSVC)
|
||||
set_option_category("MSVC")
|
||||
|
||||
define_option(MSVC_LINK_VERBOSE
|
||||
"Pass verbose linking options when linking libraries and executables"
|
||||
OFF)
|
||||
|
||||
define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF)
|
||||
endif()
|
||||
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
set_option_category("Test and benchmark")
|
||||
|
||||
unset(MILVUS_BUILD_TESTS CACHE)
|
||||
if (BUILD_UNIT_TEST)
|
||||
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON)
|
||||
else()
|
||||
else ()
|
||||
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF)
|
||||
endif(BUILD_UNIT_TEST)
|
||||
endif (BUILD_UNIT_TEST)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
macro(config_summary)
|
||||
|
@ -108,12 +105,12 @@ macro(config_summary)
|
|||
message(STATUS " Generator: ${CMAKE_GENERATOR}")
|
||||
message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
if(${CMAKE_EXPORT_COMPILE_COMMANDS})
|
||||
if (${CMAKE_EXPORT_COMPILE_COMMANDS})
|
||||
message(
|
||||
STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
foreach(category ${MILVUS_OPTION_CATEGORIES})
|
||||
foreach (category ${MILVUS_OPTION_CATEGORIES})
|
||||
|
||||
message(STATUS)
|
||||
message(STATUS "${category} options:")
|
||||
|
@ -121,50 +118,50 @@ macro(config_summary)
|
|||
set(option_names ${MILVUS_${category}_OPTION_NAMES})
|
||||
|
||||
set(max_value_length 0)
|
||||
foreach(name ${option_names})
|
||||
foreach (name ${option_names})
|
||||
string(LENGTH "\"${${name}}\"" value_length)
|
||||
if(${max_value_length} LESS ${value_length})
|
||||
if (${max_value_length} LESS ${value_length})
|
||||
set(max_value_length ${value_length})
|
||||
endif()
|
||||
endforeach()
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
foreach(name ${option_names})
|
||||
if("${${name}_OPTION_TYPE}" STREQUAL "string")
|
||||
foreach (name ${option_names})
|
||||
if ("${${name}_OPTION_TYPE}" STREQUAL "string")
|
||||
set(value "\"${${name}}\"")
|
||||
else()
|
||||
else ()
|
||||
set(value "${${name}}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(default ${${name}_OPTION_DEFAULT})
|
||||
set(description ${${name}_OPTION_DESCRIPTION})
|
||||
string(LENGTH ${description} description_length)
|
||||
if(${description_length} LESS 70)
|
||||
if (${description_length} LESS 70)
|
||||
string(
|
||||
SUBSTRING
|
||||
" "
|
||||
${description_length} -1 description_padding)
|
||||
else()
|
||||
else ()
|
||||
set(description_padding "
|
||||
")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
set(comment "[${name}]")
|
||||
|
||||
if("${value}" STREQUAL "${default}")
|
||||
if ("${value}" STREQUAL "${default}")
|
||||
set(comment "[default] ${comment}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
|
||||
set(comment "${comment} [${${name}_OPTION_ENUM}]")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
string(
|
||||
SUBSTRING "${value} "
|
||||
0 ${max_value_length} value)
|
||||
|
||||
message(STATUS " ${description} ${description_padding} ${value} ${comment}")
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
endforeach()
|
||||
endforeach ()
|
||||
|
||||
endmacro()
|
||||
|
|
|
@ -164,8 +164,10 @@ endif ()
|
|||
|
||||
macro(resolve_dependency DEPENDENCY_NAME)
|
||||
if (${DEPENDENCY_NAME}_SOURCE STREQUAL "AUTO")
|
||||
#disable find_package for now
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
find_package(${DEPENDENCY_NAME} MODULE)
|
||||
if(NOT ${${DEPENDENCY_NAME}_FOUND})
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
endif()
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "BUNDLED")
|
||||
build_dependency(${DEPENDENCY_NAME})
|
||||
elseif (${DEPENDENCY_NAME}_SOURCE STREQUAL "SYSTEM")
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# Default values are used when you make no changes to the following parameters.
|
||||
|
||||
version: 0.1 # config version
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # milvus server port, must in range [1025, 65534]
|
||||
|
@ -27,10 +29,7 @@ metric_config:
|
|||
port: 8080 # port prometheus uses to fetch metrics, must in range [1025, 65534]
|
||||
|
||||
cache_config:
|
||||
cpu_cache_capacity: 16 # GB, CPU memory used for cache, must be a positive integer
|
||||
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
|
||||
gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer
|
||||
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
|
||||
cpu_cache_capacity: 16 # GB, size of CPU memory used for cache, must be a positive integer
|
||||
cache_insert_data: false # whether to load inserted data into cache, must be a boolean
|
||||
|
||||
engine_config:
|
||||
|
@ -38,8 +37,10 @@ engine_config:
|
|||
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
|
||||
gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only
|
||||
|
||||
resource_config:
|
||||
search_resources: # define the devices used for search computation, must be in format: cpu or gpux
|
||||
- cpu
|
||||
gpu_resource_config:
|
||||
enable: false # whether to enable GPU resources
|
||||
cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer
|
||||
search_resources: # define the GPU devices used for search computation, must be in format gpux
|
||||
- gpu0
|
||||
build_index_resources: # define the GPU devices used for index building, must be in format gpux
|
||||
- gpu0
|
||||
index_build_device: gpu0 # GPU used for building index, must be in format: gpux
|
|
@ -0,0 +1,46 @@
|
|||
# Default values are used when you make no changes to the following parameters.
|
||||
|
||||
version: 0.1 # config version
|
||||
|
||||
server_config:
|
||||
address: 0.0.0.0 # milvus server ip address (IPv4)
|
||||
port: 19530 # milvus server port, must in range [1025, 65534]
|
||||
deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable
|
||||
time_zone: UTC+8 # time zone, must be in format: UTC+X
|
||||
|
||||
db_config:
|
||||
primary_path: @MILVUS_DB_PATH@ # path used to store data and meta
|
||||
secondary_path: # path used to store data only, split by semicolon
|
||||
|
||||
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
|
||||
# Keep 'dialect://:@:/', and replace other texts with real values
|
||||
# Replace 'dialect' with 'mysql' or 'sqlite'
|
||||
|
||||
insert_buffer_size: 4 # GB, maximum insert buffer size allowed, must be a positive integer
|
||||
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
|
||||
|
||||
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
|
||||
# you can specify preload tables like this: table1,table2,table3
|
||||
|
||||
metric_config:
|
||||
enable_monitor: false # enable monitoring or not, must be a boolean
|
||||
collector: prometheus # prometheus
|
||||
prometheus_config:
|
||||
port: 8080 # port prometheus uses to fetch metrics, must in range [1025, 65534]
|
||||
|
||||
cache_config:
|
||||
cpu_cache_capacity: 16 # GB, size of CPU memory used for cache, must be a positive integer
|
||||
cache_insert_data: false # whether to load inserted data into cache, must be a boolean
|
||||
|
||||
engine_config:
|
||||
use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
|
||||
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
|
||||
gpu_search_threshold: 1000 # threshold beyond which the search computation is executed on GPUs only
|
||||
|
||||
gpu_resource_config:
|
||||
enable: true # whether to enable GPU resources
|
||||
cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer
|
||||
search_resources: # define the GPU devices used for search computation, must be in format gpux
|
||||
- gpu0
|
||||
build_index_resources: # define the GPU devices used for index building, must be in format gpux
|
||||
- gpu0
|
|
@ -122,9 +122,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
|
|||
"*/src/server/Server.cpp" \
|
||||
"*/src/server/DBWrapper.cpp" \
|
||||
"*/src/server/grpc_impl/GrpcServer.cpp" \
|
||||
"*/easylogging++.h" \
|
||||
"*/easylogging++.cc" \
|
||||
"*/src/external/*"
|
||||
"*/thirdparty/*"
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "generate ${FILE_INFO_OUTPUT_NEW} failed"
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
## Data Migration
|
||||
|
||||
####0.3.x
|
||||
legacy data is not migrate-able for later versions
|
||||
|
||||
####0.4.x
|
||||
legacy data can be reused directly by 0.5.x
|
||||
|
||||
legacy data can be migrated to 0.6.x
|
||||
|
||||
####0.5.x
|
||||
legacy data can be migrated to 0.6.x
|
||||
|
||||
####0.6.x
|
||||
how to migrate legacy 0.4.x/0.5.x data
|
||||
|
||||
for sqlite meta:
|
||||
```shell
|
||||
$ sqlite3 [parth_to]/meta.sqlite < sqlite_4_to_6.sql
|
||||
```
|
||||
|
||||
for mysql meta:
|
||||
```shell
|
||||
$ mysql -h127.0.0.1 -uroot -p123456 -Dmilvus < mysql_4_to_6.sql
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
alter table Tables add column owner_table VARCHAR(255) DEFAULT '' NOT NULL;
|
||||
alter table Tables add column partition_tag VARCHAR(255) DEFAULT '' NOT NULL;
|
||||
alter table Tables add column version VARCHAR(64) DEFAULT '0.6.0' NOT NULL;
|
||||
update Tables set version='0.6.0';
|
|
@ -0,0 +1,3 @@
|
|||
alter table Tables drop column owner_table;
|
||||
alter table Tables drop column partition_tag;
|
||||
alter table Tables drop column version;
|
|
@ -0,0 +1,4 @@
|
|||
alter table Tables add column 'owner_table' TEXT DEFAULT '' NOT NULL;
|
||||
alter table Tables add column 'partition_tag' TEXT DEFAULT '' NOT NULL;
|
||||
alter table Tables add column 'version' TEXT DEFAULT '0.6.0' NOT NULL;
|
||||
update Tables set version='0.6.0';
|
|
@ -0,0 +1,7 @@
|
|||
CREATE TABLE 'TempTables' ( 'id' INTEGER PRIMARY KEY NOT NULL , 'table_id' TEXT UNIQUE NOT NULL , 'state' INTEGER NOT NULL , 'dimension' INTEGER NOT NULL , 'created_on' INTEGER NOT NULL , 'flag' INTEGER DEFAULT 0 NOT NULL , 'index_file_size' INTEGER NOT NULL , 'engine_type' INTEGER NOT NULL , 'nlist' INTEGER NOT NULL , 'metric_type' INTEGER NOT NULL);
|
||||
|
||||
INSERT INTO TempTables SELECT id, table_id, state, dimension, created_on, flag, index_file_size, engine_type, nlist, metric_type FROM Tables;
|
||||
|
||||
DROP TABLE Tables;
|
||||
|
||||
ALTER TABLE TempTables RENAME TO Tables;
|
|
@ -19,13 +19,15 @@
|
|||
|
||||
include_directories(${MILVUS_SOURCE_DIR})
|
||||
include_directories(${MILVUS_ENGINE_SRC})
|
||||
include_directories(${MILVUS_THIRDPARTY_SRC})
|
||||
|
||||
include_directories(${CUDA_TOOLKIT_ROOT_DIR}/include)
|
||||
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status)
|
||||
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus)
|
||||
|
||||
#this statement must put here, since the INDEX_INCLUDE_DIRS is defined in code/CMakeList.txt
|
||||
add_subdirectory(index)
|
||||
if (FAISS_WITH_MKL)
|
||||
add_compile_definitions("WITH_MKL")
|
||||
endif ()
|
||||
|
||||
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
|
||||
foreach (dir ${INDEX_INCLUDE_DIRS})
|
||||
|
@ -35,6 +37,7 @@ endforeach ()
|
|||
aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics metrics_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics/prometheus metrics_prometheus_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/db db_main_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/db/engine db_engine_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/db/insert db_insert_files)
|
||||
|
@ -64,15 +67,21 @@ set(scheduler_files
|
|||
${scheduler_task_files}
|
||||
)
|
||||
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/external/easyloggingpp external_easyloggingpp_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/external/nlohmann external_nlohmann_files)
|
||||
set(external_files
|
||||
${external_easyloggingpp_files}
|
||||
${external_nlohmann_files}
|
||||
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/easyloggingpp thirdparty_easyloggingpp_files)
|
||||
aux_source_directory(${MILVUS_THIRDPARTY_SRC}/nlohmann thirdparty_nlohmann_files)
|
||||
set(thirdparty_files
|
||||
${thirdparty_easyloggingpp_files}
|
||||
${thirdparty_nlohmann_files}
|
||||
)
|
||||
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server server_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl grpc_server_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl/request grpc_request_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl grpc_impl_files)
|
||||
set(grpc_server_files
|
||||
${grpc_request_files}
|
||||
${grpc_impl_files}
|
||||
)
|
||||
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/utils utils_files)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper wrapper_files)
|
||||
|
||||
|
@ -84,11 +93,16 @@ set(engine_files
|
|||
${db_insert_files}
|
||||
${db_meta_files}
|
||||
${metrics_files}
|
||||
${external_files}
|
||||
${thirdparty_files}
|
||||
${utils_files}
|
||||
${wrapper_files}
|
||||
)
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
set(engine_files ${engine_files}
|
||||
${metrics_prometheus_files})
|
||||
endif ()
|
||||
|
||||
set(client_grpc_lib
|
||||
grpcpp_channelz
|
||||
grpc++
|
||||
|
@ -109,35 +123,50 @@ set(boost_lib
|
|||
libboost_serialization.a
|
||||
)
|
||||
|
||||
set(cuda_lib
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
cudart
|
||||
cublas
|
||||
)
|
||||
|
||||
set(third_party_libs
|
||||
sqlite
|
||||
${client_grpc_lib}
|
||||
yaml-cpp
|
||||
${prometheus_lib}
|
||||
${cuda_lib}
|
||||
mysqlpp
|
||||
zlib
|
||||
${boost_lib}
|
||||
)
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
|
||||
if (MILVUS_GPU_VERSION)
|
||||
include_directories(${CUDA_INCLUDE_DIRS})
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(cuda_lib
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
cudart
|
||||
cublas
|
||||
)
|
||||
set(third_party_libs ${third_party_libs}
|
||||
gperftools
|
||||
libunwind
|
||||
)
|
||||
${cuda_lib}
|
||||
)
|
||||
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper/gpu wrapper_gpu_files)
|
||||
set(engine_files ${engine_files}
|
||||
${wrapper_gpu_files}
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (MILVUS_ENABLE_PROFILING)
|
||||
set(third_party_libs ${third_party_libs}
|
||||
gperftools
|
||||
libunwind
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
set(third_party_libs ${third_party_libs}
|
||||
${prometheus_lib}
|
||||
)
|
||||
endif ()
|
||||
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(engine_libs
|
||||
pthread
|
||||
libgomp.a
|
||||
libgfortran.a
|
||||
dl
|
||||
)
|
||||
|
||||
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
||||
|
@ -147,26 +176,33 @@ if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|||
)
|
||||
endif ()
|
||||
|
||||
cuda_add_library(milvus_engine STATIC ${engine_files})
|
||||
add_library(milvus_engine STATIC ${engine_files})
|
||||
target_link_libraries(milvus_engine
|
||||
knowhere
|
||||
${engine_libs}
|
||||
${third_party_libs}
|
||||
${engine_libs}
|
||||
)
|
||||
|
||||
add_library(metrics STATIC ${metrics_files})
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
add_library(metrics STATIC ${metrics_files} ${metrics_prometheus_files})
|
||||
else ()
|
||||
add_library(metrics STATIC ${metrics_files})
|
||||
endif ()
|
||||
|
||||
set(metrics_lib
|
||||
yaml-cpp
|
||||
${prometheus_lib}
|
||||
)
|
||||
|
||||
if (MILVUS_WITH_PROMETHEUS)
|
||||
set(metrics_lib ${metrics_lib}
|
||||
${prometheus_lib}
|
||||
)
|
||||
endif ()
|
||||
|
||||
target_link_libraries(metrics ${metrics_lib})
|
||||
|
||||
set(server_libs
|
||||
milvus_engine
|
||||
pthread
|
||||
dl
|
||||
metrics
|
||||
)
|
||||
|
||||
|
|
|
@ -15,24 +15,18 @@
|
|||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
|
||||
|
||||
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
constexpr double DEFAULT_THRESHHOLD_PERCENT = 0.85;
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
Cache<ItemObj>::Cache(int64_t capacity, uint64_t cache_max_count)
|
||||
: usage_(0),
|
||||
capacity_(capacity),
|
||||
freemem_percent_(DEFAULT_THRESHHOLD_PERCENT),
|
||||
lru_(cache_max_count) {
|
||||
// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity)
|
||||
: usage_(0), capacity_(capacity), freemem_percent_(DEFAULT_THRESHHOLD_PERCENT), lru_(cache_max_count) {
|
||||
// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity)
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::set_capacity(int64_t capacity) {
|
||||
if (capacity > 0) {
|
||||
|
@ -41,23 +35,23 @@ Cache<ItemObj>::set_capacity(int64_t capacity) {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
size_t
|
||||
Cache<ItemObj>::size() const {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
return lru_.size();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
bool
|
||||
Cache<ItemObj>::exists(const std::string &key) {
|
||||
Cache<ItemObj>::exists(const std::string& key) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
return lru_.exists(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
ItemObj
|
||||
Cache<ItemObj>::get(const std::string &key) {
|
||||
Cache<ItemObj>::get(const std::string& key) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
if (!lru_.exists(key)) {
|
||||
return nullptr;
|
||||
|
@ -66,68 +60,68 @@ Cache<ItemObj>::get(const std::string &key) {
|
|||
return lru_.get(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::insert(const std::string &key, const ItemObj &item) {
|
||||
Cache<ItemObj>::insert(const std::string& key, const ItemObj& item) {
|
||||
if (item == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// if(item->size() > capacity_) {
|
||||
// SERVER_LOG_ERROR << "Item size " << item->size()
|
||||
// << " is too large to insert into cache, capacity " << capacity_;
|
||||
// return;
|
||||
// }
|
||||
// if(item->size() > capacity_) {
|
||||
// SERVER_LOG_ERROR << "Item size " << item->size()
|
||||
// << " is too large to insert into cache, capacity " << capacity_;
|
||||
// return;
|
||||
// }
|
||||
|
||||
//calculate usage
|
||||
// calculate usage
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
||||
//if key already exist, subtract old item size
|
||||
// if key already exist, subtract old item size
|
||||
if (lru_.exists(key)) {
|
||||
const ItemObj &old_item = lru_.get(key);
|
||||
const ItemObj& old_item = lru_.get(key);
|
||||
usage_ -= old_item->Size();
|
||||
}
|
||||
|
||||
//plus new item size
|
||||
// plus new item size
|
||||
usage_ += item->Size();
|
||||
}
|
||||
|
||||
//if usage exceed capacity, free some items
|
||||
// if usage exceed capacity, free some items
|
||||
if (usage_ > capacity_) {
|
||||
SERVER_LOG_DEBUG << "Current usage " << usage_
|
||||
<< " exceeds cache capacity " << capacity_
|
||||
SERVER_LOG_DEBUG << "Current usage " << usage_ << " exceeds cache capacity " << capacity_
|
||||
<< ", start free memory";
|
||||
free_memory();
|
||||
}
|
||||
|
||||
//insert new item
|
||||
// insert new item
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
||||
lru_.put(key, item);
|
||||
SERVER_LOG_DEBUG << "Insert " << key << " size:" << item->Size()
|
||||
<< " bytes into cache, usage: " << usage_ << " bytes";
|
||||
SERVER_LOG_DEBUG << "Insert " << key << " size: " << item->Size() << " bytes into cache, usage: " << usage_
|
||||
<< " bytes," << " capacity: " << capacity_ << " bytes";
|
||||
}
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::erase(const std::string &key) {
|
||||
Cache<ItemObj>::erase(const std::string& key) {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
if (!lru_.exists(key)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ItemObj &old_item = lru_.get(key);
|
||||
const ItemObj& old_item = lru_.get(key);
|
||||
usage_ -= old_item->Size();
|
||||
|
||||
SERVER_LOG_DEBUG << "Erase " << key << " size: " << old_item->Size();
|
||||
SERVER_LOG_DEBUG << "Erase " << key << " size: " << old_item->Size() << " bytes from cache, usage: " << usage_
|
||||
<< " bytes," << " capacity: " << capacity_ << " bytes";
|
||||
|
||||
lru_.erase(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::clear() {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
@ -137,15 +131,16 @@ Cache<ItemObj>::clear() {
|
|||
}
|
||||
|
||||
/* free memory space when CACHE occupation exceed its capacity */
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::free_memory() {
|
||||
if (usage_ <= capacity_) return;
|
||||
if (usage_ <= capacity_)
|
||||
return;
|
||||
|
||||
int64_t threshhold = capacity_ * freemem_percent_;
|
||||
int64_t delta_size = usage_ - threshhold;
|
||||
if (delta_size <= 0) {
|
||||
delta_size = 1;//ensure at least one item erased
|
||||
delta_size = 1; // ensure at least one item erased
|
||||
}
|
||||
|
||||
std::set<std::string> key_array;
|
||||
|
@ -156,8 +151,8 @@ Cache<ItemObj>::free_memory() {
|
|||
|
||||
auto it = lru_.rbegin();
|
||||
while (it != lru_.rend() && released_size < delta_size) {
|
||||
auto &key = it->first;
|
||||
auto &obj_ptr = it->second;
|
||||
auto& key = it->first;
|
||||
auto& obj_ptr = it->second;
|
||||
|
||||
key_array.emplace(key);
|
||||
released_size += obj_ptr->Size();
|
||||
|
@ -167,20 +162,25 @@ Cache<ItemObj>::free_memory() {
|
|||
|
||||
SERVER_LOG_DEBUG << "to be released memory size: " << released_size;
|
||||
|
||||
for (auto &key : key_array) {
|
||||
for (auto& key : key_array) {
|
||||
erase(key);
|
||||
}
|
||||
|
||||
print();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
Cache<ItemObj>::print() {
|
||||
size_t cache_count = 0;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
cache_count = lru_.size();
|
||||
#if 0
|
||||
for (auto it = lru_.begin(); it != lru_.end(); ++it) {
|
||||
SERVER_LOG_DEBUG << it->first;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
SERVER_LOG_DEBUG << "[Cache item count]: " << cache_count;
|
||||
|
@ -188,7 +188,5 @@ Cache<ItemObj>::print() {
|
|||
SERVER_LOG_DEBUG << "[Cache capacity]: " << capacity_ << " bytes";
|
||||
}
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
|
|
@ -15,21 +15,18 @@
|
|||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
|
||||
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
CacheMgr<ItemObj>::CacheMgr() {
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
CacheMgr<ItemObj>::~CacheMgr() {
|
||||
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
uint64_t
|
||||
CacheMgr<ItemObj>::ItemCount() const {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -37,12 +34,12 @@ CacheMgr<ItemObj>::ItemCount() const {
|
|||
return 0;
|
||||
}
|
||||
|
||||
return (uint64_t) (cache_->size());
|
||||
return (uint64_t)(cache_->size());
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
bool
|
||||
CacheMgr<ItemObj>::ItemExists(const std::string &key) {
|
||||
CacheMgr<ItemObj>::ItemExists(const std::string& key) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return false;
|
||||
|
@ -51,9 +48,9 @@ CacheMgr<ItemObj>::ItemExists(const std::string &key) {
|
|||
return cache_->exists(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
ItemObj
|
||||
CacheMgr<ItemObj>::GetItem(const std::string &key) {
|
||||
CacheMgr<ItemObj>::GetItem(const std::string& key) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return nullptr;
|
||||
|
@ -62,9 +59,9 @@ CacheMgr<ItemObj>::GetItem(const std::string &key) {
|
|||
return cache_->get(key);
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::InsertItem(const std::string &key, const ItemObj &data) {
|
||||
CacheMgr<ItemObj>::InsertItem(const std::string& key, const ItemObj& data) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
|
@ -74,9 +71,9 @@ CacheMgr<ItemObj>::InsertItem(const std::string &key, const ItemObj &data) {
|
|||
server::Metrics::GetInstance().CacheAccessTotalIncrement();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::EraseItem(const std::string &key) {
|
||||
CacheMgr<ItemObj>::EraseItem(const std::string& key) {
|
||||
if (cache_ == nullptr) {
|
||||
SERVER_LOG_ERROR << "Cache doesn't exist";
|
||||
return;
|
||||
|
@ -86,7 +83,7 @@ CacheMgr<ItemObj>::EraseItem(const std::string &key) {
|
|||
server::Metrics::GetInstance().CacheAccessTotalIncrement();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::PrintInfo() {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -97,7 +94,7 @@ CacheMgr<ItemObj>::PrintInfo() {
|
|||
cache_->print();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::ClearCache() {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -108,7 +105,7 @@ CacheMgr<ItemObj>::ClearCache() {
|
|||
cache_->clear();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
int64_t
|
||||
CacheMgr<ItemObj>::CacheUsage() const {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -119,7 +116,7 @@ CacheMgr<ItemObj>::CacheUsage() const {
|
|||
return cache_->usage();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
int64_t
|
||||
CacheMgr<ItemObj>::CacheCapacity() const {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -130,7 +127,7 @@ CacheMgr<ItemObj>::CacheCapacity() const {
|
|||
return cache_->capacity();
|
||||
}
|
||||
|
||||
template<typename ItemObj>
|
||||
template <typename ItemObj>
|
||||
void
|
||||
CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
|
||||
if (cache_ == nullptr) {
|
||||
|
@ -140,6 +137,5 @@ CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
|
|||
cache_->set_capacity(capacity);
|
||||
}
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
std::mutex GpuCacheMgr::mutex_;
|
||||
std::unordered_map<uint64_t, GpuCacheMgrPtr> GpuCacheMgr::instance_;
|
||||
|
||||
|
@ -37,7 +38,7 @@ GpuCacheMgr::GpuCacheMgr() {
|
|||
Status s;
|
||||
|
||||
int64_t gpu_cache_cap;
|
||||
s = config.GetCacheConfigGpuCacheCapacity(gpu_cache_cap);
|
||||
s = config.GetGpuResourceConfigCacheCapacity(gpu_cache_cap);
|
||||
if (!s.ok()) {
|
||||
SERVER_LOG_ERROR << s.message();
|
||||
}
|
||||
|
@ -45,7 +46,7 @@ GpuCacheMgr::GpuCacheMgr() {
|
|||
cache_ = std::make_shared<Cache<DataObjPtr>>(cap, 1UL << 32);
|
||||
|
||||
float gpu_mem_threshold;
|
||||
s = config.GetCacheConfigGpuCacheThreshold(gpu_mem_threshold);
|
||||
s = config.GetGpuResourceConfigCacheThreshold(gpu_mem_threshold);
|
||||
if (!s.ok()) {
|
||||
SERVER_LOG_ERROR << s.message();
|
||||
}
|
||||
|
@ -76,6 +77,7 @@ GpuCacheMgr::GetIndex(const std::string& key) {
|
|||
DataObjPtr obj = GetItem(key);
|
||||
return obj;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
class GpuCacheMgr;
|
||||
using GpuCacheMgrPtr = std::shared_ptr<GpuCacheMgr>;
|
||||
|
||||
|
@ -42,6 +43,7 @@ class GpuCacheMgr : public CacheMgr<DataObjPtr> {
|
|||
static std::mutex mutex_;
|
||||
static std::unordered_map<uint64_t, GpuCacheMgrPtr> instance_;
|
||||
};
|
||||
#endif
|
||||
|
||||
} // namespace cache
|
||||
} // namespace milvus
|
||||
|
|
|
@ -47,44 +47,68 @@ class DB {
|
|||
|
||||
virtual Status
|
||||
CreateTable(meta::TableSchema& table_schema_) = 0;
|
||||
|
||||
virtual Status
|
||||
DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
|
||||
DropTable(const std::string& table_id, const meta::DatesT& dates) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeTable(meta::TableSchema& table_schema_) = 0;
|
||||
|
||||
virtual Status
|
||||
HasTable(const std::string& table_id, bool& has_or_not_) = 0;
|
||||
|
||||
virtual Status
|
||||
AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
|
||||
|
||||
virtual Status
|
||||
PreloadTable(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
|
||||
|
||||
virtual Status
|
||||
InsertVectors(const std::string& table_id_, uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name,
|
||||
const std::string& partition_tag) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
ResultIds& result_ids, ResultDistances& result_distances) = 0;
|
||||
DropPartition(const std::string& partition_name) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) = 0;
|
||||
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
|
||||
IDNumbers& vector_ids_) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0;
|
||||
|
||||
virtual Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) = 0;
|
||||
|
||||
virtual Status
|
||||
QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) = 0;
|
||||
|
||||
virtual Status
|
||||
Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status
|
||||
CreateIndex(const std::string& table_id, const TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeIndex(const std::string& table_id, TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DropIndex(const std::string& table_id) = 0;
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "scheduler/job/DeleteJob.h"
|
||||
#include "scheduler/job/SearchJob.h"
|
||||
#include "utils/Log.h"
|
||||
#include "utils/StringHelpFunctions.h"
|
||||
#include "utils/TimeRecorder.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
@ -38,7 +39,9 @@
|
|||
#include <chrono>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <set>
|
||||
#include <thread>
|
||||
#include <utility>
|
||||
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
@ -49,6 +52,17 @@ constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
|
|||
constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
|
||||
constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
|
||||
|
||||
static const Status SHUTDOWN_ERROR = Status(DB_ERROR, "Milvus server is shutdown!");
|
||||
|
||||
void
|
||||
TraverseFiles(const meta::DatePartionedTableFilesSchema& date_files, meta::TableFilesSchema& files_array) {
|
||||
for (auto& day_files : date_files) {
|
||||
for (auto& file : day_files.second) {
|
||||
files_array.push_back(file);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
DBImpl::DBImpl(const DBOptions& options)
|
||||
|
@ -92,13 +106,14 @@ DBImpl::Stop() {
|
|||
shutting_down_.store(true, std::memory_order_release);
|
||||
|
||||
// makesure all memory data serialized
|
||||
MemSerialize();
|
||||
std::set<std::string> sync_table_ids;
|
||||
SyncMemData(sync_table_ids);
|
||||
|
||||
// wait compaction/buildindex finish
|
||||
bg_timer_thread_.join();
|
||||
|
||||
if (options_.mode_ != DBOptions::MODE::CLUSTER_READONLY) {
|
||||
meta_ptr_->CleanUp();
|
||||
meta_ptr_->CleanUpShadowFiles();
|
||||
}
|
||||
|
||||
// ENGINE_LOG_TRACE << "DB service stop";
|
||||
|
@ -113,7 +128,7 @@ DBImpl::DropAll() {
|
|||
Status
|
||||
DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
meta::TableSchema temp_schema = table_schema;
|
||||
|
@ -122,34 +137,18 @@ DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
|||
}
|
||||
|
||||
Status
|
||||
DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
DBImpl::DropTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
// dates partly delete files of the table but currently we don't support
|
||||
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
|
||||
|
||||
if (dates.empty()) {
|
||||
mem_mgr_->EraseMemVector(table_id); // not allow insert
|
||||
meta_ptr_->DeleteTable(table_id); // soft delete table
|
||||
|
||||
// scheduler will determine when to delete table files
|
||||
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
|
||||
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(table_id, meta_ptr_, nres);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitAndDelete();
|
||||
} else {
|
||||
meta_ptr_->DropPartitionsByDates(table_id, dates);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
return DropTableRecursively(table_id, dates);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
auto stat = meta_ptr_->DescribeTable(table_schema);
|
||||
|
@ -160,7 +159,7 @@ DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
|||
Status
|
||||
DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->HasTable(table_id, has_or_not);
|
||||
|
@ -169,7 +168,7 @@ DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
|||
Status
|
||||
DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->AllTables(table_schema_array);
|
||||
|
@ -178,55 +177,66 @@ DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
|||
Status
|
||||
DBImpl::PreloadTable(const std::string& table_id) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
|
||||
// step 1: get all table files from parent table
|
||||
meta::DatesT dates;
|
||||
std::vector<size_t> ids;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
|
||||
meta::TableFilesSchema files_array;
|
||||
auto status = GetFilesToSearch(table_id, ids, dates, files_array);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// step 2: get files from partition tables
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
status = GetFilesToSearch(schema.table_id_, ids, dates, files_array);
|
||||
}
|
||||
|
||||
int64_t size = 0;
|
||||
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
|
||||
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
int64_t available_size = cache_total - cache_usage;
|
||||
|
||||
for (auto& day_files : files) {
|
||||
for (auto& file : day_files.second) {
|
||||
ExecutionEnginePtr engine =
|
||||
EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
|
||||
(MetricType)file.metric_type_, file.nlist_);
|
||||
if (engine == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status(DB_ERROR, "Invalid engine type");
|
||||
}
|
||||
// step 3: load file one by one
|
||||
ENGINE_LOG_DEBUG << "Begin pre-load table:" + table_id + ", totally " << files_array.size()
|
||||
<< " files need to be pre-loaded";
|
||||
TimeRecorderAuto rc("Pre-load table:" + table_id);
|
||||
for (auto& file : files_array) {
|
||||
ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
|
||||
(MetricType)file.metric_type_, file.nlist_);
|
||||
if (engine == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status(DB_ERROR, "Invalid engine type");
|
||||
}
|
||||
|
||||
size += engine->PhysicalSize();
|
||||
if (size > available_size) {
|
||||
return Status(SERVER_CACHE_FULL, "Cache is full");
|
||||
} else {
|
||||
try {
|
||||
// step 1: load index
|
||||
engine->Load(true);
|
||||
} catch (std::exception& ex) {
|
||||
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
size += engine->PhysicalSize();
|
||||
if (size > available_size) {
|
||||
ENGINE_LOG_DEBUG << "Pre-load canceled since cache almost full";
|
||||
return Status(SERVER_CACHE_FULL, "Cache is full");
|
||||
} else {
|
||||
try {
|
||||
std::string msg = "Pre-loaded file: " + file.file_id_ + " size: " + std::to_string(file.file_size_);
|
||||
TimeRecorderAuto rc_1(msg);
|
||||
engine->Load(true);
|
||||
} catch (std::exception& ex) {
|
||||
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->UpdateTableFlag(table_id, flag);
|
||||
|
@ -235,34 +245,108 @@ DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
|
|||
Status
|
||||
DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->Count(table_id, row_count);
|
||||
return GetTableRowCountRecursively(table_id, row_count);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) {
|
||||
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
DBImpl::CreatePartition(const std::string& table_id, const std::string& partition_name,
|
||||
const std::string& partition_tag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->CreatePartition(table_id, partition_name, partition_tag);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropPartition(const std::string& partition_name) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
auto status = mem_mgr_->EraseMemVector(partition_name); // not allow insert
|
||||
status = meta_ptr_->DropPartition(partition_name); // soft delete table
|
||||
|
||||
// scheduler will determine when to delete table files
|
||||
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
|
||||
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(partition_name, meta_ptr_, nres);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitAndDelete();
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
std::string partition_name;
|
||||
auto status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << status.message();
|
||||
return status;
|
||||
}
|
||||
|
||||
return DropPartition(partition_name);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->ShowPartitions(table_id, partition_schema_array);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
|
||||
IDNumbers& vector_ids) {
|
||||
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
// if partition is specified, use partition as target table
|
||||
Status status;
|
||||
std::string target_table_name = table_id;
|
||||
if (!partition_tag.empty()) {
|
||||
std::string partition_name;
|
||||
status = meta_ptr_->GetPartitionName(table_id, partition_tag, target_table_name);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << status.message();
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
// insert vectors into target table
|
||||
milvus::server::CollectInsertMetrics metrics(n, status);
|
||||
status = mem_mgr_->InsertVectors(table_id, n, vectors, vector_ids);
|
||||
status = mem_mgr_->InsertVectors(target_table_name, n, vectors, vector_ids);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
// serialize memory data
|
||||
std::set<std::string> sync_table_ids;
|
||||
auto status = SyncMemData(sync_table_ids);
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
|
||||
// step 1: check index difference
|
||||
TableIndex old_index;
|
||||
auto status = DescribeIndex(table_id, old_index);
|
||||
status = DescribeIndex(table_id, old_index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id;
|
||||
return status;
|
||||
|
@ -272,11 +356,8 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
|||
TableIndex new_index = index;
|
||||
new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable
|
||||
if (!utils::IsSameIndex(old_index, new_index)) {
|
||||
DropIndex(table_id);
|
||||
|
||||
status = meta_ptr_->UpdateTableIndex(table_id, new_index);
|
||||
status = UpdateTableIndexRecursively(table_id, new_index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
@ -287,102 +368,92 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
|
|||
WaitMergeFileFinish();
|
||||
|
||||
// step 4: wait and build index
|
||||
// for IDMAP type, only wait all NEW file converted to RAW file
|
||||
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
|
||||
std::vector<int> file_types;
|
||||
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
};
|
||||
} else {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::RAW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
|
||||
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
|
||||
};
|
||||
}
|
||||
status = index_failed_checker_.CleanFailedIndexFileOfTable(table_id);
|
||||
status = BuildTableIndexRecursively(table_id, index);
|
||||
|
||||
std::vector<std::string> file_ids;
|
||||
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
int times = 1;
|
||||
|
||||
while (!file_ids.empty()) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
|
||||
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
|
||||
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
|
||||
times++;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
return status;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->DescribeTableIndex(table_id, index);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropIndex(const std::string& table_id) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
|
||||
return meta_ptr_->DropTableIndex(table_id);
|
||||
return DropTableIndexRecursively(table_id);
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
ResultIds& result_ids, ResultDistances& result_distances) {
|
||||
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
meta::DatesT dates = {utils::GetDate()};
|
||||
Status result = Query(table_id, k, nq, nprobe, vectors, dates, result_ids, result_distances);
|
||||
|
||||
Status result = Query(table_id, partition_tags, k, nq, nprobe, vectors, dates, result_ids, result_distances);
|
||||
return result;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) {
|
||||
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id << " date range count: " << dates.size();
|
||||
|
||||
// get all table files from table
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
Status status;
|
||||
std::vector<size_t> ids;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
meta::TableFilesSchema files_array;
|
||||
|
||||
meta::TableFilesSchema file_id_array;
|
||||
for (auto& day_files : files) {
|
||||
for (auto& file : day_files.second) {
|
||||
file_id_array.push_back(file);
|
||||
if (partition_tags.empty()) {
|
||||
// no partition tag specified, means search in whole table
|
||||
// get all table files from parent table
|
||||
status = GetFilesToSearch(table_id, ids, dates, files_array);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
status = GetFilesToSearch(schema.table_id_, ids, dates, files_array);
|
||||
}
|
||||
} else {
|
||||
// get files from specified partitions
|
||||
std::set<std::string> partition_name_array;
|
||||
GetPartitionsByTags(table_id, partition_tags, partition_name_array);
|
||||
|
||||
for (auto& partition_name : partition_name_array) {
|
||||
status = GetFilesToSearch(partition_name, ids, dates, files_array);
|
||||
}
|
||||
}
|
||||
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
|
||||
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
|
||||
return status;
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) {
|
||||
DBImpl::QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id << " date range count: " << dates.size();
|
||||
|
@ -396,25 +467,18 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_
|
|||
ids.push_back(std::stoul(id, &sz));
|
||||
}
|
||||
|
||||
meta::DatePartionedTableFilesSchema files_array;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files_array);
|
||||
meta::TableFilesSchema files_array;
|
||||
auto status = GetFilesToSearch(table_id, ids, dates, files_array);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
meta::TableFilesSchema file_id_array;
|
||||
for (auto& day_files : files_array) {
|
||||
for (auto& file : day_files.second) {
|
||||
file_id_array.push_back(file);
|
||||
}
|
||||
}
|
||||
|
||||
if (file_id_array.empty()) {
|
||||
if (files_array.empty()) {
|
||||
return Status(DB_ERROR, "Invalid file id");
|
||||
}
|
||||
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
|
||||
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances);
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
|
||||
return status;
|
||||
}
|
||||
|
@ -422,7 +486,7 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_
|
|||
Status
|
||||
DBImpl::Size(uint64_t& result) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
return SHUTDOWN_ERROR;
|
||||
}
|
||||
|
||||
return meta_ptr_->Size(result);
|
||||
|
@ -438,7 +502,9 @@ DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& fi
|
|||
|
||||
TimeRecorder rc("");
|
||||
|
||||
// step 1: get files to search
|
||||
// step 1: construct search job
|
||||
auto status = ongoing_files_checker_.MarkOngoingFiles(files);
|
||||
|
||||
ENGINE_LOG_DEBUG << "Engine query begin, index file count: " << files.size();
|
||||
scheduler::SearchJobPtr job = std::make_shared<scheduler::SearchJob>(k, nq, nprobe, vectors);
|
||||
for (auto& file : files) {
|
||||
|
@ -446,9 +512,11 @@ DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& fi
|
|||
job->AddIndexFile(file_ptr);
|
||||
}
|
||||
|
||||
// step 2: put search task to scheduler
|
||||
// step 2: put search job to scheduler and wait result
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitResult();
|
||||
|
||||
status = ongoing_files_checker_.UnmarkOngoingFiles(files);
|
||||
if (!job->GetStatus().ok()) {
|
||||
return job->GetStatus();
|
||||
}
|
||||
|
@ -536,12 +604,12 @@ DBImpl::StartMetricTask() {
|
|||
}
|
||||
|
||||
Status
|
||||
DBImpl::MemSerialize() {
|
||||
DBImpl::SyncMemData(std::set<std::string>& sync_table_ids) {
|
||||
std::lock_guard<std::mutex> lck(mem_serialize_mutex_);
|
||||
std::set<std::string> temp_table_ids;
|
||||
mem_mgr_->Serialize(temp_table_ids);
|
||||
for (auto& id : temp_table_ids) {
|
||||
compact_table_ids_.insert(id);
|
||||
sync_table_ids.insert(id);
|
||||
}
|
||||
|
||||
if (!temp_table_ids.empty()) {
|
||||
|
@ -560,7 +628,7 @@ DBImpl::StartCompactionTask() {
|
|||
}
|
||||
|
||||
// serialize memory data
|
||||
MemSerialize();
|
||||
SyncMemData(compact_table_ids_);
|
||||
|
||||
// compactiong has been finished?
|
||||
{
|
||||
|
@ -577,6 +645,18 @@ DBImpl::StartCompactionTask() {
|
|||
{
|
||||
std::lock_guard<std::mutex> lck(compact_result_mutex_);
|
||||
if (compact_thread_results_.empty()) {
|
||||
// collect merge files for all tables(if compact_table_ids_ is empty) for two reasons:
|
||||
// 1. other tables may still has un-merged files
|
||||
// 2. server may be closed unexpected, these un-merge files need to be merged when server restart
|
||||
if (compact_table_ids_.empty()) {
|
||||
std::vector<meta::TableSchema> table_schema_array;
|
||||
meta_ptr_->AllTables(table_schema_array);
|
||||
for (auto& schema : table_schema_array) {
|
||||
compact_table_ids_.insert(schema.table_id_);
|
||||
}
|
||||
}
|
||||
|
||||
// start merge file thread
|
||||
compact_thread_results_.push_back(
|
||||
compact_thread_pool_.enqueue(&DBImpl::BackgroundCompaction, this, compact_table_ids_));
|
||||
compact_table_ids_.clear();
|
||||
|
@ -615,7 +695,6 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date, const m
|
|||
auto file_schema = file;
|
||||
file_schema.file_type_ = meta::TableFileSchema::TO_DELETE;
|
||||
updated.push_back(file_schema);
|
||||
ENGINE_LOG_DEBUG << "Merging file " << file_schema.file_id_;
|
||||
index_size = index->Size();
|
||||
|
||||
if (index_size >= file_schema.index_file_size_) {
|
||||
|
@ -625,20 +704,27 @@ DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date, const m
|
|||
|
||||
// step 3: serialize to disk
|
||||
try {
|
||||
index->Serialize();
|
||||
status = index->Serialize();
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << status.message();
|
||||
}
|
||||
} catch (std::exception& ex) {
|
||||
// typical error: out of disk space or permition denied
|
||||
std::string msg = "Serialize merged index encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
status = Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
if (!status.ok()) {
|
||||
// if failed to serialize merge file to disk
|
||||
// typical error: out of disk space, out of memory or permition denied
|
||||
table_file.file_type_ = meta::TableFileSchema::TO_DELETE;
|
||||
status = meta_ptr_->UpdateTableFile(table_file);
|
||||
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete";
|
||||
|
||||
std::cout << "ERROR: failed to persist merged index file: " << table_file.location_
|
||||
<< ", possible out of disk space" << std::endl;
|
||||
ENGINE_LOG_ERROR << "Failed to persist merged file: " << table_file.location_
|
||||
<< ", possible out of disk space or memory";
|
||||
|
||||
return Status(DB_ERROR, msg);
|
||||
return status;
|
||||
}
|
||||
|
||||
// step 4: update table files state
|
||||
|
@ -673,13 +759,15 @@ DBImpl::BackgroundMergeFiles(const std::string& table_id) {
|
|||
}
|
||||
|
||||
for (auto& kv : raw_files) {
|
||||
auto files = kv.second;
|
||||
meta::TableFilesSchema& files = kv.second;
|
||||
if (files.size() < options_.merge_trigger_number_) {
|
||||
ENGINE_LOG_DEBUG << "Files number not greater equal than merge trigger number, skip merge action";
|
||||
ENGINE_LOG_TRACE << "Files number not greater equal than merge trigger number, skip merge action";
|
||||
continue;
|
||||
}
|
||||
|
||||
status = ongoing_files_checker_.MarkOngoingFiles(files);
|
||||
MergeFiles(table_id, kv.first, kv.second);
|
||||
status = ongoing_files_checker_.UnmarkOngoingFiles(files);
|
||||
|
||||
if (shutting_down_.load(std::memory_order_acquire)) {
|
||||
ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action for table: " << table_id;
|
||||
|
@ -709,11 +797,14 @@ DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
|
|||
|
||||
meta_ptr_->Archive();
|
||||
|
||||
int ttl = 5 * meta::M_SEC; // default: file will be deleted after 5 minutes
|
||||
if (options_.mode_ == DBOptions::MODE::CLUSTER_WRITABLE) {
|
||||
ttl = meta::D_SEC;
|
||||
{
|
||||
uint64_t ttl = 10 * meta::SECOND; // default: file will be hard-deleted few seconds after soft-deleted
|
||||
if (options_.mode_ == DBOptions::MODE::CLUSTER_WRITABLE) {
|
||||
ttl = meta::HOUR;
|
||||
}
|
||||
|
||||
meta_ptr_->CleanUpFilesWithTTL(ttl, &ongoing_files_checker_);
|
||||
}
|
||||
meta_ptr_->CleanUpFilesWithTTL(ttl);
|
||||
|
||||
// ENGINE_LOG_TRACE << " Background compaction thread exit";
|
||||
}
|
||||
|
@ -748,30 +839,261 @@ DBImpl::StartBuildIndexTask(bool force) {
|
|||
|
||||
void
|
||||
DBImpl::BackgroundBuildIndex() {
|
||||
// ENGINE_LOG_TRACE << "Background build index thread start";
|
||||
|
||||
std::unique_lock<std::mutex> lock(build_index_mutex_);
|
||||
meta::TableFilesSchema to_index_files;
|
||||
meta_ptr_->FilesToIndex(to_index_files);
|
||||
Status status;
|
||||
Status status = index_failed_checker_.IgnoreFailedIndexFiles(to_index_files);
|
||||
|
||||
if (!to_index_files.empty()) {
|
||||
scheduler::BuildIndexJobPtr job = std::make_shared<scheduler::BuildIndexJob>(meta_ptr_, options_);
|
||||
ENGINE_LOG_DEBUG << "Background build index thread begin";
|
||||
status = ongoing_files_checker_.MarkOngoingFiles(to_index_files);
|
||||
|
||||
// step 2: put build index task to scheduler
|
||||
std::vector<std::pair<scheduler::BuildIndexJobPtr, scheduler::TableFileSchemaPtr>> job2file_map;
|
||||
for (auto& file : to_index_files) {
|
||||
scheduler::BuildIndexJobPtr job = std::make_shared<scheduler::BuildIndexJob>(meta_ptr_, options_);
|
||||
scheduler::TableFileSchemaPtr file_ptr = std::make_shared<meta::TableFileSchema>(file);
|
||||
job->AddToIndexFiles(file_ptr);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job2file_map.push_back(std::make_pair(job, file_ptr));
|
||||
}
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitBuildIndexFinish();
|
||||
if (!job->GetStatus().ok()) {
|
||||
Status status = job->GetStatus();
|
||||
ENGINE_LOG_ERROR << "Building index failed: " << status.ToString();
|
||||
|
||||
// step 3: wait build index finished and mark failed files
|
||||
for (auto iter = job2file_map.begin(); iter != job2file_map.end(); ++iter) {
|
||||
scheduler::BuildIndexJobPtr job = iter->first;
|
||||
meta::TableFileSchema& file_schema = *(iter->second.get());
|
||||
job->WaitBuildIndexFinish();
|
||||
if (!job->GetStatus().ok()) {
|
||||
Status status = job->GetStatus();
|
||||
ENGINE_LOG_ERROR << "Building index job " << job->id() << " failed: " << status.ToString();
|
||||
|
||||
index_failed_checker_.MarkFailedIndexFile(file_schema);
|
||||
} else {
|
||||
ENGINE_LOG_DEBUG << "Building index job " << job->id() << " succeed.";
|
||||
|
||||
index_failed_checker_.MarkSucceedIndexFile(file_schema);
|
||||
}
|
||||
status = ongoing_files_checker_.UnmarkOngoingFile(file_schema);
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Background build index thread finished";
|
||||
}
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetFilesToBuildIndex(const std::string& table_id, const std::vector<int>& file_types,
|
||||
meta::TableFilesSchema& files) {
|
||||
files.clear();
|
||||
auto status = meta_ptr_->FilesByType(table_id, file_types, files);
|
||||
|
||||
// only build index for files that row count greater than certain threshold
|
||||
for (auto it = files.begin(); it != files.end();) {
|
||||
if ((*it).file_type_ == static_cast<int>(meta::TableFileSchema::RAW) &&
|
||||
(*it).row_count_ < meta::BUILD_INDEX_THRESHOLD) {
|
||||
it = files.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
|
||||
// ENGINE_LOG_TRACE << "Background build index thread exit";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, const meta::DatesT& dates,
|
||||
meta::TableFilesSchema& files) {
|
||||
ENGINE_LOG_DEBUG << "Collect files from table: " << table_id;
|
||||
|
||||
meta::DatePartionedTableFilesSchema date_files;
|
||||
auto status = meta_ptr_->FilesToSearch(table_id, file_ids, dates, date_files);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
TraverseFiles(date_files, files);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
|
||||
std::set<std::string>& partition_name_array) {
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
auto status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
|
||||
for (auto& tag : partition_tags) {
|
||||
// trim side-blank of tag, only compare valid characters
|
||||
// for example: " ab cd " is treated as "ab cd"
|
||||
std::string valid_tag = tag;
|
||||
server::StringHelpFunctions::TrimStringBlank(valid_tag);
|
||||
for (auto& schema : partition_array) {
|
||||
if (server::StringHelpFunctions::IsRegexMatch(schema.partition_tag_, valid_tag)) {
|
||||
partition_name_array.insert(schema.table_id_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropTableRecursively(const std::string& table_id, const meta::DatesT& dates) {
|
||||
// dates partly delete files of the table but currently we don't support
|
||||
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
|
||||
|
||||
Status status;
|
||||
if (dates.empty()) {
|
||||
status = mem_mgr_->EraseMemVector(table_id); // not allow insert
|
||||
status = meta_ptr_->DropTable(table_id); // soft delete table
|
||||
index_failed_checker_.CleanFailedIndexFileOfTable(table_id);
|
||||
|
||||
// scheduler will determine when to delete table files
|
||||
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
|
||||
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(table_id, meta_ptr_, nres);
|
||||
scheduler::JobMgrInst::GetInstance()->Put(job);
|
||||
job->WaitAndDelete();
|
||||
} else {
|
||||
status = meta_ptr_->DropDataByDate(table_id, dates);
|
||||
}
|
||||
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
status = DropTableRecursively(schema.table_id_, dates);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index) {
|
||||
DropIndex(table_id);
|
||||
|
||||
auto status = meta_ptr_->UpdateTableIndex(table_id, index);
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
|
||||
return status;
|
||||
}
|
||||
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
status = UpdateTableIndexRecursively(schema.table_id_, index);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index) {
|
||||
// for IDMAP type, only wait all NEW file converted to RAW file
|
||||
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
|
||||
std::vector<int> file_types;
|
||||
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
};
|
||||
} else {
|
||||
file_types = {
|
||||
static_cast<int32_t>(meta::TableFileSchema::RAW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
|
||||
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
|
||||
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
|
||||
};
|
||||
}
|
||||
|
||||
// get files to build index
|
||||
meta::TableFilesSchema table_files;
|
||||
auto status = GetFilesToBuildIndex(table_id, file_types, table_files);
|
||||
int times = 1;
|
||||
|
||||
while (!table_files.empty()) {
|
||||
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
|
||||
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
|
||||
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
|
||||
GetFilesToBuildIndex(table_id, file_types, table_files);
|
||||
times++;
|
||||
|
||||
index_failed_checker_.IgnoreFailedIndexFiles(table_files);
|
||||
}
|
||||
|
||||
// build index for partition
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
status = BuildTableIndexRecursively(schema.table_id_, index);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
// failed to build index for some files, return error
|
||||
std::vector<std::string> failed_files;
|
||||
index_failed_checker_.GetFailedIndexFileOfTable(table_id, failed_files);
|
||||
if (!failed_files.empty()) {
|
||||
std::string msg = "Failed to build index for " + std::to_string(failed_files.size()) +
|
||||
((failed_files.size() == 1) ? " file" : " files");
|
||||
msg += ", please double check index parameters.";
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::DropTableIndexRecursively(const std::string& table_id) {
|
||||
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
|
||||
index_failed_checker_.CleanFailedIndexFileOfTable(table_id);
|
||||
auto status = meta_ptr_->DropTableIndex(table_id);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// drop partition index
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
status = DropTableIndexRecursively(schema.table_id_);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
DBImpl::GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count) {
|
||||
row_count = 0;
|
||||
auto status = meta_ptr_->Count(table_id, row_count);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
// get partition row count
|
||||
std::vector<meta::TableSchema> partition_array;
|
||||
status = meta_ptr_->ShowPartitions(table_id, partition_array);
|
||||
for (auto& schema : partition_array) {
|
||||
uint64_t partition_row_count = 0;
|
||||
status = GetTableRowCountRecursively(schema.table_id_, partition_row_count);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
||||
row_count += partition_row_count;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
@ -18,13 +18,16 @@
|
|||
#pragma once
|
||||
|
||||
#include "DB.h"
|
||||
#include "Types.h"
|
||||
#include "src/db/insert/MemManager.h"
|
||||
#include "db/IndexFailedChecker.h"
|
||||
#include "db/OngoingFileChecker.h"
|
||||
#include "db/Types.h"
|
||||
#include "db/insert/MemManager.h"
|
||||
#include "utils/ThreadPool.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
|
@ -35,8 +38,6 @@
|
|||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Env;
|
||||
|
||||
namespace meta {
|
||||
class Meta;
|
||||
}
|
||||
|
@ -57,7 +58,7 @@ class DBImpl : public DB {
|
|||
CreateTable(meta::TableSchema& table_schema) override;
|
||||
|
||||
Status
|
||||
DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
|
||||
DropTable(const std::string& table_id, const meta::DatesT& dates) override;
|
||||
|
||||
Status
|
||||
DescribeTable(meta::TableSchema& table_schema) override;
|
||||
|
@ -78,7 +79,21 @@ class DBImpl : public DB {
|
|||
GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
|
||||
|
||||
Status
|
||||
InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name,
|
||||
const std::string& partition_tag) override;
|
||||
|
||||
Status
|
||||
DropPartition(const std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override;
|
||||
|
||||
Status
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) override;
|
||||
|
||||
Status
|
||||
InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
|
||||
IDNumbers& vector_ids) override;
|
||||
|
||||
Status
|
||||
CreateIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
@ -90,18 +105,19 @@ class DBImpl : public DB {
|
|||
DropIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
ResultIds& result_ids, ResultDistances& result_distances) override;
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, ResultIds& result_ids, ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
|
||||
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
|
||||
ResultDistances& result_distances) override;
|
||||
|
||||
Status
|
||||
Size(uint64_t& result) override;
|
||||
|
||||
|
@ -135,7 +151,34 @@ class DBImpl : public DB {
|
|||
BackgroundBuildIndex();
|
||||
|
||||
Status
|
||||
MemSerialize();
|
||||
SyncMemData(std::set<std::string>& sync_table_ids);
|
||||
|
||||
Status
|
||||
GetFilesToBuildIndex(const std::string& table_id, const std::vector<int>& file_types,
|
||||
meta::TableFilesSchema& files);
|
||||
|
||||
Status
|
||||
GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, const meta::DatesT& dates,
|
||||
meta::TableFilesSchema& files);
|
||||
|
||||
Status
|
||||
GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
|
||||
std::set<std::string>& partition_name_array);
|
||||
|
||||
Status
|
||||
DropTableRecursively(const std::string& table_id, const meta::DatesT& dates);
|
||||
|
||||
Status
|
||||
UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index);
|
||||
|
||||
Status
|
||||
BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index);
|
||||
|
||||
Status
|
||||
DropTableIndexRecursively(const std::string& table_id);
|
||||
|
||||
Status
|
||||
GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count);
|
||||
|
||||
private:
|
||||
const DBOptions options_;
|
||||
|
@ -158,6 +201,9 @@ class DBImpl : public DB {
|
|||
std::list<std::future<void>> index_thread_results_;
|
||||
|
||||
std::mutex build_index_mutex_;
|
||||
|
||||
IndexFailedChecker index_failed_checker_;
|
||||
OngoingFileChecker ongoing_files_checker_;
|
||||
}; // DBImpl
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "db/IndexFailedChecker.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
constexpr uint64_t INDEX_FAILED_RETRY_TIME = 1;
|
||||
|
||||
Status
|
||||
IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& table_id) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
index_failed_files_.erase(table_id); // rebuild failed index files for this table
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
IndexFailedChecker::GetFailedIndexFileOfTable(const std::string& table_id, std::vector<std::string>& failed_files) {
|
||||
failed_files.clear();
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
auto iter = index_failed_files_.find(table_id);
|
||||
if (iter != index_failed_files_.end()) {
|
||||
File2RefCount& failed_map = iter->second;
|
||||
for (auto it_file = failed_map.begin(); it_file != failed_map.end(); ++it_file) {
|
||||
failed_files.push_back(it_file->first);
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
IndexFailedChecker::MarkFailedIndexFile(const meta::TableFileSchema& file) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
|
||||
auto iter = index_failed_files_.find(file.table_id_);
|
||||
if (iter == index_failed_files_.end()) {
|
||||
File2RefCount failed_files;
|
||||
failed_files.insert(std::make_pair(file.file_id_, 1));
|
||||
index_failed_files_.insert(std::make_pair(file.table_id_, failed_files));
|
||||
} else {
|
||||
auto it_failed_files = iter->second.find(file.file_id_);
|
||||
if (it_failed_files != iter->second.end()) {
|
||||
it_failed_files->second++;
|
||||
} else {
|
||||
iter->second.insert(std::make_pair(file.file_id_, 1));
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
IndexFailedChecker::MarkSucceedIndexFile(const meta::TableFileSchema& file) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
|
||||
auto iter = index_failed_files_.find(file.table_id_);
|
||||
if (iter != index_failed_files_.end()) {
|
||||
iter->second.erase(file.file_id_);
|
||||
if (iter->second.empty()) {
|
||||
index_failed_files_.erase(file.table_id_);
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
IndexFailedChecker::IgnoreFailedIndexFiles(meta::TableFilesSchema& table_files) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
|
||||
// there could be some failed files belong to different table.
|
||||
// some files may has failed for several times, no need to build index for these files.
|
||||
// thus we can avoid dead circle for build index operation
|
||||
for (auto it_file = table_files.begin(); it_file != table_files.end();) {
|
||||
auto it_failed_files = index_failed_files_.find((*it_file).table_id_);
|
||||
if (it_failed_files != index_failed_files_.end()) {
|
||||
auto it_failed_file = it_failed_files->second.find((*it_file).file_id_);
|
||||
if (it_failed_file != it_failed_files->second.end()) {
|
||||
if (it_failed_file->second >= INDEX_FAILED_RETRY_TIME) {
|
||||
it_file = table_files.erase(it_file);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
++it_file;
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
|
@ -0,0 +1,55 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "db/Types.h"
|
||||
#include "meta/Meta.h"
|
||||
#include "utils/Status.h"
|
||||
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class IndexFailedChecker {
|
||||
public:
|
||||
Status
|
||||
CleanFailedIndexFileOfTable(const std::string& table_id);
|
||||
|
||||
Status
|
||||
GetFailedIndexFileOfTable(const std::string& table_id, std::vector<std::string>& failed_files);
|
||||
|
||||
Status
|
||||
MarkFailedIndexFile(const meta::TableFileSchema& file);
|
||||
|
||||
Status
|
||||
MarkSucceedIndexFile(const meta::TableFileSchema& file);
|
||||
|
||||
Status
|
||||
IgnoreFailedIndexFiles(meta::TableFilesSchema& table_files);
|
||||
|
||||
private:
|
||||
std::mutex mutex_;
|
||||
Table2Files index_failed_files_; // table id mapping to (file id mapping to failed times)
|
||||
};
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
|
@ -0,0 +1,130 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#include "db/OngoingFileChecker.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
Status
|
||||
OngoingFileChecker::MarkOngoingFile(const meta::TableFileSchema& table_file) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
return MarkOngoingFileNoLock(table_file);
|
||||
}
|
||||
|
||||
Status
|
||||
OngoingFileChecker::MarkOngoingFiles(const meta::TableFilesSchema& table_files) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
|
||||
for (auto& table_file : table_files) {
|
||||
MarkOngoingFileNoLock(table_file);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
OngoingFileChecker::UnmarkOngoingFile(const meta::TableFileSchema& table_file) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
return UnmarkOngoingFileNoLock(table_file);
|
||||
}
|
||||
|
||||
Status
|
||||
OngoingFileChecker::UnmarkOngoingFiles(const meta::TableFilesSchema& table_files) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
|
||||
for (auto& table_file : table_files) {
|
||||
UnmarkOngoingFileNoLock(table_file);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
bool
|
||||
OngoingFileChecker::IsIgnored(const meta::TableFileSchema& schema) {
|
||||
std::lock_guard<std::mutex> lck(mutex_);
|
||||
|
||||
auto iter = ongoing_files_.find(schema.table_id_);
|
||||
if (iter == ongoing_files_.end()) {
|
||||
return false;
|
||||
} else {
|
||||
auto it_file = iter->second.find(schema.file_id_);
|
||||
if (it_file == iter->second.end()) {
|
||||
return false;
|
||||
} else {
|
||||
return (it_file->second > 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Status
|
||||
OngoingFileChecker::MarkOngoingFileNoLock(const meta::TableFileSchema& table_file) {
|
||||
if (table_file.table_id_.empty() || table_file.file_id_.empty()) {
|
||||
return Status(DB_ERROR, "Invalid table files");
|
||||
}
|
||||
|
||||
auto iter = ongoing_files_.find(table_file.table_id_);
|
||||
if (iter == ongoing_files_.end()) {
|
||||
File2RefCount files_refcount;
|
||||
files_refcount.insert(std::make_pair(table_file.file_id_, 1));
|
||||
ongoing_files_.insert(std::make_pair(table_file.table_id_, files_refcount));
|
||||
} else {
|
||||
auto it_file = iter->second.find(table_file.file_id_);
|
||||
if (it_file == iter->second.end()) {
|
||||
iter->second[table_file.file_id_] = 1;
|
||||
} else {
|
||||
it_file->second++;
|
||||
}
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Mark ongoing file:" << table_file.file_id_
|
||||
<< " refcount:" << ongoing_files_[table_file.table_id_][table_file.file_id_];
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_file) {
|
||||
if (table_file.table_id_.empty() || table_file.file_id_.empty()) {
|
||||
return Status(DB_ERROR, "Invalid table files");
|
||||
}
|
||||
|
||||
auto iter = ongoing_files_.find(table_file.table_id_);
|
||||
if (iter != ongoing_files_.end()) {
|
||||
auto it_file = iter->second.find(table_file.file_id_);
|
||||
if (it_file != iter->second.end()) {
|
||||
it_file->second--;
|
||||
|
||||
ENGINE_LOG_DEBUG << "Unmark ongoing file:" << table_file.file_id_ << " refcount:" << it_file->second;
|
||||
|
||||
if (it_file->second <= 0) {
|
||||
iter->second.erase(table_file.file_id_);
|
||||
if (iter->second.empty()) {
|
||||
ongoing_files_.erase(table_file.table_id_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
|
@ -0,0 +1,62 @@
|
|||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "db/Types.h"
|
||||
#include "meta/Meta.h"
|
||||
#include "utils/Status.h"
|
||||
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <string>
|
||||
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class OngoingFileChecker : public meta::Meta::CleanUpFilter {
|
||||
public:
|
||||
Status
|
||||
MarkOngoingFile(const meta::TableFileSchema& table_file);
|
||||
|
||||
Status
|
||||
MarkOngoingFiles(const meta::TableFilesSchema& table_files);
|
||||
|
||||
Status
|
||||
UnmarkOngoingFile(const meta::TableFileSchema& table_file);
|
||||
|
||||
Status
|
||||
UnmarkOngoingFiles(const meta::TableFilesSchema& table_files);
|
||||
|
||||
bool
|
||||
IsIgnored(const meta::TableFileSchema& schema) override;
|
||||
|
||||
private:
|
||||
Status
|
||||
MarkOngoingFileNoLock(const meta::TableFileSchema& table_file);
|
||||
|
||||
Status
|
||||
UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_file);
|
||||
|
||||
private:
|
||||
std::mutex mutex_;
|
||||
Table2Files ongoing_files_; // table id mapping to (file id mapping to ongoing ref-count)
|
||||
};
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
|
@ -33,7 +33,7 @@ static const char* ARCHIVE_CONF_DISK = "disk";
|
|||
static const char* ARCHIVE_CONF_DAYS = "days";
|
||||
|
||||
struct ArchiveConf {
|
||||
using CriteriaT = std::map<std::string, int>;
|
||||
using CriteriaT = std::map<std::string, int64_t>;
|
||||
|
||||
explicit ArchiveConf(const std::string& type, const std::string& criterias = std::string());
|
||||
|
||||
|
|
|
@ -21,14 +21,16 @@
|
|||
|
||||
#include <faiss/Index.h>
|
||||
#include <stdint.h>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
using IDNumber = faiss::Index::idx_t;
|
||||
|
||||
typedef int64_t IDNumber;
|
||||
typedef IDNumber* IDNumberPtr;
|
||||
typedef std::vector<IDNumber> IDNumbers;
|
||||
|
||||
|
@ -41,5 +43,8 @@ struct TableIndex {
|
|||
int32_t metric_type_ = (int)MetricType::L2;
|
||||
};
|
||||
|
||||
using File2RefCount = std::map<std::string, int64_t>;
|
||||
using Table2Files = std::map<std::string, File2RefCount>;
|
||||
|
||||
} // namespace engine
|
||||
} // namespace milvus
|
||||
|
|
|
@ -154,7 +154,9 @@ GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file
|
|||
}
|
||||
|
||||
std::string msg = "Table file doesn't exist: " + file_path;
|
||||
ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for table: " << table_file.table_id_;
|
||||
if (table_file.file_size_ > 0) { // no need to pop error for empty file
|
||||
ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for table: " << table_file.table_id_;
|
||||
}
|
||||
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
// TODO(linxj): replace with VecIndex::IndexType
|
||||
enum class EngineType {
|
||||
INVALID = 0,
|
||||
FAISS_IDMAP = 1,
|
||||
|
@ -33,7 +34,10 @@ enum class EngineType {
|
|||
FAISS_IVFSQ8,
|
||||
NSG_MIX,
|
||||
FAISS_IVFSQ8H,
|
||||
MAX_VALUE = FAISS_IVFSQ8H,
|
||||
FAISS_PQ,
|
||||
SPTAG_KDT,
|
||||
SPTAG_BKT,
|
||||
MAX_VALUE = SPTAG_BKT,
|
||||
};
|
||||
|
||||
enum class MetricType {
|
||||
|
@ -73,8 +77,8 @@ class ExecutionEngine {
|
|||
virtual Status
|
||||
CopyToCpu() = 0;
|
||||
|
||||
virtual std::shared_ptr<ExecutionEngine>
|
||||
Clone() = 0;
|
||||
// virtual std::shared_ptr<ExecutionEngine>
|
||||
// Clone() = 0;
|
||||
|
||||
virtual Status
|
||||
Merge(const std::string& location) = 0;
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include "utils/CommonUtil.h"
|
||||
#include "utils/Exception.h"
|
||||
#include "utils/Log.h"
|
||||
|
||||
#include "wrapper/ConfAdapter.h"
|
||||
#include "wrapper/ConfAdapterMgr.h"
|
||||
#include "wrapper/VecImpl.h"
|
||||
|
@ -85,6 +86,11 @@ ExecutionEngineImpl::ExecutionEngineImpl(VecIndexPtr index, const std::string& l
|
|||
|
||||
VecIndexPtr
|
||||
ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
bool gpu_resource_enable = true;
|
||||
config.GetGpuResourceConfigEnable(gpu_resource_enable);
|
||||
#endif
|
||||
std::shared_ptr<VecIndex> index;
|
||||
switch (type) {
|
||||
case EngineType::FAISS_IDMAP: {
|
||||
|
@ -92,19 +98,52 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
|||
break;
|
||||
}
|
||||
case EngineType::FAISS_IVFFLAT: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_MIX);
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
if (gpu_resource_enable)
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_MIX);
|
||||
else
|
||||
#endif
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_CPU);
|
||||
break;
|
||||
}
|
||||
case EngineType::FAISS_IVFSQ8: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_MIX);
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
if (gpu_resource_enable)
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_MIX);
|
||||
else
|
||||
#endif
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_CPU);
|
||||
break;
|
||||
}
|
||||
case EngineType::NSG_MIX: {
|
||||
index = GetVecIndexFactory(IndexType::NSG_MIX);
|
||||
break;
|
||||
}
|
||||
#ifdef CUSTOMIZATION
|
||||
case EngineType::FAISS_IVFSQ8H: {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_HYBRID);
|
||||
if (gpu_resource_enable) {
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_HYBRID);
|
||||
} else {
|
||||
throw Exception(DB_ERROR, "No GPU resources for IVFSQ8H");
|
||||
}
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case EngineType::FAISS_PQ: {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
if (gpu_resource_enable)
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_MIX);
|
||||
else
|
||||
#endif
|
||||
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_CPU);
|
||||
break;
|
||||
}
|
||||
case EngineType::SPTAG_KDT: {
|
||||
index = GetVecIndexFactory(IndexType::SPTAG_KDT_RNT_CPU);
|
||||
break;
|
||||
}
|
||||
case EngineType::SPTAG_BKT: {
|
||||
index = GetVecIndexFactory(IndexType::SPTAG_BKT_RNT_CPU);
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
|
@ -126,8 +165,16 @@ ExecutionEngineImpl::HybridLoad() const {
|
|||
return;
|
||||
}
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
const std::string key = location_ + ".quantizer";
|
||||
std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
|
||||
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
std::vector<int64_t> gpus;
|
||||
Status s = config.GetGpuResourceConfigSearchResources(gpus);
|
||||
if (!s.ok()) {
|
||||
ENGINE_LOG_ERROR << s.message();
|
||||
return;
|
||||
}
|
||||
|
||||
// cache hit
|
||||
{
|
||||
|
@ -173,6 +220,7 @@ ExecutionEngineImpl::HybridLoad() const {
|
|||
auto cache_quantizer = std::make_shared<CachedQuantizer>(quantizer);
|
||||
cache::GpuCacheMgr::GetInstance(best_device_id)->InsertItem(key, cache_quantizer);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -223,6 +271,17 @@ ExecutionEngineImpl::PhysicalSize() const {
|
|||
Status
|
||||
ExecutionEngineImpl::Serialize() {
|
||||
auto status = write_index(index_, location_);
|
||||
|
||||
// here we reset index size by file size,
|
||||
// since some index type(such as SQ8) data size become smaller after serialized
|
||||
index_->set_size(PhysicalSize());
|
||||
ENGINE_LOG_DEBUG << "Finish serialize index file: " << location_ << " size: " << index_->Size();
|
||||
|
||||
if (index_->Size() == 0) {
|
||||
std::string msg = "Failed to serialize file: " + location_ + " reason: out of disk space or memory";
|
||||
status = Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -309,21 +368,43 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
|
|||
return Status::OK();
|
||||
}
|
||||
#endif
|
||||
try {
|
||||
index_ = index_->CopyToGpu(device_id);
|
||||
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id;
|
||||
} catch (std::exception& e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status(DB_ERROR, e.what());
|
||||
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
auto index = std::static_pointer_cast<VecIndex>(cache::GpuCacheMgr::GetInstance(device_id)->GetIndex(location_));
|
||||
bool already_in_cache = (index != nullptr);
|
||||
if (already_in_cache) {
|
||||
index_ = index;
|
||||
} else {
|
||||
if (index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to gpu";
|
||||
return Status(DB_ERROR, "index is null");
|
||||
}
|
||||
|
||||
try {
|
||||
index_ = index_->CopyToGpu(device_id);
|
||||
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id;
|
||||
} catch (std::exception& e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status(DB_ERROR, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
if (!already_in_cache) {
|
||||
GpuCache(device_id);
|
||||
}
|
||||
#endif
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status
|
||||
ExecutionEngineImpl::CopyToIndexFileToGpu(uint64_t device_id) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
gpu_num_ = device_id;
|
||||
auto to_index_data = std::make_shared<ToIndexData>(PhysicalSize());
|
||||
cache::DataObjPtr obj = std::static_pointer_cast<cache::DataObj>(to_index_data);
|
||||
milvus::cache::GpuCacheMgr::GetInstance(device_id)->InsertItem(location_, obj);
|
||||
#endif
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -354,18 +435,18 @@ ExecutionEngineImpl::CopyToCpu() {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
ExecutionEnginePtr
|
||||
ExecutionEngineImpl::Clone() {
|
||||
if (index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to clone";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto ret = std::make_shared<ExecutionEngineImpl>(dim_, location_, index_type_, metric_type_, nlist_);
|
||||
ret->Init();
|
||||
ret->index_ = index_->Clone();
|
||||
return ret;
|
||||
}
|
||||
// ExecutionEnginePtr
|
||||
// ExecutionEngineImpl::Clone() {
|
||||
// if (index_ == nullptr) {
|
||||
// ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to clone";
|
||||
// return nullptr;
|
||||
// }
|
||||
//
|
||||
// auto ret = std::make_shared<ExecutionEngineImpl>(dim_, location_, index_type_, metric_type_, nlist_);
|
||||
// ret->Init();
|
||||
// ret->index_ = index_->Clone();
|
||||
// return ret;
|
||||
//}
|
||||
|
||||
Status
|
||||
ExecutionEngineImpl::Merge(const std::string& location) {
|
||||
|
@ -394,7 +475,9 @@ ExecutionEngineImpl::Merge(const std::string& location) {
|
|||
if (auto file_index = std::dynamic_pointer_cast<BFIndex>(to_merge)) {
|
||||
auto status = index_->Add(file_index->Count(), file_index->GetRawVectors(), file_index->GetRawIds());
|
||||
if (!status.ok()) {
|
||||
ENGINE_LOG_ERROR << "Merge: Add Error";
|
||||
ENGINE_LOG_ERROR << "Failed to merge: " << location << " to: " << location_;
|
||||
} else {
|
||||
ENGINE_LOG_DEBUG << "Finish merge index file: " << location;
|
||||
}
|
||||
return status;
|
||||
} else {
|
||||
|
@ -432,6 +515,7 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
|
|||
throw Exception(DB_ERROR, status.message());
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Finish build index file: " << location << " size: " << to_index->Size();
|
||||
return std::make_shared<ExecutionEngineImpl>(to_index, location, engine_type, metric_type_, nlist_);
|
||||
}
|
||||
|
||||
|
@ -534,22 +618,34 @@ ExecutionEngineImpl::Cache() {
|
|||
|
||||
Status
|
||||
ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
cache::DataObjPtr obj = std::static_pointer_cast<cache::DataObj>(index_);
|
||||
milvus::cache::GpuCacheMgr::GetInstance(gpu_id)->InsertItem(location_, obj);
|
||||
|
||||
#endif
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// TODO(linxj): remove.
|
||||
Status
|
||||
ExecutionEngineImpl::Init() {
|
||||
#ifdef MILVUS_GPU_VERSION
|
||||
server::Config& config = server::Config::GetInstance();
|
||||
Status s = config.GetResourceConfigIndexBuildDevice(gpu_num_);
|
||||
std::vector<int64_t> gpu_ids;
|
||||
Status s = config.GetGpuResourceConfigBuildIndexResources(gpu_ids);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
gpu_num_ = knowhere::INVALID_VALUE;
|
||||
}
|
||||
for (auto id : gpu_ids) {
|
||||
if (gpu_num_ == id) {
|
||||
return Status::OK();
|
||||
}
|
||||
}
|
||||
|
||||
std::string msg = "Invalid gpu_num";
|
||||
return Status(SERVER_INVALID_ARGUMENT, msg);
|
||||
#else
|
||||
return Status::OK();
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
@ -64,8 +64,8 @@ class ExecutionEngineImpl : public ExecutionEngine {
|
|||
Status
|
||||
CopyToCpu() override;
|
||||
|
||||
ExecutionEnginePtr
|
||||
Clone() override;
|
||||
// ExecutionEnginePtr
|
||||
// Clone() override;
|
||||
|
||||
Status
|
||||
Merge(const std::string& location) override;
|
||||
|
@ -122,8 +122,8 @@ class ExecutionEngineImpl : public ExecutionEngine {
|
|||
int64_t dim_;
|
||||
std::string location_;
|
||||
|
||||
int32_t nlist_ = 0;
|
||||
int32_t gpu_num_ = 0;
|
||||
int64_t nlist_ = 0;
|
||||
int64_t gpu_num_ = 0;
|
||||
};
|
||||
|
||||
} // namespace engine
|
||||
|
|
|
@ -116,6 +116,7 @@ MemManagerImpl::EraseMemVector(const std::string& table_id) {
|
|||
size_t
|
||||
MemManagerImpl::GetCurrentMutableMem() {
|
||||
size_t total_mem = 0;
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
for (auto& kv : mem_id_map_) {
|
||||
auto memTable = kv.second;
|
||||
total_mem += memTable->GetCurrentMem();
|
||||
|
@ -126,6 +127,7 @@ MemManagerImpl::GetCurrentMutableMem() {
|
|||
size_t
|
||||
MemManagerImpl::GetCurrentImmutableMem() {
|
||||
size_t total_mem = 0;
|
||||
std::unique_lock<std::mutex> lock(serialization_mtx_);
|
||||
for (auto& mem_table : immu_mem_list_) {
|
||||
total_mem += mem_table->GetCurrentMem();
|
||||
}
|
||||
|
|
|
@ -35,6 +35,13 @@ static const char* META_TABLES = "Tables";
|
|||
static const char* META_TABLEFILES = "TableFiles";
|
||||
|
||||
class Meta {
|
||||
public:
|
||||
class CleanUpFilter {
|
||||
public:
|
||||
virtual bool
|
||||
IsIgnored(const TableFileSchema& schema) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
virtual ~Meta() = default;
|
||||
|
||||
|
@ -50,14 +57,11 @@ class Meta {
|
|||
virtual Status
|
||||
AllTables(std::vector<TableSchema>& table_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
|
||||
|
||||
virtual Status
|
||||
DeleteTable(const std::string& table_id) = 0;
|
||||
DropTable(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
DeleteTableFiles(const std::string& table_id) = 0;
|
||||
|
@ -66,20 +70,41 @@ class Meta {
|
|||
CreateTableFile(TableFileSchema& file_schema) = 0;
|
||||
|
||||
virtual Status
|
||||
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) = 0;
|
||||
DropDataByDate(const std::string& table_id, const DatesT& dates) = 0;
|
||||
|
||||
virtual Status
|
||||
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFiles(TableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DropTableIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag) = 0;
|
||||
|
||||
virtual Status
|
||||
DropPartition(const std::string& partition_name) = 0;
|
||||
|
||||
virtual Status
|
||||
ShowPartitions(const std::string& table_name, std::vector<meta::TableSchema>& partition_schema_array) = 0;
|
||||
|
||||
virtual Status
|
||||
GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0;
|
||||
|
||||
virtual Status
|
||||
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
|
@ -87,6 +112,12 @@ class Meta {
|
|||
virtual Status
|
||||
FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status
|
||||
FilesToIndex(TableFilesSchema&) = 0;
|
||||
|
||||
virtual Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types, TableFilesSchema& table_files) = 0;
|
||||
|
||||
virtual Status
|
||||
Size(uint64_t& result) = 0;
|
||||
|
||||
|
@ -94,22 +125,10 @@ class Meta {
|
|||
Archive() = 0;
|
||||
|
||||
virtual Status
|
||||
FilesToIndex(TableFilesSchema&) = 0;
|
||||
CleanUpShadowFiles() = 0;
|
||||
|
||||
virtual Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) = 0;
|
||||
|
||||
virtual Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
|
||||
|
||||
virtual Status
|
||||
DropTableIndex(const std::string& table_id) = 0;
|
||||
|
||||
virtual Status
|
||||
CleanUp() = 0;
|
||||
|
||||
virtual Status CleanUpFilesWithTTL(uint16_t) = 0;
|
||||
CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter = nullptr) = 0;
|
||||
|
||||
virtual Status
|
||||
DropAll() = 0;
|
||||
|
|
|
@ -27,10 +27,17 @@ const size_t US_PS = 1000 * MS_PS;
|
|||
const size_t NS_PS = 1000 * US_PS;
|
||||
|
||||
const size_t SECOND = 1UL;
|
||||
const size_t M_SEC = 60 * SECOND;
|
||||
const size_t H_SEC = 60 * M_SEC;
|
||||
const size_t D_SEC = 24 * H_SEC;
|
||||
const size_t W_SEC = 7 * D_SEC;
|
||||
const size_t MINUTE = 60 * SECOND;
|
||||
const size_t HOUR = 60 * MINUTE;
|
||||
const size_t DAY = 24 * HOUR;
|
||||
const size_t WEEK = 7 * DAY;
|
||||
|
||||
// This value is to ignore small raw files when building index.
|
||||
// The reason is:
|
||||
// 1. The performance of brute-search for small raw files could be better than small index file.
|
||||
// 2. And small raw files can be merged to larger files, thus reduce fragmented files count.
|
||||
// We decide the value based on a testing for small size raw/index files.
|
||||
const size_t BUILD_INDEX_THRESHOLD = 5000;
|
||||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include "db/Constants.h"
|
||||
#include "db/engine/ExecutionEngine.h"
|
||||
#include "src/version.h"
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
@ -33,6 +34,7 @@ constexpr int32_t DEFAULT_ENGINE_TYPE = (int)EngineType::FAISS_IDMAP;
|
|||
constexpr int32_t DEFAULT_NLIST = 16384;
|
||||
constexpr int32_t DEFAULT_METRIC_TYPE = (int)MetricType::L2;
|
||||
constexpr int32_t DEFAULT_INDEX_FILE_SIZE = ONE_GB;
|
||||
constexpr char CURRENT_VERSION[] = MILVUS_VERSION;
|
||||
|
||||
constexpr int64_t FLAG_MASK_NO_USERID = 0x1;
|
||||
constexpr int64_t FLAG_MASK_HAS_USERID = 0x1 << 1;
|
||||
|
@ -57,6 +59,9 @@ struct TableSchema {
|
|||
int32_t engine_type_ = DEFAULT_ENGINE_TYPE;
|
||||
int32_t nlist_ = DEFAULT_NLIST;
|
||||
int32_t metric_type_ = DEFAULT_METRIC_TYPE;
|
||||
std::string owner_table_;
|
||||
std::string partition_tag_;
|
||||
std::string version_ = CURRENT_VERSION;
|
||||
}; // TableSchema
|
||||
|
||||
struct TableFileSchema {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -49,7 +49,7 @@ class MySQLMetaImpl : public Meta {
|
|||
AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
|
||||
Status
|
||||
DeleteTable(const std::string& table_id) override;
|
||||
DropTable(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
DeleteTableFiles(const std::string& table_id) override;
|
||||
|
@ -58,27 +58,17 @@ class MySQLMetaImpl : public Meta {
|
|||
CreateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
Status
|
||||
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override;
|
||||
DropDataByDate(const std::string& table_id, const DatesT& dates) override;
|
||||
|
||||
Status
|
||||
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
|
||||
|
||||
Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) override;
|
||||
|
||||
Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
||||
Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
|
||||
|
||||
Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
Status
|
||||
DropTableIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
|
@ -88,6 +78,24 @@ class MySQLMetaImpl : public Meta {
|
|||
Status
|
||||
UpdateTableFiles(TableFilesSchema& files) override;
|
||||
|
||||
Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
Status
|
||||
DropTableIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override;
|
||||
|
||||
Status
|
||||
DropPartition(const std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) override;
|
||||
|
||||
Status
|
||||
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
|
@ -98,6 +106,10 @@ class MySQLMetaImpl : public Meta {
|
|||
Status
|
||||
FilesToIndex(TableFilesSchema&) override;
|
||||
|
||||
Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
TableFilesSchema& table_files) override;
|
||||
|
||||
Status
|
||||
Archive() override;
|
||||
|
||||
|
@ -105,10 +117,10 @@ class MySQLMetaImpl : public Meta {
|
|||
Size(uint64_t& result) override;
|
||||
|
||||
Status
|
||||
CleanUp() override;
|
||||
CleanUpShadowFiles() override;
|
||||
|
||||
Status
|
||||
CleanUpFilesWithTTL(uint16_t seconds) override;
|
||||
CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter = nullptr) override;
|
||||
|
||||
Status
|
||||
DropAll() override;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -49,7 +49,7 @@ class SqliteMetaImpl : public Meta {
|
|||
AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
|
||||
Status
|
||||
DeleteTable(const std::string& table_id) override;
|
||||
DropTable(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
DeleteTableFiles(const std::string& table_id) override;
|
||||
|
@ -58,21 +58,26 @@ class SqliteMetaImpl : public Meta {
|
|||
CreateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
Status
|
||||
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override;
|
||||
DropDataByDate(const std::string& table_id, const DatesT& dates) override;
|
||||
|
||||
Status
|
||||
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
|
||||
|
||||
Status
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) override;
|
||||
|
||||
Status
|
||||
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
|
||||
|
||||
Status
|
||||
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
|
||||
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
|
||||
Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
UpdateTableFiles(TableFilesSchema& files) override;
|
||||
|
||||
Status
|
||||
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
|
||||
|
||||
|
@ -80,13 +85,16 @@ class SqliteMetaImpl : public Meta {
|
|||
DropTableIndex(const std::string& table_id) override;
|
||||
|
||||
Status
|
||||
UpdateTableFilesToIndex(const std::string& table_id) override;
|
||||
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override;
|
||||
|
||||
Status
|
||||
UpdateTableFile(TableFileSchema& file_schema) override;
|
||||
DropPartition(const std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
UpdateTableFiles(TableFilesSchema& files) override;
|
||||
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) override;
|
||||
|
||||
Status
|
||||
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
|
||||
|
||||
Status
|
||||
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
|
||||
|
@ -99,16 +107,20 @@ class SqliteMetaImpl : public Meta {
|
|||
FilesToIndex(TableFilesSchema&) override;
|
||||
|
||||
Status
|
||||
Archive() override;
|
||||
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
|
||||
TableFilesSchema& table_files) override;
|
||||
|
||||
Status
|
||||
Size(uint64_t& result) override;
|
||||
|
||||
Status
|
||||
CleanUp() override;
|
||||
Archive() override;
|
||||
|
||||
Status
|
||||
CleanUpFilesWithTTL(uint16_t seconds) override;
|
||||
CleanUpShadowFiles() override;
|
||||
|
||||
Status
|
||||
CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter = nullptr) override;
|
||||
|
||||
Status
|
||||
DropAll() override;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +0,0 @@
|
|||
We manually change two APIs in "milvus.pb.h":
|
||||
add_vector_data()
|
||||
add_row_id_array()
|
||||
add_ids()
|
||||
add_distances()
|
||||
If proto files need be generated again, remember to re-change above APIs.
|
|
@ -22,19 +22,22 @@ namespace grpc {
|
|||
static const char* MilvusService_method_names[] = {
|
||||
"/milvus.grpc.MilvusService/CreateTable",
|
||||
"/milvus.grpc.MilvusService/HasTable",
|
||||
"/milvus.grpc.MilvusService/DropTable",
|
||||
"/milvus.grpc.MilvusService/CreateIndex",
|
||||
"/milvus.grpc.MilvusService/Insert",
|
||||
"/milvus.grpc.MilvusService/Search",
|
||||
"/milvus.grpc.MilvusService/SearchInFiles",
|
||||
"/milvus.grpc.MilvusService/DescribeTable",
|
||||
"/milvus.grpc.MilvusService/CountTable",
|
||||
"/milvus.grpc.MilvusService/ShowTables",
|
||||
"/milvus.grpc.MilvusService/Cmd",
|
||||
"/milvus.grpc.MilvusService/DeleteByRange",
|
||||
"/milvus.grpc.MilvusService/PreloadTable",
|
||||
"/milvus.grpc.MilvusService/DropTable",
|
||||
"/milvus.grpc.MilvusService/CreateIndex",
|
||||
"/milvus.grpc.MilvusService/DescribeIndex",
|
||||
"/milvus.grpc.MilvusService/DropIndex",
|
||||
"/milvus.grpc.MilvusService/CreatePartition",
|
||||
"/milvus.grpc.MilvusService/ShowPartitions",
|
||||
"/milvus.grpc.MilvusService/DropPartition",
|
||||
"/milvus.grpc.MilvusService/Insert",
|
||||
"/milvus.grpc.MilvusService/Search",
|
||||
"/milvus.grpc.MilvusService/SearchInFiles",
|
||||
"/milvus.grpc.MilvusService/Cmd",
|
||||
"/milvus.grpc.MilvusService/DeleteByDate",
|
||||
"/milvus.grpc.MilvusService/PreloadTable",
|
||||
};
|
||||
|
||||
std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) {
|
||||
|
@ -46,19 +49,22 @@ std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_p
|
|||
MilvusService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel)
|
||||
: channel_(channel), rpcmethod_CreateTable_(MilvusService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_HasTable_(MilvusService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CreateIndex_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Insert_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Search_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_SearchInFiles_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeTable_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CountTable_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_ShowTables_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Cmd_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DeleteByRange_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_PreloadTable_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeIndex_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropIndex_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CountTable_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_ShowTables_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropTable_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CreateIndex_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DescribeIndex_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropIndex_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_CreatePartition_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_ShowPartitions_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DropPartition_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Insert_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Search_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_SearchInFiles_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Cmd_(MilvusService_method_names[15], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_DeleteByDate_(MilvusService_method_names[16], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_PreloadTable_(MilvusService_method_names[17], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
{}
|
||||
|
||||
::grpc::Status MilvusService::Stub::CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) {
|
||||
|
@ -117,146 +123,6 @@ void MilvusService::Stub::experimental_async::HasTable(::grpc::ClientContext* co
|
|||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::BoolReply>::Create(channel_.get(), cq, rpcmethod_HasTable_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DescribeTable_, context, request, response);
|
||||
}
|
||||
|
@ -341,88 +207,60 @@ void MilvusService::Stub::experimental_async::ShowTables(::grpc::ClientContext*
|
|||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TableNameList>::Create(channel_.get(), cq, rpcmethod_ShowTables_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response);
|
||||
::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByRange_, context, request, response);
|
||||
::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f));
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor);
|
||||
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, true);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false);
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) {
|
||||
|
@ -481,6 +319,258 @@ void MilvusService::Stub::experimental_async::DropIndex(::grpc::ClientContext* c
|
|||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropIndex_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreatePartition_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ShowPartitions_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropPartition_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResult* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResult>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResult>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByDate_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, false);
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
|
||||
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false);
|
||||
}
|
||||
|
||||
MilvusService::Service::Service() {
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[0],
|
||||
|
@ -495,68 +585,83 @@ MilvusService::Service::Service() {
|
|||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[2],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DropTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[3],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::CreateIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[4],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(
|
||||
std::mem_fn(&MilvusService::Service::Insert), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[5],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::Search), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[6],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::SearchInFiles), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[7],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>(
|
||||
std::mem_fn(&MilvusService::Service::DescribeTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[8],
|
||||
MilvusService_method_names[3],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>(
|
||||
std::mem_fn(&MilvusService::Service::CountTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[9],
|
||||
MilvusService_method_names[4],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::TableNameList>(
|
||||
std::mem_fn(&MilvusService::Service::ShowTables), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[10],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>(
|
||||
std::mem_fn(&MilvusService::Service::Cmd), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[11],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DeleteByRange), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[12],
|
||||
MilvusService_method_names[5],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::PreloadTable), this)));
|
||||
std::mem_fn(&MilvusService::Service::DropTable), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[13],
|
||||
MilvusService_method_names[6],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::CreateIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[7],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>(
|
||||
std::mem_fn(&MilvusService::Service::DescribeIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[14],
|
||||
MilvusService_method_names[8],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DropIndex), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[9],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::CreatePartition), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[10],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>(
|
||||
std::mem_fn(&MilvusService::Service::ShowPartitions), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[11],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DropPartition), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[12],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(
|
||||
std::mem_fn(&MilvusService::Service::Insert), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[13],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::Search), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[14],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResult>(
|
||||
std::mem_fn(&MilvusService::Service::SearchInFiles), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[15],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>(
|
||||
std::mem_fn(&MilvusService::Service::Cmd), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[16],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::DeleteByDate), this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
MilvusService_method_names[17],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
|
||||
std::mem_fn(&MilvusService::Service::PreloadTable), this)));
|
||||
}
|
||||
|
||||
MilvusService::Service::~Service() {
|
||||
|
@ -576,41 +681,6 @@ MilvusService::Service::~Service() {
|
|||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
|
@ -632,21 +702,14 @@ MilvusService::Service::~Service() {
|
|||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) {
|
||||
::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
|
@ -667,6 +730,69 @@ MilvusService::Service::~Service() {
|
|||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResult* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
|
||||
} // namespace milvus
|
||||
} // namespace grpc
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue