Merge pull request #1 from jielinxu/branch-0.5.0

Merge master

Former-commit-id: f9ac00741ce999b220c9f7e2cc9e21af072596a7
pull/191/head
jielinxu 2019-10-25 16:10:35 +08:00 committed by GitHub
commit 7a30504f30
697 changed files with 136783 additions and 32 deletions

27
.clang-format Normal file
View File

@ -0,0 +1,27 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
BasedOnStyle: Google
DerivePointerAlignment: false
ColumnLimit: 120
IndentWidth: 4
AccessModifierOffset: -3
AlwaysBreakAfterReturnType: All
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AlignTrailingComments: true

33
.clang-tidy Normal file
View File

@ -0,0 +1,33 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
---
Checks: 'clang-diagnostic-*,clang-analyzer-*,-clang-analyzer-alpha*,google-*,modernize-*,readability-*'
# produce HeaderFilterRegex from cpp/build-support/lint_exclusions.txt with:
# echo -n '^('; sed -e 's/*/\.*/g' cpp/build-support/lint_exclusions.txt | tr '\n' '|'; echo ')$'
HeaderFilterRegex: '^(.*cmake-build-debug.*|.*cmake-build-release.*|.*cmake_build.*|.*src/core/thirdparty.*|.*thirdparty.*|.*easylogging++.*|.*SqliteMetaImpl.cpp|.*src/grpc.*|.*src/core.*|.*src/wrapper.*)$'
AnalyzeTemporaryDtors: true
ChainedConditionalReturn: 1
ChainedConditionalAssignment: 1
CheckOptions:
- key: google-readability-braces-around-statements.ShortStatementLines
value: '1'
- key: google-readability-function-size.StatementThreshold
value: '800'
- key: google-readability-namespace-comments.ShortNamespaceLines
value: '10'
- key: google-readability-namespace-comments.SpacesBeforeComments
value: '2'

17
.clang-tidy-ignore Normal file
View File

@ -0,0 +1,17 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#

View File

@ -1,5 +1,5 @@
---
name: Bug report
name: "\U0001F41B Bug report"
about: Create a bug report to help us improve Milvus
title: "[BUG]"
labels: ''

View File

@ -1,5 +1,5 @@
---
name: Documentation request
name: "\U0001F4DD Documentation request"
about: Report incorrect or needed documentation
title: "[DOC]"
labels: ''

View File

@ -1,5 +1,5 @@
---
name: Feature request
name: "\U0001F680 Feature request"
about: Suggest an idea for Milvus
title: "[FEATURE]"
labels: ''

View File

@ -1,5 +1,5 @@
---
name: General question
name: "\U0001F914 General question"
about: Ask a general question about Milvus
title: "[QUESTION]"
labels: ''

50
.gitignore vendored
View File

@ -1,32 +1,28 @@
# Prerequisites
*.d
# CLion generated files
core/cmake-build-debug/
core/cmake-build-release/
core/cmake_build
core/.idea/
core/thirdparty/knowhere_build
# Compiled Object files
*.slo
*.lo
*.o
*.obj
.idea/
.ycm_extra_conf.py
__pycache__
# Precompiled Headers
*.gch
*.pch
# vscode generated files
.vscode
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
.env
build
cmake-build-debug
cmake-build-release
cmake_build
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
# Compiled source
*.a
*.lib
# Executables
*.exe
*.out
*.app
*.so
*.so.*
*.o
*.lo
*.tar.gz
*.log

375
CHANGELOG.md Normal file
View File

@ -0,0 +1,375 @@
# Changelog
Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.5.1 (TODO)
## Bug
## Improvement
- \#64 - Improvement dump function in scheduler
## Feature
## Task
# Milvus 0.5.0 (2019-10-21)
## Bug
- MS-568 - Fix gpuresource free error
- MS-572 - Milvus crash when get SIGINT
- MS-577 - Unittest Query randomly hung
- MS-587 - Count get wrong result after adding vectors and index built immediately
- MS-599 - Search wrong result when table created with metric_type: IP
- MS-601 - Docker logs error caused by get CPUTemperature error
- MS-605 - Server going down during searching vectors
- MS-620 - Get table row counts display wrong error code
- MS-622 - Delete vectors should be failed if date range is invalid
- MS-624 - Search vectors failed if time ranges long enough
- MS-637 - Out of memory when load too many tasks
- MS-639 - SQ8H index created failed and server hang
- MS-640 - Cache object size calculate incorrect
- MS-641 - Segment fault(signal 11) in PickToLoad
- MS-644 - Search crashed with index-type: flat
- MS-647 - grafana display average cpu-temp
- MS-652 - IVFSQH quantization double free
- MS-650 - SQ8H index create issue
- MS-653 - When config check fail, Milvus close without message
- MS-654 - Describe index timeout when building index
- MS-658 - Fix SQ8 Hybrid can't search
- MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources
- \#9 - Change default gpu_cache_capacity to 4
- \#20 - C++ sdk example get grpc error
- \#23 - Add unittest to improve code coverage
- \#31 - make clang-format failed after run build.sh -l
- \#39 - Create SQ8H index hang if using github server version
- \#30 - Some troubleshoot messages in Milvus do not provide enough information
- \#48 - Config unittest failed
- \#59 - Topk result is incorrect for small dataset
## Improvement
- MS-552 - Add and change the easylogging library
- MS-553 - Refine cache code
- MS-555 - Remove old scheduler
- MS-556 - Add Job Definition in Scheduler
- MS-557 - Merge Log.h
- MS-558 - Refine status code
- MS-562 - Add JobMgr and TaskCreator in Scheduler
- MS-566 - Refactor cmake
- MS-574 - Milvus configuration refactor
- MS-578 - Make sure milvus5.0 don't crack 0.3.1 data
- MS-585 - Update namespace in scheduler
- MS-606 - Speed up result reduce
- MS-608 - Update TODO names
- MS-609 - Update task construct function
- MS-611 - Add resources validity check in ResourceMgr
- MS-619 - Add optimizer class in scheduler
- MS-626 - Refactor DataObj to support cache any type data
- MS-648 - Improve unittest
- MS-655 - Upgrade SPTAG
- \#42 - Put union of index_build_device and search resources to gpu_pool
- \#67 - Avoid linking targets multiple times in cmake
## Feature
- MS-614 - Preload table at startup
- MS-627 - Integrate new index: IVFSQHybrid
- MS-631 - IVFSQ8H Index support
- MS-636 - Add optimizer in scheduler for FAISS_IVFSQ8H
## Task
- MS-554 - Change license to Apache 2.0
- MS-561 - Add contributing guidelines, code of conduct and README docs
- MS-567 - Add NOTICE.md
- MS-569 - Complete the NOTICE.md
- MS-575 - Add Clang-format & Clang-tidy & Cpplint
- MS-586 - Remove BUILD_FAISS_WITH_MKL option
- MS-590 - Refine cmake code to support cpplint
- MS-600 - Reconstruct unittest code
- MS-602 - Remove zilliz namespace
- MS-610 - Change error code base value from hex to decimal
- MS-624 - Re-organize project directory for open-source
- MS-635 - Add compile option to support customized faiss
- MS-660 - add ubuntu_build_deps.sh
- \#18 - Add all test cases
# Milvus 0.4.0 (2019-09-12)
## Bug
- MS-119 - The problem of combining the log files
- MS-121 - The problem that user can't change the time zone
- MS-411 - Fix metric unittest linking error
- MS-412 - Fix gpu cache logical error
- MS-416 - ExecutionEngineImpl::GpuCache has not return value cause crash
- MS-417 - YAML sequence load disable cause scheduler startup failed
- MS-413 - Create index failed and server exited
- MS-427 - Describe index error after drop index
- MS-432 - Search vectors params nprobe need to check max number
- MS-431 - Search vectors params nprobe: 0/-1, expected result: raise exception
- MS-331 - Crate Table : when table exists, error code is META_FAILED(code=15) rather than ILLEGAL TABLE NAME(code=9))
- MS-430 - Search no result if index created with FLAT
- MS-443 - Create index hang again
- MS-436 - Delete vectors failed if index created with index_type: IVF_FLAT/IVF_SQ8
- MS-449 - Add vectors twice success, once with ids, the other no ids
- MS-450 - server hang after run stop_server.sh
- MS-458 - Keep building index for one file when no gpu resource
- MS-461 - Mysql meta unittest failed
- MS-462 - Run milvus server twices, should display error
- MS-463 - Search timeout
- MS-467 - mysql db test failed
- MS-470 - Drop index success, which table not created
- MS-471 - code coverage run failed
- MS-492 - Drop index failed if index have been created with index_type: FLAT
- MS-493 - Knowhere unittest crash
- MS-453 - GPU search error when nprobe set more than 1024
- MS-474 - Create index hang if use branch-0.3.1 server config
- MS-510 - unittest out of memory and crashed
- MS-507 - Dataset 10m-512, index type sq8performance in-normal when set CPU_CACHE to 16 or 64
- MS-543 - SearchTask fail without exception
- MS-582 - grafana displays changes frequently
## Improvement
- MS-327 - Clean code for milvus
- MS-336 - Scheduler interface
- MS-344 - Add TaskTable Test
- MS-345 - Add Node Test
- MS-346 - Add some implementation of scheduler to solve compile error
- MS-348 - Add ResourceFactory Test
- MS-350 - Remove knowhere submodule
- MS-354 - Add task class and interface in scheduler
- MS-355 - Add copy interface in ExcutionEngine
- MS-357 - Add minimum schedule function
- MS-359 - Add cost test in new scheduler
- MS-361 - Add event in resource
- MS-364 - Modify tasktableitem in tasktable
- MS-365 - Use tasktableitemptr instead in event
- MS-366 - Implement TaskTable
- MS-368 - Implement cost.cpp
- MS-371 - Add TaskTableUpdatedEvent
- MS-373 - Add resource test
- MS-374 - Add action definition
- MS-375 - Add Dump implementation for Event
- MS-376 - Add loader and executor enable flag in Resource avoid diskresource execute task
- MS-377 - Improve process thread trigger in ResourceMgr, Scheduler and TaskTable
- MS-378 - Debug and Update normal_test in scheduler unittest
- MS-379 - Add Dump implementation in Resource
- MS-380 - Update resource loader and executor, work util all finished
- MS-383 - Modify condition variable usage in scheduler
- MS-384 - Add global instance of ResourceMgr and Scheduler
- MS-389 - Add clone interface in Task
- MS-390 - Update resource construct function
- MS-391 - Add PushTaskToNeighbourHasExecutor action
- MS-394 - Update scheduler unittest
- MS-400 - Add timestamp record in task state change function
- MS-402 - Add dump implementation for TaskTableItem
- MS-406 - Add table flag for meta
- MS-403 - Add GpuCacheMgr
- MS-404 - Release index after search task done avoid memory increment continues
- MS-405 - Add delete task support
- MS-407 - Reconstruct MetricsCollector
- MS-408 - Add device_id in resource construct function
- MS-409 - Using new scheduler
- MS-413 - Remove thrift dependency
- MS-410 - Add resource config comment
- MS-414 - Add TaskType in Scheduler::Task
- MS-415 - Add command tasktable to dump all tasktables
- MS-418 - Update server_config.template file, set CPU compute only default
- MS-419 - Move index_file_size from IndexParam to TableSchema
- MS-421 - Add TaskLabel in scheduler
- MS-422 - Support DeleteTask in Multi-GpuResource case
- MS-428 - Add PushTaskByDataLocality in scheduler
- MS-440 - Add DumpTaskTables in sdk
- MS-442 - Merge Knowhere
- MS-445 - Rename CopyCompleted to LoadCompleted
- MS-451 - Update server_config.template file, set GPU compute default
- MS-455 - Distribute tasks by minimal cost in scheduler
- MS-460 - Put transport speed as weight when choosing neighbour to execute task
- MS-459 - Add cache for pick function in tasktable
- MS-476 - Improve search performance
- MS-482 - Change search stream transport to unary in grpc
- MS-487 - Define metric type in CreateTable
- MS-488 - Improve code format in scheduler
- MS-495 - cmake: integrated knowhere
- MS-496 - Change the top_k limitation from 1024 to 2048
- MS-502 - Update tasktable_test in scheduler
- MS-504 - Update node_test in scheduler
- MS-505 - Install core unit test and add to coverage
- MS-508 - Update normal_test in scheduler
- MS-532 - Add grpc server unittest
- MS-511 - Update resource_test in scheduler
- MS-517 - Update resource_mgr_test in scheduler
- MS-518 - Add schedinst_test in scheduler
- MS-519 - Add event_test in scheduler
- MS-520 - Update resource_test in scheduler
- MS-524 - Add some unittest in event_test and resource_test
- MS-525 - Disable parallel reduce in SearchTask
- MS-527 - Update scheduler_test and enable it
- MS-528 - Hide some config used future
- MS-530 - Add unittest for SearchTask->Load
- MS-531 - Disable next version code
- MS-533 - Update resource_test to cover dump function
- MS-523 - Config file validation
- MS-539 - Remove old task code
- MS-546 - Add simple mode resource_config
- MS-570 - Add prometheus docker-compose file
- MS-576 - Scheduler refactor
- MS-592 - Change showtables stream transport to unary
## Feature
- MS-343 - Implement ResourceMgr
- MS-338 - NewAPI: refine code to support CreateIndex
- MS-339 - NewAPI: refine code to support DropIndex
- MS-340 - NewAPI: implement DescribeIndex
## Task
- MS-297 - disable mysql unit test
# Milvus 0.3.1 (2019-07-10)
## Bug
- MS-148 - Disable cleanup if mode is read only
- MS-149 - Fixed searching only one index file issue in distributed mode
- MS-153 - Fix c_str error when connecting to MySQL
- MS-157 - Fix changelog
- MS-190 - Use env variable to switch mem manager and fix cmake
- MS-217 - Fix SQ8 row count bug
- MS-224 - Return AlreadyExist status in MySQLMetaImpl::CreateTable if table already exists
- MS-232 - Add MySQLMetaImpl::UpdateTableFilesToIndex and set maximum_memory to default if config value = 0
- MS-233 - Remove mem manager log
- MS-230 - Change parameter name: Maximum_memory to insert_buffer_size
- MS-234 - Some case cause background merge thread stop
- MS-235 - Some test cases random fail
- MS-236 - Add MySQLMetaImpl::HasNonIndexFiles
- MS-257 - Update bzip2 download url
- MS-288 - Update compile scripts
- MS-330 - Stability test failed caused by server core dumped
- MS-347 - Build index hangs again
- MS-382 - fix MySQLMetaImpl::CleanUpFilesWithTTL unknown column bug
## Improvement
- MS-156 - Add unittest for merge result functions
- MS-152 - Delete assert in MySQLMetaImpl and change MySQLConnectionPool impl
- MS-204 - Support multi db_path
- MS-206 - Support SQ8 index type
- MS-208 - Add buildinde interface for C++ SDK
- MS-212 - Support Inner product metric type
- MS-241 - Build Faiss with MKL if using Intel CPU; else build with OpenBlas
- MS-242 - Clean up cmake and change MAKE_BUILD_ARGS to be user defined variable
- MS-245 - Improve search result transfer performance
- MS-248 - Support AddVector/SearchVector profiling
- MS-256 - Add more cache config
- MS-260 - Refine log
- MS-249 - Check machine hardware during initialize
- MS-261 - Update faiss version to 1.5.3 and add BUILD_FAISS_WITH_MKL as an option
- MS-266 - Improve topk reduce time by using multi-threads
- MS-275 - Avoid sqlite logic error excetion
- MS-278 - add IndexStatsHelper
- MS-313 - add GRPC
- MS-325 - add grpc status return for C++ sdk and modify some format
- MS-278 - Add IndexStatsHelper
- MS-312 - Set openmp thread number by config
- MS-305 - Add CPU core percent metric
- MS-310 - Add milvus CPU utilization ratio and CPU/GPU temperature metrics
- MS-324 - Show error when there is not enough gpu memory to build index
- MS-328 - Check metric type on server start
- MS-332 - Set grpc and thrift server run concurrently
- MS-352 - Add hybrid index
## Feature
- MS-180 - Add new mem manager
- MS-195 - Add nlist and use_blas_threshold conf
- MS-137 - Integrate knowhere
## Task
- MS-125 - Create 0.3.1 release branch
- MS-306 - Optimize build efficiency
# Milvus 0.3.0 (2019-06-30)
## Bug
- MS-104 - Fix unittest lcov execution error
- MS-102 - Fix build script file condition error
- MS-80 - Fix server hang issue
- MS-89 - Fix compile failed, libgpufaiss.a link missing
- MS-90 - Fix arch match incorrect on ARM
- MS-99 - Fix compilation bug
- MS-110 - Avoid huge file size
## Improvement
- MS-82 - Update server startup welcome message
- MS-83 - Update vecwise to Milvus
- MS-77 - Performance issue of post-search action
- MS-22 - Enhancement for MemVector size control
- MS-92 - Unify behavior of debug and release build
- MS-98 - Install all unit test to installation directory
- MS-115 - Change is_startup of metric_config switch from true to on
- MS-122 - Archive criteria config
- MS-124 - HasTable interface
- MS-126 - Add more error code
- MS-128 - Change default db path
## Feature
- MS-57 - Implement index load/search pipeline
- MS-56 - Add version information when server is started
- MS-64 - Different table can have different index type
- MS-52 - Return search score
- MS-66 - Support time range query
- MS-68 - Remove rocksdb from third-party
- MS-70 - cmake: remove redundant libs in src
- MS-71 - cmake: fix faiss dependency
- MS-72 - cmake: change prometheus source to git
- MS-73 - cmake: delete civetweb
- MS-65 - Implement GetTableRowCount interface
- MS-45 - Implement DeleteTable interface
- MS-75 - cmake: change faiss version to 1.5.2; add CUDA gencode
- MS-81 - fix faiss ptx issue; change cuda gencode
- MS-84 - cmake: add arrow, jemalloc and jsoncons third party; default build option OFF
- MS-85 - add NetIO metric
- MS-96 - add new query interface for specified files
- MS-97 - Add S3 SDK for MinIO Storage
- MS-105 - Add MySQL
- MS-130 - Add prometheus_test
- MS-144 - Add nprobe config
- MS-147 - Enable IVF
- MS-130 - Add prometheus_test
## Task
- MS-74 - Change README.md in cpp
- MS-88 - Add support for arm architecture
# Milvus 0.2.0 (2019-05-31)
## Bug
- MS-32 - Fix thrift error
- MS-34 - Fix prometheus-cpp thirdparty
- MS-67 - Fix license check bug
- MS-76 - Fix pipeline crash bug
- MS-100 - cmake: fix AWS build issue
- MS-101 - change AWS build type to Release
## Improvement
- MS-20 - Clean Code Part 1
## Feature
- MS-5 - Implement Auto Archive Feature
- MS-6 - Implement SDK interface part 1
- MS-16 - Implement metrics without prometheus
- MS-21 - Implement SDK interface part 2
- MS-26 - cmake. Add thirdparty packages
- MS-31 - cmake: add prometheus
- MS-33 - cmake: add -j4 to make third party packages build faster
- MS-27 - support gpu config and disable license build config in cmake
- MS-47 - Add query vps metrics
- MS-37 - Add query, cache usage, disk write speed and file data size metrics
- MS-30 - Use faiss v1.5.2
- MS-54 - cmake: Change Thrift third party URL to github.com
- MS-69 - prometheus: add all proposed metrics
## Task
- MS-1 - Add CHANGELOG.md
- MS-4 - Refactor the vecwise_engine code structure
- MS-62 - Search range to all if no date specified

48
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,48 @@
# Milvus Code of Conduct
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
- Using welcoming and inclusive language.
- Being respectful of differing viewpoints and experiences.
- Gracefully accepting constructive criticism.
- Focusing on what is best for the community.
- Showing empathy towards other community members.
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual attention or advances.
- Trolling, insulting/derogatory comments, and personal or political attacks.
- Public or private harassment.
- Publishing others' private information, such as a physical or electronic address, without explicit permission.
- Conduct which could reasonably be considered inappropriate for the forum in which it occurs.
All Milvus forums and spaces are meant for professional interactions, and any behavior which could reasonably be considered inappropriate in a professional setting is unacceptable.
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies to all content on milvus.io, Milvuss GitHub organization, or any other official Milvus web presence allowing for community interactions, as well as at all official Milvus events, whether offline or online.
The Code of Conduct also applies within all project spaces and in public spaces whenever an individual is representing Milvus or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed or de facto representative at an online or offline event. Representation of Milvus may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at support@zilliz.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/), version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq

118
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,118 @@
# Contributing to Milvus
First of all, thanks for taking the time to contribute to Milvus! It's people like you that help Milvus come to fruition. :tada:
The following are a set of guidelines for contributing to Milvus. Following these guidelines helps contributing to this project easy and transparent. These are mostly guideline, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request.
As for everything else in the project, the contributions to Milvus are governed by our [Code of Conduct](CODE_OF_CONDUCT.md).
## Contribution Checklist
Before you make any contributions, make sure you follow this list.
- Read [Contributing to Milvus](CONTRIBUTING.md).
- Check if the changes are consistent with the [coding style](CONTRIBUTING.md#coding-style), and format your code accordingly.
- Run [unit tests](CONTRIBUTING.md#run-unit-test-with-code-coverage) and check your code coverage rate.
## What contributions can I make?
Contributions to Milvus fall into the following categories.
1. To report a bug or a problem with documentation, please file an [issue](https://github.com/milvus-io/milvus/issues/new/choose) providing the details of the problem. If you believe the issue needs priority attention, please comment on the issue to notify the team.
2. To propose a new feature, please file a new feature request [issue](https://github.com/milvus-io/milvus/issues/new/choose). Describe the intended feature and discuss the design and implementation with the team and community. Once the team agrees that the plan looks good, go ahead and implement it, following the [Contributing code](CONTRIBUTING.md#contributing-code).
3. To implement a feature or bug-fix for an existing outstanding issue, follow the [Contributing code](CONTRIBUTING.md#contributing-code). If you need more context on a particular issue, comment on the issue to let people know.
## How can I contribute?
### Contributing code
If you have improvements to Milvus, send us your pull requests! For those just getting started, see [GitHub workflow](#github-workflow).
The Milvus team members will review your pull requests, and once it is accepted, it will be given a `ready to merge` label. This means we are working on submitting your pull request to the internal repository. After the change has been submitted internally, your pull request will be merged automatically on GitHub.
### GitHub workflow
Please create a new branch from an up-to-date master on your fork.
1. Fork the repository on GitHub.
2. Clone your fork to your local machine with `git clone git@github.com:<yourname>/milvus-io/milvus.git`.
3. Create a branch with `git checkout -b my-topic-branch`.
4. Make your changes, commit, then push to to GitHub with `git push --set-upstream origin my-topic-branch`.
5. Visit GitHub and make your pull request.
If you have an existing local repository, please update it before you start, to minimize the chance of merge conflicts.
```shell
git remote add upstream git@github.com:milvus-io/milvus.git
git checkout master
git pull upstream master
git checkout -b my-topic-branch
```
### General guidelines
Before sending your pull requests for review, make sure your changes are consistent with the guidelines and follow the Milvus coding style.
- Include unit tests when you contribute new features, as they help to prove that your code works correctly, and also guard against future breaking changes to lower the maintenance cost.
- Bug fixes also require unit tests, because the presence of bugs usually indicates insufficient test coverage.
- Keep API compatibility in mind when you change code in Milvus. Reviewers of your pull request will comment on any API compatibility issues.
- When you contribute a new feature to Milvus, the maintenance burden is (by default) transferred to the Milvus team. This means that the benefit of the contribution must be compared against the cost of maintaining the feature.
## Coding Style
The coding style used in Milvus generally follow [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
And we made the following changes based on the guide:
- 4 spaces for indentation
- Adopt .cpp file extension instead of .cc extension
- 120-character line length
- Camel-Cased file names
### Format code
Install clang-format
```shell
$ sudo apt-get install clang-format
$ rm cmake_build/CMakeCache.txt
```
Check code style
```shell
$ ./build.sh -l
```
To format the code
```shell
$ cd cmake_build
$ make clang-format
```
## Run unit test with code coverage
Before submitting your PR, make sure you have run unit test, and your code coverage rate is >= 90%.
Install lcov
```shell
$ sudo apt-get install lcov
```
Run unit test and generate code for code coverage check
```shell
$ ./build.sh -u -c
```
Run MySQL docker
```shell
docker pull mysql:latest
docker run -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 -d mysql:latest
```
Run code coverage
```shell
$ ./coverage.sh -u root -p 123456 -t 127.0.0.1
```
Or start your own MySQL server, and then run code coverage
```shell
$ ./coverage.sh -u ${MYSQL_USERNAME} -p ${MYSQL_PASSWORD} -t ${MYSQL_SERVER_IP}
```

30
NOTICE.md Normal file
View File

@ -0,0 +1,30 @@
| Name | License |
| ------------- | ------------------------------------------------------------ |
| Apache Arrow | [Apache License 2.0](https://github.com/apache/arrow/blob/master/LICENSE.txt) |
| Boost | [Boost Software License](https://github.com/boostorg/boost/blob/master/LICENSE_1_0.txt) |
| BZip2 | [BZip2](http://www.bzip.org/) |
| FAISS | [MIT](https://github.com/facebookresearch/faiss/blob/master/LICENSE) |
| Gtest | [BSD 3-Clause](https://github.com/google/googletest/blob/master/LICENSE) |
| LAPACK | [LAPACK](https://github.com/Reference-LAPACK/lapack/blob/master/LICENSE) |
| LZ4 | [BSD 2-Clause](https://github.com/Blosc/c-blosc/blob/master/LICENSES/LZ4.txt) |
| MySQLPP | [LGPL 2.1](https://tangentsoft.com/mysqlpp/artifact/b128a66dab867923) |
| OpenBLAS | [BSD 3-Clause](https://github.com/xianyi/OpenBLAS/blob/develop/LICENSE) |
| Prometheus | [Apache License 2.0](https://github.com/prometheus/prometheus/blob/master/LICENSE) |
| Snappy | [BSD](https://github.com/google/snappy/blob/master/COPYING) |
| SQLite | [Public Domain](https://www.sqlite.org/copyright.html) |
| SQLite-ORM | [BSD 3-Clause](https://github.com/fnc12/sqlite_orm/blob/master/LICENSE) |
| yaml-cpp | [MIT](https://github.com/jbeder/yaml-cpp/blob/master/LICENSE) |
| ZLIB | [ZLIB](http://zlib.net/zlib_license.html) |
| ZSTD | [BSD](https://github.com/facebook/zstd/blob/dev/LICENSE) |
| libunwind | [MIT](https://github.com/libunwind/libunwind/blob/master/LICENSE) |
| gperftools | [BSD 3-Clause](https://github.com/gperftools/gperftools/blob/master/COPYING) |
| grpc | [Apache 2.0](https://github.com/grpc/grpc/blob/master/LICENSE) |
| EASYLOGGINGPP | [MIT](https://github.com/zuhd-org/easyloggingpp/blob/master/LICENSEhttps://github.com/zuhd-org/easyloggingpp/blob/master/LICENSE) |
| Json | [MIT](https://github.com/nlohmann/json/blob/develop/LICENSE.MIT) |

203
README.md
View File

@ -1,6 +1,207 @@
![Milvuslogo](https://github.com/milvus-io/docs/blob/master/assets/milvus_logo.png)
![LICENSE](https://img.shields.io/badge/license-Apache--2.0-brightgreen)
![Language](https://img.shields.io/badge/language-C%2B%2B-blue)
[![codebeat badge](https://codebeat.co/badges/e030a4f6-b126-4475-a938-4723d54ec3a7?style=plastic)](https://codebeat.co/projects/github-com-jinhai-cn-milvus-master)
- [Slack Community](https://join.slack.com/t/milvusio/shared_invite/enQtNzY1OTQ0NDI3NjMzLWNmYmM1NmNjOTQ5MGI5NDhhYmRhMGU5M2NhNzhhMDMzY2MzNDdlYjM5ODQ5MmE3ODFlYzU3YjJkNmVlNDQ2ZTk)
- [Twitter](https://twitter.com/milvusio)
- [Facebook](https://www.facebook.com/io.milvus.5)
- [Blog](https://www.milvus.io/blog/)
- [CSDN](https://zilliz.blog.csdn.net/)
- [中文官网](https://www.milvus.io/zh-CN/)
# Welcome to Milvus
## What is Milvus
Milvus is an open source similarity search engine for massive feature vectors. Designed with heterogeneous computing architecture for the best cost efficiency. Searches over billion-scale vectors take only milliseconds with minimum computing resources.
Milvus provides stable Python, Java and C++ APIs.
Keep up-to-date with newest releases and latest updates by reading Milvus [release notes](https://milvus.io/docs/en/releases/v0.5.0/).
- Heterogeneous computing
Milvus is designed with heterogeneous computing architecture for the best performance and cost efficiency.
- Multiple indexes
Milvus supports a variety of indexing types that employs quantization, tree-based, and graph indexing techniques.
- Intelligent resource management
Milvus automatically adapts search computation and index building processes based on your datasets and available resources.
- Horizontal scalability
Milvus supports online / offline expansion to scale both storage and computation resources with simple commands.
- High availability
Milvus is integrated with Kubernetes framework so that all single point of failures could be avoided.
- High compatibility
Milvus is compatible with almost all deep learning models and major programming languages such as Python, Java and C++, etc.
- Ease of use
Milvus can be easily installed in a few steps and enables you to exclusively focus on feature vectors.
- Visualized monitor
You can track system performance on Prometheus-based GUI monitor dashboards.
## Architecture
![Milvus_arch](https://github.com/milvus-io/docs/blob/master/assets/milvus_arch.png)
## Get started
### Hardware Requirements
| Component | Recommended configuration |
| --------- | ----------------------------------- |
| CPU | Intel CPU Haswell or higher |
| GPU | NVIDIA Pascal series or higher |
| Memory | 8 GB or more (depends on data size) |
| Storage | SATA 3.0 SSD or higher |
### Install using docker
Use Docker to install Milvus is a breeze. See the [Milvus install guide](https://milvus.io/docs/en/userguide/install_milvus/) for details.
### Build from source
#### Software requirements
- Ubuntu 18.04 or higher
- CMake 3.14 or higher
- CUDA 10.0 or higher
- NVIDIA driver 418 or higher
#### Compilation
##### Step 1 Install dependencies
```shell
$ cd [Milvus sourcecode path]/core
./ubuntu_build_deps.sh
```
##### Step 2 Build
```shell
$ cd [Milvus sourcecode path]/core
$ ./build.sh -t Debug
or
$ ./build.sh -t Release
```
When the build is completed, all the stuff that you need in order to run Milvus will be installed under `[Milvus root path]/core/milvus`.
#### Launch Milvus server
```shell
$ cd [Milvus root path]/core/milvus
```
Add `lib/` directory to `LD_LIBRARY_PATH`
```
$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/milvus/lib
```
Then start Milvus server:
```
$ cd scripts
$ ./start_server.sh
```
To stop Milvus server, run:
```shell
$ ./stop_server.sh
```
To edit Milvus settings in `conf/server_config.yaml` and `conf/log_config.conf`, please read [Milvus Configuration](https://github.com/milvus-io/docs/blob/master/reference/milvus_config.md).
### Try your first Milvus program
#### Run Python example code
Make sure [Python 3.5](https://www.python.org/downloads/) or higher is already installed and in use.
Install Milvus Python SDK.
```shell
# Install Milvus Python SDK
$ pip install pymilvus==0.2.3
```
Create a new file `example.py`, and add [Python example code](https://github.com/milvus-io/pymilvus/blob/master/examples/AdvancedExample.py) to it.
Run the example code.
```shell
# Run Milvus Python example
$ python3 example.py
```
#### Run C++ example code
```shell
# Run Milvus C++ example
$ cd [Milvus root path]/core/milvus/bin
$ ./sdk_simple
```
#### Run Java example code
Make sure Java 8 or higher is already installed.
Refer to [this link](https://github.com/milvus-io/milvus-sdk-java/tree/master/examples) for the example code.
## Contribution guidelines
Contributions are welcomed and greatly appreciated. If you want to contribute to Milvus, please read our [contribution guidelines](CONTRIBUTING.md). This project adheres to the [code of conduct](CODE_OF_CONDUCT.md) of Milvus. By participating, you are expected to uphold this code.
We use [GitHub issues](https://github.com/milvus-io/milvus/issues/new/choose) to track issues and bugs. For general questions and public discussions, please join our community.
## Join the Milvus community
To connect with other users and contributors, welcome to join our [slack channel](https://join.slack.com/t/milvusio/shared_invite/enQtNzY1OTQ0NDI3NjMzLWNmYmM1NmNjOTQ5MGI5NDhhYmRhMGU5M2NhNzhhMDMzY2MzNDdlYjM5ODQ5MmE3ODFlYzU3YjJkNmVlNDQ2ZTk).
## Milvus Roadmap
Please read our [roadmap](https://milvus.io/docs/en/roadmap/) to learn about upcoming features.
## Resources
[Milvus official website](https://www.milvus.io)
[Milvus docs](https://www.milvus.io/docs/en/userguide/install_milvus/)
[Milvus bootcamp](https://github.com/milvus-io/bootcamp)
[Milvus blog](https://www.milvus.io/blog/)
[Milvus CSDN](https://zilliz.blog.csdn.net/)
[Milvus roadmap](https://milvus.io/docs/en/roadmap/)
## License
[Apache 2.0 license](LICENSE)
=======
![LICENSE](https://img.shields.io/badge/license-Apache--2.0-brightgreen.svg)
![Language](https://img.shields.io/badge/language-C%2B%2B-blue.svg)
![Release](https://img.shields.io/badge/Release-v0.5.0-orange.svg)
![Release date](https://img.shields.io/badge/release__date-October-yellowgreen)
# Milvus is coming soon!

View File

@ -0,0 +1,10 @@
def FileTransfer (sourceFiles, remoteDirectory, remoteIP, protocol = "ftp", makeEmptyDirs = true) {
if (protocol == "ftp") {
ftpPublisher masterNodeName: '', paramPublish: [parameterName: ''], alwaysPublishFromMaster: false, continueOnError: false, failOnError: true, publishers: [
[configName: "${remoteIP}", transfers: [
[asciiMode: false, cleanRemote: false, excludes: '', flatten: false, makeEmptyDirs: "${makeEmptyDirs}", noDefaultExcludes: false, patternSeparator: '[, ]+', remoteDirectory: "${remoteDirectory}", remoteDirectorySDF: false, removePrefix: '', sourceFiles: "${sourceFiles}"]], usePromotionTimestamp: true, useWorkspaceInPromotion: false, verbose: true
]
]
}
}
return this

152
ci/jenkins/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,152 @@
pipeline {
agent none
options {
timestamps()
}
parameters{
choice choices: ['Release', 'Debug'], description: '', name: 'BUILD_TYPE'
string defaultValue: 'cf1434e7-5a4b-4d25-82e8-88d667aef9e5', description: 'GIT CREDENTIALS ID', name: 'GIT_CREDENTIALS_ID', trim: true
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
string defaultValue: 'ba070c98-c8cc-4f7c-b657-897715f359fc', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
string defaultValue: 'http://192.168.1.202/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
string defaultValue: '1a527823-d2b7-44fd-834b-9844350baf14', description: 'JFROG CREDENTIALS ID', name: 'JFROG_CREDENTIALS_ID', trim: true
}
environment {
PROJECT_NAME = "milvus"
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
SEMVER = "${BRANCH_NAME}"
JOBNAMES = env.JOB_NAME.split('/')
PIPELINE_NAME = "${JOBNAMES[0]}"
}
stages {
stage("Ubuntu 18.04") {
environment {
OS_NAME = "ubuntu18.04"
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-ubuntu18.04-x86_64-${BUILD_DATE_FORMATTED, "yyyyMMdd"}-${BUILDS_TODAY}'
]);
DOCKER_VERSION = "${SEMVER}-${OS_NAME}-${LOWER_BUILD_TYPE}"
}
stages {
stage("Run Build") {
agent {
kubernetes {
label 'build'
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/milvus-build-env-pod.yaml'
}
}
stages {
stage('Build') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/build.groovy"
}
}
}
}
stage('Code Coverage') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/coverage.groovy"
}
}
}
}
stage('Upload Package') {
steps {
container('milvus-build-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/package.groovy"
}
}
}
}
}
}
stage("Publish docker images") {
agent {
kubernetes {
label 'publish'
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
}
}
stages {
stage('Publish') {
steps {
container('publish-images'){
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/publishImages.groovy"
}
}
}
}
}
}
stage("Deploy to Development") {
agent {
kubernetes {
label 'dev-test'
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
}
}
stages {
stage("Deploy to Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/deploySingle2Dev.groovy"
}
}
}
}
stage("Dev Test") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/singleDevTest.groovy"
}
}
}
}
stage ("Cleanup Dev") {
steps {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
}
}
}
}
}
post {
unsuccessful {
container('milvus-test-env') {
script {
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
}
}
}
}
}
}
}
}
}

View File

@ -0,0 +1,9 @@
timeout(time: 60, unit: 'MINUTES') {
dir ("ci/jenkins/scripts") {
sh "./build.sh -l"
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' && export JFROG_USER_NAME='${USERNAME}' && export JFROG_PASSWORD='${PASSWORD}' && ./build.sh -t ${params.BUILD_TYPE} -o /opt/milvus -d /opt/milvus -j -u -c"
}
}
}

View File

@ -0,0 +1,9 @@
try {
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
} catch (exc) {
def helmResult = sh script: "helm status ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu", returnStatus: true
if (!helmResult) {
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
}
throw exc
}

View File

@ -0,0 +1,10 @@
timeout(time: 60, unit: 'MINUTES') {
dir ("ci/jenkins/scripts") {
sh "./coverage.sh -o /opt/milvus -u root -p 123456 -t \$POD_IP"
// Set some env variables so codecov detection script works correctly
withCredentials([[$class: 'StringBinding', credentialsId: "${env.PIPELINE_NAME}-codecov-token", variable: 'CODECOV_TOKEN']]) {
sh 'curl -s https://codecov.io/bash | bash -s - -f output_new.info || echo "Codecov did not collect coverage reports"'
}
}
}

View File

@ -0,0 +1,14 @@
try {
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo update'
dir ('milvus-helm') {
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
dir ("milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/values.yaml --namespace milvus ."
}
}
} catch (exc) {
echo 'Helm running failed!'
sh "helm del --purge ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu"
throw exc
}

View File

@ -0,0 +1,9 @@
timeout(time: 5, unit: 'MINUTES') {
sh "tar -zcvf ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz -C /opt/ milvus"
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
def uploadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -T ./${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz")
if (uploadStatus != 0) {
error("\" ${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" upload to \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz \" failed!")
}
}
}

View File

@ -0,0 +1,47 @@
container('publish-images') {
timeout(time: 15, unit: 'MINUTES') {
dir ("docker/deploy/${OS_NAME}") {
def binaryPackage = "${PROJECT_NAME}-${PACKAGE_VERSION}.tar.gz"
withCredentials([usernamePassword(credentialsId: "${params.JFROG_CREDENTIALS_ID}", usernameVariable: 'JFROG_USERNAME', passwordVariable: 'JFROG_PASSWORD')]) {
def downloadStatus = sh(returnStatus: true, script: "curl -u${JFROG_USERNAME}:${JFROG_PASSWORD} -O ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage}")
if (downloadStatus != 0) {
error("\" Download \" ${params.JFROG_ARTFACTORY_URL}/milvus/package/${binaryPackage} \" failed!")
}
}
sh "tar zxvf ${binaryPackage}"
def imageName = "${PROJECT_NAME}/engine:${DOCKER_VERSION}"
try {
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
if (isExistSourceImage == 0) {
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
}
def customImage = docker.build("${imageName}")
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
if (isExistTargeImage == 0) {
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
}
docker.withRegistry("https://${params.DOKCER_REGISTRY_URL}", "${params.DOCKER_CREDENTIALS_ID}") {
customImage.push()
}
} catch (exc) {
throw exc
} finally {
def isExistSourceImage = sh(returnStatus: true, script: "docker inspect --type=image ${imageName} 2>&1 > /dev/null")
if (isExistSourceImage == 0) {
def removeSourceImageStatus = sh(returnStatus: true, script: "docker rmi ${imageName}")
}
def isExistTargeImage = sh(returnStatus: true, script: "docker inspect --type=image ${params.DOKCER_REGISTRY_URL}/${imageName} 2>&1 > /dev/null")
if (isExistTargeImage == 0) {
def removeTargeImageStatus = sh(returnStatus: true, script: "docker rmi ${params.DOKCER_REGISTRY_URL}/${imageName}")
}
}
}
}
}

View File

@ -0,0 +1,22 @@
timeout(time: 30, unit: 'MINUTES') {
dir ("tests/milvus_python_test") {
sh 'python3 -m pip install -r requirements.txt'
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
}
// mysql database backend test
load "${env.WORKSPACE}/ci/jenkins/jenkinsfile/cleanupSingleDev.groovy"
if (!fileExists('milvus-helm')) {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "0.5.0"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_CREDENTIALS_ID}", url: "https://github.com/milvus-io/milvus-helm.git", name: 'origin', refspec: "+refs/heads/0.5.0:refs/remotes/origin/0.5.0"]]])
}
}
dir ("milvus-helm") {
dir ("milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu -f ci/db_backend/mysql_values.yaml --namespace milvus ."
}
}
dir ("tests/milvus_python_test") {
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.PIPELINE_NAME}-${env.BUILD_NUMBER}-single-gpu-milvus-gpu-engine.milvus.svc.cluster.local"
}
}

View File

@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-images
image: registry.zilliz.com/library/docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock

View File

@ -0,0 +1,35 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-build-env
labels:
app: milvus
componet: build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-build-env:v0.5.0-ubuntu18.04
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- cat
tty: true
resources:
limits:
memory: "32Gi"
cpu: "8.0"
nvidia.com/gpu: 1
requests:
memory: "16Gi"
cpu: "4.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test-env
spec:
containers:
- name: milvus-test-env
image: registry.zilliz.com/milvus/milvus-test-env:v0.1
command:
- cat
tty: true
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: test-cluster-config

142
ci/jenkins/scripts/build.sh Executable file
View File

@ -0,0 +1,142 @@
#!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
BUILD_TYPE="Debug"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX="/opt/milvus"
BUILD_COVERAGE="OFF"
DB_PATH="/opt/milvus"
PROFILING="OFF"
USE_JFROG_CACHE="OFF"
RUN_CPPLINT="OFF"
CUSTOMIZATION="OFF" # default use ori faiss
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
if [ $? -eq 0 ]; then
CUSTOMIZATION="ON"
else
CUSTOMIZATION="OFF"
fi
while getopts "o:d:t:ulcgjhx" arg
do
case $arg in
o)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
u)
echo "Build and run unittest cases" ;
BUILD_UNITTEST="ON";
;;
l)
RUN_CPPLINT="ON"
;;
c)
BUILD_COVERAGE="ON"
;;
g)
PROFILING="ON"
;;
j)
USE_JFROG_CACHE="ON"
;;
x)
CUSTOMIZATION="OFF" # force use ori faiss
;;
h) # help
echo "
parameter:
-o: install prefix(default: /opt/milvus)
-d: db data path(default: /opt/milvus)
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-c: code coverage(default: OFF)
-g: profiling(default: OFF)
-j: use jfrog cache build directory(default: OFF)
-h: help
usage:
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${CMAKE_BUILD_DIR} ]]; then
mkdir ${CMAKE_BUILD_DIR}
fi
cd ${CMAKE_BUILD_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache
CMAKE_CMD="cmake \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DMILVUS_DB_PATH=${DB_PATH} \
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
-DCUSTOMIZATION=${CUSTOMIZATION} \
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
.."
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# # clang-tidy check
# make check-clang-tidy
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# rm -f CMakeCache.txt
# exit 1
# fi
# echo "clang-tidy check passed!"
else
# compile and build
make -j8 || exit 1
make install || exit 1
fi

138
ci/jenkins/scripts/coverage.sh Executable file
View File

@ -0,0 +1,138 @@
#!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
INSTALL_PREFIX="/opt/milvus"
CMAKE_BUILD_DIR="${SCRIPTS_DIR}/../../../core/cmake_build"
MYSQL_USER_NAME=root
MYSQL_PASSWORD=123456
MYSQL_HOST='127.0.0.1'
MYSQL_PORT='3306'
while getopts "o:u:p:t:h" arg
do
case $arg in
o)
INSTALL_PREFIX=$OPTARG
;;
u)
MYSQL_USER_NAME=$OPTARG
;;
p)
MYSQL_PASSWORD=$OPTARG
;;
t)
MYSQL_HOST=$OPTARG
;;
h) # help
echo "
parameter:
-o: milvus install prefix(default: /opt/milvus)
-u: mysql account
-p: mysql password
-t: mysql host
-h: help
usage:
./coverage.sh -o \${INSTALL_PREFIX} -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${INSTALL_PREFIX}/lib
LCOV_CMD="lcov"
# LCOV_GEN_CMD="genhtml"
FILE_INFO_BASE="base.info"
FILE_INFO_MILVUS="server.info"
FILE_INFO_OUTPUT="output.info"
FILE_INFO_OUTPUT_NEW="output_new.info"
DIR_LCOV_OUTPUT="lcov_out"
DIR_GCNO="${CMAKE_BUILD_DIR}"
DIR_UNITTEST="${INSTALL_PREFIX}/unittest"
# delete old code coverage info files
rm -rf lcov_out
rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW
MYSQL_DB_NAME=milvus_`date +%s%N`
function mysql_exc()
{
cmd=$1
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
if [ $? -ne 0 ]; then
echo "mysql $cmd run failed"
fi
}
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
mysql_exc "FLUSH PRIVILEGES;"
mysql_exc "USE ${MYSQL_DB_NAME};"
# get baseline
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
if [ $? -ne 0 ]; then
echo "gen baseline coverage run failed"
exit -1
fi
for test in `ls ${DIR_UNITTEST}`; do
echo $test
case ${test} in
test_db)
# set run args for test_db
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
;;
*test_*)
args=""
;;
esac
# run unittest
${DIR_UNITTEST}/${test} "${args}"
if [ $? -ne 0 ]; then
echo ${args}
echo ${DIR_UNITTEST}/${test} "run failed"
fi
done
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
# gen code coverage
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
# merge coverage
${LCOV_CMD} -a ${FILE_INFO_BASE} -a ${FILE_INFO_MILVUS} -o "${FILE_INFO_OUTPUT}"
# remove third party from tracefiles
${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"/usr/*" \
"*/boost/*" \
"*/cmake_build/*_ep-prefix/*" \
"*/src/index/cmake_build*" \
"*/src/index/thirdparty*" \
"*/src/grpc*" \
"*/src/metrics/MetricBase.h" \
"*/src/server/Server.cpp" \
"*/src/server/DBWrapper.cpp" \
"*/src/server/grpc_impl/GrpcServer.cpp" \
"*/src/utils/easylogging++.h" \
"*/src/utils/easylogging++.cc"
# gen html report
# ${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/

View File

@ -0,0 +1,13 @@
try {
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
if (!result) {
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
}
} catch (exc) {
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
if (!result) {
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
}
throw exc
}

View File

@ -0,0 +1,13 @@
try {
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
if (!result) {
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
}
} catch (exc) {
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}", returnStatus: true
if (!result) {
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
}
throw exc
}

View File

@ -0,0 +1,13 @@
try {
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
if (!result) {
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
}
} catch (exc) {
def result = sh script: "helm status ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster", returnStatus: true
if (!result) {
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
}
throw exc
}

View File

@ -0,0 +1,24 @@
try {
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
sh 'helm repo update'
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("milvus/milvus-cluster") {
sh "helm install --wait --timeout 300 --set roServers.image.tag=${DOCKER_VERSION} --set woServers.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP -f ci/values.yaml --name ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster --namespace milvus-cluster --version 0.5.0 . "
}
}
/*
timeout(time: 2, unit: 'MINUTES') {
waitUntil {
def result = sh script: "nc -z -w 3 ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local 19530", returnStatus: true
return !result
}
}
*/
} catch (exc) {
echo 'Helm running failed!'
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster"
throw exc
}

View File

@ -0,0 +1,12 @@
timeout(time: 25, unit: 'MINUTES') {
try {
dir ("${PROJECT_NAME}_test") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
sh 'python3 -m pip install -r requirements_cluster.txt'
sh "pytest . --alluredir=cluster_test_out --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-cluster-milvus-cluster-proxy.milvus-cluster.svc.cluster.local"
}
} catch (exc) {
echo 'Milvus Cluster Test Failed !'
throw exc
}
}

View File

@ -0,0 +1,16 @@
try {
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
sh 'helm repo update'
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("milvus/milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.5.0 ."
}
}
} catch (exc) {
echo 'Helm running failed!'
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
throw exc
}

View File

@ -0,0 +1,16 @@
try {
sh 'helm init --client-only --skip-refresh --stable-repo-url https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts'
sh 'helm repo add milvus https://registry.zilliz.com/chartrepo/milvus'
sh 'helm repo update'
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("milvus/milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.repository=\"zilliz.azurecr.cn/milvus/engine\" --set engine.image.tag=${DOCKER_VERSION} --set expose.type=loadBalancer --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/values.yaml --namespace milvus-1 --version 0.5.0 ."
}
}
} catch (exc) {
echo 'Helm running failed!'
sh "helm del --purge ${env.JOB_NAME}-${env.BUILD_NUMBER}"
throw exc
}

View File

@ -0,0 +1,28 @@
timeout(time: 30, unit: 'MINUTES') {
try {
dir ("${PROJECT_NAME}_test") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --level=1 --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local"
}
// mysql database backend test
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
if (!fileExists('milvus-helm')) {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
}
}
dir ("milvus-helm") {
dir ("milvus/milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.5.0 ."
}
}
dir ("${PROJECT_NAME}_test") {
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --level=1 --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local"
}
} catch (exc) {
echo 'Milvus Test Failed !'
throw exc
}
}

View File

@ -0,0 +1,29 @@
timeout(time: 60, unit: 'MINUTES') {
try {
dir ("${PROJECT_NAME}_test") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
sh 'python3 -m pip install -r requirements.txt -i http://pypi.douban.com/simple --trusted-host pypi.douban.com'
sh "pytest . --alluredir=\"test_out/dev/single/sqlite\" --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-1.svc.cluster.local"
}
// mysql database backend test
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
if (!fileExists('milvus-helm')) {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
}
}
dir ("milvus-helm") {
dir ("milvus/milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.tag=${DOCKER_VERSION} --set expose.type=clusterIP --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.4.0 ."
}
}
dir ("${PROJECT_NAME}_test") {
sh "pytest . --alluredir=\"test_out/dev/single/mysql\" --ip ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine.milvus-2.svc.cluster.local"
}
} catch (exc) {
echo 'Milvus Test Failed !'
throw exc
}
}

View File

@ -0,0 +1,30 @@
container('milvus-build-env') {
timeout(time: 120, unit: 'MINUTES') {
gitlabCommitStatus(name: 'Build Engine') {
dir ("milvus_engine") {
try {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("core") {
sh "git config --global user.email \"test@zilliz.com\""
sh "git config --global user.name \"test\""
withCredentials([usernamePassword(credentialsId: "${params.JFROG_USER}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
sh "./build.sh -l"
sh "rm -rf cmake_build"
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' \
&& export JFROG_USER_NAME='${USERNAME}' \
&& export JFROG_PASSWORD='${PASSWORD}' \
&& export FAISS_URL='http://192.168.1.105:6060/jinhai/faiss/-/archive/branch-0.2.1/faiss-branch-0.2.1.tar.gz' \
&& ./build.sh -t ${params.BUILD_TYPE} -d /opt/milvus -j -u -c"
sh "./coverage.sh -u root -p 123456 -t \$POD_IP"
}
}
} catch (exc) {
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
throw exc
}
}
}
}
}

View File

@ -0,0 +1,28 @@
container('milvus-build-env') {
timeout(time: 120, unit: 'MINUTES') {
gitlabCommitStatus(name: 'Build Engine') {
dir ("milvus_engine") {
try {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [[$class: 'SubmoduleOption',disableSubmodules: false,parentCredentials: true,recursiveSubmodules: true,reference: '',trackingSubmodules: false]], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("core") {
sh "git config --global user.email \"test@zilliz.com\""
sh "git config --global user.name \"test\""
withCredentials([usernamePassword(credentialsId: "${params.JFROG_USER}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {
sh "./build.sh -l"
sh "rm -rf cmake_build"
sh "export JFROG_ARTFACTORY_URL='${params.JFROG_ARTFACTORY_URL}' \
&& export JFROG_USER_NAME='${USERNAME}' \
&& export JFROG_PASSWORD='${PASSWORD}' \
&& export FAISS_URL='http://192.168.1.105:6060/jinhai/faiss/-/archive/branch-0.2.1/faiss-branch-0.2.1.tar.gz' \
&& ./build.sh -t ${params.BUILD_TYPE} -j -d /opt/milvus"
}
}
} catch (exc) {
updateGitlabCommitStatus name: 'Build Engine', state: 'failed'
throw exc
}
}
}
}
}

View File

@ -0,0 +1,38 @@
container('publish-docker') {
timeout(time: 15, unit: 'MINUTES') {
gitlabCommitStatus(name: 'Publish Engine Docker') {
try {
dir ("${PROJECT_NAME}_build") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("docker/deploy/ubuntu16.04/free_version") {
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
try {
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
docker.withRegistry('https://registry.zilliz.com', "${params.DOCKER_PUBLISH_USER}") {
customImage.push()
}
docker.withRegistry('https://zilliz.azurecr.cn', "${params.AZURE_DOCKER_PUBLISH_USER}") {
customImage.push()
}
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
}
} catch (exc) {
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
throw exc
} finally {
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
}
}
}
} catch (exc) {
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
echo 'Publish docker failed!'
throw exc
}
}
}
}

View File

@ -0,0 +1,15 @@
def notify() {
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
// Send an email only if the build status has changed from green/unstable to red
emailext subject: '$DEFAULT_SUBJECT',
body: '$DEFAULT_CONTENT',
recipientProviders: [
[$class: 'DevelopersRecipientProvider'],
[$class: 'RequesterRecipientProvider']
],
replyTo: '$DEFAULT_REPLYTO',
to: '$DEFAULT_RECIPIENTS'
}
}
return this

View File

@ -0,0 +1,44 @@
container('milvus-build-env') {
timeout(time: 5, unit: 'MINUTES') {
dir ("milvus_engine") {
dir ("core") {
gitlabCommitStatus(name: 'Packaged Engine') {
if (fileExists('milvus')) {
try {
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
}
} catch (exc) {
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
throw exc
}
} else {
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
error("Milvus binary directory don't exists!")
}
}
gitlabCommitStatus(name: 'Packaged Engine lcov') {
if (fileExists('lcov_out')) {
try {
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
fileTransfer.FileTransfer("lcov_out/", "${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}", 'nas storage')
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
echo "Milvus lcov out Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/lcov/${JOB_NAME}-${BUILD_ID}/lcov_out/\""
}
} catch (exc) {
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
throw exc
}
} else {
updateGitlabCommitStatus name: 'Packaged Engine lcov', state: 'failed'
error("Milvus lcov out directory don't exists!")
}
}
}
}
}
}

View File

@ -0,0 +1,26 @@
container('milvus-build-env') {
timeout(time: 5, unit: 'MINUTES') {
dir ("milvus_engine") {
dir ("core") {
gitlabCommitStatus(name: 'Packaged Engine') {
if (fileExists('milvus')) {
try {
sh "tar -zcvf ./${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz ./milvus"
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
fileTransfer.FileTransfer("${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz", "${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}", 'nas storage')
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
echo "Download Milvus Engine Binary Viewer \"http://192.168.1.126:8080/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz\""
}
} catch (exc) {
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
throw exc
}
} else {
updateGitlabCommitStatus name: 'Packaged Engine', state: 'failed'
error("Milvus binary directory don't exists!")
}
}
}
}
}
}

View File

@ -0,0 +1,35 @@
container('publish-docker') {
timeout(time: 15, unit: 'MINUTES') {
gitlabCommitStatus(name: 'Publish Engine Docker') {
try {
dir ("${PROJECT_NAME}_build") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:build/milvus_build.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
dir ("docker/deploy/ubuntu16.04/free_version") {
sh "curl -O -u anonymous: ftp://192.168.1.126/data/${PROJECT_NAME}/engine/${JOB_NAME}-${BUILD_ID}/${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
sh "tar zxvf ${PROJECT_NAME}-engine-${PACKAGE_VERSION}.tar.gz"
try {
def customImage = docker.build("${PROJECT_NAME}/engine:${DOCKER_VERSION}")
docker.withRegistry('https://registry.zilliz.com', "${params.DOCKER_PUBLISH_USER}") {
customImage.push()
}
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'success'
echo "Docker Pull Command: docker pull registry.zilliz.com/${PROJECT_NAME}/engine:${DOCKER_VERSION}"
}
} catch (exc) {
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'canceled'
throw exc
} finally {
sh "docker rmi ${PROJECT_NAME}/engine:${DOCKER_VERSION}"
}
}
}
} catch (exc) {
updateGitlabCommitStatus name: 'Publish Engine Docker', state: 'failed'
echo 'Publish docker failed!'
throw exc
}
}
}
}

View File

@ -0,0 +1,31 @@
timeout(time: 40, unit: 'MINUTES') {
try {
dir ("${PROJECT_NAME}_test") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:Test/milvus_test.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
sh 'python3 -m pip install -r requirements.txt'
def service_ip = sh (script: "kubectl get svc --namespace milvus-1 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine --template \"{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}\"",returnStdout: true).trim()
sh "pytest . --alluredir=\"test_out/staging/single/sqlite\" --ip ${service_ip}"
}
// mysql database backend test
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
if (!fileExists('milvus-helm')) {
dir ("milvus-helm") {
checkout([$class: 'GitSCM', branches: [[name: "${SEMVER}"]], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[credentialsId: "${params.GIT_USER}", url: "git@192.168.1.105:megasearch/milvus-helm.git", name: 'origin', refspec: "+refs/heads/${SEMVER}:refs/remotes/origin/${SEMVER}"]]])
}
}
dir ("milvus-helm") {
dir ("milvus/milvus-gpu") {
sh "helm install --wait --timeout 300 --set engine.image.repository=\"zilliz.azurecr.cn/milvus/engine\" --set engine.image.tag=${DOCKER_VERSION} --set expose.type=loadBalancer --name ${env.JOB_NAME}-${env.BUILD_NUMBER} -f ci/db_backend/mysql_values.yaml --namespace milvus-2 --version 0.5.0 ."
}
}
dir ("${PROJECT_NAME}_test") {
def service_ip = sh (script: "kubectl get svc --namespace milvus-2 ${env.JOB_NAME}-${env.BUILD_NUMBER}-milvus-gpu-engine --template \"{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}\"",returnStdout: true).trim()
sh "pytest . --alluredir=\"test_out/staging/single/mysql\" --ip ${service_ip}"
}
} catch (exc) {
echo 'Milvus Test Failed !'
throw exc
}
}

View File

@ -0,0 +1,14 @@
timeout(time: 5, unit: 'MINUTES') {
dir ("${PROJECT_NAME}_test") {
if (fileExists('cluster_test_out')) {
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
fileTransfer.FileTransfer("cluster_test_out/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
}
} else {
error("Milvus Dev Test Out directory don't exists!")
}
}
}

View File

@ -0,0 +1,13 @@
timeout(time: 5, unit: 'MINUTES') {
dir ("${PROJECT_NAME}_test") {
if (fileExists('test_out/dev')) {
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
fileTransfer.FileTransfer("test_out/dev/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
}
} else {
error("Milvus Dev Test Out directory don't exists!")
}
}
}

View File

@ -0,0 +1,13 @@
timeout(time: 5, unit: 'MINUTES') {
dir ("${PROJECT_NAME}_test") {
if (fileExists('test_out/staging')) {
def fileTransfer = load "${env.WORKSPACE}/ci/function/file_transfer.groovy"
fileTransfer.FileTransfer("test_out/staging/", "${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}", 'nas storage')
if (currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
echo "Milvus Dev Test Out Viewer \"ftp://192.168.1.126/data/${PROJECT_NAME}/test/${JOB_NAME}-${BUILD_ID}\""
}
} else {
error("Milvus Dev Test Out directory don't exists!")
}
}
}

396
ci/main_jenkinsfile Normal file
View File

@ -0,0 +1,396 @@
pipeline {
agent none
options {
timestamps()
}
environment {
PROJECT_NAME = "milvus"
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
}
stages {
stage("Ubuntu 16.04") {
environment {
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
]);
DOCKER_VERSION = VersionNumber([
versionNumberString : '${DOCKER_VERSION_STR}'
]);
}
stages {
stage("Run Build") {
agent {
kubernetes {
cloud 'build-kubernetes'
label 'build'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
name: milvus-build-env
labels:
app: milvus
componet: build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- cat
tty: true
resources:
limits:
memory: "28Gi"
cpu: "10.0"
nvidia.com/gpu: 1
requests:
memory: "14Gi"
cpu: "5.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql
"""
}
}
stages {
stage('Build') {
steps {
gitlabCommitStatus(name: 'Build') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
}
}
}
}
}
post {
aborted {
script {
updateGitlabCommitStatus name: 'Build', state: 'canceled'
echo "Milvus Build aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'Build', state: 'failed'
echo "Milvus Build failure !"
}
}
}
}
stage("Publish docker and helm") {
agent {
kubernetes {
label 'publish'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-docker
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
"""
}
}
stages {
stage('Publish Docker') {
steps {
gitlabCommitStatus(name: 'Publish Docker') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
}
}
}
}
}
post {
aborted {
script {
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
echo "Milvus Publish Docker aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
echo "Milvus Publish Docker failure !"
}
}
}
}
stage("Deploy to Development") {
parallel {
stage("Single Node") {
agent {
kubernetes {
label 'dev-test'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test
spec:
containers:
- name: milvus-testframework
image: registry.zilliz.com/milvus/milvus-test:v0.2
command:
- cat
tty: true
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: test-cluster-config
"""
}
}
stages {
stage("Deploy to Dev") {
steps {
gitlabCommitStatus(name: 'Deloy to Dev') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
}
}
}
}
}
stage("Dev Test") {
steps {
gitlabCommitStatus(name: 'Deloy Test') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
}
}
}
}
}
stage ("Cleanup Dev") {
steps {
gitlabCommitStatus(name: 'Cleanup Dev') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
}
}
}
}
}
}
post {
always {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
}
}
}
success {
script {
echo "Milvus Single Node CI/CD success !"
}
}
aborted {
script {
echo "Milvus Single Node CI/CD aborted !"
}
}
failure {
script {
echo "Milvus Single Node CI/CD failure !"
}
}
}
}
// stage("Cluster") {
// agent {
// kubernetes {
// label 'dev-test'
// defaultContainer 'jnlp'
// yaml """
// apiVersion: v1
// kind: Pod
// metadata:
// labels:
// app: milvus
// componet: test
// spec:
// containers:
// - name: milvus-testframework
// image: registry.zilliz.com/milvus/milvus-test:v0.2
// command:
// - cat
// tty: true
// volumeMounts:
// - name: kubeconf
// mountPath: /root/.kube/
// readOnly: true
// volumes:
// - name: kubeconf
// secret:
// secretName: test-cluster-config
// """
// }
// }
// stages {
// stage("Deploy to Dev") {
// steps {
// gitlabCommitStatus(name: 'Deloy to Dev') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
// }
// }
// }
// }
// }
// stage("Dev Test") {
// steps {
// gitlabCommitStatus(name: 'Deloy Test') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
// }
// }
// }
// }
// }
// stage ("Cleanup Dev") {
// steps {
// gitlabCommitStatus(name: 'Cleanup Dev') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
// }
// }
// }
// }
// }
// }
// post {
// always {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
// }
// }
// }
// success {
// script {
// echo "Milvus Cluster CI/CD success !"
// }
// }
// aborted {
// script {
// echo "Milvus Cluster CI/CD aborted !"
// }
// }
// failure {
// script {
// echo "Milvus Cluster CI/CD failure !"
// }
// }
// }
// }
}
}
}
}
}
post {
always {
script {
if (env.gitlabAfter != null) {
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
// Send an email only if the build status has changed from green/unstable to red
emailext subject: '$DEFAULT_SUBJECT',
body: '$DEFAULT_CONTENT',
recipientProviders: [
[$class: 'DevelopersRecipientProvider'],
[$class: 'RequesterRecipientProvider']
],
replyTo: '$DEFAULT_REPLYTO',
to: '$DEFAULT_RECIPIENTS'
}
}
}
}
success {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
echo "Milvus CI/CD success !"
}
}
aborted {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
echo "Milvus CI/CD aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
echo "Milvus CI/CD failure !"
}
}
}
}

396
ci/main_jenkinsfile_no_ut Normal file
View File

@ -0,0 +1,396 @@
pipeline {
agent none
options {
timestamps()
}
environment {
PROJECT_NAME = "milvus"
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? "${SEMVER}-${LOWER_BUILD_TYPE}" : "${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}"}"
}
stages {
stage("Ubuntu 16.04") {
environment {
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
]);
DOCKER_VERSION = VersionNumber([
versionNumberString : '${DOCKER_VERSION_STR}'
]);
}
stages {
stage("Run Build") {
agent {
kubernetes {
cloud 'build-kubernetes'
label 'build'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
name: milvus-build-env
labels:
app: milvus
componet: build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command:
- cat
tty: true
resources:
limits:
memory: "28Gi"
cpu: "10.0"
nvidia.com/gpu: 1
requests:
memory: "14Gi"
cpu: "5.0"
- name: milvus-mysql
image: mysql:5.6
env:
- name: MYSQL_ROOT_PASSWORD
value: 123456
ports:
- containerPort: 3306
name: mysql
"""
}
}
stages {
stage('Build') {
steps {
gitlabCommitStatus(name: 'Build') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build_no_ut.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus_no_ut.groovy"
}
}
}
}
}
post {
aborted {
script {
updateGitlabCommitStatus name: 'Build', state: 'canceled'
echo "Milvus Build aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'Build', state: 'failed'
echo "Milvus Build failure !"
}
}
}
}
stage("Publish docker and helm") {
agent {
kubernetes {
label 'publish'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-docker
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
"""
}
}
stages {
stage('Publish Docker') {
steps {
gitlabCommitStatus(name: 'Publish Docker') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/publish_docker.groovy"
}
}
}
}
}
post {
aborted {
script {
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
echo "Milvus Publish Docker aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
echo "Milvus Publish Docker failure !"
}
}
}
}
stage("Deploy to Development") {
parallel {
stage("Single Node") {
agent {
kubernetes {
label 'dev-test'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test
spec:
containers:
- name: milvus-testframework
image: registry.zilliz.com/milvus/milvus-test:v0.2
command:
- cat
tty: true
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: test-cluster-config
"""
}
}
stages {
stage("Deploy to Dev") {
steps {
gitlabCommitStatus(name: 'Deloy to Dev') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
}
}
}
}
}
stage("Dev Test") {
steps {
gitlabCommitStatus(name: 'Deloy Test') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
}
}
}
}
}
stage ("Cleanup Dev") {
steps {
gitlabCommitStatus(name: 'Cleanup Dev') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
}
}
}
}
}
}
post {
always {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
}
}
}
success {
script {
echo "Milvus Single Node CI/CD success !"
}
}
aborted {
script {
echo "Milvus Single Node CI/CD aborted !"
}
}
failure {
script {
echo "Milvus Single Node CI/CD failure !"
}
}
}
}
// stage("Cluster") {
// agent {
// kubernetes {
// label 'dev-test'
// defaultContainer 'jnlp'
// yaml """
// apiVersion: v1
// kind: Pod
// metadata:
// labels:
// app: milvus
// componet: test
// spec:
// containers:
// - name: milvus-testframework
// image: registry.zilliz.com/milvus/milvus-test:v0.2
// command:
// - cat
// tty: true
// volumeMounts:
// - name: kubeconf
// mountPath: /root/.kube/
// readOnly: true
// volumes:
// - name: kubeconf
// secret:
// secretName: test-cluster-config
// """
// }
// }
// stages {
// stage("Deploy to Dev") {
// steps {
// gitlabCommitStatus(name: 'Deloy to Dev') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
// }
// }
// }
// }
// }
// stage("Dev Test") {
// steps {
// gitlabCommitStatus(name: 'Deloy Test') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
// }
// }
// }
// }
// }
// stage ("Cleanup Dev") {
// steps {
// gitlabCommitStatus(name: 'Cleanup Dev') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
// }
// }
// }
// }
// }
// }
// post {
// always {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
// }
// }
// }
// success {
// script {
// echo "Milvus Cluster CI/CD success !"
// }
// }
// aborted {
// script {
// echo "Milvus Cluster CI/CD aborted !"
// }
// }
// failure {
// script {
// echo "Milvus Cluster CI/CD failure !"
// }
// }
// }
// }
}
}
}
}
}
post {
always {
script {
if (env.gitlabAfter != null) {
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
// Send an email only if the build status has changed from green/unstable to red
emailext subject: '$DEFAULT_SUBJECT',
body: '$DEFAULT_CONTENT',
recipientProviders: [
[$class: 'DevelopersRecipientProvider'],
[$class: 'RequesterRecipientProvider']
],
replyTo: '$DEFAULT_REPLYTO',
to: '$DEFAULT_RECIPIENTS'
}
}
}
}
success {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
echo "Milvus CI/CD success !"
}
}
aborted {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
echo "Milvus CI/CD aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
echo "Milvus CI/CD failure !"
}
}
}
}

478
ci/nightly_main_jenkinsfile Normal file
View File

@ -0,0 +1,478 @@
pipeline {
agent none
options {
timestamps()
}
environment {
PROJECT_NAME = "milvus"
LOWER_BUILD_TYPE = BUILD_TYPE.toLowerCase()
SEMVER = "${env.gitlabSourceBranch == null ? params.ENGINE_BRANCH.substring(params.ENGINE_BRANCH.lastIndexOf('/') + 1) : env.gitlabSourceBranch}"
GITLAB_AFTER_COMMIT = "${env.gitlabAfter == null ? null : env.gitlabAfter}"
SUFFIX_VERSION_NAME = "${env.gitlabAfter == null ? null : env.gitlabAfter.substring(0, 6)}"
DOCKER_VERSION_STR = "${env.gitlabAfter == null ? '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, \"yyyyMMdd\"}' : '${SEMVER}-${LOWER_BUILD_TYPE}-${SUFFIX_VERSION_NAME}'}"
}
stages {
stage("Ubuntu 16.04") {
environment {
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${LOWER_BUILD_TYPE}-${BUILD_DATE_FORMATTED, "yyyyMMdd"}'
]);
DOCKER_VERSION = VersionNumber([
versionNumberString : '${DOCKER_VERSION_STR}'
]);
}
stages {
stage("Run Build") {
agent {
kubernetes {
cloud 'build-kubernetes'
label 'build'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
name: milvus-build-env
labels:
app: milvus
componet: build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-build-env:v0.13
command:
- cat
tty: true
resources:
limits:
memory: "28Gi"
cpu: "10.0"
nvidia.com/gpu: 1
requests:
memory: "14Gi"
cpu: "5.0"
"""
}
}
stages {
stage('Build') {
steps {
gitlabCommitStatus(name: 'Build') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/milvus_build.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/packaged_milvus.groovy"
}
}
}
}
}
post {
aborted {
script {
updateGitlabCommitStatus name: 'Build', state: 'canceled'
echo "Milvus Build aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'Build', state: 'failed'
echo "Milvus Build failure !"
}
}
}
}
stage("Publish docker and helm") {
agent {
kubernetes {
label 'publish'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-docker
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
"""
}
}
stages {
stage('Publish Docker') {
steps {
gitlabCommitStatus(name: 'Publish Docker') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/nightly_publish_docker.groovy"
}
}
}
}
}
post {
aborted {
script {
updateGitlabCommitStatus name: 'Publish Docker', state: 'canceled'
echo "Milvus Publish Docker aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'Publish Docker', state: 'failed'
echo "Milvus Publish Docker failure !"
}
}
}
}
stage("Deploy to Development") {
parallel {
stage("Single Node") {
agent {
kubernetes {
label 'dev-test'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test
spec:
containers:
- name: milvus-testframework
image: registry.zilliz.com/milvus/milvus-test:v0.2
command:
- cat
tty: true
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: test-cluster-config
"""
}
}
stages {
stage("Deploy to Dev") {
steps {
gitlabCommitStatus(name: 'Deloy to Dev') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2dev.groovy"
}
}
}
}
}
stage("Dev Test") {
steps {
gitlabCommitStatus(name: 'Deloy Test') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/dev_test_all.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_test_out.groovy"
}
}
}
}
}
stage ("Cleanup Dev") {
steps {
gitlabCommitStatus(name: 'Cleanup Dev') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
}
}
}
}
}
}
post {
always {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_dev.groovy"
}
}
}
success {
script {
echo "Milvus Deploy to Dev Single Node CI/CD success !"
}
}
aborted {
script {
echo "Milvus Deploy to Dev Single Node CI/CD aborted !"
}
}
failure {
script {
echo "Milvus Deploy to Dev Single Node CI/CD failure !"
}
}
}
}
// stage("Cluster") {
// agent {
// kubernetes {
// label 'dev-test'
// defaultContainer 'jnlp'
// yaml """
// apiVersion: v1
// kind: Pod
// metadata:
// labels:
// app: milvus
// componet: test
// spec:
// containers:
// - name: milvus-testframework
// image: registry.zilliz.com/milvus/milvus-test:v0.2
// command:
// - cat
// tty: true
// volumeMounts:
// - name: kubeconf
// mountPath: /root/.kube/
// readOnly: true
// volumes:
// - name: kubeconf
// secret:
// secretName: test-cluster-config
// """
// }
// }
// stages {
// stage("Deploy to Dev") {
// steps {
// gitlabCommitStatus(name: 'Deloy to Dev') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_deploy2dev.groovy"
// }
// }
// }
// }
// }
// stage("Dev Test") {
// steps {
// gitlabCommitStatus(name: 'Deloy Test') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_dev_test.groovy"
// load "${env.WORKSPACE}/ci/jenkinsfile/upload_dev_cluster_test_out.groovy"
// }
// }
// }
// }
// }
// stage ("Cleanup Dev") {
// steps {
// gitlabCommitStatus(name: 'Cleanup Dev') {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
// }
// }
// }
// }
// }
// }
// post {
// always {
// container('milvus-testframework') {
// script {
// load "${env.WORKSPACE}/ci/jenkinsfile/cluster_cleanup_dev.groovy"
// }
// }
// }
// success {
// script {
// echo "Milvus Deploy to Dev Cluster CI/CD success !"
// }
// }
// aborted {
// script {
// echo "Milvus Deploy to Dev Cluster CI/CD aborted !"
// }
// }
// failure {
// script {
// echo "Milvus Deploy to Dev Cluster CI/CD failure !"
// }
// }
// }
// }
}
}
stage("Deploy to Staging") {
parallel {
stage("Single Node") {
agent {
kubernetes {
label 'dev-test'
defaultContainer 'jnlp'
yaml """
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test
spec:
containers:
- name: milvus-testframework
image: registry.zilliz.com/milvus/milvus-test:v0.2
command:
- cat
tty: true
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: aks-gpu-cluster-config
"""
}
}
stages {
stage("Deploy to Staging") {
steps {
gitlabCommitStatus(name: 'Deloy to Staging') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/deploy2staging.groovy"
}
}
}
}
}
stage("Staging Test") {
steps {
gitlabCommitStatus(name: 'Staging Test') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/staging_test.groovy"
load "${env.WORKSPACE}/ci/jenkinsfile/upload_staging_test_out.groovy"
}
}
}
}
}
stage ("Cleanup Staging") {
steps {
gitlabCommitStatus(name: 'Cleanup Staging') {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
}
}
}
}
}
}
post {
always {
container('milvus-testframework') {
script {
load "${env.WORKSPACE}/ci/jenkinsfile/cleanup_staging.groovy"
}
}
}
success {
script {
echo "Milvus Deploy to Staging Single Node CI/CD success !"
}
}
aborted {
script {
echo "Milvus Deploy to Staging Single Node CI/CD aborted !"
}
}
failure {
script {
echo "Milvus Deploy to Staging Single Node CI/CD failure !"
}
}
}
}
}
}
}
}
}
post {
always {
script {
if (!currentBuild.resultIsBetterOrEqualTo('SUCCESS')) {
// Send an email only if the build status has changed from green/unstable to red
emailext subject: '$DEFAULT_SUBJECT',
body: '$DEFAULT_CONTENT',
recipientProviders: [
[$class: 'DevelopersRecipientProvider'],
[$class: 'RequesterRecipientProvider']
],
replyTo: '$DEFAULT_REPLYTO',
to: '$DEFAULT_RECIPIENTS'
}
}
}
success {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'success'
echo "Milvus CI/CD success !"
}
}
aborted {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'canceled'
echo "Milvus CI/CD aborted !"
}
}
failure {
script {
updateGitlabCommitStatus name: 'CI/CD', state: 'failed'
echo "Milvus CI/CD failure !"
}
}
}
}

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: build-env
spec:
containers:
- name: milvus-build-env
image: registry.zilliz.com/milvus/milvus-build-env:v0.9
command:
- cat
tty: true

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: testframework
spec:
containers:
- name: milvus-testframework
image: registry.zilliz.com/milvus/milvus-test:v0.1
command:
- cat
tty: true

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-docker
image: registry.zilliz.com/library/zilliz_docker:v1.0.0
securityContext:
privileged: true
command:
- cat
tty: true
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock

14
codecov.yaml Normal file
View File

@ -0,0 +1,14 @@
#Configuration File for CodeCov
coverage:
precision: 2
round: down
range: "70...100"
status:
project: on
patch: yes
changes: no
comment:
layout: "header, diff, changes, tree"
behavior: default

10
core/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
milvus/
conf/server_config.yaml
conf/log_config.conf
version.h
lcov_out/
base.info
output.info
output_new.info
server.info
*.pyc

262
core/CMakeLists.txt Normal file
View File

@ -0,0 +1,262 @@
#-------------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#-------------------------------------------------------------------------------
cmake_minimum_required(VERSION 3.14)
message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
MACRO (GET_CURRENT_TIME CURRENT_TIME)
execute_process(COMMAND "date" +"%Y-%m-%d %H:%M.%S" OUTPUT_VARIABLE ${CURRENT_TIME})
ENDMACRO (GET_CURRENT_TIME)
GET_CURRENT_TIME(BUILD_TIME)
string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME})
message(STATUS "Build time = ${BUILD_TIME}")
MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
execute_process(COMMAND "git" symbolic-ref --short HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
ENDMACRO (GET_GIT_BRANCH_NAME)
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
if(NOT GIT_BRANCH_NAME STREQUAL "")
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
endif()
set(MILVUS_VERSION "${GIT_BRANCH_NAME}")
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MILVUS_VERSION "${MILVUS_VERSION}")
find_package(ClangTools)
set(BUILD_SUPPORT_DIR "${CMAKE_SOURCE_DIR}/build-support")
if(CMAKE_BUILD_TYPE STREQUAL "Release")
set(BUILD_TYPE "Release")
else()
set(BUILD_TYPE "Debug")
endif()
message(STATUS "Build type = ${BUILD_TYPE}")
project(milvus VERSION "${MILVUS_VERSION}")
project(milvus_engine LANGUAGES CUDA CXX)
unset(CMAKE_EXPORT_COMPILE_COMMANDS CACHE)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(MILVUS_VERSION_MAJOR "${milvus_VERSION_MAJOR}")
set(MILVUS_VERSION_MINOR "${milvus_VERSION_MINOR}")
set(MILVUS_VERSION_PATCH "${milvus_VERSION_PATCH}")
if(MILVUS_VERSION_MAJOR STREQUAL ""
OR MILVUS_VERSION_MINOR STREQUAL ""
OR MILVUS_VERSION_PATCH STREQUAL "")
message(WARNING "Failed to determine Milvus version from git branch name")
set(MILVUS_VERSION "0.5.0")
endif()
message(STATUS "Build version = ${MILVUS_VERSION}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/version.h)
message(STATUS "Milvus version: "
"${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} "
"(full: '${MILVUS_VERSION}')")
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED on)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
message(STATUS "Building milvus_engine on x86 architecture")
set(MILVUS_BUILD_ARCH x86_64)
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
message(STATUS "Building milvus_engine on ppc architecture")
set(MILVUS_BUILD_ARCH ppc64le)
else()
message(WARNING "Unknown processor type")
message(WARNING "CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}")
set(MILVUS_BUILD_ARCH unknown)
endif()
find_package (Python COMPONENTS Interpreter Development)
find_package(CUDA)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
if(CMAKE_BUILD_TYPE STREQUAL "Release")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O3")
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -O0 -g")
endif()
# Ensure that a default make is set
if("${MAKE}" STREQUAL "")
if(NOT MSVC)
find_program(MAKE make)
endif()
endif()
find_path(MYSQL_INCLUDE_DIR
NAMES "mysql.h"
PATH_SUFFIXES "mysql")
if (${MYSQL_INCLUDE_DIR} STREQUAL "MYSQL_INCLUDE_DIR-NOTFOUND")
message(FATAL_ERROR "Could not found MySQL include directory")
else()
include_directories(${MYSQL_INCLUDE_DIR})
endif()
set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR})
set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR})
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
include(ExternalProject)
include(DefineOptions)
include(BuildUtils)
include(ThirdPartyPackages)
config_summary()
if (CUSTOMIZATION)
add_definitions(-DCUSTOMIZATION)
endif (CUSTOMIZATION)
add_subdirectory(src)
if (BUILD_UNIT_TEST STREQUAL "ON")
if (BUILD_COVERAGE STREQUAL "ON")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
endif()
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unittest)
endif()
add_custom_target(Clean-All COMMAND ${CMAKE_BUILD_TOOL} clean)
if("${MILVUS_DB_PATH}" STREQUAL "")
set(MILVUS_DB_PATH "/tmp/milvus")
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/server_config.yaml)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.template ${CMAKE_CURRENT_SOURCE_DIR}/conf/log_config.conf)
install(DIRECTORY scripts/
DESTINATION scripts
FILE_PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ
WORLD_EXECUTE WORLD_READ
FILES_MATCHING PATTERN "*.sh")
install(FILES
conf/server_config.yaml
conf/log_config.conf
DESTINATION
conf)
#
# "make lint" target
#
if(NOT MILVUS_VERBOSE_LINT)
set(MILVUS_LINT_QUIET "--quiet")
endif()
if(NOT LINT_EXCLUSIONS_FILE)
# source files matching a glob from a line in this file
# will be excluded from linting (cpplint, clang-tidy, clang-format)
set(LINT_EXCLUSIONS_FILE ${BUILD_SUPPORT_DIR}/lint_exclusions.txt)
endif()
find_program(CPPLINT_BIN NAMES cpplint cpplint.py HINTS ${BUILD_SUPPORT_DIR})
message(STATUS "Found cpplint executable at ${CPPLINT_BIN}")
#
# "make lint" targets
#
add_custom_target(lint
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_cpplint.py
--cpplint_binary
${CPPLINT_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}
${MILVUS_LINT_QUIET})
#
# "make clang-format" and "make check-clang-format" targets
#
if(${CLANG_FORMAT_FOUND})
# runs clang format and updates files in place.
add_custom_target(clang-format
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_format.py
--clang_format_binary
${CLANG_FORMAT_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
--fix
${MILVUS_LINT_QUIET})
# runs clang format and exits with a non-zero exit code if any files need to be reformatted
add_custom_target(check-clang-format
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_format.py
--clang_format_binary
${CLANG_FORMAT_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
${MILVUS_LINT_QUIET})
endif()
#
# "make clang-tidy" and "make check-clang-tidy" targets
#
if(${CLANG_TIDY_FOUND})
# runs clang-tidy and attempts to fix any warning automatically
add_custom_target(clang-tidy
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
--clang_tidy_binary
${CLANG_TIDY_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--compile_commands
${CMAKE_BINARY_DIR}/compile_commands.json
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
--fix
${MILVUS_LINT_QUIET})
# runs clang-tidy and exits with a non-zero exit code if any errors are found.
add_custom_target(check-clang-tidy
${PYTHON_EXECUTABLE}
${BUILD_SUPPORT_DIR}/run_clang_tidy.py
--clang_tidy_binary
${CLANG_TIDY_BIN}
--exclude_globs
${LINT_EXCLUSIONS_FILE}
--compile_commands
${CMAKE_BINARY_DIR}/compile_commands.json
--source_dir
${CMAKE_CURRENT_SOURCE_DIR}/src
${MILVUS_LINT_QUIET})
endif()

View File

@ -0,0 +1,38 @@
<code_scheme name="milvus" version="173">
<Objective-C>
<option name="INDENT_NAMESPACE_MEMBERS" value="0" />
<option name="INDENT_VISIBILITY_KEYWORDS" value="1" />
<option name="KEEP_STRUCTURES_IN_ONE_LINE" value="true" />
<option name="KEEP_CASE_EXPRESSIONS_IN_ONE_LINE" value="true" />
<option name="FUNCTION_NON_TOP_AFTER_RETURN_TYPE_WRAP" value="0" />
<option name="FUNCTION_TOP_AFTER_RETURN_TYPE_WRAP" value="2" />
<option name="FUNCTION_PARAMETERS_WRAP" value="5" />
<option name="FUNCTION_CALL_ARGUMENTS_WRAP" value="5" />
<option name="TEMPLATE_CALL_ARGUMENTS_WRAP" value="5" />
<option name="TEMPLATE_CALL_ARGUMENTS_ALIGN_MULTILINE" value="true" />
<option name="CLASS_CONSTRUCTOR_INIT_LIST_WRAP" value="5" />
<option name="ALIGN_INIT_LIST_IN_COLUMNS" value="false" />
<option name="SPACE_BEFORE_PROTOCOLS_BRACKETS" value="false" />
<option name="SPACE_BEFORE_POINTER_IN_DECLARATION" value="false" />
<option name="SPACE_AFTER_POINTER_IN_DECLARATION" value="true" />
<option name="SPACE_BEFORE_REFERENCE_IN_DECLARATION" value="false" />
<option name="SPACE_AFTER_REFERENCE_IN_DECLARATION" value="true" />
<option name="KEEP_BLANK_LINES_BEFORE_END" value="1" />
</Objective-C>
<codeStyleSettings language="ObjectiveC">
<option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
<option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
<option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="1" />
<option name="BLANK_LINES_AROUND_CLASS" value="0" />
<option name="BLANK_LINES_AROUND_METHOD_IN_INTERFACE" value="0" />
<option name="BLANK_LINES_AFTER_CLASS_HEADER" value="1" />
<option name="SPACE_AFTER_TYPE_CAST" value="false" />
<option name="BINARY_OPERATION_SIGN_ON_NEXT_LINE" value="true" />
<option name="KEEP_SIMPLE_BLOCKS_IN_ONE_LINE" value="false" />
<option name="FOR_STATEMENT_WRAP" value="1" />
<option name="ASSIGNMENT_WRAP" value="1" />
<indentOptions>
<option name="CONTINUATION_INDENT_SIZE" value="4" />
</indentOptions>
</codeStyleSettings>
</code_scheme>

6476
core/build-support/cpplint.py vendored Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
*cmake-build-debug*
*cmake-build-release*
*cmake_build*
*src/index/thirdparty*
*thirdparty*
*easylogging++*
*SqliteMetaImpl.cpp
*src/grpc*
*milvus/include*

110
core/build-support/lintutils.py Executable file
View File

@ -0,0 +1,110 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing as mp
import os
from fnmatch import fnmatch
from subprocess import Popen
def chunk(seq, n):
"""
divide a sequence into equal sized chunks
(the last chunk may be smaller, but won't be empty)
"""
chunks = []
some = []
for element in seq:
if len(some) == n:
chunks.append(some)
some = []
some.append(element)
if len(some) > 0:
chunks.append(some)
return chunks
def dechunk(chunks):
"flatten chunks into a single list"
seq = []
for chunk in chunks:
seq.extend(chunk)
return seq
def run_parallel(cmds, **kwargs):
"""
Run each of cmds (with shared **kwargs) using subprocess.Popen
then wait for all of them to complete.
Runs batches of multiprocessing.cpu_count() * 2 from cmds
returns a list of tuples containing each process'
returncode, stdout, stderr
"""
complete = []
for cmds_batch in chunk(cmds, mp.cpu_count() * 2):
procs_batch = [Popen(cmd, **kwargs) for cmd in cmds_batch]
for proc in procs_batch:
stdout, stderr = proc.communicate()
complete.append((proc.returncode, stdout, stderr))
return complete
_source_extensions = '''
.h
.cc
.cpp
'''.split()
def get_sources(source_dir, exclude_globs=[]):
sources = []
for directory, subdirs, basenames in os.walk(source_dir):
for path in [os.path.join(directory, basename)
for basename in basenames]:
# filter out non-source files
if os.path.splitext(path)[1] not in _source_extensions:
continue
path = os.path.abspath(path)
# filter out files that match the globs in the globs file
if any([fnmatch(path, glob) for glob in exclude_globs]):
continue
sources.append(path)
return sources
def stdout_pathcolonline(completed_process, filenames):
"""
given a completed process which may have reported some files as problematic
by printing the path name followed by ':' then a line number, examine
stdout and return the set of actually reported file names
"""
returncode, stdout, stderr = completed_process
bfilenames = set()
for filename in filenames:
bfilenames.add(filename.encode('utf-8') + b':')
problem_files = set()
for line in stdout.splitlines():
for filename in bfilenames:
if line.startswith(filename):
problem_files.add(filename.decode('utf-8'))
bfilenames.remove(filename)
break
return problem_files, stdout

View File

@ -0,0 +1,142 @@
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import lintutils
from subprocess import PIPE
import argparse
import difflib
import multiprocessing as mp
import sys
from functools import partial
# examine the output of clang-format and if changes are
# present assemble a (unified)patch of the difference
def _check_one_file(completed_processes, filename):
with open(filename, "rb") as reader:
original = reader.read()
returncode, stdout, stderr = completed_processes[filename]
formatted = stdout
if formatted != original:
# Run the equivalent of diff -u
diff = list(difflib.unified_diff(
original.decode('utf8').splitlines(True),
formatted.decode('utf8').splitlines(True),
fromfile=filename,
tofile="{} (after clang format)".format(
filename)))
else:
diff = None
return filename, diff
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs clang-format on all of the source "
"files. If --fix is specified enforce format by "
"modifying in place, otherwise compare the output "
"with the existing file and output any necessary "
"changes as a patch in unified diff format")
parser.add_argument("--clang_format_binary",
required=True,
help="Path to the clang-format binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--fix", default=False,
action="store_true",
help="If specified, will re-format the source "
"code instead of comparing the re-formatted "
"output, defaults to %(default)s")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
formatted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
formatted_filenames.append(str(path))
if arguments.fix:
if not arguments.quiet:
print("\n".join(map(lambda x: "Formatting {}".format(x),
formatted_filenames)))
# Break clang-format invocations into chunks: each invocation formats
# 16 files. Wait for all processes to complete
results = lintutils.run_parallel([
[arguments.clang_format_binary, "-i"] + some
for some in lintutils.chunk(formatted_filenames, 16)
])
for returncode, stdout, stderr in results:
# if any clang-format reported a parse error, bubble it
if returncode != 0:
sys.exit(returncode)
else:
# run an instance of clang-format for each source file in parallel,
# then wait for all processes to complete
results = lintutils.run_parallel([
[arguments.clang_format_binary, filename]
for filename in formatted_filenames
], stdout=PIPE, stderr=PIPE)
for returncode, stdout, stderr in results:
# if any clang-format reported a parse error, bubble it
if returncode != 0:
sys.exit(returncode)
error = False
checker = partial(_check_one_file, {
filename: result
for filename, result in zip(formatted_filenames, results)
})
pool = mp.Pool()
try:
# check the output from each invocation of clang-format in parallel
for filename, diff in pool.imap(checker, formatted_filenames):
if not arguments.quiet:
print("Checking {}".format(filename))
if diff:
print("{} had clang-format style issues".format(filename))
# Print out the diff to stderr
error = True
# pad with a newline
print(file=sys.stderr)
diff_out = []
for diff_str in diff:
diff_out.append(diff_str.encode('raw_unicode_escape'))
sys.stderr.writelines(diff_out)
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
sys.exit(1 if error else 0)

View File

@ -0,0 +1,126 @@
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import multiprocessing as mp
import lintutils
from subprocess import PIPE
import sys
from functools import partial
def _get_chunk_key(filenames):
# lists are not hashable so key on the first filename in a chunk
return filenames[0]
# clang-tidy outputs complaints in '/path:line_number: complaint' format,
# so we can scan its output to get a list of files to fix
def _check_some_files(completed_processes, filenames):
result = completed_processes[_get_chunk_key(filenames)]
return lintutils.stdout_pathcolonline(result, filenames)
def _check_all(cmd, filenames):
# each clang-tidy instance will process 16 files
chunks = lintutils.chunk(filenames, 16)
cmds = [cmd + some for some in chunks]
results = lintutils.run_parallel(cmds, stderr=PIPE, stdout=PIPE)
error = False
# record completed processes (keyed by the first filename in the input
# chunk) for lookup in _check_some_files
completed_processes = {
_get_chunk_key(some): result
for some, result in zip(chunks, results)
}
checker = partial(_check_some_files, completed_processes)
pool = mp.Pool()
try:
# check output of completed clang-tidy invocations in parallel
for problem_files, stdout in pool.imap(checker, chunks):
if problem_files:
msg = "clang-tidy suggested fixes for {}"
print("\n".join(map(msg.format, problem_files)))
print(stdout)
error = True
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
if error:
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs clang-tidy on all ")
parser.add_argument("--clang_tidy_binary",
required=True,
help="Path to the clang-tidy binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--compile_commands",
required=True,
help="compile_commands.json to pass clang-tidy")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--fix", default=False,
action="store_true",
help="If specified, will attempt to fix the "
"source code instead of recommending fixes, "
"defaults to %(default)s")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
linted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
linted_filenames.append(path)
if not arguments.quiet:
msg = 'Tidying {}' if arguments.fix else 'Checking {}'
print("\n".join(map(msg.format, linted_filenames)))
cmd = [
arguments.clang_tidy_binary,
'-p',
arguments.compile_commands
]
if arguments.fix:
cmd.append('-fix')
results = lintutils.run_parallel(
[cmd + some for some in lintutils.chunk(linted_filenames, 16)])
for returncode, stdout, stderr in results:
if returncode != 0:
sys.exit(returncode)
else:
_check_all(cmd, linted_filenames)

132
core/build-support/run_cpplint.py Executable file
View File

@ -0,0 +1,132 @@
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import lintutils
from subprocess import PIPE, STDOUT
import argparse
import multiprocessing as mp
import sys
import platform
from functools import partial
# NOTE(wesm):
#
# * readability/casting is disabled as it aggressively warns about functions
# with names like "int32", so "int32(x)", where int32 is a function name,
# warns with
_filters = '''
-whitespace/comments
-readability/casting
-readability/todo
-readability/alt_tokens
-build/header_guard
-build/c++11
-runtime/references
-build/include_order
'''.split()
def _get_chunk_key(filenames):
# lists are not hashable so key on the first filename in a chunk
return filenames[0]
def _check_some_files(completed_processes, filenames):
# cpplint outputs complaints in '/path:line_number: complaint' format,
# so we can scan its output to get a list of files to fix
result = completed_processes[_get_chunk_key(filenames)]
return lintutils.stdout_pathcolonline(result, filenames)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Runs cpplint on all of the source files.")
parser.add_argument("--cpplint_binary",
required=True,
help="Path to the cpplint binary")
parser.add_argument("--exclude_globs",
help="Filename containing globs for files "
"that should be excluded from the checks")
parser.add_argument("--source_dir",
required=True,
help="Root directory of the source code")
parser.add_argument("--quiet", default=False,
action="store_true",
help="If specified, only print errors")
arguments = parser.parse_args()
exclude_globs = []
if arguments.exclude_globs:
for line in open(arguments.exclude_globs):
exclude_globs.append(line.strip())
linted_filenames = []
for path in lintutils.get_sources(arguments.source_dir, exclude_globs):
linted_filenames.append(str(path))
cmd = [
arguments.cpplint_binary,
'--verbose=2',
'--linelength=120',
'--filter=' + ','.join(_filters)
]
if (arguments.cpplint_binary.endswith('.py') and
platform.system() == 'Windows'):
# Windows doesn't support executable scripts; execute with
# sys.executable
cmd.insert(0, sys.executable)
if arguments.quiet:
cmd.append('--quiet')
else:
print("\n".join(map(lambda x: "Linting {}".format(x),
linted_filenames)))
# lint files in chunks: each invocation of cpplint will process 16 files
chunks = lintutils.chunk(linted_filenames, 16)
cmds = [cmd + some for some in chunks]
results = lintutils.run_parallel(cmds, stdout=PIPE, stderr=STDOUT)
error = False
# record completed processes (keyed by the first filename in the input
# chunk) for lookup in _check_some_files
completed_processes = {
_get_chunk_key(filenames): result
for filenames, result in zip(chunks, results)
}
checker = partial(_check_some_files, completed_processes)
pool = mp.Pool()
try:
# scan the outputs of various cpplint invocations in parallel to
# distill a list of problematic files
for problem_files, stdout in pool.imap(checker, chunks):
if problem_files:
if isinstance(stdout, bytes):
stdout = stdout.decode('utf8')
print(stdout, file=sys.stderr)
error = True
except Exception:
error = True
raise
finally:
pool.terminate()
pool.join()
sys.exit(1 if error else 0)

150
core/build.sh Executable file
View File

@ -0,0 +1,150 @@
#!/bin/bash
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Debug"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX=$(pwd)/milvus
MAKE_CLEAN="OFF"
BUILD_COVERAGE="OFF"
DB_PATH="/tmp/milvus"
PROFILING="OFF"
USE_JFROG_CACHE="OFF"
RUN_CPPLINT="OFF"
CUSTOMIZATION="OFF" # default use ori faiss
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
wget -q --method HEAD ${CUSTOMIZED_FAISS_URL}
if [ $? -eq 0 ]; then
CUSTOMIZATION="ON"
else
CUSTOMIZATION="OFF"
fi
while getopts "p:d:t:ulrcgjhx" arg
do
case $arg in
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
u)
echo "Build and run unittest cases" ;
BUILD_UNITTEST="ON";
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
rm ./${BUILD_OUTPUT_DIR} -r
MAKE_CLEAN="ON"
fi
;;
c)
BUILD_COVERAGE="ON"
;;
g)
PROFILING="ON"
;;
j)
USE_JFROG_CACHE="ON"
;;
x)
CUSTOMIZATION="OFF" # force use ori faiss
;;
h) # help
echo "
parameter:
-p: install prefix(default: $(pwd)/milvus)
-d: db data path(default: /tmp/milvus)
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-r: remove previous build directory(default: OFF)
-c: code coverage(default: OFF)
-g: profiling(default: OFF)
-j: use jfrog cache build directory(default: OFF)
-h: help
usage:
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
mkdir ${BUILD_OUTPUT_DIR}
fi
cd ${BUILD_OUTPUT_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache > /dev/null 2>&1
CMAKE_CMD="cmake \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DMILVUS_DB_PATH=${DB_PATH} \
-DMILVUS_ENABLE_PROFILING=${PROFILING} \
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
-DCUSTOMIZATION=${CUSTOMIZATION} \
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
../"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${MAKE_CLEAN} == "ON" ]]; then
make clean
fi
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# # clang-tidy check
# make check-clang-tidy
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# exit 1
# fi
# echo "clang-tidy check passed!"
else
# strip binary symbol
if [[ ${BUILD_TYPE} != "Debug" ]]; then
strip src/milvus_server
fi
# compile and build
make -j 8 install || exit 1
fi

204
core/cmake/BuildUtils.cmake Normal file
View File

@ -0,0 +1,204 @@
# Define a function that check last file modification
function(Check_Last_Modify cache_check_lists_file_path working_dir last_modified_commit_id)
if(EXISTS "${working_dir}")
if(EXISTS "${cache_check_lists_file_path}")
set(GIT_LOG_SKIP_NUM 0)
set(_MATCH_ALL ON CACHE BOOL "Match all")
set(_LOOP_STATUS ON CACHE BOOL "Whether out of loop")
file(STRINGS ${cache_check_lists_file_path} CACHE_IGNORE_TXT)
while(_LOOP_STATUS)
foreach(_IGNORE_ENTRY ${CACHE_IGNORE_TXT})
if(NOT _IGNORE_ENTRY MATCHES "^[^#]+")
continue()
endif()
set(_MATCH_ALL OFF)
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --name-status --pretty= WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE CHANGE_FILES)
if(NOT CHANGE_FILES STREQUAL "")
string(REPLACE "\n" ";" _CHANGE_FILES ${CHANGE_FILES})
foreach(_FILE_ENTRY ${_CHANGE_FILES})
string(REGEX MATCH "[^ \t]+$" _FILE_NAME ${_FILE_ENTRY})
execute_process(COMMAND sh -c "echo ${_FILE_NAME} | grep ${_IGNORE_ENTRY}" RESULT_VARIABLE return_code)
if (return_code EQUAL 0)
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
set(_LOOP_STATUS OFF)
endif()
endforeach()
else()
set(_LOOP_STATUS OFF)
endif()
endforeach()
if(_MATCH_ALL)
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
set(_LOOP_STATUS OFF)
endif()
math(EXPR GIT_LOG_SKIP_NUM "${GIT_LOG_SKIP_NUM} + 1")
endwhile(_LOOP_STATUS)
else()
execute_process(COMMAND git log --no-merges -1 --skip=${GIT_LOG_SKIP_NUM} --pretty=%H WORKING_DIRECTORY ${working_dir} OUTPUT_VARIABLE LAST_MODIFIED_COMMIT_ID)
set (${last_modified_commit_id} ${LAST_MODIFIED_COMMIT_ID} PARENT_SCOPE)
endif()
else()
message(FATAL_ERROR "The directory ${working_dir} does not exist")
endif()
endfunction()
# Define a function that extracts a cached package
function(ExternalProject_Use_Cache project_name package_file install_path)
message(STATUS "Will use cached package file: ${package_file}")
ExternalProject_Add(${project_name}
DOWNLOAD_COMMAND ${CMAKE_COMMAND} -E echo
"No download step needed (using cached package)"
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E echo
"No configure step needed (using cached package)"
BUILD_COMMAND ${CMAKE_COMMAND} -E echo
"No build step needed (using cached package)"
INSTALL_COMMAND ${CMAKE_COMMAND} -E echo
"No install step needed (using cached package)"
)
# We want our tar files to contain the Install/<package> prefix (not for any
# very special reason, only for consistency and so that we can identify them
# in the extraction logs) which means that we must extract them in the
# binary (top-level build) directory to have them installed in the right
# place for subsequent ExternalProjects to pick them up. It seems that the
# only way to control the working directory is with Add_Step!
ExternalProject_Add_Step(${project_name} extract
ALWAYS 1
COMMAND
${CMAKE_COMMAND} -E echo
"Extracting ${package_file} to ${install_path}"
COMMAND
${CMAKE_COMMAND} -E tar xzvf ${package_file} ${install_path}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
ExternalProject_Add_StepTargets(${project_name} extract)
endfunction()
# Define a function that to create a new cached package
function(ExternalProject_Create_Cache project_name package_file install_path cache_username cache_password cache_path)
if(EXISTS ${package_file})
message(STATUS "Removing existing package file: ${package_file}")
file(REMOVE ${package_file})
endif()
string(REGEX REPLACE "(.+)/.+$" "\\1" package_dir ${package_file})
if(NOT EXISTS ${package_dir})
file(MAKE_DIRECTORY ${package_dir})
endif()
message(STATUS "Will create cached package file: ${package_file}")
ExternalProject_Add_Step(${project_name} package
DEPENDEES install
BYPRODUCTS ${package_file}
COMMAND ${CMAKE_COMMAND} -E echo "Updating cached package file: ${package_file}"
COMMAND ${CMAKE_COMMAND} -E tar czvf ${package_file} ${install_path}
COMMAND ${CMAKE_COMMAND} -E echo "Uploading package file ${package_file} to ${cache_path}"
COMMAND curl -u${cache_username}:${cache_password} -T ${package_file} ${cache_path}
)
ExternalProject_Add_StepTargets(${project_name} package)
endfunction()
function(ADD_THIRDPARTY_LIB LIB_NAME)
set(options)
set(one_value_args SHARED_LIB STATIC_LIB)
set(multi_value_args DEPS INCLUDE_DIRECTORIES)
cmake_parse_arguments(ARG
"${options}"
"${one_value_args}"
"${multi_value_args}"
${ARGN})
if(ARG_UNPARSED_ARGUMENTS)
message(SEND_ERROR "Error: unrecognized arguments: ${ARG_UNPARSED_ARGUMENTS}")
endif()
if(ARG_STATIC_LIB AND ARG_SHARED_LIB)
if(NOT ARG_STATIC_LIB)
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
endif()
set(AUG_LIB_NAME "${LIB_NAME}_static")
add_library(${AUG_LIB_NAME} STATIC IMPORTED)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_STATIC_LIB}")
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
message(STATUS "Added static library dependency ${AUG_LIB_NAME}: ${ARG_STATIC_LIB}")
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
set(AUG_LIB_NAME "${LIB_NAME}_shared")
add_library(${AUG_LIB_NAME} SHARED IMPORTED)
if(WIN32)
# Mark the ".lib" location as part of a Windows DLL
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_IMPLIB "${ARG_SHARED_LIB}")
else()
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_SHARED_LIB}")
endif()
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
message(STATUS "Added shared library dependency ${AUG_LIB_NAME}: ${ARG_SHARED_LIB}")
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
elseif(ARG_STATIC_LIB)
set(AUG_LIB_NAME "${LIB_NAME}_static")
add_library(${AUG_LIB_NAME} STATIC IMPORTED)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_STATIC_LIB}")
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
message(STATUS "Added static library dependency ${AUG_LIB_NAME}: ${ARG_STATIC_LIB}")
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
elseif(ARG_SHARED_LIB)
set(AUG_LIB_NAME "${LIB_NAME}_shared")
add_library(${AUG_LIB_NAME} SHARED IMPORTED)
if(WIN32)
# Mark the ".lib" location as part of a Windows DLL
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_IMPLIB "${ARG_SHARED_LIB}")
else()
set_target_properties(${AUG_LIB_NAME}
PROPERTIES IMPORTED_LOCATION "${ARG_SHARED_LIB}")
endif()
message(STATUS "Added shared library dependency ${AUG_LIB_NAME}: ${ARG_SHARED_LIB}")
if(ARG_DEPS)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_LINK_LIBRARIES "${ARG_DEPS}")
endif()
if(ARG_INCLUDE_DIRECTORIES)
set_target_properties(${AUG_LIB_NAME}
PROPERTIES INTERFACE_INCLUDE_DIRECTORIES
"${ARG_INCLUDE_DIRECTORIES}")
endif()
else()
message(FATAL_ERROR "No static or shared library provided for ${LIB_NAME}")
endif()
endfunction()

View File

@ -0,0 +1,189 @@
macro(set_option_category name)
set(MILVUS_OPTION_CATEGORY ${name})
list(APPEND "MILVUS_OPTION_CATEGORIES" ${name})
endmacro()
macro(define_option name description default)
option(${name} ${description} ${default})
list(APPEND "MILVUS_${MILVUS_OPTION_CATEGORY}_OPTION_NAMES" ${name})
set("${name}_OPTION_DESCRIPTION" ${description})
set("${name}_OPTION_DEFAULT" ${default})
set("${name}_OPTION_TYPE" "bool")
endmacro()
function(list_join lst glue out)
if("${${lst}}" STREQUAL "")
set(${out} "" PARENT_SCOPE)
return()
endif()
list(GET ${lst} 0 joined)
list(REMOVE_AT ${lst} 0)
foreach(item ${${lst}})
set(joined "${joined}${glue}${item}")
endforeach()
set(${out} ${joined} PARENT_SCOPE)
endfunction()
macro(define_option_string name description default)
set(${name} ${default} CACHE STRING ${description})
list(APPEND "MILVUS_${MILVUS_OPTION_CATEGORY}_OPTION_NAMES" ${name})
set("${name}_OPTION_DESCRIPTION" ${description})
set("${name}_OPTION_DEFAULT" "\"${default}\"")
set("${name}_OPTION_TYPE" "string")
set("${name}_OPTION_ENUM" ${ARGN})
list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM")
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
set_property(CACHE ${name} PROPERTY STRINGS ${ARGN})
endif()
endmacro()
#----------------------------------------------------------------------
set_option_category("Thirdparty")
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO")
define_option_string(MILVUS_DEPENDENCY_SOURCE
"Method to use for acquiring MILVUS's build dependencies"
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
"AUTO"
"BUNDLED"
"SYSTEM")
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
"Show output from ExternalProjects rather than just logging to files" ON)
define_option(MILVUS_BOOST_VENDORED "Use vendored Boost instead of existing Boost. \
Note that this requires linking Boost statically" OFF)
define_option(MILVUS_BOOST_HEADER_ONLY "Use only BOOST headers" OFF)
define_option(MILVUS_WITH_BZ2 "Build with BZ2 compression" ON)
define_option(MILVUS_WITH_EASYLOGGINGPP "Build with Easylogging++ library" ON)
define_option(MILVUS_WITH_LZ4 "Build with lz4 compression" ON)
define_option(MILVUS_WITH_PROMETHEUS "Build with PROMETHEUS library" ON)
define_option(MILVUS_WITH_SNAPPY "Build with Snappy compression" ON)
define_option(MILVUS_WITH_SQLITE "Build with SQLite library" ON)
define_option(MILVUS_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
define_option(MILVUS_WITH_MYSQLPP "Build with MySQL++" ON)
define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
if(CMAKE_VERSION VERSION_LESS 3.7)
set(MILVUS_WITH_ZSTD_DEFAULT OFF)
else()
# ExternalProject_Add(SOURCE_SUBDIR) is available since CMake 3.7.
set(MILVUS_WITH_ZSTD_DEFAULT ON)
endif()
define_option(MILVUS_WITH_ZSTD "Build with zstd compression" ${MILVUS_WITH_ZSTD_DEFAULT})
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
endif()
define_option(MILVUS_WITH_GRPC "Build with GRPC" ON)
#----------------------------------------------------------------------
if(MSVC)
set_option_category("MSVC")
define_option(MSVC_LINK_VERBOSE
"Pass verbose linking options when linking libraries and executables"
OFF)
define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF)
endif()
#----------------------------------------------------------------------
set_option_category("Test and benchmark")
unset(MILVUS_BUILD_TESTS CACHE)
if (BUILD_UNIT_TEST)
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON)
else()
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF)
endif(BUILD_UNIT_TEST)
#----------------------------------------------------------------------
macro(config_summary)
message(STATUS "---------------------------------------------------------------------")
message(STATUS "MILVUS version: ${MILVUS_VERSION}")
message(STATUS)
message(STATUS "Build configuration summary:")
message(STATUS " Generator: ${CMAKE_GENERATOR}")
message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}")
if(${CMAKE_EXPORT_COMPILE_COMMANDS})
message(
STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json")
endif()
foreach(category ${MILVUS_OPTION_CATEGORIES})
message(STATUS)
message(STATUS "${category} options:")
set(option_names ${MILVUS_${category}_OPTION_NAMES})
set(max_value_length 0)
foreach(name ${option_names})
string(LENGTH "\"${${name}}\"" value_length)
if(${max_value_length} LESS ${value_length})
set(max_value_length ${value_length})
endif()
endforeach()
foreach(name ${option_names})
if("${${name}_OPTION_TYPE}" STREQUAL "string")
set(value "\"${${name}}\"")
else()
set(value "${${name}}")
endif()
set(default ${${name}_OPTION_DEFAULT})
set(description ${${name}_OPTION_DESCRIPTION})
string(LENGTH ${description} description_length)
if(${description_length} LESS 70)
string(
SUBSTRING
" "
${description_length} -1 description_padding)
else()
set(description_padding "
")
endif()
set(comment "[${name}]")
if("${value}" STREQUAL "${default}")
set(comment "[default] ${comment}")
endif()
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
set(comment "${comment} [${${name}_OPTION_ENUM}]")
endif()
string(
SUBSTRING "${value} "
0 ${max_value_length} value)
message(STATUS " ${description} ${description_padding} ${value} ${comment}")
endforeach()
endforeach()
endmacro()

View File

@ -0,0 +1,109 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tries to find the clang-tidy and clang-format modules
#
# Usage of this module as follows:
#
# find_package(ClangTools)
#
# Variables used by this module, they can change the default behaviour and need
# to be set before calling find_package:
#
# ClangToolsBin_HOME -
# When set, this path is inspected instead of standard library binary locations
# to find clang-tidy and clang-format
#
# This module defines
# CLANG_TIDY_BIN, The path to the clang tidy binary
# CLANG_TIDY_FOUND, Whether clang tidy was found
# CLANG_FORMAT_BIN, The path to the clang format binary
# CLANG_TIDY_FOUND, Whether clang format was found
find_program(CLANG_TIDY_BIN
NAMES
clang-tidy-7.0
clang-tidy-6.0
clang-tidy-5.0
clang-tidy-4.0
clang-tidy-3.9
clang-tidy-3.8
clang-tidy-3.7
clang-tidy-3.6
clang-tidy
PATHS ${ClangTools_PATH} $ENV{CLANG_TOOLS_PATH} /usr/local/bin /usr/bin
NO_DEFAULT_PATH
)
if ( "${CLANG_TIDY_BIN}" STREQUAL "CLANG_TIDY_BIN-NOTFOUND" )
set(CLANG_TIDY_FOUND 0)
message("clang-tidy not found")
else()
set(CLANG_TIDY_FOUND 1)
message("clang-tidy found at ${CLANG_TIDY_BIN}")
endif()
if (CLANG_FORMAT_VERSION)
find_program(CLANG_FORMAT_BIN
NAMES clang-format-${CLANG_FORMAT_VERSION}
PATHS
${ClangTools_PATH}
$ENV{CLANG_TOOLS_PATH}
/usr/local/bin /usr/bin
NO_DEFAULT_PATH
)
# If not found yet, search alternative locations
if (("${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND") AND APPLE)
# Homebrew ships older LLVM versions in /usr/local/opt/llvm@version/
STRING(REGEX REPLACE "^([0-9]+)\\.[0-9]+" "\\1" CLANG_FORMAT_MAJOR_VERSION "${CLANG_FORMAT_VERSION}")
STRING(REGEX REPLACE "^[0-9]+\\.([0-9]+)" "\\1" CLANG_FORMAT_MINOR_VERSION "${CLANG_FORMAT_VERSION}")
if ("${CLANG_FORMAT_MINOR_VERSION}" STREQUAL "0")
find_program(CLANG_FORMAT_BIN
NAMES clang-format
PATHS /usr/local/opt/llvm@${CLANG_FORMAT_MAJOR_VERSION}/bin
NO_DEFAULT_PATH
)
else()
find_program(CLANG_FORMAT_BIN
NAMES clang-format
PATHS /usr/local/opt/llvm@${CLANG_FORMAT_VERSION}/bin
NO_DEFAULT_PATH
)
endif()
endif()
else()
find_program(CLANG_FORMAT_BIN
NAMES
clang-format-7.0
clang-format-6.0
clang-format-5.0
clang-format-4.0
clang-format-3.9
clang-format-3.8
clang-format-3.7
clang-format-3.6
clang-format
PATHS ${ClangTools_PATH} $ENV{CLANG_TOOLS_PATH} /usr/local/bin /usr/bin
NO_DEFAULT_PATH
)
endif()
if ( "${CLANG_FORMAT_BIN}" STREQUAL "CLANG_FORMAT_BIN-NOTFOUND" )
set(CLANG_FORMAT_FOUND 0)
message("clang-format not found")
else()
set(CLANG_FORMAT_FOUND 1)
message("clang-format found at ${CLANG_FORMAT_BIN}")
endif()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,27 @@
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = false
SUBSECOND_PRECISION = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 209715200 ## Throw log files away after 200MB
* DEBUG:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-warning.log"
* TRACE:
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = false
TO_STANDARD_OUTPUT = false
## Error logs
* ERROR:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"

View File

@ -0,0 +1,43 @@
# Default values are used when you make no changes to the following parameters.
server_config:
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # port range: 1025 ~ 65534
deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable
time_zone: UTC+8
db_config:
primary_path: @MILVUS_DB_PATH@ # path used to store data and meta
secondary_path: # path used to store data only, split by semicolon
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
# Keep 'dialect://:@:/', and replace other texts with real values
# Replace 'dialect' with 'mysql' or 'sqlite'
insert_buffer_size: 4 # GB, maximum insert buffer size allowed
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
# you can specify preload tables like this: table1,table2,table3
metric_config:
enable_monitor: false # enable monitoring or not
collector: prometheus # prometheus
prometheus_config:
port: 8080 # port prometheus uses to fetch metrics
cache_config:
cpu_cache_capacity: 16 # GB, CPU memory used for cache
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
gpu_cache_capacity: 4 # GB, GPU memory used for cache
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
cache_insert_data: false # whether to load inserted data into cache
engine_config:
use_blas_threshold: 20 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
resource_config:
search_resources: # define the GPUs used for search computation, valid value: gpux
- gpu0
index_build_device: gpu0 # GPU used for building index

128
core/coverage.sh Executable file
View File

@ -0,0 +1,128 @@
#!/bin/bash
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/milvus/lib
MYSQL_USER_NAME=root
MYSQL_PASSWORD=123456
MYSQL_HOST='127.0.0.1'
MYSQL_PORT='3306'
while getopts "u:p:t:h" arg
do
case $arg in
u)
MYSQL_USER_NAME=$OPTARG
;;
p)
MYSQL_PASSWORD=$OPTARG
;;
t)
MYSQL_HOST=$OPTARG
;;
h) # help
echo "
parameter:
-u: mysql account
-p: mysql password
-t: mysql host
-h: help
usage:
./coverage.sh -u \${MYSQL_USER} -p \${MYSQL_PASSWORD} -t \${MYSQL_HOST} [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
LCOV_CMD="lcov"
LCOV_GEN_CMD="genhtml"
FILE_INFO_BASE="base.info"
FILE_INFO_MILVUS="server.info"
FILE_INFO_OUTPUT="output.info"
FILE_INFO_OUTPUT_NEW="output_new.info"
DIR_LCOV_OUTPUT="lcov_out"
DIR_GCNO="cmake_build"
DIR_UNITTEST="milvus/unittest"
# delete old code coverage info files
rm -f FILE_INFO_BASE
rm -f FILE_INFO_MILVUS
rm -f FILE_INFO_OUTPUT
rm -f FILE_INFO_OUTPUT_NEW
rm -rf lcov_out
rm -f FILE_INFO_BASE FILE_INFO_MILVUS FILE_INFO_OUTPUT FILE_INFO_OUTPUT_NEW
MYSQL_DB_NAME=milvus_`date +%s%N`
function mysql_exc()
{
cmd=$1
mysql -h${MYSQL_HOST} -u${MYSQL_USER_NAME} -p${MYSQL_PASSWORD} -e "${cmd}"
if [ $? -ne 0 ]; then
echo "mysql $cmd run failed"
fi
}
mysql_exc "CREATE DATABASE IF NOT EXISTS ${MYSQL_DB_NAME};"
mysql_exc "GRANT ALL PRIVILEGES ON ${MYSQL_DB_NAME}.* TO '${MYSQL_USER_NAME}'@'%';"
mysql_exc "FLUSH PRIVILEGES;"
mysql_exc "USE ${MYSQL_DB_NAME};"
# get baseline
${LCOV_CMD} -c -i -d ${DIR_GCNO} -o "${FILE_INFO_BASE}"
if [ $? -ne 0 ]; then
echo "gen baseline coverage run failed"
exit -1
fi
for test in `ls ${DIR_UNITTEST}`; do
echo $test
case ${test} in
test_db)
# set run args for test_db
args="mysql://${MYSQL_USER_NAME}:${MYSQL_PASSWORD}@${MYSQL_HOST}:${MYSQL_PORT}/${MYSQL_DB_NAME}"
;;
*test_*)
args=""
;;
esac
# run unittest
./${DIR_UNITTEST}/${test} "${args}"
if [ $? -ne 0 ]; then
echo ${args}
echo ${DIR_UNITTEST}/${test} "run failed"
fi
done
mysql_exc "DROP DATABASE IF EXISTS ${MYSQL_DB_NAME};"
# gen code coverage
${LCOV_CMD} -d ${DIR_GCNO} -o "${FILE_INFO_MILVUS}" -c
# merge coverage
${LCOV_CMD} -a ${FILE_INFO_BASE} -a ${FILE_INFO_MILVUS} -o "${FILE_INFO_OUTPUT}"
# remove third party from tracefiles
${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"/usr/*" \
"*/boost/*" \
"*/cmake_build/*_ep-prefix/*" \
"*/src/index/cmake_build*" \
"*/src/index/thirdparty*" \
"*/src/grpc*" \
"*/src/metrics/MetricBase.h" \
"*/src/server/Server.cpp" \
"*/src/server/DBWrapper.cpp" \
"*/src/server/grpc_impl/GrpcServer.cpp" \
"*/src/utils/easylogging++.h" \
"*/src/utils/easylogging++.cc"
# gen html report
${LCOV_GEN_CMD} "${FILE_INFO_OUTPUT_NEW}" --output-directory ${DIR_LCOV_OUTPUT}/

4
core/scripts/start_server.sh Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
../bin/milvus_server -c ../conf/server_config.yaml -l ../conf/log_config.conf

16
core/scripts/stop_server.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/bash
function kill_progress()
{
kill -s SIGUSR2 $(pgrep $1)
sleep 2
}
STATUS=$(kill_progress "milvus_server" )
if [[ ${STATUS} == "false" ]];then
echo "Milvus server closed abnormally!"
else
echo "Milvus server closed successfully!"
fi

191
core/src/CMakeLists.txt Normal file
View File

@ -0,0 +1,191 @@
#-------------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#-------------------------------------------------------------------------------
include_directories(${MILVUS_SOURCE_DIR})
include_directories(${MILVUS_ENGINE_SRC})
include_directories(${CUDA_TOOLKIT_ROOT_DIR}/include)
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status)
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus)
#this statement must put here, since the INDEX_INCLUDE_DIRS is defined in code/CMakeList.txt
add_subdirectory(index)
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
foreach (dir ${INDEX_INCLUDE_DIRS})
include_directories(${dir})
endforeach ()
aux_source_directory(${MILVUS_ENGINE_SRC}/cache cache_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/config config_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/metrics metrics_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db db_main_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/engine db_engine_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/insert db_insert_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/db/meta db_meta_files)
set(grpc_service_files
${MILVUS_ENGINE_SRC}/grpc/gen-milvus/milvus.grpc.pb.cc
${MILVUS_ENGINE_SRC}/grpc/gen-milvus/milvus.pb.cc
${MILVUS_ENGINE_SRC}/grpc/gen-status/status.grpc.pb.cc
${MILVUS_ENGINE_SRC}/grpc/gen-status/status.pb.cc
)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler scheduler_main_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/action scheduler_action_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/event scheduler_event_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/job scheduler_job_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/optimizer scheduler_optimizer_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/resource scheduler_resource_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/scheduler/task scheduler_task_files)
set(scheduler_files
${scheduler_main_files}
${scheduler_action_files}
${scheduler_event_files}
${scheduler_job_files}
${scheduler_optimizer_files}
${scheduler_resource_files}
${scheduler_task_files}
)
aux_source_directory(${MILVUS_ENGINE_SRC}/server server_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/server/grpc_impl grpc_server_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/utils utils_files)
aux_source_directory(${MILVUS_ENGINE_SRC}/wrapper wrapper_files)
set(engine_files
${CMAKE_CURRENT_SOURCE_DIR}/main.cpp
${cache_files}
${db_main_files}
${db_engine_files}
${db_insert_files}
${db_meta_files}
${metrics_files}
${utils_files}
${wrapper_files}
)
set(client_grpc_lib
grpcpp_channelz
grpc++
grpc
grpc_protobuf
grpc_protoc
)
set(prometheus_lib
prometheus-cpp-push
prometheus-cpp-pull
prometheus-cpp-core
)
set(boost_lib
libboost_system.a
libboost_filesystem.a
libboost_serialization.a
)
set(cuda_lib
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
cudart
cublas
)
set(third_party_libs
sqlite
${client_grpc_lib}
yaml-cpp
${prometheus_lib}
${boost_lib}
bzip2
lz4
snappy
zlib
zstd
${cuda_lib}
mysqlpp
)
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
set(third_party_libs ${third_party_libs}
gperftools
libunwind
)
endif ()
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
set(engine_libs
pthread
libgomp.a
libgfortran.a
)
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
set(engine_libs
${engine_libs}
libquadmath.a
)
endif ()
cuda_add_library(milvus_engine STATIC ${engine_files})
target_link_libraries(milvus_engine
knowhere
${engine_libs}
${third_party_libs}
)
add_library(metrics STATIC ${metrics_files})
set(metrics_lib
yaml-cpp
${prometheus_lib}
)
target_link_libraries(metrics ${metrics_lib})
set(server_libs
milvus_engine
pthread
dl
metrics
)
add_executable(milvus_server
${config_files}
${metrics_files}
${scheduler_files}
${server_files}
${grpc_server_files}
${grpc_service_files}
${utils_files}
)
target_link_libraries(milvus_server
${server_libs}
)
install(TARGETS milvus_server DESTINATION bin)
install(FILES
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3.2.4
DESTINATION lib)
add_subdirectory(sdk)

91
core/src/cache/Cache.h vendored Normal file
View File

@ -0,0 +1,91 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "LRU.h"
#include "utils/Log.h"
#include <atomic>
#include <mutex>
#include <set>
#include <string>
namespace milvus {
namespace cache {
template <typename ItemObj>
class Cache {
public:
// mem_capacity, units:GB
Cache(int64_t capacity_gb, uint64_t cache_max_count);
~Cache() = default;
int64_t
usage() const {
return usage_;
}
int64_t
capacity() const {
return capacity_;
} // unit: BYTE
void
set_capacity(int64_t capacity); // unit: BYTE
double
freemem_percent() const {
return freemem_percent_;
}
void
set_freemem_percent(double percent) {
freemem_percent_ = percent;
}
size_t
size() const;
bool
exists(const std::string& key);
ItemObj
get(const std::string& key);
void
insert(const std::string& key, const ItemObj& item);
void
erase(const std::string& key);
void
print();
void
clear();
private:
void
free_memory();
private:
int64_t usage_;
int64_t capacity_;
double freemem_percent_;
LRU<std::string, ItemObj> lru_;
mutable std::mutex mutex_;
};
} // namespace cache
} // namespace milvus
#include "cache/Cache.inl"

194
core/src/cache/Cache.inl vendored Normal file
View File

@ -0,0 +1,194 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
namespace milvus {
namespace cache {
constexpr double DEFAULT_THRESHHOLD_PERCENT = 0.85;
template<typename ItemObj>
Cache<ItemObj>::Cache(int64_t capacity, uint64_t cache_max_count)
: usage_(0),
capacity_(capacity),
freemem_percent_(DEFAULT_THRESHHOLD_PERCENT),
lru_(cache_max_count) {
// AGENT_LOG_DEBUG << "Construct Cache with capacity " << std::to_string(mem_capacity)
}
template<typename ItemObj>
void
Cache<ItemObj>::set_capacity(int64_t capacity) {
if (capacity > 0) {
capacity_ = capacity;
free_memory();
}
}
template<typename ItemObj>
size_t
Cache<ItemObj>::size() const {
std::lock_guard<std::mutex> lock(mutex_);
return lru_.size();
}
template<typename ItemObj>
bool
Cache<ItemObj>::exists(const std::string &key) {
std::lock_guard<std::mutex> lock(mutex_);
return lru_.exists(key);
}
template<typename ItemObj>
ItemObj
Cache<ItemObj>::get(const std::string &key) {
std::lock_guard<std::mutex> lock(mutex_);
if (!lru_.exists(key)) {
return nullptr;
}
return lru_.get(key);
}
template<typename ItemObj>
void
Cache<ItemObj>::insert(const std::string &key, const ItemObj &item) {
if (item == nullptr) {
return;
}
// if(item->size() > capacity_) {
// SERVER_LOG_ERROR << "Item size " << item->size()
// << " is too large to insert into cache, capacity " << capacity_;
// return;
// }
//calculate usage
{
std::lock_guard<std::mutex> lock(mutex_);
//if key already exist, subtract old item size
if (lru_.exists(key)) {
const ItemObj &old_item = lru_.get(key);
usage_ -= old_item->Size();
}
//plus new item size
usage_ += item->Size();
}
//if usage exceed capacity, free some items
if (usage_ > capacity_) {
SERVER_LOG_DEBUG << "Current usage " << usage_
<< " exceeds cache capacity " << capacity_
<< ", start free memory";
free_memory();
}
//insert new item
{
std::lock_guard<std::mutex> lock(mutex_);
lru_.put(key, item);
SERVER_LOG_DEBUG << "Insert " << key << " size:" << item->Size()
<< " bytes into cache, usage: " << usage_ << " bytes";
}
}
template<typename ItemObj>
void
Cache<ItemObj>::erase(const std::string &key) {
std::lock_guard<std::mutex> lock(mutex_);
if (!lru_.exists(key)) {
return;
}
const ItemObj &old_item = lru_.get(key);
usage_ -= old_item->Size();
SERVER_LOG_DEBUG << "Erase " << key << " size: " << old_item->Size();
lru_.erase(key);
}
template<typename ItemObj>
void
Cache<ItemObj>::clear() {
std::lock_guard<std::mutex> lock(mutex_);
lru_.clear();
usage_ = 0;
SERVER_LOG_DEBUG << "Clear cache !";
}
/* free memory space when CACHE occupation exceed its capacity */
template<typename ItemObj>
void
Cache<ItemObj>::free_memory() {
if (usage_ <= capacity_) return;
int64_t threshhold = capacity_ * freemem_percent_;
int64_t delta_size = usage_ - threshhold;
if (delta_size <= 0) {
delta_size = 1;//ensure at least one item erased
}
std::set<std::string> key_array;
int64_t released_size = 0;
{
std::lock_guard<std::mutex> lock(mutex_);
auto it = lru_.rbegin();
while (it != lru_.rend() && released_size < delta_size) {
auto &key = it->first;
auto &obj_ptr = it->second;
key_array.emplace(key);
released_size += obj_ptr->Size();
++it;
}
}
SERVER_LOG_DEBUG << "to be released memory size: " << released_size;
for (auto &key : key_array) {
erase(key);
}
print();
}
template<typename ItemObj>
void
Cache<ItemObj>::print() {
size_t cache_count = 0;
{
std::lock_guard<std::mutex> lock(mutex_);
cache_count = lru_.size();
}
SERVER_LOG_DEBUG << "[Cache item count]: " << cache_count;
SERVER_LOG_DEBUG << "[Cache usage]: " << usage_ << " bytes";
SERVER_LOG_DEBUG << "[Cache capacity]: " << capacity_ << " bytes";
}
} // namespace cache
} // namespace milvus

73
core/src/cache/CacheMgr.h vendored Normal file
View File

@ -0,0 +1,73 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "Cache.h"
#include "metrics/Metrics.h"
#include "utils/Log.h"
#include <memory>
#include <string>
namespace milvus {
namespace cache {
template <typename ItemObj>
class CacheMgr {
public:
virtual uint64_t
ItemCount() const;
virtual bool
ItemExists(const std::string& key);
virtual ItemObj
GetItem(const std::string& key);
virtual void
InsertItem(const std::string& key, const ItemObj& data);
virtual void
EraseItem(const std::string& key);
virtual void
PrintInfo();
virtual void
ClearCache();
int64_t
CacheUsage() const;
int64_t
CacheCapacity() const;
void
SetCapacity(int64_t capacity);
protected:
CacheMgr();
virtual ~CacheMgr();
protected:
using CachePtr = std::shared_ptr<Cache<ItemObj>>;
CachePtr cache_;
};
} // namespace cache
} // namespace milvus
#include "cache/CacheMgr.inl"

145
core/src/cache/CacheMgr.inl vendored Normal file
View File

@ -0,0 +1,145 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
namespace milvus {
namespace cache {
template<typename ItemObj>
CacheMgr<ItemObj>::CacheMgr() {
}
template<typename ItemObj>
CacheMgr<ItemObj>::~CacheMgr() {
}
template<typename ItemObj>
uint64_t
CacheMgr<ItemObj>::ItemCount() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return 0;
}
return (uint64_t) (cache_->size());
}
template<typename ItemObj>
bool
CacheMgr<ItemObj>::ItemExists(const std::string &key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return false;
}
return cache_->exists(key);
}
template<typename ItemObj>
ItemObj
CacheMgr<ItemObj>::GetItem(const std::string &key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return nullptr;
}
server::Metrics::GetInstance().CacheAccessTotalIncrement();
return cache_->get(key);
}
template<typename ItemObj>
void
CacheMgr<ItemObj>::InsertItem(const std::string &key, const ItemObj &data) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return;
}
cache_->insert(key, data);
server::Metrics::GetInstance().CacheAccessTotalIncrement();
}
template<typename ItemObj>
void
CacheMgr<ItemObj>::EraseItem(const std::string &key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return;
}
cache_->erase(key);
server::Metrics::GetInstance().CacheAccessTotalIncrement();
}
template<typename ItemObj>
void
CacheMgr<ItemObj>::PrintInfo() {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return;
}
cache_->print();
}
template<typename ItemObj>
void
CacheMgr<ItemObj>::ClearCache() {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return;
}
cache_->clear();
}
template<typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheUsage() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return 0;
}
return cache_->usage();
}
template<typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheCapacity() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return 0;
}
return cache_->capacity();
}
template<typename ItemObj>
void
CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
return;
}
cache_->set_capacity(capacity);
}
} // namespace cache
} // namespace milvus

69
core/src/cache/CpuCacheMgr.cpp vendored Normal file
View File

@ -0,0 +1,69 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cache/CpuCacheMgr.h"
#include "server/Config.h"
#include "utils/Log.h"
#include <utility>
namespace milvus {
namespace cache {
namespace {
constexpr int64_t unit = 1024 * 1024 * 1024;
}
CpuCacheMgr::CpuCacheMgr() {
server::Config& config = server::Config::GetInstance();
Status s;
int64_t cpu_cache_cap;
s = config.GetCacheConfigCpuCacheCapacity(cpu_cache_cap);
if (!s.ok()) {
SERVER_LOG_ERROR << s.message();
}
int64_t cap = cpu_cache_cap * unit;
cache_ = std::make_shared<Cache<DataObjPtr>>(cap, 1UL << 32);
float cpu_cache_threshold;
s = config.GetCacheConfigCpuCacheThreshold(cpu_cache_threshold);
if (!s.ok()) {
SERVER_LOG_ERROR << s.message();
}
if (cpu_cache_threshold > 0.0 && cpu_cache_threshold <= 1.0) {
cache_->set_freemem_percent(cpu_cache_threshold);
} else {
SERVER_LOG_ERROR << "Invalid cpu_cache_threshold: " << cpu_cache_threshold << ", by default set to "
<< cache_->freemem_percent();
}
}
CpuCacheMgr*
CpuCacheMgr::GetInstance() {
static CpuCacheMgr s_mgr;
return &s_mgr;
}
DataObjPtr
CpuCacheMgr::GetIndex(const std::string& key) {
DataObjPtr obj = GetItem(key);
return obj;
}
} // namespace cache
} // namespace milvus

43
core/src/cache/CpuCacheMgr.h vendored Normal file
View File

@ -0,0 +1,43 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "CacheMgr.h"
#include "DataObj.h"
#include <memory>
#include <string>
namespace milvus {
namespace cache {
class CpuCacheMgr : public CacheMgr<DataObjPtr> {
private:
CpuCacheMgr();
public:
// TODO(myh): use smart pointer instead
static CpuCacheMgr*
GetInstance();
DataObjPtr
GetIndex(const std::string& key);
};
} // namespace cache
} // namespace milvus

34
core/src/cache/DataObj.h vendored Normal file
View File

@ -0,0 +1,34 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <memory>
namespace milvus {
namespace cache {
class DataObj {
public:
virtual int64_t
Size() = 0;
};
using DataObjPtr = std::shared_ptr<DataObj>;
} // namespace cache
} // namespace milvus

81
core/src/cache/GpuCacheMgr.cpp vendored Normal file
View File

@ -0,0 +1,81 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cache/GpuCacheMgr.h"
#include "server/Config.h"
#include "utils/Log.h"
#include <sstream>
#include <utility>
namespace milvus {
namespace cache {
std::mutex GpuCacheMgr::mutex_;
std::unordered_map<uint64_t, GpuCacheMgrPtr> GpuCacheMgr::instance_;
namespace {
constexpr int64_t G_BYTE = 1024 * 1024 * 1024;
}
GpuCacheMgr::GpuCacheMgr() {
server::Config& config = server::Config::GetInstance();
Status s;
int64_t gpu_cache_cap;
s = config.GetCacheConfigGpuCacheCapacity(gpu_cache_cap);
if (!s.ok()) {
SERVER_LOG_ERROR << s.message();
}
int64_t cap = gpu_cache_cap * G_BYTE;
cache_ = std::make_shared<Cache<DataObjPtr>>(cap, 1UL << 32);
float gpu_mem_threshold;
s = config.GetCacheConfigGpuCacheThreshold(gpu_mem_threshold);
if (!s.ok()) {
SERVER_LOG_ERROR << s.message();
}
if (gpu_mem_threshold > 0.0 && gpu_mem_threshold <= 1.0) {
cache_->set_freemem_percent(gpu_mem_threshold);
} else {
SERVER_LOG_ERROR << "Invalid gpu_mem_threshold: " << gpu_mem_threshold << ", by default set to "
<< cache_->freemem_percent();
}
}
GpuCacheMgr*
GpuCacheMgr::GetInstance(uint64_t gpu_id) {
if (instance_.find(gpu_id) == instance_.end()) {
std::lock_guard<std::mutex> lock(mutex_);
if (instance_.find(gpu_id) == instance_.end()) {
instance_.insert(std::pair<uint64_t, GpuCacheMgrPtr>(gpu_id, std::make_shared<GpuCacheMgr>()));
}
return instance_[gpu_id].get();
} else {
std::lock_guard<std::mutex> lock(mutex_);
return instance_[gpu_id].get();
}
}
DataObjPtr
GpuCacheMgr::GetIndex(const std::string& key) {
DataObjPtr obj = GetItem(key);
return obj;
}
} // namespace cache
} // namespace milvus

47
core/src/cache/GpuCacheMgr.h vendored Normal file
View File

@ -0,0 +1,47 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "CacheMgr.h"
#include "DataObj.h"
#include <memory>
#include <string>
#include <unordered_map>
namespace milvus {
namespace cache {
class GpuCacheMgr;
using GpuCacheMgrPtr = std::shared_ptr<GpuCacheMgr>;
class GpuCacheMgr : public CacheMgr<DataObjPtr> {
public:
GpuCacheMgr();
static GpuCacheMgr*
GetInstance(uint64_t gpu_id);
DataObjPtr
GetIndex(const std::string& key);
private:
static std::mutex mutex_;
static std::unordered_map<uint64_t, GpuCacheMgrPtr> instance_;
};
} // namespace cache
} // namespace milvus

122
core/src/cache/LRU.h vendored Normal file
View File

@ -0,0 +1,122 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <cstddef>
#include <list>
#include <stdexcept>
#include <unordered_map>
#include <utility>
namespace milvus {
namespace cache {
template <typename key_t, typename value_t>
class LRU {
public:
typedef typename std::pair<key_t, value_t> key_value_pair_t;
typedef typename std::list<key_value_pair_t>::iterator list_iterator_t;
typedef typename std::list<key_value_pair_t>::reverse_iterator reverse_list_iterator_t;
explicit LRU(size_t max_size) : max_size_(max_size) {
}
void
put(const key_t& key, const value_t& value) {
auto it = cache_items_map_.find(key);
cache_items_list_.push_front(key_value_pair_t(key, value));
if (it != cache_items_map_.end()) {
cache_items_list_.erase(it->second);
cache_items_map_.erase(it);
}
cache_items_map_[key] = cache_items_list_.begin();
if (cache_items_map_.size() > max_size_) {
auto last = cache_items_list_.end();
last--;
cache_items_map_.erase(last->first);
cache_items_list_.pop_back();
}
}
const value_t&
get(const key_t& key) {
auto it = cache_items_map_.find(key);
if (it == cache_items_map_.end()) {
throw std::range_error("There is no such key in cache");
} else {
cache_items_list_.splice(cache_items_list_.begin(), cache_items_list_, it->second);
return it->second->second;
}
}
void
erase(const key_t& key) {
auto it = cache_items_map_.find(key);
if (it != cache_items_map_.end()) {
cache_items_list_.erase(it->second);
cache_items_map_.erase(it);
}
}
bool
exists(const key_t& key) const {
return cache_items_map_.find(key) != cache_items_map_.end();
}
size_t
size() const {
return cache_items_map_.size();
}
list_iterator_t
begin() {
iter_ = cache_items_list_.begin();
return iter_;
}
list_iterator_t
end() {
return cache_items_list_.end();
}
reverse_list_iterator_t
rbegin() {
return cache_items_list_.rbegin();
}
reverse_list_iterator_t
rend() {
return cache_items_list_.rend();
}
void
clear() {
cache_items_list_.clear();
cache_items_map_.clear();
}
private:
std::list<key_value_pair_t> cache_items_list_;
std::unordered_map<key_t, list_iterator_t> cache_items_map_;
size_t max_size_;
list_iterator_t iter_;
};
} // namespace cache
} // namespace milvus

View File

@ -0,0 +1,47 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <string>
#include "ConfigNode.h"
#include "utils/Status.h"
namespace milvus {
namespace server {
class ConfigMgr {
public:
virtual Status
LoadConfigFile(const std::string& filename) = 0;
virtual void
Print() const = 0; // will be deleted
virtual std::string
DumpString() const = 0;
virtual const ConfigNode&
GetRootNode() const = 0;
virtual ConfigNode&
GetRootNode() = 0;
};
} // namespace server
} // namespace milvus

View File

@ -0,0 +1,235 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "config/ConfigNode.h"
#include "utils/Error.h"
#include "utils/Log.h"
#include <algorithm>
#include <sstream>
#include <string>
namespace milvus {
namespace server {
void
ConfigNode::Combine(const ConfigNode& target) {
const std::map<std::string, std::string>& kv = target.GetConfig();
for (auto itr = kv.begin(); itr != kv.end(); ++itr) {
config_[itr->first] = itr->second;
}
const std::map<std::string, std::vector<std::string> >& sequences = target.GetSequences();
for (auto itr = sequences.begin(); itr != sequences.end(); ++itr) {
sequences_[itr->first] = itr->second;
}
const std::map<std::string, ConfigNode>& children = target.GetChildren();
for (auto itr = children.begin(); itr != children.end(); ++itr) {
children_[itr->first] = itr->second;
}
}
// key/value pair config
void
ConfigNode::SetValue(const std::string& key, const std::string& value) {
config_[key] = value;
}
std::string
ConfigNode::GetValue(const std::string& param_key, const std::string& default_val) const {
auto ref = config_.find(param_key);
if (ref != config_.end()) {
return ref->second;
}
// THROW_UNEXPECTED_ERROR("Can't find parameter key: " + param_key);
return default_val;
}
bool
ConfigNode::GetBoolValue(const std::string& param_key, bool default_val) const {
std::string val = GetValue(param_key);
if (!val.empty()) {
std::transform(val.begin(), val.end(), val.begin(), ::tolower);
return (val == "true" || val == "on" || val == "yes" || val == "1");
} else {
return default_val;
}
}
int32_t
ConfigNode::GetInt32Value(const std::string& param_key, int32_t default_val) const {
std::string val = GetValue(param_key);
if (!val.empty()) {
return (int32_t)std::strtol(val.c_str(), nullptr, 10);
} else {
return default_val;
}
}
int64_t
ConfigNode::GetInt64Value(const std::string& param_key, int64_t default_val) const {
std::string val = GetValue(param_key);
if (!val.empty()) {
return std::strtol(val.c_str(), nullptr, 10);
} else {
return default_val;
}
}
float
ConfigNode::GetFloatValue(const std::string& param_key, float default_val) const {
std::string val = GetValue(param_key);
if (!val.empty()) {
return std::strtof(val.c_str(), nullptr);
} else {
return default_val;
}
}
double
ConfigNode::GetDoubleValue(const std::string& param_key, double default_val) const {
std::string val = GetValue(param_key);
if (!val.empty()) {
return std::strtod(val.c_str(), nullptr);
} else {
return default_val;
}
}
const std::map<std::string, std::string>&
ConfigNode::GetConfig() const {
return config_;
}
void
ConfigNode::ClearConfig() {
config_.clear();
}
// key/object config
void
ConfigNode::AddChild(const std::string& type_name, const ConfigNode& config) {
children_[type_name] = config;
}
ConfigNode
ConfigNode::GetChild(const std::string& type_name) const {
auto ref = children_.find(type_name);
if (ref != children_.end()) {
return ref->second;
}
ConfigNode nc;
return nc;
}
ConfigNode&
ConfigNode::GetChild(const std::string& type_name) {
return children_[type_name];
}
void
ConfigNode::GetChildren(ConfigNodeArr& arr) const {
arr.clear();
for (auto ref : children_) {
arr.push_back(ref.second);
}
}
const std::map<std::string, ConfigNode>&
ConfigNode::GetChildren() const {
return children_;
}
void
ConfigNode::ClearChildren() {
children_.clear();
}
// key/sequence config
void
ConfigNode::AddSequenceItem(const std::string& key, const std::string& item) {
sequences_[key].push_back(item);
}
std::vector<std::string>
ConfigNode::GetSequence(const std::string& key) const {
auto itr = sequences_.find(key);
if (itr != sequences_.end()) {
return itr->second;
} else {
std::vector<std::string> temp;
return temp;
}
}
const std::map<std::string, std::vector<std::string> >&
ConfigNode::GetSequences() const {
return sequences_;
}
void
ConfigNode::ClearSequences() {
sequences_.clear();
}
void
ConfigNode::PrintAll(const std::string& prefix) const {
for (auto& elem : config_) {
SERVER_LOG_INFO << prefix << elem.first + ": " << elem.second;
}
for (auto& elem : sequences_) {
SERVER_LOG_INFO << prefix << elem.first << ": ";
for (auto& str : elem.second) {
SERVER_LOG_INFO << prefix << " - " << str;
}
}
for (auto& elem : children_) {
SERVER_LOG_INFO << prefix << elem.first << ": ";
elem.second.PrintAll(prefix + " ");
}
}
std::string
ConfigNode::DumpString(const std::string& prefix) const {
std::stringstream str_buffer;
const std::string endl = "\n";
for (auto& elem : config_) {
str_buffer << prefix << elem.first << ": " << elem.second << endl;
}
for (auto& elem : sequences_) {
str_buffer << prefix << elem.first << ": " << endl;
for (auto& str : elem.second) {
str_buffer << prefix + " - " << str << endl;
}
}
for (auto& elem : children_) {
str_buffer << prefix << elem.first << ": " << endl;
str_buffer << elem.second.DumpString(prefix + " ") << endl;
}
return str_buffer.str();
}
} // namespace server
} // namespace milvus

View File

@ -0,0 +1,95 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <map>
#include <string>
#include <vector>
namespace milvus {
namespace server {
class ConfigNode;
typedef std::vector<ConfigNode> ConfigNodeArr;
class ConfigNode {
public:
void
Combine(const ConfigNode& target);
// key/value pair config
void
SetValue(const std::string& key, const std::string& value);
std::string
GetValue(const std::string& param_key, const std::string& default_val = "") const;
bool
GetBoolValue(const std::string& param_key, bool default_val = false) const;
int32_t
GetInt32Value(const std::string& param_key, int32_t default_val = 0) const;
int64_t
GetInt64Value(const std::string& param_key, int64_t default_val = 0) const;
float
GetFloatValue(const std::string& param_key, float default_val = 0.0) const;
double
GetDoubleValue(const std::string& param_key, double default_val = 0.0) const;
const std::map<std::string, std::string>&
GetConfig() const;
void
ClearConfig();
// key/object config
void
AddChild(const std::string& type_name, const ConfigNode& config);
ConfigNode
GetChild(const std::string& type_name) const;
ConfigNode&
GetChild(const std::string& type_name);
void
GetChildren(ConfigNodeArr& arr) const;
const std::map<std::string, ConfigNode>&
GetChildren() const;
void
ClearChildren();
// key/sequence config
void
AddSequenceItem(const std::string& key, const std::string& item);
std::vector<std::string>
GetSequence(const std::string& key) const;
const std::map<std::string, std::vector<std::string> >&
GetSequences() const;
void
ClearSequences();
void
PrintAll(const std::string& prefix = "") const;
std::string
DumpString(const std::string& prefix = "") const;
private:
std::map<std::string, std::string> config_;
std::map<std::string, ConfigNode> children_;
std::map<std::string, std::vector<std::string> > sequences_;
};
} // namespace server
} // namespace milvus

View File

@ -0,0 +1,108 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "config/YamlConfigMgr.h"
#include "utils/Log.h"
namespace milvus {
namespace server {
Status
YamlConfigMgr::LoadConfigFile(const std::string& filename) {
try {
node_ = YAML::LoadFile(filename);
LoadConfigNode(node_, config_);
} catch (YAML::Exception& e) {
std::string str = "Exception: load config file fail: " + std::string(e.what());
return Status(SERVER_UNEXPECTED_ERROR, str);
}
return Status::OK();
}
void
YamlConfigMgr::Print() const {
SERVER_LOG_INFO << "System config content:";
config_.PrintAll();
}
std::string
YamlConfigMgr::DumpString() const {
return config_.DumpString("");
}
const ConfigNode&
YamlConfigMgr::GetRootNode() const {
return config_;
}
ConfigNode&
YamlConfigMgr::GetRootNode() {
return config_;
}
bool
YamlConfigMgr::SetConfigValue(const YAML::Node& node, const std::string& key, ConfigNode& config) {
if (node[key].IsDefined()) {
config.SetValue(key, node[key].as<std::string>());
return true;
}
return false;
}
bool
YamlConfigMgr::SetChildConfig(const YAML::Node& node, const std::string& child_name, ConfigNode& config) {
if (node[child_name].IsDefined()) {
ConfigNode sub_config;
LoadConfigNode(node[child_name], sub_config);
config.AddChild(child_name, sub_config);
return true;
}
return false;
}
bool
YamlConfigMgr::SetSequence(const YAML::Node& node, const std::string& child_name, ConfigNode& config) {
if (node[child_name].IsDefined()) {
size_t cnt = node[child_name].size();
for (size_t i = 0; i < cnt; i++) {
config.AddSequenceItem(child_name, node[child_name][i].as<std::string>());
}
return true;
}
return false;
}
void
YamlConfigMgr::LoadConfigNode(const YAML::Node& node, ConfigNode& config) {
std::string key;
for (YAML::const_iterator it = node.begin(); it != node.end(); ++it) {
if (!it->first.IsNull()) {
key = it->first.as<std::string>();
}
if (node[key].IsScalar()) {
SetConfigValue(node, key, config);
} else if (node[key].IsMap()) {
SetChildConfig(node, key, config);
} else if (node[key].IsSequence()) {
SetSequence(node, key, config);
}
}
}
} // namespace server
} // namespace milvus

View File

@ -0,0 +1,71 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <yaml-cpp/yaml.h>
#include <string>
#include "ConfigMgr.h"
#include "utils/Status.h"
namespace milvus {
namespace server {
class YamlConfigMgr : public ConfigMgr {
public:
static ConfigMgr*
GetInstance() {
static YamlConfigMgr mgr;
return &mgr;
}
virtual Status
LoadConfigFile(const std::string& filename);
virtual void
Print() const;
virtual std::string
DumpString() const;
virtual const ConfigNode&
GetRootNode() const;
virtual ConfigNode&
GetRootNode();
private:
bool
SetConfigValue(const YAML::Node& node, const std::string& key, ConfigNode& config);
bool
SetChildConfig(const YAML::Node& node, const std::string& child_name, ConfigNode& config);
bool
SetSequence(const YAML::Node& node, const std::string& child_name, ConfigNode& config);
void
LoadConfigNode(const YAML::Node& node, ConfigNode& config);
private:
YAML::Node node_;
ConfigNode config_;
};
} // namespace server
} // namespace milvus

39
core/src/db/Constants.h Normal file
View File

@ -0,0 +1,39 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <stdint.h>
namespace milvus {
namespace engine {
constexpr uint64_t K = 1024UL;
constexpr uint64_t M = K * K;
constexpr uint64_t G = K * M;
constexpr uint64_t T = K * G;
constexpr uint64_t MAX_TABLE_FILE_MEM = 128 * M;
constexpr int VECTOR_TYPE_SIZE = sizeof(float);
static constexpr uint64_t ONE_KB = K;
static constexpr uint64_t ONE_MB = ONE_KB * ONE_KB;
static constexpr uint64_t ONE_GB = ONE_KB * ONE_MB;
} // namespace engine
} // namespace milvus

97
core/src/db/DB.h Normal file
View File

@ -0,0 +1,97 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "Options.h"
#include "Types.h"
#include "meta/Meta.h"
#include "utils/Status.h"
#include <memory>
#include <string>
#include <vector>
namespace milvus {
namespace engine {
class Env;
class DB {
public:
DB() = default;
DB(const DB&) = delete;
DB&
operator=(const DB&) = delete;
virtual ~DB() = default;
virtual Status
Start() = 0;
virtual Status
Stop() = 0;
virtual Status
CreateTable(meta::TableSchema& table_schema_) = 0;
virtual Status
DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
virtual Status
DescribeTable(meta::TableSchema& table_schema_) = 0;
virtual Status
HasTable(const std::string& table_id, bool& has_or_not_) = 0;
virtual Status
AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
virtual Status
GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
virtual Status
PreloadTable(const std::string& table_id) = 0;
virtual Status
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
virtual Status
InsertVectors(const std::string& table_id_, uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
virtual Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
QueryResults& results) = 0;
virtual Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
const meta::DatesT& dates, QueryResults& results) = 0;
virtual Status
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0;
virtual Status
Size(uint64_t& result) = 0;
virtual Status
CreateIndex(const std::string& table_id, const TableIndex& index) = 0;
virtual Status
DescribeIndex(const std::string& table_id, TableIndex& index) = 0;
virtual Status
DropIndex(const std::string& table_id) = 0;
virtual Status
DropAll() = 0;
}; // DB
using DBPtr = std::shared_ptr<DB>;
} // namespace engine
} // namespace milvus

48
core/src/db/DBFactory.cpp Normal file
View File

@ -0,0 +1,48 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "db/DBFactory.h"
#include "DBImpl.h"
#include "meta/MetaFactory.h"
#include "meta/MySQLMetaImpl.h"
#include "meta/SqliteMetaImpl.h"
#include "utils/Exception.h"
#include <stdlib.h>
#include <time.h>
#include <cstdlib>
#include <sstream>
#include <string>
namespace milvus {
namespace engine {
DBOptions
DBFactory::BuildOption() {
auto meta = MetaFactory::BuildOption();
DBOptions options;
options.meta_ = meta;
return options;
}
DBPtr
DBFactory::Build(const DBOptions& options) {
return std::make_shared<DBImpl>(options);
}
} // namespace engine
} // namespace milvus

39
core/src/db/DBFactory.h Normal file
View File

@ -0,0 +1,39 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "DB.h"
#include "Options.h"
#include <memory>
#include <string>
namespace milvus {
namespace engine {
class DBFactory {
public:
static DBOptions
BuildOption();
static DBPtr
Build(const DBOptions& options);
};
} // namespace engine
} // namespace milvus

776
core/src/db/DBImpl.cpp Normal file
View File

@ -0,0 +1,776 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "db/DBImpl.h"
#include "Utils.h"
#include "cache/CpuCacheMgr.h"
#include "cache/GpuCacheMgr.h"
#include "engine/EngineFactory.h"
#include "insert/MemMenagerFactory.h"
#include "meta/MetaConsts.h"
#include "meta/MetaFactory.h"
#include "meta/SqliteMetaImpl.h"
#include "metrics/Metrics.h"
#include "scheduler/SchedInst.h"
#include "scheduler/job/BuildIndexJob.h"
#include "scheduler/job/DeleteJob.h"
#include "scheduler/job/SearchJob.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
#include <assert.h>
#include <algorithm>
#include <boost/filesystem.hpp>
#include <chrono>
#include <cstring>
#include <iostream>
#include <thread>
namespace milvus {
namespace engine {
namespace {
constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
} // namespace
DBImpl::DBImpl(const DBOptions& options)
: options_(options), shutting_down_(true), compact_thread_pool_(1, 1), index_thread_pool_(1, 1) {
meta_ptr_ = MetaFactory::Build(options.meta_, options.mode_);
mem_mgr_ = MemManagerFactory::Build(meta_ptr_, options_);
Start();
}
DBImpl::~DBImpl() {
Stop();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// external api
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Status
DBImpl::Start() {
if (!shutting_down_.load(std::memory_order_acquire)) {
return Status::OK();
}
ENGINE_LOG_TRACE << "DB service start";
shutting_down_.store(false, std::memory_order_release);
// for distribute version, some nodes are read only
if (options_.mode_ != DBOptions::MODE::CLUSTER_READONLY) {
ENGINE_LOG_TRACE << "StartTimerTasks";
bg_timer_thread_ = std::thread(&DBImpl::BackgroundTimerTask, this);
}
return Status::OK();
}
Status
DBImpl::Stop() {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status::OK();
}
shutting_down_.store(true, std::memory_order_release);
// makesure all memory data serialized
MemSerialize();
// wait compaction/buildindex finish
bg_timer_thread_.join();
if (options_.mode_ != DBOptions::MODE::CLUSTER_READONLY) {
meta_ptr_->CleanUp();
}
ENGINE_LOG_TRACE << "DB service stop";
return Status::OK();
}
Status
DBImpl::DropAll() {
return meta_ptr_->DropAll();
}
Status
DBImpl::CreateTable(meta::TableSchema& table_schema) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
meta::TableSchema temp_schema = table_schema;
temp_schema.index_file_size_ *= ONE_MB; // store as MB
return meta_ptr_->CreateTable(temp_schema);
}
Status
DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
// dates partly delete files of the table but currently we don't support
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
if (dates.empty()) {
mem_mgr_->EraseMemVector(table_id); // not allow insert
meta_ptr_->DeleteTable(table_id); // soft delete table
// scheduler will determine when to delete table files
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(0, table_id, meta_ptr_, nres);
scheduler::JobMgrInst::GetInstance()->Put(job);
job->WaitAndDelete();
} else {
meta_ptr_->DropPartitionsByDates(table_id, dates);
}
return Status::OK();
}
Status
DBImpl::DescribeTable(meta::TableSchema& table_schema) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
auto stat = meta_ptr_->DescribeTable(table_schema);
table_schema.index_file_size_ /= ONE_MB; // return as MB
return stat;
}
Status
DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
return meta_ptr_->HasTable(table_id, has_or_not);
}
Status
DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
return meta_ptr_->AllTables(table_schema_array);
}
Status
DBImpl::PreloadTable(const std::string& table_id) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
meta::DatePartionedTableFilesSchema files;
meta::DatesT dates;
std::vector<size_t> ids;
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
if (!status.ok()) {
return status;
}
int64_t size = 0;
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
int64_t available_size = cache_total - cache_usage;
for (auto& day_files : files) {
for (auto& file : day_files.second) {
ExecutionEnginePtr engine =
EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
(MetricType)file.metric_type_, file.nlist_);
if (engine == nullptr) {
ENGINE_LOG_ERROR << "Invalid engine type";
return Status(DB_ERROR, "Invalid engine type");
}
size += engine->PhysicalSize();
if (size > available_size) {
return Status(SERVER_CACHE_FULL, "Cache is full");
} else {
try {
// step 1: load index
engine->Load(true);
} catch (std::exception& ex) {
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
return Status(DB_ERROR, msg);
}
}
}
}
return Status::OK();
}
Status
DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
return meta_ptr_->UpdateTableFlag(table_id, flag);
}
Status
DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
return meta_ptr_->Count(table_id, row_count);
}
Status
DBImpl::InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) {
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
Status status;
milvus::server::CollectInsertMetrics metrics(n, status);
status = mem_mgr_->InsertVectors(table_id, n, vectors, vector_ids);
return status;
}
Status
DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
{
std::unique_lock<std::mutex> lock(build_index_mutex_);
// step 1: check index difference
TableIndex old_index;
auto status = DescribeIndex(table_id, old_index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id;
return status;
}
// step 2: update index info
TableIndex new_index = index;
new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable
if (!utils::IsSameIndex(old_index, new_index)) {
DropIndex(table_id);
status = meta_ptr_->UpdateTableIndex(table_id, new_index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
return status;
}
}
}
// step 3: let merge file thread finish
// to avoid duplicate data bug
WaitMergeFileFinish();
// step 4: wait and build index
// for IDMAP type, only wait all NEW file converted to RAW file
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
std::vector<int> file_types;
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
file_types = {
static_cast<int32_t>(meta::TableFileSchema::NEW),
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
};
} else {
file_types = {
static_cast<int32_t>(meta::TableFileSchema::RAW),
static_cast<int32_t>(meta::TableFileSchema::NEW),
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
};
}
std::vector<std::string> file_ids;
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
int times = 1;
while (!file_ids.empty()) {
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
}
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
times++;
}
return Status::OK();
}
Status
DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) {
return meta_ptr_->DescribeTableIndex(table_id, index);
}
Status
DBImpl::DropIndex(const std::string& table_id) {
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
return meta_ptr_->DropTableIndex(table_id);
}
Status
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
QueryResults& results) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
meta::DatesT dates = {utils::GetDate()};
Status result = Query(table_id, k, nq, nprobe, vectors, dates, results);
return result;
}
Status
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
const meta::DatesT& dates, QueryResults& results) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id << " date range count: " << dates.size();
// get all table files from table
meta::DatePartionedTableFilesSchema files;
std::vector<size_t> ids;
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
if (!status.ok()) {
return status;
}
meta::TableFilesSchema file_id_array;
for (auto& day_files : files) {
for (auto& file : day_files.second) {
file_id_array.push_back(file);
}
}
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results);
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
return status;
}
Status
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id << " date range count: " << dates.size();
// get specified files
std::vector<size_t> ids;
for (auto& id : file_ids) {
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
std::string::size_type sz;
ids.push_back(std::stoul(id, &sz));
}
meta::DatePartionedTableFilesSchema files_array;
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files_array);
if (!status.ok()) {
return status;
}
meta::TableFilesSchema file_id_array;
for (auto& day_files : files_array) {
for (auto& file : day_files.second) {
file_id_array.push_back(file);
}
}
if (file_id_array.empty()) {
return Status(DB_ERROR, "Invalid file id");
}
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results);
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
return status;
}
Status
DBImpl::Size(uint64_t& result) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
}
return meta_ptr_->Size(result);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// internal methods
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Status
DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, QueryResults& results) {
server::CollectQueryMetrics metrics(nq);
TimeRecorder rc("");
// step 1: get files to search
ENGINE_LOG_DEBUG << "Engine query begin, index file count: " << files.size();
scheduler::SearchJobPtr job = std::make_shared<scheduler::SearchJob>(0, k, nq, nprobe, vectors);
for (auto& file : files) {
scheduler::TableFileSchemaPtr file_ptr = std::make_shared<meta::TableFileSchema>(file);
job->AddIndexFile(file_ptr);
}
// step 2: put search task to scheduler
scheduler::JobMgrInst::GetInstance()->Put(job);
job->WaitResult();
if (!job->GetStatus().ok()) {
return job->GetStatus();
}
// step 3: construct results
results = job->GetResult();
rc.ElapseFromBegin("Engine query totally cost");
return Status::OK();
}
void
DBImpl::BackgroundTimerTask() {
Status status;
server::SystemInfo::GetInstance().Init();
while (true) {
if (shutting_down_.load(std::memory_order_acquire)) {
WaitMergeFileFinish();
WaitBuildIndexFinish();
ENGINE_LOG_DEBUG << "DB background thread exit";
break;
}
std::this_thread::sleep_for(std::chrono::seconds(1));
StartMetricTask();
StartCompactionTask();
StartBuildIndexTask();
}
}
void
DBImpl::WaitMergeFileFinish() {
std::lock_guard<std::mutex> lck(compact_result_mutex_);
for (auto& iter : compact_thread_results_) {
iter.wait();
}
}
void
DBImpl::WaitBuildIndexFinish() {
std::lock_guard<std::mutex> lck(index_result_mutex_);
for (auto& iter : index_thread_results_) {
iter.wait();
}
}
void
DBImpl::StartMetricTask() {
static uint64_t metric_clock_tick = 0;
metric_clock_tick++;
if (metric_clock_tick % METRIC_ACTION_INTERVAL != 0) {
return;
}
ENGINE_LOG_TRACE << "Start metric task";
server::Metrics::GetInstance().KeepingAliveCounterIncrement(METRIC_ACTION_INTERVAL);
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
if (cache_total > 0) {
double cache_usage_double = cache_usage;
server::Metrics::GetInstance().CpuCacheUsageGaugeSet(cache_usage_double * 100 / cache_total);
} else {
server::Metrics::GetInstance().CpuCacheUsageGaugeSet(0);
}
server::Metrics::GetInstance().GpuCacheUsageGaugeSet();
uint64_t size;
Size(size);
server::Metrics::GetInstance().DataFileSizeGaugeSet(size);
server::Metrics::GetInstance().CPUUsagePercentSet();
server::Metrics::GetInstance().RAMUsagePercentSet();
server::Metrics::GetInstance().GPUPercentGaugeSet();
server::Metrics::GetInstance().GPUMemoryUsageGaugeSet();
server::Metrics::GetInstance().OctetsSet();
server::Metrics::GetInstance().CPUCoreUsagePercentSet();
server::Metrics::GetInstance().GPUTemperature();
server::Metrics::GetInstance().CPUTemperature();
ENGINE_LOG_TRACE << "Metric task finished";
}
Status
DBImpl::MemSerialize() {
std::lock_guard<std::mutex> lck(mem_serialize_mutex_);
std::set<std::string> temp_table_ids;
mem_mgr_->Serialize(temp_table_ids);
for (auto& id : temp_table_ids) {
compact_table_ids_.insert(id);
}
if (!temp_table_ids.empty()) {
SERVER_LOG_DEBUG << "Insert cache serialized";
}
return Status::OK();
}
void
DBImpl::StartCompactionTask() {
static uint64_t compact_clock_tick = 0;
compact_clock_tick++;
if (compact_clock_tick % COMPACT_ACTION_INTERVAL != 0) {
return;
}
// serialize memory data
MemSerialize();
// compactiong has been finished?
{
std::lock_guard<std::mutex> lck(compact_result_mutex_);
if (!compact_thread_results_.empty()) {
std::chrono::milliseconds span(10);
if (compact_thread_results_.back().wait_for(span) == std::future_status::ready) {
compact_thread_results_.pop_back();
}
}
}
// add new compaction task
{
std::lock_guard<std::mutex> lck(compact_result_mutex_);
if (compact_thread_results_.empty()) {
compact_thread_results_.push_back(
compact_thread_pool_.enqueue(&DBImpl::BackgroundCompaction, this, compact_table_ids_));
compact_table_ids_.clear();
}
}
}
Status
DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date, const meta::TableFilesSchema& files) {
ENGINE_LOG_DEBUG << "Merge files for table: " << table_id;
// step 1: create table file
meta::TableFileSchema table_file;
table_file.table_id_ = table_id;
table_file.date_ = date;
table_file.file_type_ = meta::TableFileSchema::NEW_MERGE;
Status status = meta_ptr_->CreateTableFile(table_file);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create table: " << status.ToString();
return status;
}
// step 2: merge files
ExecutionEnginePtr index =
EngineFactory::Build(table_file.dimension_, table_file.location_, (EngineType)table_file.engine_type_,
(MetricType)table_file.metric_type_, table_file.nlist_);
meta::TableFilesSchema updated;
int64_t index_size = 0;
for (auto& file : files) {
server::CollectMergeFilesMetrics metrics;
index->Merge(file.location_);
auto file_schema = file;
file_schema.file_type_ = meta::TableFileSchema::TO_DELETE;
updated.push_back(file_schema);
ENGINE_LOG_DEBUG << "Merging file " << file_schema.file_id_;
index_size = index->Size();
if (index_size >= file_schema.index_file_size_) {
break;
}
}
// step 3: serialize to disk
try {
index->Serialize();
} catch (std::exception& ex) {
// typical error: out of disk space or permition denied
std::string msg = "Serialize merged index encounter exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
table_file.file_type_ = meta::TableFileSchema::TO_DELETE;
status = meta_ptr_->UpdateTableFile(table_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete";
std::cout << "ERROR: failed to persist merged index file: " << table_file.location_
<< ", possible out of disk space" << std::endl;
return Status(DB_ERROR, msg);
}
// step 4: update table files state
// if index type isn't IDMAP, set file type to TO_INDEX if file size execeed index_file_size
// else set file type to RAW, no need to build index
if (table_file.engine_type_ != (int)EngineType::FAISS_IDMAP) {
table_file.file_type_ = (index->PhysicalSize() >= table_file.index_file_size_) ? meta::TableFileSchema::TO_INDEX
: meta::TableFileSchema::RAW;
} else {
table_file.file_type_ = meta::TableFileSchema::RAW;
}
table_file.file_size_ = index->PhysicalSize();
table_file.row_count_ = index->Count();
updated.push_back(table_file);
status = meta_ptr_->UpdateTableFiles(updated);
ENGINE_LOG_DEBUG << "New merged file " << table_file.file_id_ << " of size " << index->PhysicalSize() << " bytes";
if (options_.insert_cache_immediately_) {
index->Cache();
}
return status;
}
Status
DBImpl::BackgroundMergeFiles(const std::string& table_id) {
meta::DatePartionedTableFilesSchema raw_files;
auto status = meta_ptr_->FilesToMerge(table_id, raw_files);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to get merge files for table: " << table_id;
return status;
}
for (auto& kv : raw_files) {
auto files = kv.second;
if (files.size() < options_.merge_trigger_number_) {
ENGINE_LOG_DEBUG << "Files number not greater equal than merge trigger number, skip merge action";
continue;
}
MergeFiles(table_id, kv.first, kv.second);
if (shutting_down_.load(std::memory_order_acquire)) {
ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action for table: " << table_id;
break;
}
}
return Status::OK();
}
void
DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
ENGINE_LOG_TRACE << " Background compaction thread start";
Status status;
for (auto& table_id : table_ids) {
status = BackgroundMergeFiles(table_id);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Merge files for table " << table_id << " failed: " << status.ToString();
}
if (shutting_down_.load(std::memory_order_acquire)) {
ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action";
break;
}
}
meta_ptr_->Archive();
int ttl = 5 * meta::M_SEC; // default: file will be deleted after 5 minutes
if (options_.mode_ == DBOptions::MODE::CLUSTER_WRITABLE) {
ttl = meta::D_SEC;
}
meta_ptr_->CleanUpFilesWithTTL(ttl);
ENGINE_LOG_TRACE << " Background compaction thread exit";
}
void
DBImpl::StartBuildIndexTask(bool force) {
static uint64_t index_clock_tick = 0;
index_clock_tick++;
if (!force && (index_clock_tick % INDEX_ACTION_INTERVAL != 0)) {
return;
}
// build index has been finished?
{
std::lock_guard<std::mutex> lck(index_result_mutex_);
if (!index_thread_results_.empty()) {
std::chrono::milliseconds span(10);
if (index_thread_results_.back().wait_for(span) == std::future_status::ready) {
index_thread_results_.pop_back();
}
}
}
// add new build index task
{
std::lock_guard<std::mutex> lck(index_result_mutex_);
if (index_thread_results_.empty()) {
index_thread_results_.push_back(index_thread_pool_.enqueue(&DBImpl::BackgroundBuildIndex, this));
}
}
}
void
DBImpl::BackgroundBuildIndex() {
ENGINE_LOG_TRACE << "Background build index thread start";
std::unique_lock<std::mutex> lock(build_index_mutex_);
meta::TableFilesSchema to_index_files;
meta_ptr_->FilesToIndex(to_index_files);
Status status;
if (!to_index_files.empty()) {
scheduler::BuildIndexJobPtr job = std::make_shared<scheduler::BuildIndexJob>(0, meta_ptr_, options_);
// step 2: put build index task to scheduler
for (auto& file : to_index_files) {
scheduler::TableFileSchemaPtr file_ptr = std::make_shared<meta::TableFileSchema>(file);
job->AddToIndexFiles(file_ptr);
}
scheduler::JobMgrInst::GetInstance()->Put(job);
job->WaitBuildIndexFinish();
if (!job->GetStatus().ok()) {
Status status = job->GetStatus();
ENGINE_LOG_ERROR << "Building index failed: " << status.ToString();
}
}
ENGINE_LOG_TRACE << "Background build index thread exit";
}
} // namespace engine
} // namespace milvus

163
core/src/db/DBImpl.h Normal file
View File

@ -0,0 +1,163 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "DB.h"
#include "Types.h"
#include "src/db/insert/MemManager.h"
#include "utils/ThreadPool.h"
#include <atomic>
#include <condition_variable>
#include <list>
#include <memory>
#include <mutex>
#include <set>
#include <string>
#include <thread>
#include <vector>
namespace milvus {
namespace engine {
class Env;
namespace meta {
class Meta;
}
class DBImpl : public DB {
public:
explicit DBImpl(const DBOptions& options);
~DBImpl();
Status
Start() override;
Status
Stop() override;
Status
DropAll() override;
Status
CreateTable(meta::TableSchema& table_schema) override;
Status
DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
Status
DescribeTable(meta::TableSchema& table_schema) override;
Status
HasTable(const std::string& table_id, bool& has_or_not) override;
Status
AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
Status
PreloadTable(const std::string& table_id) override;
Status
UpdateTableFlag(const std::string& table_id, int64_t flag);
Status
GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
Status
InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
Status
CreateIndex(const std::string& table_id, const TableIndex& index) override;
Status
DescribeIndex(const std::string& table_id, TableIndex& index) override;
Status
DropIndex(const std::string& table_id) override;
Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
QueryResults& results) override;
Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
const meta::DatesT& dates, QueryResults& results) override;
Status
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
Status
Size(uint64_t& result) override;
private:
Status
QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, QueryResults& results);
void
BackgroundTimerTask();
void
WaitMergeFileFinish();
void
WaitBuildIndexFinish();
void
StartMetricTask();
void
StartCompactionTask();
Status
MergeFiles(const std::string& table_id, const meta::DateT& date, const meta::TableFilesSchema& files);
Status
BackgroundMergeFiles(const std::string& table_id);
void
BackgroundCompaction(std::set<std::string> table_ids);
void
StartBuildIndexTask(bool force = false);
void
BackgroundBuildIndex();
Status
MemSerialize();
private:
const DBOptions options_;
std::atomic<bool> shutting_down_;
std::thread bg_timer_thread_;
meta::MetaPtr meta_ptr_;
MemManagerPtr mem_mgr_;
std::mutex mem_serialize_mutex_;
ThreadPool compact_thread_pool_;
std::mutex compact_result_mutex_;
std::list<std::future<void>> compact_thread_results_;
std::set<std::string> compact_table_ids_;
ThreadPool index_thread_pool_;
std::mutex index_result_mutex_;
std::list<std::future<void>> index_thread_results_;
std::mutex build_index_mutex_;
}; // DBImpl
} // namespace engine
} // namespace milvus

View File

@ -0,0 +1,65 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "db/IDGenerator.h"
#include <assert.h>
#include <chrono>
#include <iostream>
namespace milvus {
namespace engine {
IDGenerator::~IDGenerator() = default;
constexpr size_t SimpleIDGenerator::MAX_IDS_PER_MICRO;
IDNumber
SimpleIDGenerator::GetNextIDNumber() {
auto now = std::chrono::system_clock::now();
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(now.time_since_epoch()).count();
return micros * MAX_IDS_PER_MICRO;
}
void
SimpleIDGenerator::NextIDNumbers(size_t n, IDNumbers& ids) {
if (n > MAX_IDS_PER_MICRO) {
NextIDNumbers(n - MAX_IDS_PER_MICRO, ids);
NextIDNumbers(MAX_IDS_PER_MICRO, ids);
return;
}
if (n <= 0) {
return;
}
auto now = std::chrono::system_clock::now();
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(now.time_since_epoch()).count();
micros *= MAX_IDS_PER_MICRO;
for (int pos = 0; pos < n; ++pos) {
ids.push_back(micros + pos);
}
}
void
SimpleIDGenerator::GetNextIDNumbers(size_t n, IDNumbers& ids) {
ids.clear();
NextIDNumbers(n, ids);
}
} // namespace engine
} // namespace milvus

57
core/src/db/IDGenerator.h Normal file
View File

@ -0,0 +1,57 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "Types.h"
#include <cstddef>
#include <vector>
namespace milvus {
namespace engine {
class IDGenerator {
public:
virtual IDNumber
GetNextIDNumber() = 0;
virtual void
GetNextIDNumbers(size_t n, IDNumbers& ids) = 0;
virtual ~IDGenerator() = 0;
}; // IDGenerator
class SimpleIDGenerator : public IDGenerator {
public:
~SimpleIDGenerator() override = default;
IDNumber
GetNextIDNumber() override;
void
GetNextIDNumbers(size_t n, IDNumbers& ids) override;
private:
void
NextIDNumbers(size_t n, IDNumbers& ids);
static constexpr size_t MAX_IDS_PER_MICRO = 1000;
}; // SimpleIDGenerator
} // namespace engine
} // namespace milvus

92
core/src/db/Options.cpp Normal file
View File

@ -0,0 +1,92 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "db/Options.h"
#include "utils/Exception.h"
#include "utils/Log.h"
#include <assert.h>
#include <stdlib.h>
#include <boost/algorithm/string.hpp>
namespace milvus {
namespace engine {
ArchiveConf::ArchiveConf(const std::string& type, const std::string& criterias) {
ParseType(type);
ParseCritirias(criterias);
}
void
ArchiveConf::SetCriterias(const ArchiveConf::CriteriaT& criterial) {
for (auto& pair : criterial) {
criterias_[pair.first] = pair.second;
}
}
void
ArchiveConf::ParseCritirias(const std::string& criterias) {
std::stringstream ss(criterias);
std::vector<std::string> tokens;
boost::algorithm::split(tokens, criterias, boost::is_any_of(";"));
if (tokens.size() == 0) {
return;
}
for (auto& token : tokens) {
if (token.empty()) {
continue;
}
std::vector<std::string> kv;
boost::algorithm::split(kv, token, boost::is_any_of(":"));
if (kv.size() != 2) {
ENGINE_LOG_WARNING << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
continue;
}
if (kv[0] != "disk" && kv[0] != "days") {
ENGINE_LOG_WARNING << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
continue;
}
try {
auto value = std::stoi(kv[1]);
criterias_[kv[0]] = value;
} catch (std::out_of_range&) {
std::string msg = "Out of range: '" + kv[1] + "'";
ENGINE_LOG_ERROR << msg;
throw InvalidArgumentException(msg);
} catch (...) {
std::string msg = "Invalid argument: '" + kv[1] + "'";
ENGINE_LOG_ERROR << msg;
throw InvalidArgumentException(msg);
}
}
}
void
ArchiveConf::ParseType(const std::string& type) {
if (type != "delete" && type != "swap") {
std::string msg = "Invalid argument: type='" + type + "'";
throw InvalidArgumentException(msg);
}
type_ = type;
}
} // namespace engine
} // namespace milvus

82
core/src/db/Options.h Normal file
View File

@ -0,0 +1,82 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "Constants.h"
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace milvus {
namespace engine {
class Env;
static const char* ARCHIVE_CONF_DISK = "disk";
static const char* ARCHIVE_CONF_DAYS = "days";
struct ArchiveConf {
using CriteriaT = std::map<std::string, int>;
explicit ArchiveConf(const std::string& type, const std::string& criterias = std::string());
const std::string&
GetType() const {
return type_;
}
const CriteriaT
GetCriterias() const {
return criterias_;
}
void
SetCriterias(const ArchiveConf::CriteriaT& criterial);
private:
void
ParseCritirias(const std::string& criterias);
void
ParseType(const std::string& type);
std::string type_;
CriteriaT criterias_;
};
struct DBMetaOptions {
std::string path_;
std::vector<std::string> slave_paths_;
std::string backend_uri_;
ArchiveConf archive_conf_ = ArchiveConf("delete");
}; // DBMetaOptions
struct DBOptions {
typedef enum { SINGLE = 0, CLUSTER_READONLY, CLUSTER_WRITABLE } MODE;
uint16_t merge_trigger_number_ = 2;
DBMetaOptions meta_;
int mode_ = MODE::SINGLE;
size_t insert_buffer_size_ = 4 * ONE_GB;
bool insert_cache_immediately_ = false;
}; // Options
} // namespace engine
} // namespace milvus

43
core/src/db/Types.h Normal file
View File

@ -0,0 +1,43 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "db/engine/ExecutionEngine.h"
#include <stdint.h>
#include <utility>
#include <vector>
namespace milvus {
namespace engine {
typedef int64_t IDNumber;
typedef IDNumber* IDNumberPtr;
typedef std::vector<IDNumber> IDNumbers;
typedef std::vector<std::pair<IDNumber, double>> QueryResult;
typedef std::vector<QueryResult> QueryResults;
struct TableIndex {
int32_t engine_type_ = (int)EngineType::FAISS_IDMAP;
int32_t nlist_ = 16384;
int32_t metric_type_ = (int)MetricType::L2;
};
} // namespace engine
} // namespace milvus

Some files were not shown because too many files have changed in this diff Show More