mirror of https://github.com/milvus-io/milvus.git
solve conflicts
Former-commit-id: a3a201d31004f08432c1af0cd15e7db4c3a51119pull/191/head
commit
2aa3975371
|
@ -1,7 +1,3 @@
|
|||
third_party/thrift-0.12.0/
|
||||
third_party/faiss-1.5.1/
|
||||
third_party/bzip2-1.0.6/
|
||||
third_party/sqlite3/
|
||||
megasearch/
|
||||
milvus/
|
||||
conf/server_config.yaml
|
||||
version.h
|
||||
|
|
|
@ -2,11 +2,15 @@
|
|||
|
||||
Please mark all change in change log and use the ticket from JIRA.
|
||||
|
||||
# MegaSearch 0.3.0 (TBD)
|
||||
# Milvus 0.3.0 (TBD)
|
||||
|
||||
## Bug
|
||||
- MS-80 - Fix server hang issue
|
||||
|
||||
## Improvement
|
||||
- MS-82 - Update server startup welcome message
|
||||
- MS-83 - Update vecwise to Milvus
|
||||
- MS-77 - Performance issue of post-search action
|
||||
|
||||
## New Feature
|
||||
|
||||
|
@ -14,15 +18,30 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-56 - Add version information when server is started
|
||||
- MS-64 - Different table can have different index type
|
||||
- MS-52 - Return search score
|
||||
- MS-66 - Support time range query
|
||||
- MS-68 - Remove rocksdb from third-party
|
||||
- MS-70 - cmake: remove redundant libs in src
|
||||
- MS-71 - cmake: fix faiss dependency
|
||||
- MS-72 - cmake: change prometheus source to git
|
||||
- MS-73 - cmake: delete civetweb
|
||||
- MS-65 - Implement GetTableRowCount interface
|
||||
- MS-45 - Implement DeleteTable interface
|
||||
- MS-75 - cmake: change faiss version to 1.5.2; add CUDA gencode
|
||||
- MS-81 - fix faiss ptx issue; change cuda gencode
|
||||
- MS-84 - cmake: add arrow, jemalloc and jsoncons third party; default build option OFF
|
||||
- MS-85 - add NetIO metric
|
||||
|
||||
## Task
|
||||
- MS-74 - Change README.md in cpp
|
||||
|
||||
# MegaSearch 0.2.0 (2019-05-31)
|
||||
# Milvus 0.2.0 (2019-05-31)
|
||||
|
||||
## Bug
|
||||
|
||||
- MS-32 - Fix thrift error
|
||||
- MS-34 - Fix prometheus-cpp thirdparty
|
||||
- MS-67 - Fix license check bug
|
||||
- MS-76 - Fix pipeline crash bug
|
||||
|
||||
## Improvement
|
||||
|
||||
|
@ -42,6 +61,7 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-37 - Add query, cache usage, disk write speed and file data size metrics
|
||||
- MS-30 - Use faiss v1.5.2
|
||||
- MS-54 - cmake: Change Thrift third party URL to github.com
|
||||
- MS-69 - prometheus: add all proposed metrics
|
||||
|
||||
## Task
|
||||
|
||||
|
|
|
@ -20,11 +20,12 @@ MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
|
|||
ENDMACRO (GET_GIT_BRANCH_NAME)
|
||||
|
||||
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
|
||||
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
|
||||
if(NOT GIT_BRANCH_NAME STREQUAL "")
|
||||
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
|
||||
endif()
|
||||
|
||||
set(MEGASEARCH_VERSION "${GIT_BRANCH_NAME}")
|
||||
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MEGASEARCH_VERSION "${MEGASEARCH_VERSION}")
|
||||
message(STATUS "Build version = ${MEGASEARCH_VERSION}")
|
||||
set(MILVUS_VERSION "${GIT_BRANCH_NAME}")
|
||||
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]" MILVUS_VERSION "${MILVUS_VERSION}")
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(BUILD_TYPE "release")
|
||||
|
@ -33,27 +34,36 @@ else()
|
|||
endif()
|
||||
message(STATUS "Build type = ${BUILD_TYPE}")
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/version.h)
|
||||
project(milvus VERSION "${MILVUS_VERSION}")
|
||||
project(milvus_engine LANGUAGES CUDA CXX)
|
||||
|
||||
project(megasearch VERSION "${MEGASEARCH_VERSION}")
|
||||
project(vecwise_engine LANGUAGES CUDA CXX)
|
||||
|
||||
set(MEGASEARCH_VERSION_MAJOR "${megasearch_VERSION_MAJOR}")
|
||||
set(MEGASEARCH_VERSION_MINOR "${megasearch_VERSION_MINOR}")
|
||||
set(MEGASEARCH_VERSION_PATCH "${megasearch_VERSION_PATCH}")
|
||||
|
||||
if(MEGASEARCH_VERSION_MAJOR STREQUAL ""
|
||||
OR MEGASEARCH_VERSION_MINOR STREQUAL ""
|
||||
OR MEGASEARCH_VERSION_PATCH STREQUAL "")
|
||||
message(FATAL_ERROR "Failed to determine MegaSearch version from '${MEGASEARCH_VERSION}'")
|
||||
# Ensure that a default make is set
|
||||
if("${MAKE}" STREQUAL "")
|
||||
if(NOT MSVC)
|
||||
find_program(MAKE make)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "MegaSearch version: "
|
||||
"${MEGASEARCH_VERSION_MAJOR}.${MEGASEARCH_VERSION_MINOR}.${MEGASEARCH_VERSION_PATCH} "
|
||||
"(full: '${MEGASEARCH_VERSION}')")
|
||||
set(MILVUS_VERSION_MAJOR "${milvus_VERSION_MAJOR}")
|
||||
set(MILVUS_VERSION_MINOR "${milvus_VERSION_MINOR}")
|
||||
set(MILVUS_VERSION_PATCH "${milvus_VERSION_PATCH}")
|
||||
|
||||
set(MEGASEARCH_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
set(MEGASEARCH_BINARY_DIR ${PROJECT_BINARY_DIR})
|
||||
if(MILVUS_VERSION_MAJOR STREQUAL ""
|
||||
OR MILVUS_VERSION_MINOR STREQUAL ""
|
||||
OR MILVUS_VERSION_PATCH STREQUAL "")
|
||||
message(WARNING "Failed to determine Milvus version from '${MILVUS_VERSION}'")
|
||||
set(MILVUS_VERSION "unknown")
|
||||
endif()
|
||||
|
||||
message(STATUS "Build version = ${MILVUS_VERSION}")
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/version.h.macro ${CMAKE_CURRENT_SOURCE_DIR}/version.h)
|
||||
|
||||
message(STATUS "Milvus version: "
|
||||
"${MILVUS_VERSION_MAJOR}.${MILVUS_VERSION_MINOR}.${MILVUS_VERSION_PATCH} "
|
||||
"(full: '${MILVUS_VERSION}')")
|
||||
|
||||
set(MILVUS_SOURCE_DIR ${PROJECT_SOURCE_DIR})
|
||||
set(MILVUS_BINARY_DIR ${PROJECT_BINARY_DIR})
|
||||
|
||||
find_package(CUDA)
|
||||
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES -arch sm_60 --expt-extended-lambda")
|
||||
|
@ -69,23 +79,23 @@ set(CMAKE_CXX_STANDARD 14)
|
|||
set(CMAKE_CXX_STANDARD_REQUIRED on)
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "(x86)|(X86)|(amd64)|(AMD64)")
|
||||
message("building vecwise_engine on x86 architecture")
|
||||
set(VECWISE_BUILD_ARCH x86_64)
|
||||
message("building milvus_engine on x86 architecture")
|
||||
set(MILVUS_BUILD_ARCH x86_64)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(ppc)")
|
||||
message("building vecwise_engine on ppc architecture")
|
||||
set(VECWISE_BUILD_ARCH ppc64le)
|
||||
message("building milvus_engine on ppc architecture")
|
||||
set(MILVUS_BUILD_ARCH ppc64le)
|
||||
else()
|
||||
message("unknown processor type")
|
||||
message("CMAKE_SYSTEM_PROCESSOR=${CMAKE_SYSTEM_PROCESSOR}")
|
||||
set(VECWISE_BUILD_ARCH unknown)
|
||||
set(MILVUS_BUILD_ARCH unknown)
|
||||
endif()
|
||||
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Release")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
# if (GPU_VERSION STREQUAL "ON")
|
||||
# set(ENABLE_LICENSE "ON")
|
||||
# add_definitions("-DENABLE_LICENSE")
|
||||
# endif ()
|
||||
if (GPU_VERSION STREQUAL "ON")
|
||||
set(ENABLE_LICENSE "ON")
|
||||
add_definitions("-DENABLE_LICENSE")
|
||||
endif ()
|
||||
else()
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -fPIC -DELPP_THREAD_SAFE -fopenmp")
|
||||
endif()
|
||||
|
@ -94,34 +104,30 @@ endif()
|
|||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
#if (BUILD_UNIT_TEST)
|
||||
# option(MEGASEARCH_BUILD_TESTS "Build the megasearch test suite" ON)
|
||||
#endif(BUILD_UNIT_TEST)
|
||||
|
||||
include(ExternalProject)
|
||||
include(DefineOptions)
|
||||
include(BuildUtils)
|
||||
include(ThirdPartyPackages)
|
||||
|
||||
include_directories(${MEGASEARCH_SOURCE_DIR})
|
||||
link_directories(${MEGASEARCH_BINARY_DIR})
|
||||
include_directories(${MILVUS_SOURCE_DIR})
|
||||
link_directories(${MILVUS_BINARY_DIR})
|
||||
|
||||
## Following should be check
|
||||
|
||||
set(VECWISE_ENGINE_INCLUDE ${PROJECT_SOURCE_DIR}/include)
|
||||
set(VECWISE_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
|
||||
#set(VECWISE_THIRD_PARTY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
|
||||
#set(VECWISE_THIRD_PARTY_BUILD ${CMAKE_CURRENT_SOURCE_DIR}/third_party/build)
|
||||
set(MILVUS_ENGINE_INCLUDE ${PROJECT_SOURCE_DIR}/include)
|
||||
set(MILVUS_ENGINE_SRC ${PROJECT_SOURCE_DIR}/src)
|
||||
#set(MILVUS_THIRD_PARTY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
|
||||
#set(MILVUS_THIRD_PARTY_BUILD ${CMAKE_CURRENT_SOURCE_DIR}/third_party/build)
|
||||
|
||||
add_compile_definitions(PROFILER=${PROFILER})
|
||||
|
||||
include_directories(${VECWISE_ENGINE_INCLUDE})
|
||||
include_directories(${VECWISE_ENGINE_SRC})
|
||||
#include_directories(${VECWISE_THIRD_PARTY_BUILD}/include)
|
||||
include_directories(${MILVUS_ENGINE_INCLUDE})
|
||||
include_directories(${MILVUS_ENGINE_SRC})
|
||||
#include_directories(${MILVUS_THIRD_PARTY_BUILD}/include)
|
||||
|
||||
link_directories(${CMAKE_CURRRENT_BINARY_DIR})
|
||||
#link_directories(${VECWISE_THIRD_PARTY_BUILD}/lib)
|
||||
#link_directories(${VECWISE_THIRD_PARTY_BUILD}/lib64)
|
||||
#link_directories(${MILVUS_THIRD_PARTY_BUILD}/lib)
|
||||
#link_directories(${MILVUS_THIRD_PARTY_BUILD}/lib64)
|
||||
#execute_process(COMMAND bash build.sh
|
||||
# WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/third_party)
|
||||
|
||||
|
@ -141,7 +147,7 @@ install(FILES
|
|||
scripts)
|
||||
install(FILES
|
||||
conf/server_config.yaml
|
||||
conf/vecwise_engine_log.conf
|
||||
conf/log_config.conf
|
||||
DESTINATION
|
||||
conf)
|
||||
|
||||
|
|
|
@ -2,41 +2,44 @@
|
|||
#### Step 1: install necessery tools
|
||||
|
||||
centos7 :
|
||||
yum install gfortran libsqlite3-dev libsnappy-dev libzstd-dev bzip2
|
||||
yum install gfortran flex bison
|
||||
|
||||
ubuntu16.04 :
|
||||
sudo apt-get install gfortran libsqlite3-dev libsnappy-dev libzstd-dev bzip2 liblz4-dev
|
||||
sudo apt-get install gfortran flex bison
|
||||
|
||||
#### Step 2: build third-parties
|
||||
Note: If you want to debug into third-parties, you can build debug with CXXFLAGS='-g -O0' with option
|
||||
: -t Debug
|
||||
#### Step 2: build(output to cmake_build folder)
|
||||
cmake_build/src/milvus_server is the server
|
||||
|
||||
cd [sourcecode path]/cpp/thid_party
|
||||
./build.sh -t Debug
|
||||
./build.sh -t Release
|
||||
|
||||
#### Step 3: build(output to cmake_build folder)
|
||||
cmake_build/src/vecwise_server is the server
|
||||
|
||||
cmake_build/src/libvecwise_engine.a is the static library
|
||||
cmake_build/src/libmilvus_engine.a is the static library
|
||||
|
||||
cd [sourcecode path]/cpp
|
||||
./build.sh -t Debug
|
||||
./build.sh -t Release
|
||||
./build.sh -g # Build GPU version
|
||||
|
||||
|
||||
If you encounter the following error when building:
|
||||
`protocol https not supported or disabled in libcurl`
|
||||
|
||||
1. Install libcurl4-openssl-dev
|
||||
|
||||
2. Install cmake 3.14:
|
||||
|
||||
```
|
||||
./bootstrap --system-curl
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
#### To build unittest:
|
||||
|
||||
|
||||
./build.sh -u
|
||||
or
|
||||
./build.sh --unittest
|
||||
|
||||
|
||||
|
||||
### Launch server
|
||||
Set config in cpp/conf/server_config.yaml
|
||||
|
||||
Then launch server with config:
|
||||
|
||||
cd [build output path]
|
||||
start_server.sh
|
||||
stop_server.sh
|
||||
|
@ -44,7 +47,7 @@ Then launch server with config:
|
|||
### Launch test_client(only for debug)
|
||||
If you want to test remote api, you can build test_client.
|
||||
test_client use same config file with server:
|
||||
|
||||
|
||||
cd [build output path]/test_client
|
||||
test_client -c [sourcecode path]/cpp/conf/server_config.yaml
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
BUILD_TYPE="Debug"
|
||||
BUILD_UNITTEST="off"
|
||||
BUILD_GPU="OFF"
|
||||
INSTALL_PREFIX=$(pwd)/megasearch
|
||||
INSTALL_PREFIX=$(pwd)/milvus
|
||||
MAKE_CLEAN="OFF"
|
||||
|
||||
while getopts "p:t:uhgr" arg
|
||||
|
@ -75,7 +75,7 @@ fi
|
|||
make -j 4 || exit 1
|
||||
|
||||
if [[ ${BUILD_TYPE} != "Debug" ]]; then
|
||||
strip src/vecwise_server
|
||||
strip src/milvus_server
|
||||
fi
|
||||
|
||||
make install
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
|
||||
macro(set_option_category name)
|
||||
set(MEGASEARCH_OPTION_CATEGORY ${name})
|
||||
list(APPEND "MEGASEARCH_OPTION_CATEGORIES" ${name})
|
||||
set(MILVUS_OPTION_CATEGORY ${name})
|
||||
list(APPEND "MILVUS_OPTION_CATEGORIES" ${name})
|
||||
endmacro()
|
||||
|
||||
macro(define_option name description default)
|
||||
option(${name} ${description} ${default})
|
||||
list(APPEND "MEGASEARCH_${MEGASEARCH_OPTION_CATEGORY}_OPTION_NAMES" ${name})
|
||||
list(APPEND "MILVUS_${MILVUS_OPTION_CATEGORY}_OPTION_NAMES" ${name})
|
||||
set("${name}_OPTION_DESCRIPTION" ${description})
|
||||
set("${name}_OPTION_DEFAULT" ${default})
|
||||
set("${name}_OPTION_TYPE" "bool")
|
||||
|
@ -28,7 +28,7 @@ endfunction()
|
|||
|
||||
macro(define_option_string name description default)
|
||||
set(${name} ${default} CACHE STRING ${description})
|
||||
list(APPEND "MEGASEARCH_${MEGASEARCH_OPTION_CATEGORY}_OPTION_NAMES" ${name})
|
||||
list(APPEND "MILVUS_${MILVUS_OPTION_CATEGORY}_OPTION_NAMES" ${name})
|
||||
set("${name}_OPTION_DESCRIPTION" ${description})
|
||||
set("${name}_OPTION_DEFAULT" "\"${default}\"")
|
||||
set("${name}_OPTION_TYPE" "string")
|
||||
|
@ -43,65 +43,69 @@ endmacro()
|
|||
#----------------------------------------------------------------------
|
||||
set_option_category("Thirdparty")
|
||||
|
||||
set(MEGASEARCH_DEPENDENCY_SOURCE_DEFAULT "AUTO")
|
||||
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO")
|
||||
|
||||
define_option_string(MEGASEARCH_DEPENDENCY_SOURCE
|
||||
"Method to use for acquiring MEGASEARCH's build dependencies"
|
||||
"${MEGASEARCH_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
define_option_string(MILVUS_DEPENDENCY_SOURCE
|
||||
"Method to use for acquiring MILVUS's build dependencies"
|
||||
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
|
||||
"AUTO"
|
||||
"BUNDLED"
|
||||
"SYSTEM")
|
||||
|
||||
define_option(MEGASEARCH_VERBOSE_THIRDPARTY_BUILD
|
||||
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
|
||||
"Show output from ExternalProjects rather than just logging to files" ON)
|
||||
|
||||
define_option(MEGASEARCH_BOOST_USE_SHARED "Rely on boost shared libraries where relevant" OFF)
|
||||
define_option(MILVUS_WITH_ARROW "Build with ARROW" OFF)
|
||||
|
||||
define_option(MEGASEARCH_BOOST_VENDORED "Use vendored Boost instead of existing Boost. \
|
||||
define_option(MILVUS_BOOST_USE_SHARED "Rely on boost shared libraries where relevant" OFF)
|
||||
|
||||
define_option(MILVUS_BOOST_VENDORED "Use vendored Boost instead of existing Boost. \
|
||||
Note that this requires linking Boost statically" ON)
|
||||
|
||||
define_option(MEGASEARCH_BOOST_HEADER_ONLY "Use only BOOST headers" OFF)
|
||||
define_option(MILVUS_BOOST_HEADER_ONLY "Use only BOOST headers" OFF)
|
||||
|
||||
define_option(MEGASEARCH_WITH_BZ2 "Build with BZ2 compression" ON)
|
||||
define_option(MILVUS_WITH_BZ2 "Build with BZ2 compression" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_EASYLOGGINGPP "Build with Easylogging++ library" ON)
|
||||
define_option(MILVUS_WITH_EASYLOGGINGPP "Build with Easylogging++ library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_FAISS "Build with FAISS library" ON)
|
||||
define_option(MILVUS_WITH_FAISS "Build with FAISS library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON)
|
||||
define_option(MILVUS_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON)
|
||||
|
||||
define_option_string(MEGASEARCH_FAISS_GPU_ARCH "Specifying which GPU architectures to build against"
|
||||
"-gencode=arch=compute_61,code=sm_61")
|
||||
#define_option_string(MILVUS_FAISS_GPU_ARCH "Specifying which GPU architectures to build against"
|
||||
# "-gencode=arch=compute_35,code=compute_35 -gencode=arch=compute_52,code=compute_52 -gencode=arch=compute_60,code=compute_60 -gencode=arch=compute_61,code=compute_61")
|
||||
|
||||
define_option(MEGASEARCH_WITH_LAPACK "Build with LAPACK library" ON)
|
||||
define_option(MILVUS_WITH_LAPACK "Build with LAPACK library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_LZ4 "Build with lz4 compression" ON)
|
||||
define_option(MILVUS_WITH_LZ4 "Build with lz4 compression" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_OPENBLAS "Build with OpenBLAS library" ON)
|
||||
define_option(MILVUS_WITH_JSONCONS "Build with JSONCONS" OFF)
|
||||
|
||||
define_option(MEGASEARCH_WITH_PROMETHEUS "Build with PROMETHEUS library" ON)
|
||||
define_option(MILVUS_WITH_OPENBLAS "Build with OpenBLAS library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_ROCKSDB "Build with RocksDB library" ON)
|
||||
define_option(MILVUS_WITH_PROMETHEUS "Build with PROMETHEUS library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_SNAPPY "Build with Snappy compression" ON)
|
||||
define_option(MILVUS_WITH_ROCKSDB "Build with RocksDB library" OFF)
|
||||
|
||||
define_option(MEGASEARCH_WITH_SQLITE "Build with SQLite library" ON)
|
||||
define_option(MILVUS_WITH_SNAPPY "Build with Snappy compression" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
|
||||
define_option(MILVUS_WITH_SQLITE "Build with SQLite library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_THRIFT "Build with Apache Thrift library" ON)
|
||||
define_option(MILVUS_WITH_SQLITE_ORM "Build with SQLite ORM library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_YAMLCPP "Build with yaml-cpp library" ON)
|
||||
define_option(MILVUS_WITH_THRIFT "Build with Apache Thrift library" ON)
|
||||
|
||||
define_option(MEGASEARCH_WITH_ZLIB "Build with zlib compression" ON)
|
||||
define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
|
||||
|
||||
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
|
||||
|
||||
if(CMAKE_VERSION VERSION_LESS 3.7)
|
||||
set(MEGASEARCH_WITH_ZSTD_DEFAULT OFF)
|
||||
set(MILVUS_WITH_ZSTD_DEFAULT OFF)
|
||||
else()
|
||||
# ExternalProject_Add(SOURCE_SUBDIR) is available since CMake 3.7.
|
||||
set(MEGASEARCH_WITH_ZSTD_DEFAULT ON)
|
||||
set(MILVUS_WITH_ZSTD_DEFAULT ON)
|
||||
endif()
|
||||
define_option(MEGASEARCH_WITH_ZSTD "Build with zstd compression" ${MEGASEARCH_WITH_ZSTD_DEFAULT})
|
||||
define_option(MILVUS_WITH_ZSTD "Build with zstd compression" ${MILVUS_WITH_ZSTD_DEFAULT})
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
if(MSVC)
|
||||
|
@ -111,7 +115,7 @@ if(MSVC)
|
|||
"Pass verbose linking options when linking libraries and executables"
|
||||
OFF)
|
||||
|
||||
define_option(MEGASEARCH_USE_STATIC_CRT "Build MEGASEARCH with statically linked CRT" OFF)
|
||||
define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF)
|
||||
endif()
|
||||
|
||||
|
||||
|
@ -119,15 +123,15 @@ endif()
|
|||
set_option_category("Test and benchmark")
|
||||
|
||||
if (BUILD_UNIT_TEST)
|
||||
define_option(MEGASEARCH_BUILD_TESTS "Build the MEGASEARCH googletest unit tests" ON)
|
||||
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON)
|
||||
else()
|
||||
define_option(MEGASEARCH_BUILD_TESTS "Build the MEGASEARCH googletest unit tests" OFF)
|
||||
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF)
|
||||
endif(BUILD_UNIT_TEST)
|
||||
|
||||
#----------------------------------------------------------------------
|
||||
macro(config_summary)
|
||||
message(STATUS "---------------------------------------------------------------------")
|
||||
message(STATUS "MEGASEARCH version: ${MEGASEARCH_VERSION}")
|
||||
message(STATUS "MILVUS version: ${MILVUS_VERSION}")
|
||||
message(STATUS)
|
||||
message(STATUS "Build configuration summary:")
|
||||
|
||||
|
@ -139,12 +143,12 @@ macro(config_summary)
|
|||
STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json")
|
||||
endif()
|
||||
|
||||
foreach(category ${MEGASEARCH_OPTION_CATEGORIES})
|
||||
foreach(category ${MILVUS_OPTION_CATEGORIES})
|
||||
|
||||
message(STATUS)
|
||||
message(STATUS "${category} options:")
|
||||
|
||||
set(option_names ${MEGASEARCH_${category}_OPTION_NAMES})
|
||||
set(option_names ${MILVUS_${category}_OPTION_NAMES})
|
||||
|
||||
set(max_value_length 0)
|
||||
foreach(name ${option_names})
|
||||
|
|
|
@ -15,13 +15,15 @@
|
|||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set(MEGASEARCH_THIRDPARTY_DEPENDENCIES
|
||||
set(MILVUS_THIRDPARTY_DEPENDENCIES
|
||||
|
||||
ARROW
|
||||
BOOST
|
||||
BZip2
|
||||
Easylogging++
|
||||
FAISS
|
||||
GTest
|
||||
JSONCONS
|
||||
LAPACK
|
||||
Lz4
|
||||
OpenBLAS
|
||||
|
@ -35,17 +37,19 @@ set(MEGASEARCH_THIRDPARTY_DEPENDENCIES
|
|||
ZLIB
|
||||
ZSTD)
|
||||
|
||||
message(STATUS "Using ${MEGASEARCH_DEPENDENCY_SOURCE} approach to find dependencies")
|
||||
message(STATUS "Using ${MILVUS_DEPENDENCY_SOURCE} approach to find dependencies")
|
||||
|
||||
# For each dependency, set dependency source to global default, if unset
|
||||
foreach(DEPENDENCY ${MEGASEARCH_THIRDPARTY_DEPENDENCIES})
|
||||
foreach(DEPENDENCY ${MILVUS_THIRDPARTY_DEPENDENCIES})
|
||||
if("${${DEPENDENCY}_SOURCE}" STREQUAL "")
|
||||
set(${DEPENDENCY}_SOURCE ${MEGASEARCH_DEPENDENCY_SOURCE})
|
||||
set(${DEPENDENCY}_SOURCE ${MILVUS_DEPENDENCY_SOURCE})
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
macro(build_dependency DEPENDENCY_NAME)
|
||||
if("${DEPENDENCY_NAME}" STREQUAL "BZip2")
|
||||
if("${DEPENDENCY_NAME}" STREQUAL "ARROW")
|
||||
build_arrow()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "BZip2")
|
||||
build_bzip2()
|
||||
elseif("${DEPENDENCY_NAME}" STREQUAL "Easylogging++")
|
||||
build_easyloggingpp()
|
||||
|
@ -57,6 +61,8 @@ macro(build_dependency DEPENDENCY_NAME)
|
|||
build_lz4()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "GTest")
|
||||
build_gtest()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "JSONCONS")
|
||||
build_jsoncons()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "OpenBLAS")
|
||||
build_openblas()
|
||||
elseif ("${DEPENDENCY_NAME}" STREQUAL "Prometheus")
|
||||
|
@ -139,7 +145,7 @@ set(EP_COMMON_CMAKE_ARGS
|
|||
-DCMAKE_CXX_FLAGS=${EP_CXX_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS_${UPPERCASE_BUILD_TYPE}=${EP_CXX_FLAGS})
|
||||
|
||||
if(NOT MEGASEARCH_VERBOSE_THIRDPARTY_BUILD)
|
||||
if(NOT MILVUS_VERBOSE_THIRDPARTY_BUILD)
|
||||
set(EP_LOG_OPTIONS LOG_CONFIGURE 1 LOG_BUILD 1 LOG_INSTALL 1 LOG_DOWNLOAD 1)
|
||||
else()
|
||||
set(EP_LOG_OPTIONS)
|
||||
|
@ -155,7 +161,6 @@ endif()
|
|||
set(MAKE_BUILD_ARGS "-j2")
|
||||
|
||||
## Using make -j in sub-make is fragile
|
||||
## see discussion https://github.com/apache/MEGASEARCH/pull/2779
|
||||
#if(${CMAKE_GENERATOR} MATCHES "Makefiles")
|
||||
# set(MAKE_BUILD_ARGS "")
|
||||
#else()
|
||||
|
@ -174,7 +179,7 @@ find_package(Threads REQUIRED)
|
|||
# offline builds
|
||||
|
||||
# Read toolchain versions from cpp/thirdparty/versions.txt
|
||||
set(THIRDPARTY_DIR "${MEGASEARCH_SOURCE_DIR}/thirdparty")
|
||||
set(THIRDPARTY_DIR "${MILVUS_SOURCE_DIR}/thirdparty")
|
||||
file(STRINGS "${THIRDPARTY_DIR}/versions.txt" TOOLCHAIN_VERSIONS_TXT)
|
||||
foreach(_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT})
|
||||
# Exclude comments
|
||||
|
@ -196,8 +201,16 @@ foreach(_VERSION_ENTRY ${TOOLCHAIN_VERSIONS_TXT})
|
|||
set(${_LIB_NAME} "${_LIB_VERSION}")
|
||||
endforeach()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_BOOST_URL})
|
||||
set(BOOST_SOURCE_URL "$ENV{MEGASEARCH_BOOST_URL}")
|
||||
if(DEFINED ENV{MILVUS_ARROW_URL})
|
||||
set(ARROW_SOURCE_URL "$ENV{MILVUS_ARROW_URL}")
|
||||
else()
|
||||
set(ARROW_SOURCE_URL
|
||||
"https://github.com/youny626/arrow.git"
|
||||
)
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MILVUS_BOOST_URL})
|
||||
set(BOOST_SOURCE_URL "$ENV{MILVUS_BOOST_URL}")
|
||||
else()
|
||||
string(REPLACE "." "_" BOOST_VERSION_UNDERSCORES ${BOOST_VERSION})
|
||||
set(BOOST_SOURCE_URL
|
||||
|
@ -205,115 +218,210 @@ else()
|
|||
)
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_BZIP2_URL})
|
||||
set(BZIP2_SOURCE_URL "$ENV{MEGASEARCH_BZIP2_URL}")
|
||||
if(DEFINED ENV{MILVUS_BZIP2_URL})
|
||||
set(BZIP2_SOURCE_URL "$ENV{MILVUS_BZIP2_URL}")
|
||||
else()
|
||||
set(BZIP2_SOURCE_URL "https://fossies.org/linux/misc/bzip2-${BZIP2_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_EASYLOGGINGPP_URL})
|
||||
set(EASYLOGGINGPP_SOURCE_URL "$ENV{MEGASEARCH_EASYLOGGINGPP_URL}")
|
||||
if(DEFINED ENV{MILVUS_EASYLOGGINGPP_URL})
|
||||
set(EASYLOGGINGPP_SOURCE_URL "$ENV{MILVUS_EASYLOGGINGPP_URL}")
|
||||
else()
|
||||
set(EASYLOGGINGPP_SOURCE_URL "https://github.com/zuhd-org/easyloggingpp/archive/${EASYLOGGINGPP_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_FAISS_URL})
|
||||
set(FAISS_SOURCE_URL "$ENV{MEGASEARCH_FAISS_URL}")
|
||||
if(DEFINED ENV{MILVUS_FAISS_URL})
|
||||
set(FAISS_SOURCE_URL "$ENV{MILVUS_FAISS_URL}")
|
||||
else()
|
||||
set(FAISS_SOURCE_URL "https://github.com/JinHai-CN/faiss/archive/${FAISS_VERSION}.tar.gz")
|
||||
set(FAISS_SOURCE_URL "https://github.com/facebookresearch/faiss/archive/${FAISS_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if (DEFINED ENV{MEGASEARCH_GTEST_URL})
|
||||
set(GTEST_SOURCE_URL "$ENV{MEGASEARCH_GTEST_URL}")
|
||||
if (DEFINED ENV{MILVUS_GTEST_URL})
|
||||
set(GTEST_SOURCE_URL "$ENV{MILVUS_GTEST_URL}")
|
||||
else ()
|
||||
set(GTEST_SOURCE_URL
|
||||
"https://github.com/google/googletest/archive/release-${GTEST_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_LAPACK_URL})
|
||||
set(LAPACK_SOURCE_URL "$ENV{MEGASEARCH_LAPACK_URL}")
|
||||
if (DEFINED ENV{MILVUS_JSONCONS_URL})
|
||||
set(JSONCONS_SOURCE_URL "$ENV{MILVUS_JSONCONS_URL}")
|
||||
else ()
|
||||
set(JSONCONS_SOURCE_URL
|
||||
"https://github.com/danielaparker/jsoncons/archive/v${JSONCONS_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MILVUS_LAPACK_URL})
|
||||
set(LAPACK_SOURCE_URL "$ENV{MILVUS_LAPACK_URL}")
|
||||
else()
|
||||
set(LAPACK_SOURCE_URL "https://github.com/Reference-LAPACK/lapack/archive/${LAPACK_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_LZ4_URL})
|
||||
set(LZ4_SOURCE_URL "$ENV{MEGASEARCH_LZ4_URL}")
|
||||
if(DEFINED ENV{MILVUS_LZ4_URL})
|
||||
set(LZ4_SOURCE_URL "$ENV{MILVUS_LZ4_URL}")
|
||||
else()
|
||||
set(LZ4_SOURCE_URL "https://github.com/lz4/lz4/archive/${LZ4_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if (DEFINED ENV{MEGASEARCH_OPENBLAS_URL})
|
||||
set(OPENBLAS_SOURCE_URL "$ENV{MEGASEARCH_OPENBLAS_URL}")
|
||||
if (DEFINED ENV{MILVUS_OPENBLAS_URL})
|
||||
set(OPENBLAS_SOURCE_URL "$ENV{MILVUS_OPENBLAS_URL}")
|
||||
else ()
|
||||
set(OPENBLAS_SOURCE_URL
|
||||
"https://github.com/xianyi/OpenBLAS/archive/${OPENBLAS_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if (DEFINED ENV{MEGASEARCH_PROMETHEUS_URL})
|
||||
if (DEFINED ENV{MILVUS_PROMETHEUS_URL})
|
||||
set(PROMETHEUS_SOURCE_URL "$ENV{PROMETHEUS_OPENBLAS_URL}")
|
||||
else ()
|
||||
set(PROMETHEUS_SOURCE_URL
|
||||
"https://github.com/JinHai-CN/prometheus-cpp/archive/${PROMETHEUS_VERSION}.tar.gz")
|
||||
#"https://github.com/JinHai-CN/prometheus-cpp/archive/${PROMETHEUS_VERSION}.tar.gz"
|
||||
https://github.com/jupp0r/prometheus-cpp.git)
|
||||
endif()
|
||||
|
||||
if (DEFINED ENV{MEGASEARCH_ROCKSDB_URL})
|
||||
set(ROCKSDB_SOURCE_URL "$ENV{MEGASEARCH_ROCKSDB_URL}")
|
||||
if (DEFINED ENV{MILVUS_ROCKSDB_URL})
|
||||
set(ROCKSDB_SOURCE_URL "$ENV{MILVUS_ROCKSDB_URL}")
|
||||
else ()
|
||||
set(ROCKSDB_SOURCE_URL
|
||||
"https://github.com/facebook/rocksdb/archive/${ROCKSDB_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_SNAPPY_URL})
|
||||
set(SNAPPY_SOURCE_URL "$ENV{MEGASEARCH_SNAPPY_URL}")
|
||||
if(DEFINED ENV{MILVUS_SNAPPY_URL})
|
||||
set(SNAPPY_SOURCE_URL "$ENV{MILVUS_SNAPPY_URL}")
|
||||
else()
|
||||
set(SNAPPY_SOURCE_URL
|
||||
"https://github.com/google/snappy/archive/${SNAPPY_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_SQLITE_URL})
|
||||
set(SQLITE_SOURCE_URL "$ENV{MEGASEARCH_SQLITE_URL}")
|
||||
if(DEFINED ENV{MILVUS_SQLITE_URL})
|
||||
set(SQLITE_SOURCE_URL "$ENV{MILVUS_SQLITE_URL}")
|
||||
else()
|
||||
set(SQLITE_SOURCE_URL
|
||||
"https://www.sqlite.org/2019/sqlite-autoconf-${SQLITE_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_SQLITE_ORM_URL})
|
||||
set(SQLITE_ORM_SOURCE_URL "$ENV{MEGASEARCH_SQLITE_ORM_URL}")
|
||||
if(DEFINED ENV{MILVUS_SQLITE_ORM_URL})
|
||||
set(SQLITE_ORM_SOURCE_URL "$ENV{MILVUS_SQLITE_ORM_URL}")
|
||||
else()
|
||||
set(SQLITE_ORM_SOURCE_URL
|
||||
"https://github.com/fnc12/sqlite_orm/archive/${SQLITE_ORM_VERSION}.zip")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_THRIFT_URL})
|
||||
set(THRIFT_SOURCE_URL "$ENV{MEGASEARCH_THRIFT_URL}")
|
||||
if(DEFINED ENV{MILVUS_THRIFT_URL})
|
||||
set(THRIFT_SOURCE_URL "$ENV{MILVUS_THRIFT_URL}")
|
||||
else()
|
||||
set(THRIFT_SOURCE_URL
|
||||
"https://github.com/apache/thrift/archive/${THRIFT_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_YAMLCPP_URL})
|
||||
set(YAMLCPP_SOURCE_URL "$ENV{MEGASEARCH_YAMLCPP_URL}")
|
||||
if(DEFINED ENV{MILVUS_YAMLCPP_URL})
|
||||
set(YAMLCPP_SOURCE_URL "$ENV{MILVUS_YAMLCPP_URL}")
|
||||
else()
|
||||
set(YAMLCPP_SOURCE_URL "https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-${YAMLCPP_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_ZLIB_URL})
|
||||
set(ZLIB_SOURCE_URL "$ENV{MEGASEARCH_ZLIB_URL}")
|
||||
if(DEFINED ENV{MILVUS_ZLIB_URL})
|
||||
set(ZLIB_SOURCE_URL "$ENV{MILVUS_ZLIB_URL}")
|
||||
else()
|
||||
set(ZLIB_SOURCE_URL "https://github.com/madler/zlib/archive/${ZLIB_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if(DEFINED ENV{MEGASEARCH_ZSTD_URL})
|
||||
set(ZSTD_SOURCE_URL "$ENV{MEGASEARCH_ZSTD_URL}")
|
||||
if(DEFINED ENV{MILVUS_ZSTD_URL})
|
||||
set(ZSTD_SOURCE_URL "$ENV{MILVUS_ZSTD_URL}")
|
||||
else()
|
||||
set(ZSTD_SOURCE_URL "https://github.com/facebook/zstd/archive/${ZSTD_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# ARROW
|
||||
|
||||
macro(build_arrow)
|
||||
message(STATUS "Building Apache ARROW-${ARROW_VERSION} from source")
|
||||
set(ARROW_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep/cpp")
|
||||
set(ARROW_STATIC_LIB_NAME arrow)
|
||||
# set(ARROW_CUDA_STATIC_LIB_NAME arrow_cuda)
|
||||
set(ARROW_STATIC_LIB
|
||||
"${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
)
|
||||
# set(ARROW_CUDA_STATIC_LIB
|
||||
# "${ARROW_PREFIX}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${ARROW_CUDA_STATIC_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
|
||||
# )
|
||||
set(ARROW_INCLUDE_DIR "${ARROW_PREFIX}/include")
|
||||
|
||||
set(ARROW_CMAKE_ARGS
|
||||
${EP_COMMON_CMAKE_ARGS}
|
||||
# "-DARROW_THRIFT_URL=${THRIFT_SOURCE_URL}"
|
||||
#"env ARROW_THRIFT_URL=${THRIFT_SOURCE_URL}"
|
||||
-DARROW_BUILD_STATIC=ON
|
||||
-DARROW_BUILD_SHARED=OFF
|
||||
-DARROW_PARQUET=ON
|
||||
-DARROW_USE_GLOG=OFF
|
||||
-DCMAKE_INSTALL_PREFIX=${ARROW_PREFIX}
|
||||
"-DCMAKE_LIBRARY_PATH=${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs"
|
||||
-DCMAKE_BUILD_TYPE=Release)
|
||||
|
||||
# set($ENV{ARROW_THRIFT_URL} ${THRIFT_SOURCE_URL})
|
||||
|
||||
externalproject_add(arrow_ep
|
||||
GIT_REPOSITORY
|
||||
${ARROW_SOURCE_URL}
|
||||
GIT_TAG
|
||||
${ARROW_VERSION}
|
||||
GIT_SHALLOW
|
||||
TRUE
|
||||
# SOURCE_DIR
|
||||
# ${ARROW_PREFIX}
|
||||
# BINARY_DIR
|
||||
# ${ARROW_PREFIX}
|
||||
SOURCE_SUBDIR
|
||||
cpp
|
||||
# COMMAND
|
||||
# "export \"ARROW_THRIFT_URL=${THRIFT_SOURCE_URL}\""
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
${ARROW_CMAKE_ARGS}
|
||||
BUILD_COMMAND
|
||||
${MAKE}
|
||||
${MAKE_BUILD_ARGS}
|
||||
INSTALL_COMMAND
|
||||
${MAKE} install
|
||||
# BUILD_IN_SOURCE
|
||||
# 1
|
||||
BUILD_BYPRODUCTS
|
||||
"${ARROW_STATIC_LIB}"
|
||||
# "${ARROW_CUDA_STATIC_LIB}"
|
||||
)
|
||||
|
||||
# ExternalProject_Add_StepDependencies(arrow_ep build thrift_ep)
|
||||
|
||||
file(MAKE_DIRECTORY "${ARROW_PREFIX}/include")
|
||||
add_library(arrow STATIC IMPORTED)
|
||||
set_target_properties(arrow
|
||||
PROPERTIES IMPORTED_LOCATION "${ARROW_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${ARROW_INCLUDE_DIR}")
|
||||
# INTERFACE_LINK_LIBRARIES thrift)
|
||||
add_dependencies(arrow arrow_ep)
|
||||
|
||||
set(JEMALLOC_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/arrow_ep-prefix/src/arrow_ep-build/jemalloc_ep-prefix/src/jemalloc_ep")
|
||||
|
||||
add_custom_command(TARGET arrow_ep POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${ARROW_PREFIX}/lib/
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a ${ARROW_PREFIX}/lib/
|
||||
DEPENDS ${JEMALLOC_PREFIX}/lib/libjemalloc_pic.a)
|
||||
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_ARROW)
|
||||
|
||||
resolve_dependency(ARROW)
|
||||
|
||||
link_directories(SYSTEM ${ARROW_PREFIX}/lib/)
|
||||
include_directories(SYSTEM ${ARROW_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Add Boost dependencies (code adapted from Apache Kudu (incubating))
|
||||
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
if(MSVC AND MEGASEARCH_USE_STATIC_CRT)
|
||||
if(MSVC AND MILVUS_USE_STATIC_CRT)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
endif()
|
||||
set(Boost_ADDITIONAL_VERSIONS
|
||||
|
@ -340,7 +448,7 @@ set(Boost_ADDITIONAL_VERSIONS
|
|||
"1.60.0"
|
||||
"1.60")
|
||||
|
||||
if(MEGASEARCH_BOOST_VENDORED)
|
||||
if(MILVUS_BOOST_VENDORED)
|
||||
set(BOOST_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/boost_ep-prefix/src/boost_ep")
|
||||
set(BOOST_LIB_DIR "${BOOST_PREFIX}/stage/lib")
|
||||
set(BOOST_BUILD_LINK "static")
|
||||
|
@ -357,7 +465,7 @@ if(MEGASEARCH_BOOST_VENDORED)
|
|||
set(BOOST_FILESYSTEM_LIBRARY boost_filesystem_static)
|
||||
set(BOOST_SERIALIZATION_LIBRARY boost_serialization_static)
|
||||
|
||||
if(MEGASEARCH_BOOST_HEADER_ONLY)
|
||||
if(MILVUS_BOOST_HEADER_ONLY)
|
||||
set(BOOST_BUILD_PRODUCTS)
|
||||
set(BOOST_CONFIGURE_COMMAND "")
|
||||
set(BOOST_BUILD_COMMAND "")
|
||||
|
@ -383,7 +491,7 @@ if(MEGASEARCH_BOOST_VENDORED)
|
|||
|
||||
add_thirdparty_lib(boost_serialization STATIC_LIB "${BOOST_STATIC_SERIALIZATION_LIBRARY}")
|
||||
|
||||
set(MEGASEARCH_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY} ${BOOST_STATIC_SERIALIZATION_LIBRARY})
|
||||
set(MILVUS_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY} ${BOOST_STATIC_SERIALIZATION_LIBRARY})
|
||||
endif()
|
||||
externalproject_add(boost_ep
|
||||
URL
|
||||
|
@ -417,7 +525,7 @@ else()
|
|||
# set(Boost_NO_SYSTEM_PATHS ON)
|
||||
# endif()
|
||||
|
||||
if(MEGASEARCH_BOOST_USE_SHARED)
|
||||
if(MILVUS_BOOST_USE_SHARED)
|
||||
# Find shared Boost libraries.
|
||||
set(Boost_USE_STATIC_LIBS OFF)
|
||||
set(BUILD_SHARED_LIBS_KEEP ${BUILD_SHARED_LIBS})
|
||||
|
@ -428,14 +536,14 @@ else()
|
|||
add_definitions(-DBOOST_ALL_DYN_LINK)
|
||||
endif()
|
||||
|
||||
if(MEGASEARCH_BOOST_HEADER_ONLY)
|
||||
if(MILVUS_BOOST_HEADER_ONLY)
|
||||
find_package(Boost REQUIRED)
|
||||
else()
|
||||
find_package(Boost COMPONENTS serialization system filesystem REQUIRED)
|
||||
set(BOOST_SYSTEM_LIBRARY Boost::system)
|
||||
set(BOOST_FILESYSTEM_LIBRARY Boost::filesystem)
|
||||
set(BOOST_SERIALIZATION_LIBRARY Boost::serialization)
|
||||
set(MEGASEARCH_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY})
|
||||
set(MILVUS_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY})
|
||||
endif()
|
||||
set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_KEEP})
|
||||
unset(BUILD_SHARED_LIBS_KEEP)
|
||||
|
@ -443,14 +551,14 @@ else()
|
|||
# Find static boost headers and libs
|
||||
# TODO Differentiate here between release and debug builds
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
if(MEGASEARCH_BOOST_HEADER_ONLY)
|
||||
if(MILVUS_BOOST_HEADER_ONLY)
|
||||
find_package(Boost REQUIRED)
|
||||
else()
|
||||
find_package(Boost COMPONENTS serialization system filesystem REQUIRED)
|
||||
set(BOOST_SYSTEM_LIBRARY Boost::system)
|
||||
set(BOOST_FILESYSTEM_LIBRARY Boost::filesystem)
|
||||
set(BOOST_SERIALIZATION_LIBRARY Boost::serialization)
|
||||
set(MEGASEARCH_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY})
|
||||
set(MILVUS_BOOST_LIBS ${BOOST_SYSTEM_LIBRARY} ${BOOST_FILESYSTEM_LIBRARY})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
@ -503,7 +611,7 @@ macro(build_bzip2)
|
|||
add_dependencies(bzip2 bzip2_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_BZ2)
|
||||
if(MILVUS_WITH_BZ2)
|
||||
resolve_dependency(BZip2)
|
||||
|
||||
if(NOT TARGET bzip2)
|
||||
|
@ -555,7 +663,7 @@ macro(build_easyloggingpp)
|
|||
add_dependencies(easyloggingpp easyloggingpp_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_EASYLOGGINGPP)
|
||||
if(MILVUS_WITH_EASYLOGGINGPP)
|
||||
resolve_dependency(Easylogging++)
|
||||
|
||||
get_target_property(EASYLOGGINGPP_INCLUDE_DIR easyloggingpp INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
@ -601,7 +709,7 @@ macro(build_openblas)
|
|||
add_dependencies(openblas openblas_ep)
|
||||
endmacro()
|
||||
|
||||
#if(MEGASEARCH_WITH_OPENBLAS)
|
||||
#if(MILVUS_WITH_OPENBLAS)
|
||||
# resolve_dependency(OpenBLAS)
|
||||
#
|
||||
# get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
@ -645,7 +753,7 @@ macro(build_lapack)
|
|||
add_dependencies(lapack lapack_ep)
|
||||
endmacro()
|
||||
|
||||
#if(MEGASEARCH_WITH_LAPACK)
|
||||
#if(MILVUS_WITH_LAPACK)
|
||||
# resolve_dependency(LAPACK)
|
||||
#
|
||||
# get_target_property(LAPACK_INCLUDE_DIR lapack INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
@ -682,10 +790,18 @@ macro(build_faiss)
|
|||
# endif()
|
||||
# set(FAISS_DEPENDENCIES ${FAISS_DEPENDENCIES} ${OPENBLAS_LIBRARY})
|
||||
|
||||
if(${MEGASEARCH_WITH_FAISS_GPU_VERSION} STREQUAL "ON")
|
||||
if(${MILVUS_WITH_FAISS_GPU_VERSION} STREQUAL "ON")
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS}
|
||||
"--with-cuda=${CUDA_TOOLKIT_ROOT_DIR}"
|
||||
"--with-cuda-arch=${MEGASEARCH_FAISS_GPU_ARCH}")
|
||||
# "with_cuda_arch=\"-gencode=arch=compute_35,code=compute_35 \\
|
||||
# -gencode=arch=compute_52,code=compute_52 \\
|
||||
# -gencode=arch=compute_60,code=compute_60 \\
|
||||
# -gencode=arch=compute_61,code=compute_61\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_35,code=compute_35\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_52,code=compute_52\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_60,code=compute_60\""
|
||||
"--with-cuda-arch=\"-gencode=arch=compute_61,code=compute_61\""
|
||||
)
|
||||
else()
|
||||
set(FAISS_CONFIGURE_ARGS ${FAISS_CONFIGURE_ARGS} --without-cuda)
|
||||
endif()
|
||||
|
@ -719,20 +835,26 @@ macro(build_faiss)
|
|||
${FAISS_STATIC_LIB})
|
||||
# DEPENDS
|
||||
# ${faiss_dependencies})
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep)
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build lapack_ep)
|
||||
|
||||
ExternalProject_Add_StepDependencies(faiss_ep build openblas_ep lapack_ep)
|
||||
|
||||
file(MAKE_DIRECTORY "${FAISS_INCLUDE_DIR}")
|
||||
add_library(faiss STATIC IMPORTED)
|
||||
set_target_properties(
|
||||
faiss
|
||||
PROPERTIES IMPORTED_LOCATION "${FAISS_STATIC_LIB}"
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}")
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${FAISS_INCLUDE_DIR}"
|
||||
INTERFACE_LINK_LIBRARIES "openblas;lapack" )
|
||||
|
||||
add_dependencies(faiss faiss_ep)
|
||||
#add_dependencies(faiss openblas_ep)
|
||||
#add_dependencies(faiss lapack_ep)
|
||||
#target_link_libraries(faiss ${OPENBLAS_PREFIX}/lib)
|
||||
#target_link_libraries(faiss ${LAPACK_PREFIX}/lib)
|
||||
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_FAISS)
|
||||
if(MILVUS_WITH_FAISS)
|
||||
|
||||
resolve_dependency(OpenBLAS)
|
||||
get_target_property(OPENBLAS_INCLUDE_DIR openblas INTERFACE_INCLUDE_DIRECTORIES)
|
||||
|
@ -827,7 +949,7 @@ macro(build_gtest)
|
|||
|
||||
endmacro()
|
||||
|
||||
if (MEGASEARCH_BUILD_TESTS)
|
||||
if (MILVUS_BUILD_TESTS)
|
||||
#message(STATUS "Resolving gtest dependency")
|
||||
resolve_dependency(GTest)
|
||||
|
||||
|
@ -840,6 +962,30 @@ if (MEGASEARCH_BUILD_TESTS)
|
|||
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# JSONCONS
|
||||
|
||||
macro(build_jsoncons)
|
||||
message(STATUS "Building JSONCONS-${JSONCONS_VERSION} from source")
|
||||
|
||||
set(JSONCONS_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/jsoncons_ep-prefix")
|
||||
set(JSONCONS_TAR_NAME "${JSONCONS_PREFIX}/jsoncons-${JSONCONS_VERSION}.tar.gz")
|
||||
set(JSONCONS_INCLUDE_DIR "${JSONCONS_PREFIX}/jsoncons-${JSONCONS_VERSION}/include")
|
||||
if (NOT EXISTS ${JSONCONS_INCLUDE_DIR})
|
||||
file(MAKE_DIRECTORY ${JSONCONS_PREFIX})
|
||||
file(DOWNLOAD ${JSONCONS_SOURCE_URL}
|
||||
${JSONCONS_TAR_NAME})
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E tar -xf ${JSONCONS_TAR_NAME}
|
||||
WORKING_DIRECTORY ${JSONCONS_PREFIX})
|
||||
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
if(MILVUS_WITH_JSONCONS)
|
||||
resolve_dependency(JSONCONS)
|
||||
include_directories(SYSTEM "${JSONCONS_INCLUDE_DIR}")
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# lz4
|
||||
|
||||
|
@ -849,7 +995,7 @@ macro(build_lz4)
|
|||
set(LZ4_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/lz4_ep-prefix/")
|
||||
|
||||
if(MSVC)
|
||||
if(MEGASEARCH_USE_STATIC_CRT)
|
||||
if(MILVUS_USE_STATIC_CRT)
|
||||
if(${UPPERCASE_BUILD_TYPE} STREQUAL "DEBUG")
|
||||
set(LZ4_RUNTIME_LIBRARY_LINKAGE "/p:RuntimeLibrary=MultiThreadedDebug")
|
||||
else()
|
||||
|
@ -905,7 +1051,7 @@ macro(build_lz4)
|
|||
add_dependencies(lz4 lz4_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_LZ4)
|
||||
if(MILVUS_WITH_LZ4)
|
||||
resolve_dependency(Lz4)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
@ -935,11 +1081,20 @@ macro(build_prometheus)
|
|||
${EP_COMMON_CMAKE_ARGS}
|
||||
-DCMAKE_INSTALL_LIBDIR=lib
|
||||
-DBUILD_SHARED_LIBS=OFF
|
||||
"-DCMAKE_INSTALL_PREFIX=${PROMETHEUS_PREFIX}")
|
||||
"-DCMAKE_INSTALL_PREFIX=${PROMETHEUS_PREFIX}"
|
||||
-DCMAKE_BUILD_TYPE=Release)
|
||||
|
||||
externalproject_add(prometheus_ep
|
||||
URL
|
||||
GIT_REPOSITORY
|
||||
${PROMETHEUS_SOURCE_URL}
|
||||
GIT_TAG
|
||||
${PROMETHEUS_VERSION}
|
||||
GIT_SHALLOW
|
||||
TRUE
|
||||
# GIT_CONFIG
|
||||
# recurse-submodules=true
|
||||
# URL
|
||||
# ${PROMETHEUS_SOURCE_URL}
|
||||
${EP_LOG_OPTIONS}
|
||||
CMAKE_ARGS
|
||||
${PROMETHEUS_CMAKE_ARGS}
|
||||
|
@ -979,7 +1134,7 @@ macro(build_prometheus)
|
|||
add_dependencies(prometheus-cpp-core prometheus_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_PROMETHEUS)
|
||||
if(MILVUS_WITH_PROMETHEUS)
|
||||
|
||||
resolve_dependency(Prometheus)
|
||||
|
||||
|
@ -997,7 +1152,7 @@ if(MEGASEARCH_WITH_PROMETHEUS)
|
|||
link_directories(SYSTEM ${PROMETHEUS_PREFIX}/core/)
|
||||
include_directories(SYSTEM ${PROMETHEUS_PREFIX}/core/include)
|
||||
|
||||
link_directories(${PROMETHEUS_PREFIX}/civetweb_ep-prefix/src/civetweb_ep)
|
||||
#link_directories(${PROMETHEUS_PREFIX}/civetweb_ep-prefix/src/civetweb_ep)
|
||||
endif()
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
|
@ -1041,7 +1196,7 @@ macro(build_rocksdb)
|
|||
add_dependencies(rocksdb rocksdb_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_ROCKSDB)
|
||||
if(MILVUS_WITH_ROCKSDB)
|
||||
|
||||
resolve_dependency(RocksDB)
|
||||
|
||||
|
@ -1094,7 +1249,7 @@ macro(build_snappy)
|
|||
add_dependencies(snappy snappy_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_SNAPPY)
|
||||
if(MILVUS_WITH_SNAPPY)
|
||||
# if(Snappy_SOURCE STREQUAL "AUTO")
|
||||
# # Normally *Config.cmake files reside in /usr/lib/cmake but Snappy
|
||||
# # errornously places them in ${CMAKE_ROOT}/Modules/
|
||||
|
@ -1170,7 +1325,7 @@ macro(build_sqlite)
|
|||
add_dependencies(sqlite sqlite_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_SQLITE)
|
||||
if(MILVUS_WITH_SQLITE)
|
||||
resolve_dependency(SQLite)
|
||||
include_directories(SYSTEM "${SQLITE_INCLUDE_DIR}")
|
||||
link_directories(SYSTEM ${SQLITE_PREFIX}/lib/)
|
||||
|
@ -1183,16 +1338,16 @@ macro(build_sqlite_orm)
|
|||
message(STATUS "Building SQLITE_ORM-${SQLITE_ORM_VERSION} from source")
|
||||
|
||||
set(SQLITE_ORM_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/sqlite_orm_ep-prefix")
|
||||
set(SQLITE_ORM_TAR_NAME "${SQLITE_ORM_PREFIX}/sqlite_orm-${SQLITE_ORM_VERSION}.tar.gz") #sqlite_orm-${SQLITE_ORM_VERSION}.tar.gz
|
||||
if (NOT EXISTS ${SQLITE_ORM_TAR_NAME})
|
||||
set(SQLITE_ORM_TAR_NAME "${SQLITE_ORM_PREFIX}/sqlite_orm-${SQLITE_ORM_VERSION}.tar.gz")
|
||||
set(SQLITE_ORM_INCLUDE_DIR "${SQLITE_ORM_PREFIX}/sqlite_orm-${SQLITE_ORM_VERSION}/include/sqlite_orm")
|
||||
if (NOT EXISTS ${SQLITE_ORM_INCLUDE_DIR})
|
||||
file(MAKE_DIRECTORY ${SQLITE_ORM_PREFIX})
|
||||
file(DOWNLOAD https://github.com/fnc12/sqlite_orm/archive/${SQLITE_ORM_VERSION}.tar.gz
|
||||
file(DOWNLOAD ${SQLITE_ORM_SOURCE_URL}
|
||||
${SQLITE_ORM_TAR_NAME})
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E tar -xf ${SQLITE_ORM_TAR_NAME}
|
||||
WORKING_DIRECTORY ${SQLITE_ORM_PREFIX})
|
||||
|
||||
endif ()
|
||||
set(SQLITE_ORM_INCLUDE_DIR "${SQLITE_ORM_PREFIX}/sqlite_orm-${SQLITE_ORM_VERSION}/include/sqlite_orm")
|
||||
|
||||
#set(SQLITE_ORM_PREFIX "${CMAKE_CURRENT_BINARY_DIR}/sqlite_orm_ep-prefix/src/sqlite_orm_ep")
|
||||
#set(SQLITE_ORM_INCLUDE_DIR "${SQLITE_ORM_PREFIX}/include/sqlite_orm")
|
||||
|
@ -1257,7 +1412,7 @@ macro(build_sqlite_orm)
|
|||
# add_dependencies(sqlite_orm sqlite_orm_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_SQLITE_ORM)
|
||||
if(MILVUS_WITH_SQLITE_ORM)
|
||||
resolve_dependency(SQLite_ORM)
|
||||
# ExternalProject_Get_Property(sqlite_orm_ep source_dir)
|
||||
# set(SQLITE_ORM_INCLUDE_DIR ${source_dir}/sqlite_orm_ep)
|
||||
|
@ -1303,7 +1458,7 @@ macro(build_thrift)
|
|||
|
||||
set(THRIFT_STATIC_LIB_NAME "${CMAKE_STATIC_LIBRARY_PREFIX}thrift")
|
||||
if(MSVC)
|
||||
if(MEGASEARCH_USE_STATIC_CRT)
|
||||
if(MILVUS_USE_STATIC_CRT)
|
||||
set(THRIFT_STATIC_LIB_NAME "${THRIFT_STATIC_LIB_NAME}")
|
||||
set(THRIFT_CMAKE_ARGS ${THRIFT_CMAKE_ARGS} "-DWITH_MT=ON")
|
||||
else()
|
||||
|
@ -1404,7 +1559,7 @@ macro(build_thrift)
|
|||
add_dependencies(thrift thrift_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_THRIFT)
|
||||
if(MILVUS_WITH_THRIFT)
|
||||
resolve_dependency(Thrift)
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
# MESSAGE(STATUS ${THRIFT_PREFIX}/lib/)
|
||||
|
@ -1451,7 +1606,7 @@ macro(build_yamlcpp)
|
|||
add_dependencies(yaml-cpp yaml-cpp_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_YAMLCPP)
|
||||
if(MILVUS_WITH_YAMLCPP)
|
||||
resolve_dependency(yaml-cpp)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
@ -1501,7 +1656,7 @@ macro(build_zlib)
|
|||
add_dependencies(zlib zlib_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_ZLIB)
|
||||
if(MILVUS_WITH_ZLIB)
|
||||
resolve_dependency(ZLIB)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
@ -1528,7 +1683,7 @@ macro(build_zstd)
|
|||
|
||||
if(MSVC)
|
||||
set(ZSTD_STATIC_LIB "${ZSTD_PREFIX}/lib/zstd_static.lib")
|
||||
if(MEGASEARCH_USE_STATIC_CRT)
|
||||
if(MILVUS_USE_STATIC_CRT)
|
||||
set(ZSTD_CMAKE_ARGS ${ZSTD_CMAKE_ARGS} "-DZSTD_USE_STATIC_RUNTIME=on")
|
||||
endif()
|
||||
else()
|
||||
|
@ -1573,7 +1728,7 @@ macro(build_zstd)
|
|||
add_dependencies(zstd zstd_ep)
|
||||
endmacro()
|
||||
|
||||
if(MEGASEARCH_WITH_ZSTD)
|
||||
if(MILVUS_WITH_ZSTD)
|
||||
resolve_dependency(ZSTD)
|
||||
|
||||
# TODO: Don't use global includes but rather target_include_directories
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
* GLOBAL:
|
||||
FORMAT = "%datetime | %level | %logger | %msg"
|
||||
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-global.log"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-global.log"
|
||||
ENABLED = true
|
||||
TO_FILE = true
|
||||
TO_STANDARD_OUTPUT = true
|
||||
|
@ -8,12 +8,12 @@
|
|||
PERFORMANCE_TRACKING = false
|
||||
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
|
||||
* DEBUG:
|
||||
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-debug.log"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-debug.log"
|
||||
ENABLED = true
|
||||
* WARNING:
|
||||
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-warning.log"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-warning.log"
|
||||
* TRACE:
|
||||
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-trace.log"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-trace.log"
|
||||
* VERBOSE:
|
||||
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
|
||||
TO_FILE = false
|
||||
|
@ -21,7 +21,7 @@
|
|||
## Error logs
|
||||
* ERROR:
|
||||
ENABLED = false
|
||||
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-error.log"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-error.log"
|
||||
* FATAL:
|
||||
ENABLED = false
|
||||
FILENAME = "/tmp/vecwise/logs/vecwise_engine-%datetime{%H:%m}-fatal.log"
|
||||
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-fatal.log"
|
|
@ -6,16 +6,16 @@ server_config:
|
|||
gpu_index: 0 #which gpu to be used
|
||||
|
||||
db_config:
|
||||
db_path: /tmp/vecwise
|
||||
db_path: /tmp/milvus
|
||||
db_backend_url: http://127.0.0.1
|
||||
db_flush_interval: 5 #unit: second
|
||||
idmapper_max_open_file: 128
|
||||
db_flush_interval: 5 #flush cache data into disk at intervals, unit: second
|
||||
index_building_threshold: 1024 #build index file when raw data file size larger than this value, unit: MB
|
||||
|
||||
metric_config:
|
||||
is_startup: true # true is on, false is off
|
||||
collector: prometheus # prometheus, now we only have prometheus
|
||||
prometheus_config:
|
||||
collect_type: pull # pull means prometheus pull the message from megasearch, push means megasearch push metric to push gateway
|
||||
collect_type: pull # pull means prometheus pull the message from server, push means server push metric to push gateway
|
||||
port: 8080
|
||||
push_gateway_ip_address: 127.0.0.1
|
||||
push_gateway_port: 9091
|
||||
|
@ -24,5 +24,4 @@ license_config:
|
|||
license_path: "/tmp/system.license"
|
||||
|
||||
cache_config:
|
||||
cpu_cache_capacity: 16 # unit: GB
|
||||
gpu_cache_capacity: 2 # unit: GB
|
||||
cpu_cache_capacity: 16 # memory pool to hold index data, unit: GB
|
|
@ -1,19 +0,0 @@
|
|||
server_config:
|
||||
address: 0.0.0.0
|
||||
port: 33001
|
||||
transfer_protocol: binary #optional: binary, compact, json
|
||||
server_mode: thread_pool #optional: simple, thread_pool
|
||||
gpu_index: 0 #which gpu to be used
|
||||
|
||||
db_config:
|
||||
db_path: /tmp/vecwise
|
||||
db_backend_url: http://127.0.0.1
|
||||
db_flush_interval: 5 #unit: second
|
||||
idmapper_max_open_file: 128
|
||||
|
||||
license_config:
|
||||
license_path: "/tmp/system.license"
|
||||
|
||||
cache_config:
|
||||
cpu_cache_capacity: 16 # unit: GB
|
||||
gpu_cache_capacity: 2 # unit: GB
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
../bin/vecwise_server -c ../conf/server_config.yaml -l ../conf/vecwise_engine_log.conf
|
||||
../bin/milvus_server -c ../conf/server_config.yaml -l ../conf/log_config.conf
|
||||
|
||||
|
|
|
@ -7,10 +7,10 @@ function kill_progress()
|
|||
sleep 2
|
||||
}
|
||||
|
||||
STATUS=$(kill_progress "vecwise_server" )
|
||||
STATUS=$(kill_progress "milvus_server" )
|
||||
|
||||
if [[ ${STATUS} == "false" ]];then
|
||||
echo "vecwise_server closed abnormally!"
|
||||
echo "Milvus server closed abnormally!"
|
||||
else
|
||||
echo "vecwise_server closed successfully!"
|
||||
echo "Milvus server closed successfully!"
|
||||
fi
|
||||
|
|
|
@ -31,12 +31,16 @@ set(license_generator_files
|
|||
)
|
||||
|
||||
set(service_files
|
||||
thrift/gen-cpp/MegasearchService.cpp
|
||||
thrift/gen-cpp/megasearch_constants.cpp
|
||||
thrift/gen-cpp/megasearch_types.cpp
|
||||
thrift/gen-cpp/MilvusService.cpp
|
||||
thrift/gen-cpp/milvus_constants.cpp
|
||||
thrift/gen-cpp/milvus_types.cpp
|
||||
metrics/SystemInfo.cpp
|
||||
metrics/SystemInfo.h
|
||||
server/ThreadPoolServer.cpp
|
||||
server/ThreadPoolServer.h
|
||||
)
|
||||
|
||||
set(vecwise_engine_files
|
||||
set(engine_files
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/main.cpp
|
||||
${cache_files}
|
||||
${db_files}
|
||||
|
@ -50,32 +54,12 @@ set(get_sys_info_files
|
|||
license/GetSysInfo.cpp)
|
||||
|
||||
include_directories(/usr/include)
|
||||
include_directories(/usr/local/cuda/include)
|
||||
include_directories("${CUDA_TOOLKIT_ROOT_DIR}/include")
|
||||
include_directories(thrift/gen-cpp)
|
||||
|
||||
#target_link_libraries(megasearch boost_system_static)
|
||||
#target_link_libraries(megasearch boost_filesystem_static)
|
||||
#target_link_libraries(megasearch boost_serialization_static)
|
||||
#target_link_libraries(megasearch bzip2)
|
||||
#target_link_libraries(megasearch easyloggingpp)
|
||||
#target_link_libraries(megasearch faiss)
|
||||
#target_link_libraries(megasearch gtest)
|
||||
#target_link_libraries(megasearch lapack)
|
||||
#target_link_libraries(megasearch lz4)
|
||||
#target_link_libraries(megasearch openblas)
|
||||
#target_link_libraries(megasearch rocksdb)
|
||||
#target_link_libraries(megasearch snappy)
|
||||
#target_link_libraries(megasearch sqlite)
|
||||
#target_link_libraries(megasearch sqlite_orm)
|
||||
#target_link_libraries(megasearch thrift)
|
||||
#target_link_libraries(megasearch yaml-cpp)
|
||||
#target_link_libraries(megasearch zlib)
|
||||
#target_link_libraries(megasearch zstd)
|
||||
|
||||
set(third_party_libs
|
||||
easyloggingpp
|
||||
sqlite
|
||||
# sqlite_orm
|
||||
thrift
|
||||
yaml-cpp
|
||||
faiss
|
||||
|
@ -84,7 +68,6 @@ set(third_party_libs
|
|||
prometheus-cpp-push
|
||||
prometheus-cpp-pull
|
||||
prometheus-cpp-core
|
||||
civetweb
|
||||
boost_system_static
|
||||
boost_filesystem_static
|
||||
boost_serialization_static
|
||||
|
@ -93,46 +76,36 @@ set(third_party_libs
|
|||
snappy
|
||||
zlib
|
||||
zstd
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
)
|
||||
if (MEGASEARCH_WITH_ARROW STREQUAL "ON")
|
||||
set(third_party_libs ${third_party_libs} arrow)
|
||||
endif()
|
||||
|
||||
if (GPU_VERSION STREQUAL "ON")
|
||||
link_directories(/usr/local/cuda/lib64)
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(engine_libs
|
||||
pthread
|
||||
libfaiss.a
|
||||
libgpufaiss.a
|
||||
libgomp.a
|
||||
libopenblas.a
|
||||
libgfortran.a
|
||||
cudart
|
||||
cublas
|
||||
libsqlite3.a
|
||||
libprometheus-cpp-push.a
|
||||
libprometheus-cpp-pull.a
|
||||
libprometheus-cpp-core.a
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
)
|
||||
else()
|
||||
set(engine_libs
|
||||
pthread
|
||||
libfaiss.a
|
||||
libgomp.a
|
||||
libopenblas.a
|
||||
libgfortran.a
|
||||
libsqlite3.a
|
||||
libprometheus-cpp-push.a
|
||||
libprometheus-cpp-pull.a
|
||||
libprometheus-cpp-core.a
|
||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (ENABLE_LICENSE STREQUAL "ON")
|
||||
link_directories(/usr/local/cuda/lib64/stubs)
|
||||
link_directories(/usr/local/cuda/lib64)
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs")
|
||||
link_directories("${CUDA_TOOLKIT_ROOT_DIR}/lib64")
|
||||
set(license_libs
|
||||
nvidia-ml
|
||||
libboost_system.a
|
||||
libboost_filesystem.a
|
||||
libboost_serialization.a
|
||||
crypto
|
||||
cudart
|
||||
cublas
|
||||
|
@ -141,43 +114,32 @@ endif ()
|
|||
|
||||
|
||||
|
||||
cuda_add_library(vecwise_engine STATIC ${vecwise_engine_files})
|
||||
target_link_libraries(vecwise_engine ${engine_libs} ${third_party_libs})
|
||||
cuda_add_library(milvus_engine STATIC ${engine_files})
|
||||
target_link_libraries(milvus_engine ${engine_libs} ${third_party_libs})
|
||||
|
||||
add_library(metrics STATIC ${metrics_files})
|
||||
|
||||
if (ENABLE_LICENSE STREQUAL "ON")
|
||||
add_library(vecwise_license STATIC ${license_check_files})
|
||||
target_link_libraries(vecwise_license ${license_libs} ${third_party_libs})
|
||||
add_library(license_check STATIC ${license_check_files})
|
||||
target_link_libraries(license_check ${license_libs} ${third_party_libs})
|
||||
endif ()
|
||||
|
||||
#set(metrics_lib
|
||||
# libprometheus-cpp-push.a
|
||||
# libprometheus-cpp-pull.a
|
||||
# libprometheus-cpp-core.a
|
||||
# )
|
||||
|
||||
#add_library(vecwise_engine STATIC ${metrics_files} )
|
||||
#target_link_libraries(metrics ${metrics_lib})
|
||||
|
||||
set(server_libs
|
||||
vecwise_engine
|
||||
libthrift.a
|
||||
pthread
|
||||
libyaml-cpp.a
|
||||
libboost_system.a
|
||||
libboost_filesystem.a
|
||||
libsnappy.a
|
||||
libbz2.a
|
||||
libz.a
|
||||
libzstd.a
|
||||
liblz4.a
|
||||
dl
|
||||
metrics
|
||||
|
||||
set(metrics_lib
|
||||
prometheus-cpp-push
|
||||
prometheus-cpp-pull
|
||||
prometheus-cpp-core
|
||||
)
|
||||
|
||||
add_executable(vecwise_server
|
||||
target_link_libraries(metrics ${metrics_lib})
|
||||
|
||||
set(server_libs
|
||||
milvus_engine
|
||||
pthread
|
||||
dl
|
||||
metrics
|
||||
)
|
||||
|
||||
add_executable(milvus_server
|
||||
${config_files}
|
||||
${server_files}
|
||||
${utils_files}
|
||||
|
@ -187,23 +149,23 @@ add_executable(vecwise_server
|
|||
)
|
||||
|
||||
if (ENABLE_LICENSE STREQUAL "ON")
|
||||
target_link_libraries(vecwise_server ${server_libs} vecwise_license ${third_party_libs})
|
||||
target_link_libraries(milvus_server ${server_libs} license_check ${third_party_libs})
|
||||
else ()
|
||||
target_link_libraries(vecwise_server ${server_libs} ${third_party_libs})
|
||||
target_link_libraries(milvus_server ${server_libs} ${third_party_libs})
|
||||
endif()
|
||||
|
||||
if (ENABLE_LICENSE STREQUAL "ON")
|
||||
add_executable(get_sys_info ${get_sys_info_files})
|
||||
add_executable(license_generator ${license_generator_files})
|
||||
|
||||
target_link_libraries(get_sys_info ${license_libs} vecwise_license ${third_party_libs})
|
||||
target_link_libraries(get_sys_info ${license_libs} license_check ${third_party_libs})
|
||||
target_link_libraries(license_generator ${license_libs} ${third_party_libs})
|
||||
|
||||
install(TARGETS get_sys_info DESTINATION bin)
|
||||
install(TARGETS license_generator DESTINATION bin)
|
||||
endif ()
|
||||
|
||||
install(TARGETS vecwise_server DESTINATION bin)
|
||||
install(TARGETS milvus_server DESTINATION bin)
|
||||
|
||||
add_subdirectory(sdk)
|
||||
#target_link_libraries(
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <set>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
Cache::Cache(int64_t capacity, uint64_t cache_max_count)
|
||||
|
@ -218,6 +218,6 @@ void Cache::print() {
|
|||
}
|
||||
|
||||
} // cache
|
||||
} // vecwise
|
||||
} // milvus
|
||||
} // zilliz
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include "DataObj.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
const std::string SWAP_DIR = ".CACHE";
|
||||
|
@ -65,6 +65,6 @@ private:
|
|||
using CachePtr = std::shared_ptr<Cache>;
|
||||
|
||||
} // cache
|
||||
} // vecwise
|
||||
} // milvus
|
||||
} // zilliz
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "metrics/Metrics.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
CacheMgr::CacheMgr() {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "Cache.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
class CacheMgr {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "server/ServerConfig.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
CpuCacheMgr::CpuCacheMgr() {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "CacheMgr.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
class CpuCacheMgr : public CacheMgr {
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <memory>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
class DataObj {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "server/ServerConfig.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
GpuCacheMgr::GpuCacheMgr() {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include "CacheMgr.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
class GpuCacheMgr : public CacheMgr {
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <stdexcept>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace cache {
|
||||
|
||||
template<typename key_t, typename value_t>
|
||||
|
@ -97,6 +97,6 @@ private:
|
|||
};
|
||||
|
||||
} // cache
|
||||
} // vecwise
|
||||
} // milvus
|
||||
} // zilliz
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <algorithm>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
void ConfigNode::Combine(const ConfigNode& target) {
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <map>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
class ConfigNode;
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include "YamlConfigMgr.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
IConfigMgr * IConfigMgr::GetInstance() {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "ConfigNode.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
// this class can parse nested config file and return config item
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <sys/stat.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
ServerError YamlConfigMgr::LoadConfigFile(const std::string &filename) {
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <yaml-cpp/yaml.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
class YamlConfigMgr : public IConfigMgr {
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "Factories.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
DB::~DB() {}
|
||||
|
@ -21,5 +21,5 @@ void DB::Open(const Options& options, DB** dbptr) {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Env;
|
||||
|
@ -23,19 +23,22 @@ public:
|
|||
static void Open(const Options& options, DB** dbptr);
|
||||
|
||||
virtual Status CreateTable(meta::TableSchema& table_schema_) = 0;
|
||||
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
|
||||
virtual Status DescribeTable(meta::TableSchema& table_schema_) = 0;
|
||||
virtual Status HasTable(const std::string& table_id_, bool& has_or_not_) = 0;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not_) = 0;
|
||||
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
|
||||
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
|
||||
|
||||
virtual Status InsertVectors(const std::string& table_id_,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, QueryResults& results) = 0;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0;
|
||||
|
||||
virtual Status Size(long& result) = 0;
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status DropAll() = 0;
|
||||
|
||||
|
@ -47,5 +50,5 @@ public:
|
|||
}; // DB
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -6,20 +6,22 @@
|
|||
#include "DBImpl.h"
|
||||
#include "DBMetaImpl.h"
|
||||
#include "Env.h"
|
||||
#include "Log.h"
|
||||
#include "EngineFactory.h"
|
||||
#include "metrics/Metrics.h"
|
||||
#include "scheduler/SearchScheduler.h"
|
||||
#include "utils/TimeRecorder.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
#include <cstring>
|
||||
#include <easylogging++.h>
|
||||
#include <cache/CpuCacheMgr.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace {
|
||||
|
@ -70,6 +72,55 @@ void CollectFileMetrics(int file_type, size_t file_size, double total_time) {
|
|||
}
|
||||
}
|
||||
|
||||
void CalcScore(uint64_t vector_count,
|
||||
const float *vectors_data,
|
||||
uint64_t dimension,
|
||||
const SearchContext::ResultSet &result_src,
|
||||
SearchContext::ResultSet &result_target) {
|
||||
result_target.clear();
|
||||
if(result_src.empty()){
|
||||
return;
|
||||
}
|
||||
|
||||
server::TimeRecorder rc("Calculate Score");
|
||||
int vec_index = 0;
|
||||
for(auto& result : result_src) {
|
||||
const float * vec_data = vectors_data + vec_index*dimension;
|
||||
double vec_len = 0;
|
||||
for(uint64_t i = 0; i < dimension; i++) {
|
||||
vec_len += vec_data[i]*vec_data[i];
|
||||
}
|
||||
vec_index++;
|
||||
|
||||
double max_score = 0.0;
|
||||
for(auto& pair : result) {
|
||||
if(max_score < pair.second) {
|
||||
max_score = pair.second;
|
||||
}
|
||||
}
|
||||
|
||||
//makesure socre is less than 100
|
||||
if(max_score > vec_len) {
|
||||
vec_len = max_score;
|
||||
}
|
||||
|
||||
//avoid divided by zero
|
||||
static constexpr double TOLERANCE = std::numeric_limits<float>::epsilon();
|
||||
if(vec_len < TOLERANCE) {
|
||||
vec_len = TOLERANCE;
|
||||
}
|
||||
|
||||
SearchContext::Id2ScoreMap score_array;
|
||||
double vec_len_inverse = 1.0/vec_len;
|
||||
for(auto& pair : result) {
|
||||
score_array.push_back(std::make_pair(pair.first, (1 - pair.second*vec_len_inverse)*100.0));
|
||||
}
|
||||
result_target.emplace_back(score_array);
|
||||
}
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -88,6 +139,34 @@ Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
|||
return pMeta_->CreateTable(table_schema);
|
||||
}
|
||||
|
||||
Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
auto status = pMeta_->FilesToDelete(table_id, dates, files);
|
||||
if (!status.ok()) { return status; }
|
||||
|
||||
for (auto &day_files : files) {
|
||||
for (auto &file : day_files.second) {
|
||||
boost::filesystem::remove(file.location_);
|
||||
}
|
||||
}
|
||||
|
||||
//dates empty means delete all files of the table
|
||||
if(dates.empty()) {
|
||||
meta::TableSchema table_schema;
|
||||
table_schema.table_id_ = table_id;
|
||||
status = DescribeTable(table_schema);
|
||||
|
||||
pMeta_->DeleteTable(table_id);
|
||||
boost::system::error_code ec;
|
||||
boost::filesystem::remove_all(table_schema.location_, ec);
|
||||
if(ec.failed()) {
|
||||
ENGINE_LOG_WARNING << "Failed to remove table folder";
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
||||
return pMeta_->DescribeTable(table_schema);
|
||||
}
|
||||
|
@ -96,8 +175,16 @@ Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
|||
return pMeta_->HasTable(table_id, has_or_not);
|
||||
}
|
||||
|
||||
Status DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
||||
return pMeta_->AllTables(table_schema_array);
|
||||
}
|
||||
|
||||
Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
|
||||
return pMeta_->Count(table_id, row_count);
|
||||
}
|
||||
|
||||
Status DBImpl::InsertVectors(const std::string& table_id_,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
Status status = pMemMgr_->InsertVectors(table_id_, n, vectors, vector_ids_);
|
||||
|
@ -108,9 +195,10 @@ Status DBImpl::InsertVectors(const std::string& table_id_,
|
|||
|
||||
CollectInsertMetrics(total_time, n, status.ok());
|
||||
return status;
|
||||
|
||||
}
|
||||
|
||||
Status DBImpl::Query(const std::string &table_id, size_t k, size_t nq,
|
||||
Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq,
|
||||
const float *vectors, QueryResults &results) {
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
meta::DatesT dates = {meta::Meta::GetDate()};
|
||||
|
@ -119,10 +207,11 @@ Status DBImpl::Query(const std::string &table_id, size_t k, size_t nq,
|
|||
auto total_time = METRICS_MICROSECONDS(start_time,end_time);
|
||||
|
||||
CollectQueryMetrics(total_time, nq);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
Status DBImpl::Query(const std::string& table_id, size_t k, size_t nq,
|
||||
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
#if 0
|
||||
return QuerySync(table_id, k, nq, vectors, dates, results);
|
||||
|
@ -131,13 +220,13 @@ Status DBImpl::Query(const std::string& table_id, size_t k, size_t nq,
|
|||
#endif
|
||||
}
|
||||
|
||||
Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status DBImpl::QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
auto status = pMeta_->FilesToSearch(table_id, dates, files);
|
||||
if (!status.ok()) { return status; }
|
||||
|
||||
LOG(DEBUG) << "Search DateT Size=" << files.size();
|
||||
ENGINE_LOG_DEBUG << "Search DateT Size = " << files.size();
|
||||
|
||||
meta::TableFilesSchema index_files;
|
||||
meta::TableFilesSchema raw_files;
|
||||
|
@ -154,7 +243,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
} else if (!raw_files.empty()) {
|
||||
dim = raw_files[0].dimension_;
|
||||
} else {
|
||||
LOG(DEBUG) << "no files to search";
|
||||
ENGINE_LOG_DEBUG << "no files to search";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -190,7 +279,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
auto file_size = index->PhysicalSize();
|
||||
search_set_size += file_size;
|
||||
|
||||
LOG(DEBUG) << "Search file_type " << file.file_type_ << " Of Size: "
|
||||
ENGINE_LOG_DEBUG << "Search file_type " << file.file_type_ << " Of Size: "
|
||||
<< file_size/(1024*1024) << " M";
|
||||
|
||||
int inner_k = index->Count() < k ? index->Count() : k;
|
||||
|
@ -252,7 +341,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
search_in_index(raw_files);
|
||||
search_in_index(index_files);
|
||||
|
||||
LOG(DEBUG) << "Search Overall Set Size=" << search_set_size << " M";
|
||||
ENGINE_LOG_DEBUG << "Search Overall Set Size = " << search_set_size << " M";
|
||||
cluster_topk();
|
||||
|
||||
free(output_distence);
|
||||
|
@ -262,10 +351,15 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
if (results.empty()) {
|
||||
return Status::NotFound("Group " + table_id + ", search result not found!");
|
||||
}
|
||||
|
||||
QueryResults temp_results;
|
||||
CalcScore(nq, vectors, dim, results, temp_results);
|
||||
results.swap(temp_results);
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status DBImpl::QueryAsync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
|
||||
//step 1: get files to search
|
||||
|
@ -273,7 +367,7 @@ Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
|||
auto status = pMeta_->FilesToSearch(table_id, dates, files);
|
||||
if (!status.ok()) { return status; }
|
||||
|
||||
LOG(DEBUG) << "Search DateT Size=" << files.size();
|
||||
ENGINE_LOG_DEBUG << "Search DateT Size=" << files.size();
|
||||
|
||||
SearchContextPtr context = std::make_shared<SearchContext>(k, nq, vectors);
|
||||
|
||||
|
@ -290,9 +384,13 @@ Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
|||
|
||||
context->WaitResult();
|
||||
|
||||
//step 3: construct results
|
||||
//step 3: construct results, calculate score between 0 ~ 100
|
||||
auto& context_result = context->GetResult();
|
||||
results.swap(context_result);
|
||||
meta::TableSchema table_schema;
|
||||
table_schema.table_id_ = table_id;
|
||||
pMeta_->DescribeTable(table_schema);
|
||||
|
||||
CalcScore(context->nq(), context->vectors(), table_schema.dimension_, context_result, results);
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -304,17 +402,25 @@ void DBImpl::StartTimerTasks(int interval) {
|
|||
|
||||
void DBImpl::BackgroundTimerTask(int interval) {
|
||||
Status status;
|
||||
server::SystemInfo::GetInstance().Init();
|
||||
while (true) {
|
||||
if (!bg_error_.ok()) break;
|
||||
if (shutting_down_.load(std::memory_order_acquire)) break;
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::seconds(interval));
|
||||
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
LOG(DEBUG) << "Cache usage " << cache_total;
|
||||
server::Metrics::GetInstance().CacheUsageGaugeSet(static_cast<double>(cache_total));
|
||||
long size;
|
||||
|
||||
server::Metrics::GetInstance().KeepingAliveCounterIncrement(interval);
|
||||
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
|
||||
server::Metrics::GetInstance().CacheUsageGaugeSet(cache_usage*100/cache_total);
|
||||
uint64_t size;
|
||||
Size(size);
|
||||
server::Metrics::GetInstance().DataFileSizeGaugeSet(size);
|
||||
server::Metrics::GetInstance().CPUUsagePercentSet();
|
||||
server::Metrics::GetInstance().RAMUsagePercentSet();
|
||||
server::Metrics::GetInstance().GPUPercentGaugeSet();
|
||||
server::Metrics::GetInstance().GPUMemoryUsageGaugeSet();
|
||||
server::Metrics::GetInstance().OctetsSet();
|
||||
TrySchedule();
|
||||
}
|
||||
}
|
||||
|
@ -509,7 +615,7 @@ Status DBImpl::DropAll() {
|
|||
return pMeta_->DropAll();
|
||||
}
|
||||
|
||||
Status DBImpl::Size(long& result) {
|
||||
Status DBImpl::Size(uint64_t& result) {
|
||||
return pMeta_->Size(result);
|
||||
}
|
||||
|
||||
|
@ -534,5 +640,5 @@ DBImpl::~DBImpl() {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <thread>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Env;
|
||||
|
@ -33,29 +33,32 @@ public:
|
|||
DBImpl(const Options& options);
|
||||
|
||||
virtual Status CreateTable(meta::TableSchema& table_schema) override;
|
||||
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
|
||||
virtual Status DescribeTable(meta::TableSchema& table_schema) override;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
|
||||
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
|
||||
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
|
||||
|
||||
virtual Status InsertVectors(const std::string& table_id,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids) override;
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, QueryResults& results) override;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
|
||||
|
||||
virtual Status DropAll() override;
|
||||
|
||||
virtual Status Size(long& result) override;
|
||||
virtual Status Size(uint64_t& result) override;
|
||||
|
||||
virtual ~DBImpl();
|
||||
|
||||
private:
|
||||
Status QuerySync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results);
|
||||
|
||||
Status QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status QueryAsync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results);
|
||||
|
||||
|
||||
|
@ -97,5 +100,5 @@ private:
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "DBMetaImpl.h"
|
||||
#include "IDGenerator.h"
|
||||
#include "Utils.h"
|
||||
#include "Log.h"
|
||||
#include "MetaConsts.h"
|
||||
#include "Factories.h"
|
||||
#include "metrics/Metrics.h"
|
||||
|
@ -17,16 +18,24 @@
|
|||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <sqlite_orm.h>
|
||||
#include <easylogging++.h>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
using namespace sqlite_orm;
|
||||
|
||||
namespace {
|
||||
|
||||
void HandleException(std::exception &e) {
|
||||
ENGINE_LOG_DEBUG << "Engine meta exception: " << e.what();
|
||||
throw e;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
inline auto StoragePrototype(const std::string &path) {
|
||||
return make_storage(path,
|
||||
make_table("Table",
|
||||
|
@ -100,7 +109,7 @@ Status DBMetaImpl::Initialize() {
|
|||
if (!boost::filesystem::is_directory(options_.path)) {
|
||||
auto ret = boost::filesystem::create_directory(options_.path);
|
||||
if (!ret) {
|
||||
LOG(ERROR) << "Create directory " << options_.path << " Error";
|
||||
ENGINE_LOG_ERROR << "Create directory " << options_.path << " Error";
|
||||
}
|
||||
assert(ret);
|
||||
}
|
||||
|
@ -148,8 +157,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
|
|||
in(&TableFileSchema::date_, dates)
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -175,12 +183,12 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
|||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
|
||||
auto group_path = GetTablePath(table_schema.table_id_);
|
||||
|
||||
if (!boost::filesystem::is_directory(group_path)) {
|
||||
auto ret = boost::filesystem::create_directories(group_path);
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
if (!boost::filesystem::is_directory(table_path)) {
|
||||
auto ret = boost::filesystem::create_directories(table_path);
|
||||
if (!ret) {
|
||||
LOG(ERROR) << "Create directory " << group_path << " Error";
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
}
|
||||
assert(ret);
|
||||
}
|
||||
|
@ -188,6 +196,21 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::DeleteTable(const std::string& table_id) {
|
||||
try {
|
||||
//drop the table from meta
|
||||
auto tables = ConnectorPtr->select(columns(&TableSchema::id_),
|
||||
where(c(&TableSchema::table_id_) == table_id));
|
||||
for (auto &table : tables) {
|
||||
ConnectorPtr->remove<TableSchema>(std::get<0>(table));
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
||||
try {
|
||||
server::Metrics::GetInstance().MetaAccessTotalIncrement();
|
||||
|
@ -212,9 +235,12 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
|||
} else {
|
||||
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -237,12 +263,42 @@ Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
|
|||
has_or_not = false;
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::AllTables(std::vector<TableSchema>& table_schema_array) {
|
||||
try {
|
||||
server::Metrics::GetInstance().MetaAccessTotalIncrement();
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
auto selected = ConnectorPtr->select(columns(&TableSchema::id_,
|
||||
&TableSchema::table_id_,
|
||||
&TableSchema::files_cnt_,
|
||||
&TableSchema::dimension_,
|
||||
&TableSchema::engine_type_,
|
||||
&TableSchema::store_raw_data_));
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
for (auto &table : selected) {
|
||||
TableSchema schema;
|
||||
schema.id_ = std::get<0>(table);
|
||||
schema.table_id_ = std::get<1>(table);
|
||||
schema.files_cnt_ = std::get<2>(table);
|
||||
schema.dimension_ = std::get<3>(table);
|
||||
schema.engine_type_ = std::get<4>(table);
|
||||
schema.store_raw_data_ = std::get<5>(table);
|
||||
|
||||
table_schema_array.emplace_back(schema);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
||||
if (file_schema.date_ == EmptyDate) {
|
||||
file_schema.date_ = Meta::GetDate();
|
||||
|
@ -282,7 +338,7 @@ Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
|||
if (!boost::filesystem::is_directory(partition_path)) {
|
||||
auto ret = boost::filesystem::create_directory(partition_path);
|
||||
if (!ret) {
|
||||
LOG(ERROR) << "Create directory " << partition_path << " Error";
|
||||
ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
|
||||
}
|
||||
assert(ret);
|
||||
}
|
||||
|
@ -336,8 +392,7 @@ Status DBMetaImpl::FilesToIndex(TableFilesSchema &files) {
|
|||
files.push_back(table_file);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -400,7 +455,8 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
|||
&TableFileSchema::file_id_,
|
||||
&TableFileSchema::file_type_,
|
||||
&TableFileSchema::size_,
|
||||
&TableFileSchema::date_),
|
||||
&TableFileSchema::date_,
|
||||
&TableFileSchema::engine_type_),
|
||||
where(c(&TableFileSchema::table_id_) == table_id and
|
||||
in(&TableFileSchema::date_, partition) and
|
||||
(c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW or
|
||||
|
@ -427,6 +483,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
|||
table_file.file_type_ = std::get<3>(file);
|
||||
table_file.size_ = std::get<4>(file);
|
||||
table_file.date_ = std::get<5>(file);
|
||||
table_file.engine_type_ = std::get<6>(file);
|
||||
table_file.dimension_ = table_schema.dimension_;
|
||||
GetTableFilePath(table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
|
@ -438,8 +495,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
|||
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -459,7 +515,8 @@ Status DBMetaImpl::FilesToMerge(const std::string &table_id,
|
|||
&TableFileSchema::size_,
|
||||
&TableFileSchema::date_),
|
||||
where(c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW and
|
||||
c(&TableFileSchema::table_id_) == table_id));
|
||||
c(&TableFileSchema::table_id_) == table_id),
|
||||
order_by(&TableFileSchema::size_).desc());
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
|
@ -488,8 +545,79 @@ Status DBMetaImpl::FilesToMerge(const std::string &table_id,
|
|||
files[table_file.date_].push_back(table_file);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::FilesToDelete(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) {
|
||||
auto now = utils::GetMicroSecTimeStamp();
|
||||
try {
|
||||
if(partition.empty()) {
|
||||
//step 1: get table files by dates
|
||||
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
|
||||
&TableFileSchema::table_id_,
|
||||
&TableFileSchema::file_id_,
|
||||
&TableFileSchema::size_,
|
||||
&TableFileSchema::date_),
|
||||
where(c(&TableFileSchema::file_type_) !=
|
||||
(int) TableFileSchema::TO_DELETE
|
||||
and c(&TableFileSchema::table_id_) == table_id));
|
||||
|
||||
//step 2: erase table files from meta
|
||||
for (auto &file : selected) {
|
||||
TableFileSchema table_file;
|
||||
table_file.id_ = std::get<0>(file);
|
||||
table_file.table_id_ = std::get<1>(file);
|
||||
table_file.file_id_ = std::get<2>(file);
|
||||
table_file.size_ = std::get<3>(file);
|
||||
table_file.date_ = std::get<4>(file);
|
||||
GetTableFilePath(table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
}
|
||||
files[table_file.date_].push_back(table_file);
|
||||
|
||||
ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
|
||||
}
|
||||
|
||||
} else {
|
||||
//step 1: get all table files
|
||||
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
|
||||
&TableFileSchema::table_id_,
|
||||
&TableFileSchema::file_id_,
|
||||
&TableFileSchema::size_,
|
||||
&TableFileSchema::date_),
|
||||
where(c(&TableFileSchema::file_type_) !=
|
||||
(int) TableFileSchema::TO_DELETE
|
||||
and in(&TableFileSchema::date_, partition)
|
||||
and c(&TableFileSchema::table_id_) == table_id));
|
||||
|
||||
//step 2: erase table files from meta
|
||||
for (auto &file : selected) {
|
||||
TableFileSchema table_file;
|
||||
table_file.id_ = std::get<0>(file);
|
||||
table_file.table_id_ = std::get<1>(file);
|
||||
table_file.file_id_ = std::get<2>(file);
|
||||
table_file.size_ = std::get<3>(file);
|
||||
table_file.date_ = std::get<4>(file);
|
||||
GetTableFilePath(table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
}
|
||||
files[table_file.date_].push_back(table_file);
|
||||
|
||||
ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
|
||||
}
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -520,8 +648,7 @@ Status DBMetaImpl::GetTableFile(TableFileSchema &file_schema) {
|
|||
" File:" + file_schema.file_id_ + " not found");
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -550,12 +677,11 @@ Status DBMetaImpl::Archive() {
|
|||
c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
}
|
||||
if (criteria == "disk") {
|
||||
long sum = 0;
|
||||
uint64_t sum = 0;
|
||||
Size(sum);
|
||||
|
||||
auto to_delete = (sum - limit * G);
|
||||
|
@ -566,7 +692,7 @@ Status DBMetaImpl::Archive() {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::Size(long &result) {
|
||||
Status DBMetaImpl::Size(uint64_t &result) {
|
||||
result = 0;
|
||||
try {
|
||||
auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::size_)),
|
||||
|
@ -578,11 +704,10 @@ Status DBMetaImpl::Size(long &result) {
|
|||
if (!std::get<0>(sub_query)) {
|
||||
continue;
|
||||
}
|
||||
result += (long) (*std::get<0>(sub_query));
|
||||
result += (uint64_t) (*std::get<0>(sub_query));
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -609,7 +734,8 @@ Status DBMetaImpl::DiscardFiles(long to_discard_size) {
|
|||
table_file.id_ = std::get<0>(file);
|
||||
table_file.size_ = std::get<1>(file);
|
||||
ids.push_back(table_file.id_);
|
||||
LOG(DEBUG) << "Discard table_file.id=" << table_file.file_id_ << " table_file.size=" << table_file.size_;
|
||||
ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_
|
||||
<< " table_file.size=" << table_file.size_;
|
||||
to_discard_size -= table_file.size_;
|
||||
}
|
||||
|
||||
|
@ -626,11 +752,9 @@ Status DBMetaImpl::DiscardFiles(long to_discard_size) {
|
|||
));
|
||||
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
|
||||
return DiscardFiles(to_discard_size);
|
||||
}
|
||||
|
||||
|
@ -644,9 +768,8 @@ Status DBMetaImpl::UpdateTableFile(TableFileSchema &file_schema) {
|
|||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
LOG(DEBUG) << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
|
||||
throw e;
|
||||
ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -669,8 +792,7 @@ Status DBMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
|
|||
return Status::DBTransactionError("Update files Error");
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -708,8 +830,7 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
|||
/* LOG(DEBUG) << "Removing deleted id=" << table_file.id << " location=" << table_file.location << std::endl; */
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -747,14 +868,13 @@ Status DBMetaImpl::CleanUp() {
|
|||
/* LOG(DEBUG) << "Removing id=" << table_file.id << " location=" << table_file.location << std::endl; */
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::Count(const std::string &table_id, long &result) {
|
||||
Status DBMetaImpl::Count(const std::string &table_id, uint64_t &result) {
|
||||
|
||||
try {
|
||||
|
||||
|
@ -785,10 +905,10 @@ Status DBMetaImpl::Count(const std::string &table_id, long &result) {
|
|||
}
|
||||
|
||||
result /= table_schema.dimension_;
|
||||
result /= sizeof(float);
|
||||
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -806,5 +926,5 @@ DBMetaImpl::~DBMetaImpl() {
|
|||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "Options.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
|
@ -20,8 +20,10 @@ public:
|
|||
DBMetaImpl(const DBMetaOptions& options_);
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) override;
|
||||
virtual Status DeleteTable(const std::string& table_id) override;
|
||||
virtual Status DescribeTable(TableSchema& group_info_) override;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
|
@ -40,11 +42,15 @@ public:
|
|||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
|
||||
virtual Status FilesToDelete(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
|
||||
virtual Status FilesToIndex(TableFilesSchema&) override;
|
||||
|
||||
virtual Status Archive() override;
|
||||
|
||||
virtual Status Size(long& result) override;
|
||||
virtual Status Size(uint64_t& result) override;
|
||||
|
||||
virtual Status CleanUp() override;
|
||||
|
||||
|
@ -52,7 +58,7 @@ public:
|
|||
|
||||
virtual Status DropAll() override;
|
||||
|
||||
virtual Status Count(const std::string& table_id, long& result) override;
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) override;
|
||||
|
||||
virtual ~DBMetaImpl();
|
||||
|
||||
|
@ -70,5 +76,5 @@ private:
|
|||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "Log.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
ExecutionEnginePtr
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "ExecutionEngine.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class EngineFactory {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "Env.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
Env::Env()
|
||||
|
@ -83,5 +83,5 @@ Env* Env::Default() {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <atomic>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Env {
|
||||
|
@ -52,5 +52,5 @@ protected:
|
|||
}; // Env
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Exception : public std::exception {
|
||||
|
@ -50,5 +50,5 @@ public:
|
|||
};
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <easylogging++.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
Status ExecutionEngine::AddWithIds(const std::vector<float>& vectors, const std::vector<long>& vector_ids) {
|
||||
|
@ -23,5 +23,5 @@ Status ExecutionEngine::AddWithIds(const std::vector<float>& vectors, const std:
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <memory>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
enum class EngineType {
|
||||
|
@ -57,5 +57,5 @@ using ExecutionEnginePtr = std::shared_ptr<ExecutionEngine>;
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include <easylogging++.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
DBMetaOptions DBMetaOptionsFactory::Build(const std::string& path) {
|
||||
|
@ -54,5 +54,5 @@ DB* DBFactory::Build(const Options& options) {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <memory>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
struct DBMetaOptionsFactory {
|
||||
|
@ -35,5 +35,5 @@ struct DBFactory {
|
|||
};
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
|
||||
|
@ -68,7 +68,7 @@ Status FaissExecutionEngine::Serialize() {
|
|||
}
|
||||
|
||||
Status FaissExecutionEngine::Load() {
|
||||
auto index = zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
|
||||
auto index = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location_);
|
||||
bool to_cache = false;
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
if (!index) {
|
||||
|
@ -98,7 +98,7 @@ Status FaissExecutionEngine::Merge(const std::string& location) {
|
|||
if (location == location_) {
|
||||
return Status::Error("Cannot Merge Self");
|
||||
}
|
||||
auto to_merge = zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->GetIndex(location);
|
||||
auto to_merge = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(location);
|
||||
if (!to_merge) {
|
||||
to_merge = read_index(location);
|
||||
}
|
||||
|
@ -131,13 +131,16 @@ Status FaissExecutionEngine::Search(long n,
|
|||
long k,
|
||||
float *distances,
|
||||
long *labels) const {
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
pIndex_->search(n, data, k, distances, labels);
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time,end_time);
|
||||
server::Metrics::GetInstance().QueryIndexTypePerSecondSet(build_index_type_, double(n)/double(total_time));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status FaissExecutionEngine::Cache() {
|
||||
zilliz::vecwise::cache::CpuCacheMgr::GetInstance(
|
||||
zilliz::milvus::cache::CpuCacheMgr::GetInstance(
|
||||
)->InsertItem(location_, std::make_shared<Index>(pIndex_));
|
||||
|
||||
return Status::OK();
|
||||
|
@ -145,5 +148,5 @@ Status FaissExecutionEngine::Cache() {
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -15,7 +15,7 @@ namespace faiss {
|
|||
}
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
|
||||
|
@ -68,5 +68,5 @@ protected:
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <iostream>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
IDGenerator::~IDGenerator() {}
|
||||
|
@ -49,5 +49,5 @@ void SimpleIDGenerator::GetNextIDNumbers(size_t n, IDNumbers& ids) {
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include <vector>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class IDGenerator {
|
||||
|
@ -37,5 +37,5 @@ private:
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <easylogging++.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
#define ENGINE_DOMAIN_NAME "[ENGINE] "
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include <easylogging++.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
MemVectors::MemVectors(const std::shared_ptr<meta::Meta>& meta_ptr,
|
||||
|
@ -27,9 +27,14 @@ MemVectors::MemVectors(const std::shared_ptr<meta::Meta>& meta_ptr,
|
|||
pEE_(EngineFactory::Build(schema_.dimension_, schema_.location_, (EngineType)schema_.engine_type_)) {
|
||||
}
|
||||
|
||||
|
||||
void MemVectors::Add(size_t n_, const float* vectors_, IDNumbers& vector_ids_) {
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
pIdGenerator_->GetNextIDNumbers(n_, vector_ids_);
|
||||
pEE_->AddWithIds(n_, vectors_, vector_ids_.data());
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().AddVectorsPerSecondGaugeSet(static_cast<int>(n_), static_cast<int>(schema_.dimension_), total_time);
|
||||
}
|
||||
|
||||
size_t MemVectors::Total() const {
|
||||
|
@ -97,6 +102,7 @@ Status MemManager::InsertVectors(const std::string& table_id_,
|
|||
const float* vectors_,
|
||||
IDNumbers& vector_ids_) {
|
||||
std::unique_lock<std::mutex> lock(mutex_);
|
||||
|
||||
return InsertVectorsNoLock(table_id_, n_, vectors_, vector_ids_);
|
||||
}
|
||||
|
||||
|
@ -138,5 +144,5 @@ Status MemManager::Serialize(std::vector<std::string>& table_ids) {
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <mutex>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace meta {
|
||||
|
@ -92,5 +92,5 @@ private:
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <stdio.h>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
|
@ -44,5 +44,5 @@ DateT Meta::GetDate() {
|
|||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <memory>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
|
@ -24,8 +24,10 @@ public:
|
|||
using Ptr = std::shared_ptr<Meta>;
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) = 0;
|
||||
virtual Status DeleteTable(const std::string& table_id) = 0;
|
||||
virtual Status DescribeTable(TableSchema& table_schema) = 0;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) = 0;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) = 0;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) = 0;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
|
@ -43,7 +45,11 @@ public:
|
|||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status Size(long& result) = 0;
|
||||
virtual Status FilesToDelete(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status Archive() = 0;
|
||||
|
||||
|
@ -54,7 +60,7 @@ public:
|
|||
|
||||
virtual Status DropAll() = 0;
|
||||
|
||||
virtual Status Count(const std::string& table_id, long& result) = 0;
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) = 0;
|
||||
|
||||
static DateT GetDate(const std::time_t& t, int day_delta = 0);
|
||||
static DateT GetDate();
|
||||
|
@ -64,5 +70,5 @@ public:
|
|||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#pragma once
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
|
@ -28,5 +28,5 @@ const size_t W_SEC = 7*D_SEC;
|
|||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace meta {
|
||||
|
||||
|
@ -58,5 +58,5 @@ typedef std::map<DateT, TableFilesSchema> DatePartionedTableFilesSchema;
|
|||
|
||||
} // namespace meta
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include "Exception.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
Options::Options()
|
||||
|
@ -71,5 +71,5 @@ void ArchiveConf::ParseType(const std::string& type) {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -10,11 +10,15 @@
|
|||
#include <map>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Env;
|
||||
|
||||
static constexpr uint64_t ONE_KB = 1024;
|
||||
static constexpr uint64_t ONE_MB = ONE_KB*ONE_KB;
|
||||
static constexpr uint64_t ONE_GB = ONE_KB*ONE_MB;
|
||||
|
||||
struct ArchiveConf {
|
||||
using CriteriaT = std::map<std::string, int>;
|
||||
|
||||
|
@ -40,14 +44,14 @@ struct DBMetaOptions {
|
|||
|
||||
struct Options {
|
||||
Options();
|
||||
uint16_t memory_sync_interval = 1;
|
||||
uint16_t memory_sync_interval = 1; //unit: second
|
||||
uint16_t merge_trigger_number = 2;
|
||||
size_t index_trigger_size = 1024*1024*1024;
|
||||
size_t index_trigger_size = ONE_GB; //unit: byte
|
||||
Env* env;
|
||||
DBMetaOptions meta;
|
||||
}; // Options
|
||||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "Status.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
const char* Status::CopyState(const char* state) {
|
||||
|
@ -63,5 +63,5 @@ std::string Status::ToString() const {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class Status {
|
||||
|
@ -90,5 +90,5 @@ inline Status& Status::operator=(Status&& rhs) noexcept {
|
|||
}
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <vector>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
typedef long IDNumber;
|
||||
|
@ -20,5 +20,5 @@ typedef std::vector<QueryResult> QueryResults;
|
|||
|
||||
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <chrono>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace utils {
|
||||
|
||||
|
@ -22,5 +22,5 @@ long GetMicroSecTimeStamp() {
|
|||
|
||||
} // namespace utils
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
namespace utils {
|
||||
|
||||
|
@ -15,5 +15,5 @@ long GetMicroSecTimeStamp();
|
|||
|
||||
} // namespace utils
|
||||
} // namespace engine
|
||||
} // namespace vecwise
|
||||
} // namespace milvus
|
||||
} // namespace zilliz
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "SearchContext.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class IScheduleStrategy {
|
||||
|
|
|
@ -10,15 +10,9 @@
|
|||
#include "utils/Log.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
IndexLoaderQueue&
|
||||
IndexLoaderQueue::GetInstance() {
|
||||
static IndexLoaderQueue instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
void
|
||||
IndexLoaderQueue::Put(const SearchContextPtr &search_context) {
|
||||
std::unique_lock <std::mutex> lock(mtx);
|
||||
|
@ -26,6 +20,7 @@ IndexLoaderQueue::Put(const SearchContextPtr &search_context) {
|
|||
|
||||
if(search_context == nullptr) {
|
||||
queue_.push_back(nullptr);
|
||||
empty_.notify_all();
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
|
||||
|
@ -26,18 +26,15 @@ public:
|
|||
using IndexLoaderContextPtr = std::shared_ptr<IndexLoaderContext>;
|
||||
|
||||
class IndexLoaderQueue {
|
||||
private:
|
||||
public:
|
||||
IndexLoaderQueue() : mtx(), full_(), empty_() {}
|
||||
|
||||
IndexLoaderQueue(const IndexLoaderQueue &rhs) = delete;
|
||||
|
||||
IndexLoaderQueue &operator=(const IndexLoaderQueue &rhs) = delete;
|
||||
|
||||
public:
|
||||
using LoaderQueue = std::list<IndexLoaderContextPtr>;
|
||||
|
||||
static IndexLoaderQueue& GetInstance();
|
||||
|
||||
void Put(const SearchContextPtr &search_context);
|
||||
|
||||
IndexLoaderContextPtr Take();
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include "utils/Log.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class MemScheduleStrategy : public IScheduleStrategy {
|
||||
|
@ -38,7 +38,7 @@ public:
|
|||
new_loader->search_contexts_.push_back(search_context);
|
||||
new_loader->file_ = pair.second;
|
||||
|
||||
auto index = zilliz::vecwise::cache::CpuCacheMgr::GetInstance()->GetIndex(pair.second->location_);
|
||||
auto index = zilliz::milvus::cache::CpuCacheMgr::GetInstance()->GetIndex(pair.second->location_);
|
||||
if(index != nullptr) {
|
||||
//if the index file has been in memory, increase its priority
|
||||
loader_list.push_front(new_loader);
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "IScheduleStrategy.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class StrategyFactory {
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <chrono>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
SearchContext::SearchContext(uint64_t topk, uint64_t nq, const float* vectors)
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <condition_variable>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
using TableFileSchemaPtr = std::shared_ptr<meta::TableFileSchema>;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include "db/EngineFactory.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace {
|
||||
|
@ -55,8 +55,7 @@ void CollectDurationMetrics(int index_type, double total_time) {
|
|||
}
|
||||
|
||||
SearchScheduler::SearchScheduler()
|
||||
: thread_pool_(2),
|
||||
stopped_(true) {
|
||||
: stopped_(true) {
|
||||
Start();
|
||||
}
|
||||
|
||||
|
@ -75,8 +74,13 @@ SearchScheduler::Start() {
|
|||
return true;
|
||||
}
|
||||
|
||||
thread_pool_.enqueue(&SearchScheduler::IndexLoadWorker, this);
|
||||
thread_pool_.enqueue(&SearchScheduler::SearchWorker, this);
|
||||
stopped_ = false;
|
||||
|
||||
search_queue_.SetCapacity(2);
|
||||
|
||||
index_load_thread_ = std::make_shared<std::thread>(&SearchScheduler::IndexLoadWorker, this);
|
||||
search_thread_ = std::make_shared<std::thread>(&SearchScheduler::SearchWorker, this);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -86,29 +90,34 @@ SearchScheduler::Stop() {
|
|||
return true;
|
||||
}
|
||||
|
||||
IndexLoaderQueue& index_queue = IndexLoaderQueue::GetInstance();
|
||||
index_queue.Put(nullptr);
|
||||
if(index_load_thread_) {
|
||||
index_load_queue_.Put(nullptr);
|
||||
index_load_thread_->join();
|
||||
index_load_thread_ = nullptr;
|
||||
}
|
||||
|
||||
SearchTaskQueue& search_queue = SearchTaskQueue::GetInstance();
|
||||
search_queue.Put(nullptr);
|
||||
if(search_thread_) {
|
||||
search_queue_.Put(nullptr);
|
||||
search_thread_->join();
|
||||
search_thread_ = nullptr;
|
||||
}
|
||||
|
||||
stopped_ = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
SearchScheduler::ScheduleSearchTask(SearchContextPtr& search_context) {
|
||||
IndexLoaderQueue& index_queue = IndexLoaderQueue::GetInstance();
|
||||
index_queue.Put(search_context);
|
||||
index_load_queue_.Put(search_context);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
SearchScheduler::IndexLoadWorker() {
|
||||
IndexLoaderQueue& index_queue = IndexLoaderQueue::GetInstance();
|
||||
SearchTaskQueue& search_queue = SearchTaskQueue::GetInstance();
|
||||
while(true) {
|
||||
IndexLoaderContextPtr context = index_queue.Take();
|
||||
IndexLoaderContextPtr context = index_load_queue_.Take();
|
||||
if(context == nullptr) {
|
||||
SERVER_LOG_INFO << "Stop thread for index loading";
|
||||
break;//exit
|
||||
|
@ -137,7 +146,7 @@ SearchScheduler::IndexLoadWorker() {
|
|||
task_ptr->index_type_ = context->file_->file_type_;
|
||||
task_ptr->index_engine_ = index_ptr;
|
||||
task_ptr->search_contexts_.swap(context->search_contexts_);
|
||||
search_queue.Put(task_ptr);
|
||||
search_queue_.Put(task_ptr);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -145,9 +154,8 @@ SearchScheduler::IndexLoadWorker() {
|
|||
|
||||
bool
|
||||
SearchScheduler::SearchWorker() {
|
||||
SearchTaskQueue& search_queue = SearchTaskQueue::GetInstance();
|
||||
while(true) {
|
||||
SearchTaskPtr task_ptr = search_queue.Take();
|
||||
SearchTaskPtr task_ptr = search_queue_.Take();
|
||||
if(task_ptr == nullptr) {
|
||||
SERVER_LOG_INFO << "Stop thread for searching";
|
||||
break;//exit
|
||||
|
|
|
@ -6,10 +6,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "SearchContext.h"
|
||||
#include "utils/ThreadPool.h"
|
||||
#include "IndexLoaderQueue.h"
|
||||
#include "SearchTaskQueue.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class SearchScheduler {
|
||||
|
@ -30,7 +31,12 @@ private:
|
|||
bool SearchWorker();
|
||||
|
||||
private:
|
||||
server::ThreadPool thread_pool_;
|
||||
std::shared_ptr<std::thread> index_load_thread_;
|
||||
std::shared_ptr<std::thread> search_thread_;
|
||||
|
||||
IndexLoaderQueue index_load_queue_;
|
||||
SearchTaskQueue search_queue_;
|
||||
|
||||
bool stopped_ = true;
|
||||
};
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "utils/TimeRecorder.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
namespace {
|
||||
|
@ -18,10 +18,15 @@ void ClusterResult(const std::vector<long> &output_ids,
|
|||
uint64_t topk,
|
||||
SearchContext::ResultSet &result_set) {
|
||||
result_set.clear();
|
||||
result_set.reserve(nq);
|
||||
for (auto i = 0; i < nq; i++) {
|
||||
SearchContext::Id2ScoreMap id_score;
|
||||
id_score.reserve(topk);
|
||||
for (auto k = 0; k < topk; k++) {
|
||||
uint64_t index = i * nq + k;
|
||||
uint64_t index = i * topk + k;
|
||||
if(output_ids[index] < 0) {
|
||||
continue;
|
||||
}
|
||||
id_score.push_back(std::make_pair(output_ids[index], output_distence[index]));
|
||||
}
|
||||
result_set.emplace_back(id_score);
|
||||
|
@ -29,20 +34,60 @@ void ClusterResult(const std::vector<long> &output_ids,
|
|||
}
|
||||
|
||||
void MergeResult(SearchContext::Id2ScoreMap &score_src,
|
||||
SearchContext::Id2ScoreMap &score_target,
|
||||
uint64_t topk) {
|
||||
for (auto& pair_src : score_src) {
|
||||
for (auto iter = score_target.begin(); iter != score_target.end(); ++iter) {
|
||||
if(pair_src.second > iter->second) {
|
||||
score_target.insert(iter, pair_src);
|
||||
SearchContext::Id2ScoreMap &score_target,
|
||||
uint64_t topk) {
|
||||
//Note: the score_src and score_target are already arranged by score in ascending order
|
||||
if(score_src.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if(score_target.empty()) {
|
||||
score_target.swap(score_src);
|
||||
return;
|
||||
}
|
||||
|
||||
size_t src_count = score_src.size();
|
||||
size_t target_count = score_target.size();
|
||||
SearchContext::Id2ScoreMap score_merged;
|
||||
score_merged.reserve(topk);
|
||||
size_t src_index = 0, target_index = 0;
|
||||
while(true) {
|
||||
//all score_src items are merged, if score_merged.size() still less than topk
|
||||
//move items from score_target to score_merged until score_merged.size() equal topk
|
||||
if(src_index >= src_count - 1) {
|
||||
for(size_t i = target_index; i < target_count && score_merged.size() < topk; ++i) {
|
||||
score_merged.push_back(score_target[i]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
//all score_target items are merged, if score_merged.size() still less than topk
|
||||
//move items from score_src to score_merged until score_merged.size() equal topk
|
||||
if(target_index >= target_count - 1) {
|
||||
for(size_t i = src_index; i < src_count && score_merged.size() < topk; ++i) {
|
||||
score_merged.push_back(score_src[i]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
//compare score, put smallest score to score_merged one by one
|
||||
auto& src_pair = score_src[src_index];
|
||||
auto& target_pair = score_target[target_index];
|
||||
if(src_pair.second > target_pair.second) {
|
||||
score_merged.push_back(target_pair);
|
||||
target_index++;
|
||||
} else {
|
||||
score_merged.push_back(src_pair);
|
||||
src_index++;
|
||||
}
|
||||
|
||||
//score_merged.size() already equal topk
|
||||
if(score_merged.size() >= topk) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
//remove unused items
|
||||
while (score_target.size() > topk) {
|
||||
score_target.pop_back();
|
||||
}
|
||||
score_target.swap(score_merged);
|
||||
}
|
||||
|
||||
void TopkResult(SearchContext::ResultSet &result_src,
|
||||
|
@ -65,45 +110,6 @@ void TopkResult(SearchContext::ResultSet &result_src,
|
|||
}
|
||||
}
|
||||
|
||||
void CalcScore(uint64_t vector_count,
|
||||
const float *vectors_data,
|
||||
uint64_t dimension,
|
||||
const SearchContext::ResultSet &result_src,
|
||||
SearchContext::ResultSet &result_target) {
|
||||
result_target.clear();
|
||||
if(result_src.empty()){
|
||||
return;
|
||||
}
|
||||
|
||||
int vec_index = 0;
|
||||
for(auto& result : result_src) {
|
||||
const float * vec_data = vectors_data + vec_index*dimension;
|
||||
double vec_len = 0;
|
||||
for(uint64_t i = 0; i < dimension; i++) {
|
||||
vec_len += vec_data[i]*vec_data[i];
|
||||
}
|
||||
vec_index++;
|
||||
|
||||
SearchContext::Id2ScoreMap score_array;
|
||||
for(auto& pair : result) {
|
||||
score_array.push_back(std::make_pair(pair.first, (1 - pair.second/vec_len)*100.0));
|
||||
}
|
||||
result_target.emplace_back(score_array);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
SearchTaskQueue::SearchTaskQueue() {
|
||||
SetCapacity(4);
|
||||
}
|
||||
|
||||
|
||||
SearchTaskQueue&
|
||||
SearchTaskQueue::GetInstance() {
|
||||
static SearchTaskQueue instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
bool SearchTask::DoSearch() {
|
||||
|
@ -111,7 +117,7 @@ bool SearchTask::DoSearch() {
|
|||
return false;
|
||||
}
|
||||
|
||||
server::TimeRecorder rc("DoSearch");
|
||||
server::TimeRecorder rc("DoSearch index(" + std::to_string(index_id_) + ")");
|
||||
|
||||
std::vector<long> output_ids;
|
||||
std::vector<float> output_distence;
|
||||
|
@ -121,33 +127,29 @@ bool SearchTask::DoSearch() {
|
|||
output_ids.resize(inner_k*context->nq());
|
||||
output_distence.resize(inner_k*context->nq());
|
||||
|
||||
//step 2: search
|
||||
try {
|
||||
//step 2: search
|
||||
index_engine_->Search(context->nq(), context->vectors(), inner_k, output_distence.data(),
|
||||
output_ids.data());
|
||||
|
||||
rc.Record("do search");
|
||||
|
||||
//step 3: cluster result
|
||||
SearchContext::ResultSet result_set;
|
||||
ClusterResult(output_ids, output_distence, context->nq(), inner_k, result_set);
|
||||
rc.Record("cluster result");
|
||||
|
||||
//step 4: pick up topk result
|
||||
TopkResult(result_set, inner_k, context->GetResult());
|
||||
rc.Record("reduce topk");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "SearchTask encounter exception: " << ex.what();
|
||||
context->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
|
||||
continue;
|
||||
}
|
||||
|
||||
rc.Record("do search");
|
||||
|
||||
//step 3: cluster result
|
||||
SearchContext::ResultSet result_set;
|
||||
ClusterResult(output_ids, output_distence, context->nq(), inner_k, result_set);
|
||||
rc.Record("cluster result");
|
||||
|
||||
//step 4: pick up topk result
|
||||
TopkResult(result_set, inner_k, context->GetResult());
|
||||
rc.Record("reduce topk");
|
||||
|
||||
//step 5: calculate score between 0 ~ 100
|
||||
CalcScore(context->nq(), context->vectors(), index_engine_->Dimension(), context->GetResult(), result_set);
|
||||
context->GetResult().swap(result_set);
|
||||
rc.Record("calculate score");
|
||||
|
||||
//step 6: notify to send result to client
|
||||
//step 5: notify to send result to client
|
||||
context->IndexSearchDone(index_id_);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <memory>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
class SearchTask {
|
||||
|
@ -27,21 +27,7 @@ public:
|
|||
};
|
||||
|
||||
using SearchTaskPtr = std::shared_ptr<SearchTask>;
|
||||
|
||||
class SearchTaskQueue : public server::BlockingQueue<SearchTaskPtr> {
|
||||
private:
|
||||
SearchTaskQueue();
|
||||
|
||||
SearchTaskQueue(const SearchTaskQueue &rhs) = delete;
|
||||
|
||||
SearchTaskQueue &operator=(const SearchTaskQueue &rhs) = delete;
|
||||
|
||||
public:
|
||||
static SearchTaskQueue& GetInstance();
|
||||
|
||||
private:
|
||||
|
||||
};
|
||||
using SearchTaskQueue = server::BlockingQueue<SearchTaskPtr>;
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <getopt.h>
|
||||
#include <memory.h>
|
||||
// Not provide path: current work path will be used and system.info.
|
||||
using namespace zilliz::vecwise;
|
||||
using namespace zilliz::milvus;
|
||||
|
||||
void
|
||||
print_usage(const std::string &app_name) {
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#include "LicenseCheck.h"
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
#include <boost/archive/binary_oarchive.hpp>
|
||||
#include <boost/archive/binary_iarchive.hpp>
|
||||
//#include <boost/foreach.hpp>
|
||||
|
@ -7,14 +9,21 @@
|
|||
#include <boost/filesystem/path.hpp>
|
||||
#include <boost/serialization/map.hpp>
|
||||
#include <boost/filesystem/operations.hpp>
|
||||
#include <boost/thread.hpp>
|
||||
#include <boost/date_time/posix_time/posix_time.hpp>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
LicenseCheck::LicenseCheck() {
|
||||
|
||||
// Part 1: Legality check
|
||||
}
|
||||
|
||||
LicenseCheck::~LicenseCheck() {
|
||||
StopCountingDown();
|
||||
}
|
||||
|
||||
ServerError
|
||||
LicenseCheck::LegalityCheck(const std::string &license_file_path) {
|
||||
|
@ -69,14 +78,16 @@ LicenseCheck::AlterFile(const std::string &license_file_path,
|
|||
const boost::system::error_code &ec,
|
||||
boost::asio::deadline_timer *pt) {
|
||||
|
||||
ServerError err = LegalityCheck(license_file_path);
|
||||
if(err!=SERVER_SUCCESS)
|
||||
{
|
||||
ServerError err = LicenseCheck::LegalityCheck(license_file_path);
|
||||
if(err!=SERVER_SUCCESS) {
|
||||
printf("license file check error\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("---runing---\n");
|
||||
pt->expires_at(pt->expires_at() + boost::posix_time::hours(1));
|
||||
pt->async_wait(boost::bind(AlterFile, license_file_path, boost::asio::placeholders::error, pt));
|
||||
pt->async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, pt));
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
|
||||
}
|
||||
|
@ -84,11 +95,34 @@ LicenseCheck::AlterFile(const std::string &license_file_path,
|
|||
ServerError
|
||||
LicenseCheck::StartCountingDown(const std::string &license_file_path) {
|
||||
|
||||
if (!LicenseLibrary::IsFileExistent(license_file_path)) return SERVER_LICENSE_FILE_NOT_EXIST;
|
||||
boost::asio::io_service io;
|
||||
boost::asio::deadline_timer t(io, boost::posix_time::hours(1));
|
||||
t.async_wait(boost::bind(AlterFile, license_file_path, boost::asio::placeholders::error, &t));
|
||||
io.run();
|
||||
if (!LicenseLibrary::IsFileExistent(license_file_path)) {
|
||||
printf("license file not exist\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
//create a thread to run AlterFile
|
||||
if(counting_thread_ == nullptr) {
|
||||
counting_thread_ = std::make_shared<std::thread>([&]() {
|
||||
boost::asio::deadline_timer t(io_service_, boost::posix_time::hours(1));
|
||||
t.async_wait(boost::bind(LicenseCheck::AlterFile, license_file_path, boost::asio::placeholders::error, &t));
|
||||
io_service_.run();//this thread will block here
|
||||
});
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
ServerError
|
||||
LicenseCheck::StopCountingDown() {
|
||||
if(!io_service_.stopped()) {
|
||||
io_service_.stop();
|
||||
}
|
||||
|
||||
if(counting_thread_ != nullptr) {
|
||||
counting_thread_->join();
|
||||
counting_thread_ = nullptr;
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
|
|
|
@ -4,40 +4,44 @@
|
|||
#include "LicenseLibrary.h"
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/thread.hpp>
|
||||
#include <boost/date_time/posix_time/posix_time.hpp>
|
||||
|
||||
#include <thread>
|
||||
#include <memory>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
class LicenseCheck {
|
||||
public:
|
||||
private:
|
||||
LicenseCheck();
|
||||
~LicenseCheck();
|
||||
|
||||
public:
|
||||
static LicenseCheck &
|
||||
GetInstance() {
|
||||
static LicenseCheck instance;
|
||||
return instance;
|
||||
};
|
||||
|
||||
|
||||
// Part 1: Legality check
|
||||
static ServerError
|
||||
LegalityCheck(const std::string &license_file_path);
|
||||
|
||||
ServerError
|
||||
StartCountingDown(const std::string &license_file_path);
|
||||
|
||||
// Part 2: Timing check license
|
||||
ServerError
|
||||
StopCountingDown();
|
||||
|
||||
private:
|
||||
static ServerError
|
||||
AlterFile(const std::string &license_file_path,
|
||||
const boost::system::error_code &ec,
|
||||
boost::asio::deadline_timer *pt);
|
||||
|
||||
|
||||
static ServerError
|
||||
StartCountingDown(const std::string &license_file_path);
|
||||
|
||||
private:
|
||||
|
||||
private:
|
||||
boost::asio::io_service io_service_;
|
||||
std::shared_ptr<std::thread> counting_thread_;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include "utils/Error.h"
|
||||
|
||||
|
||||
using namespace zilliz::vecwise;
|
||||
using namespace zilliz::milvus;
|
||||
// Not provide path: current work path will be used and system.info.
|
||||
|
||||
void
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
constexpr int LicenseLibrary::sha256_length_;
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
class LicenseLibrary {
|
||||
|
|
|
@ -22,12 +22,12 @@ INITIALIZE_EASYLOGGINGPP
|
|||
|
||||
void print_help(const std::string &app_name);
|
||||
|
||||
using namespace zilliz::vecwise;
|
||||
using namespace zilliz::milvus;
|
||||
|
||||
int
|
||||
main(int argc, char *argv[]) {
|
||||
printf("Megasearch %s version: v%s built at %s\n", BUILD_TYPE, MEGASEARCH_VERSION, BUILD_TIME);
|
||||
printf("Megasearch server start...\n");
|
||||
printf("\nWelcome to use Milvus by Zillz!\n");
|
||||
printf("Milvus %s version: v%s built at %s\n", BUILD_TYPE, MILVUS_VERSION, BUILD_TIME);
|
||||
|
||||
signal(SIGINT, server::SignalUtil::HandleSignal);
|
||||
signal(SIGSEGV, server::SignalUtil::HandleSignal);
|
||||
|
@ -53,7 +53,7 @@ main(int argc, char *argv[]) {
|
|||
|
||||
if(argc < 2) {
|
||||
print_help(app_name);
|
||||
printf("Vecwise engine server exit...\n");
|
||||
printf("Milvus server exit...\n");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ main(int argc, char *argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
zilliz::vecwise::server::InitLog(log_config_file);
|
||||
zilliz::milvus::server::InitLog(log_config_file);
|
||||
|
||||
server::Server* server_ptr = server::Server::Instance();
|
||||
server_ptr->Init(start_daemonized, pid_filename, config_filename);
|
||||
|
|
|
@ -8,9 +8,10 @@
|
|||
|
||||
#include "utils/Error.h"
|
||||
#include "server/ServerConfig.h"
|
||||
#include "SystemInfo.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
class MetricsBase{
|
||||
public:
|
||||
|
@ -71,6 +72,17 @@ class MetricsBase{
|
|||
virtual void AddVectorsFailGaugeSet(double value) {};
|
||||
virtual void QueryVectorResponseSummaryObserve(double value, int count = 1) {};
|
||||
virtual void QueryVectorResponsePerSecondGaugeSet(double value) {};
|
||||
virtual void CPUUsagePercentSet() {};
|
||||
virtual void RAMUsagePercentSet() {};
|
||||
virtual void QueryResponsePerSecondGaugeSet(double value) {};
|
||||
virtual void GPUPercentGaugeSet() {};
|
||||
virtual void GPUMemoryUsageGaugeSet() {};
|
||||
virtual void AddVectorsPerSecondGaugeSet(int num_vector, int dim, double time) {};
|
||||
virtual void QueryIndexTypePerSecondSet(std::string type, double value) {};
|
||||
virtual void ConnectionGaugeIncrement() {};
|
||||
virtual void ConnectionGaugeDecrement() {};
|
||||
virtual void KeepingAliveCounterIncrement(double value = 1) {};
|
||||
virtual void OctetsSet() {};
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include "PrometheusMetrics.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
MetricsBase &
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
//#include "PrometheusMetrics.h"
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
#define METRICS_NOW_TIME std::chrono::system_clock::now()
|
||||
|
|
|
@ -5,30 +5,163 @@
|
|||
******************************************************************************/
|
||||
|
||||
#include "PrometheusMetrics.h"
|
||||
#include "utils/Log.h"
|
||||
#include "SystemInfo.h"
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
ServerError
|
||||
PrometheusMetrics::Init() {
|
||||
ConfigNode& configNode = ServerConfig::GetInstance().GetConfig(CONFIG_METRIC);
|
||||
startup_ = configNode.GetValue(CONFIG_METRIC_IS_STARTUP) == "true" ? true:false;
|
||||
// Following should be read from config file.
|
||||
const std::string bind_address = configNode.GetChild(CONFIG_PROMETHEUS).GetValue(CONFIG_METRIC_PROMETHEUS_PORT);
|
||||
const std::string uri = std::string("/metrics");
|
||||
const std::size_t num_threads = 2;
|
||||
try {
|
||||
ConfigNode &configNode = ServerConfig::GetInstance().GetConfig(CONFIG_METRIC);
|
||||
startup_ = configNode.GetValue(CONFIG_METRIC_IS_STARTUP) == "true" ? true : false;
|
||||
// Following should be read from config file.
|
||||
const std::string bind_address = configNode.GetChild(CONFIG_PROMETHEUS).GetValue(CONFIG_METRIC_PROMETHEUS_PORT);
|
||||
const std::string uri = std::string("/metrics");
|
||||
const std::size_t num_threads = 2;
|
||||
|
||||
// Init Exposer
|
||||
exposer_ptr_ = std::make_shared<prometheus::Exposer>(bind_address, uri, num_threads);
|
||||
// Init Exposer
|
||||
exposer_ptr_ = std::make_shared<prometheus::Exposer>(bind_address, uri, num_threads);
|
||||
|
||||
// Exposer Registry
|
||||
exposer_ptr_->RegisterCollectable(registry_);
|
||||
// Exposer Registry
|
||||
exposer_ptr_->RegisterCollectable(registry_);
|
||||
} catch (std::exception& ex) {
|
||||
SERVER_LOG_ERROR << "Failed to connect prometheus server: " << std::string(ex.what());
|
||||
return SERVER_UNEXPECTED_ERROR;
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PrometheusMetrics::CPUUsagePercentSet() {
|
||||
if(!startup_) return ;
|
||||
double usage_percent = server::SystemInfo::GetInstance().CPUPercent();
|
||||
CPU_usage_percent_.Set(usage_percent);
|
||||
}
|
||||
|
||||
void
|
||||
PrometheusMetrics::RAMUsagePercentSet() {
|
||||
if(!startup_) return ;
|
||||
double usage_percent = server::SystemInfo::GetInstance().MemoryPercent();
|
||||
RAM_usage_percent_.Set(usage_percent);
|
||||
}
|
||||
|
||||
void
|
||||
PrometheusMetrics::GPUPercentGaugeSet() {
|
||||
if(!startup_) return;
|
||||
int numDevide = server::SystemInfo::GetInstance().num_device();
|
||||
std::vector<unsigned int> values = server::SystemInfo::GetInstance().GPUPercent();
|
||||
// for (int i = 0; i < numDevide; ++i) {
|
||||
// GPU_percent_gauges_[i].Set(static_cast<double>(values[i]));
|
||||
// }
|
||||
if(numDevide >= 1) GPU0_percent_gauge_.Set(static_cast<double>(values[0]));
|
||||
if(numDevide >= 2) GPU1_percent_gauge_.Set(static_cast<double>(values[1]));
|
||||
if(numDevide >= 3) GPU2_percent_gauge_.Set(static_cast<double>(values[2]));
|
||||
if(numDevide >= 4) GPU3_percent_gauge_.Set(static_cast<double>(values[3]));
|
||||
if(numDevide >= 5) GPU4_percent_gauge_.Set(static_cast<double>(values[4]));
|
||||
if(numDevide >= 6) GPU5_percent_gauge_.Set(static_cast<double>(values[5]));
|
||||
if(numDevide >= 7) GPU6_percent_gauge_.Set(static_cast<double>(values[6]));
|
||||
if(numDevide >= 8) GPU7_percent_gauge_.Set(static_cast<double>(values[7]));
|
||||
|
||||
// to do
|
||||
}
|
||||
|
||||
void PrometheusMetrics::GPUMemoryUsageGaugeSet() {
|
||||
if(!startup_) return;
|
||||
int numDevide = server::SystemInfo::GetInstance().num_device();
|
||||
std::vector<unsigned long long> values = server::SystemInfo::GetInstance().GPUMemoryUsed();
|
||||
constexpr unsigned long long MtoB = 1024*1024;
|
||||
int numDevice = values.size();
|
||||
// for (int i = 0; i < numDevice; ++i) {
|
||||
// GPU_memory_usage_gauges_[i].Set(values[i]/MtoB);
|
||||
// }
|
||||
if(numDevice >=1) GPU0_memory_usage_gauge_.Set(values[0]/MtoB);
|
||||
if(numDevice >=2) GPU1_memory_usage_gauge_.Set(values[1]/MtoB);
|
||||
if(numDevice >=3) GPU2_memory_usage_gauge_.Set(values[2]/MtoB);
|
||||
if(numDevice >=4) GPU3_memory_usage_gauge_.Set(values[3]/MtoB);
|
||||
if(numDevice >=5) GPU4_memory_usage_gauge_.Set(values[4]/MtoB);
|
||||
if(numDevice >=6) GPU5_memory_usage_gauge_.Set(values[5]/MtoB);
|
||||
if(numDevice >=7) GPU6_memory_usage_gauge_.Set(values[6]/MtoB);
|
||||
if(numDevice >=8) GPU7_memory_usage_gauge_.Set(values[7]/MtoB);
|
||||
|
||||
// to do
|
||||
}
|
||||
void PrometheusMetrics::AddVectorsPerSecondGaugeSet(int num_vector, int dim, double time) {
|
||||
// MB/s
|
||||
if(!startup_) return;
|
||||
|
||||
long long MtoB = 1024*1024;
|
||||
long long size = num_vector * dim * 4;
|
||||
add_vectors_per_second_gauge_.Set(size/time/MtoB);
|
||||
|
||||
}
|
||||
void PrometheusMetrics::QueryIndexTypePerSecondSet(std::string type, double value) {
|
||||
if(!startup_) return;
|
||||
if(type == "IVF"){
|
||||
query_index_IVF_type_per_second_gauge_.Set(value);
|
||||
} else if(type == "IDMap"){
|
||||
query_index_IDMAP_type_per_second_gauge_.Set(value);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void PrometheusMetrics::ConnectionGaugeIncrement() {
|
||||
if(!startup_) return;
|
||||
connection_gauge_.Increment();
|
||||
}
|
||||
|
||||
void PrometheusMetrics::ConnectionGaugeDecrement() {
|
||||
if(!startup_) return;
|
||||
connection_gauge_.Decrement();
|
||||
}
|
||||
|
||||
void PrometheusMetrics::OctetsSet() {
|
||||
if(!startup_) return;
|
||||
|
||||
// get old stats and reset them
|
||||
unsigned long long old_inoctets = SystemInfo::GetInstance().get_inoctets();
|
||||
unsigned long long old_outoctets = SystemInfo::GetInstance().get_octets();
|
||||
auto old_time = SystemInfo::GetInstance().get_nettime();
|
||||
std::pair<unsigned long long, unsigned long long> in_and_out_octets = SystemInfo::GetInstance().Octets();
|
||||
SystemInfo::GetInstance().set_inoctets(in_and_out_octets.first);
|
||||
SystemInfo::GetInstance().set_outoctets(in_and_out_octets.second);
|
||||
SystemInfo::GetInstance().set_nettime();
|
||||
|
||||
//
|
||||
constexpr double micro_to_second = 1e-6;
|
||||
auto now_time = std::chrono::system_clock::now();
|
||||
auto total_microsecond = METRICS_MICROSECONDS(old_time, now_time);
|
||||
auto total_second = total_microsecond*micro_to_second;
|
||||
if(total_second == 0) return;
|
||||
inoctets_gauge_.Set((in_and_out_octets.first-old_inoctets)/total_second);
|
||||
outoctets_gauge_.Set((in_and_out_octets.second-old_outoctets)/total_second);
|
||||
}
|
||||
|
||||
//void PrometheusMetrics::GpuPercentInit() {
|
||||
// int num_device = SystemInfo::GetInstance().num_device();
|
||||
// constexpr char device_number[] = "DeviceNum";
|
||||
// for(int i = 0; i < num_device; ++ i) {
|
||||
// GPU_percent_gauges_.emplace_back(GPU_percent_.Add({{device_number,std::to_string(i)}}));
|
||||
// }
|
||||
//
|
||||
//}
|
||||
//void PrometheusMetrics::GpuMemoryInit() {
|
||||
// int num_device = SystemInfo::GetInstance().num_device();
|
||||
// constexpr char device_number[] = "DeviceNum";
|
||||
// for(int i = 0; i < num_device; ++ i) {
|
||||
// GPU_memory_usage_gauges_.emplace_back(GPU_memory_usage_.Add({{device_number,std::to_string(i)}}));
|
||||
// }
|
||||
//}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
|
||||
|
@ -49,6 +49,8 @@ class PrometheusMetrics: public MetricsBase {
|
|||
std::shared_ptr<prometheus::Exposer> exposer_ptr_;
|
||||
std::shared_ptr<prometheus::Registry> registry_ = std::make_shared<prometheus::Registry>();
|
||||
bool startup_ = false;
|
||||
// void GpuPercentInit();
|
||||
// void GpuMemoryInit();
|
||||
public:
|
||||
|
||||
void AddGroupSuccessTotalIncrement(double value = 1.0) override { if(startup_) add_group_success_total_.Increment(value);};
|
||||
|
@ -104,51 +106,21 @@ class PrometheusMetrics: public MetricsBase {
|
|||
void AddVectorsFailGaugeSet(double value) override { if(startup_) add_vectors_fail_gauge_.Set(value);};
|
||||
void QueryVectorResponseSummaryObserve(double value, int count = 1) override { if (startup_) for(int i = 0 ; i < count ; ++i) query_vector_response_summary_.Observe(value);};
|
||||
void QueryVectorResponsePerSecondGaugeSet(double value) override {if (startup_) query_vector_response_per_second_gauge_.Set(value);};
|
||||
void CPUUsagePercentSet() override ;
|
||||
void RAMUsagePercentSet() override ;
|
||||
void QueryResponsePerSecondGaugeSet(double value) override {if(startup_) query_response_per_second_gauge.Set(value);};
|
||||
void GPUPercentGaugeSet() override ;
|
||||
void GPUMemoryUsageGaugeSet() override ;
|
||||
void AddVectorsPerSecondGaugeSet(int num_vector, int dim, double time) override ;
|
||||
void QueryIndexTypePerSecondSet(std::string type, double value) override ;
|
||||
void ConnectionGaugeIncrement() override ;
|
||||
void ConnectionGaugeDecrement() override ;
|
||||
void KeepingAliveCounterIncrement(double value = 1) override {if(startup_) keeping_alive_counter_.Increment(value);};
|
||||
void OctetsSet() override ;
|
||||
|
||||
|
||||
|
||||
|
||||
// prometheus::Counter &connection_total() {return connection_total_; }
|
||||
//
|
||||
// prometheus::Counter &add_group_success_total() { return add_group_success_total_; }
|
||||
// prometheus::Counter &add_group_fail_total() { return add_group_fail_total_; }
|
||||
//
|
||||
// prometheus::Counter &get_group_success_total() { return get_group_success_total_;}
|
||||
// prometheus::Counter &get_group_fail_total() { return get_group_fail_total_;}
|
||||
//
|
||||
// prometheus::Counter &has_group_success_total() { return has_group_success_total_;}
|
||||
// prometheus::Counter &has_group_fail_total() { return has_group_fail_total_;}
|
||||
//
|
||||
// prometheus::Counter &get_group_files_success_total() { return get_group_files_success_total_;};
|
||||
// prometheus::Counter &get_group_files_fail_total() { return get_group_files_fail_total_;}
|
||||
//
|
||||
// prometheus::Counter &add_vectors_success_total() { return add_vectors_success_total_; }
|
||||
// prometheus::Counter &add_vectors_fail_total() { return add_vectors_fail_total_; }
|
||||
//
|
||||
// prometheus::Histogram &add_vectors_duration_histogram() { return add_vectors_duration_histogram_;}
|
||||
//
|
||||
// prometheus::Counter &search_success_total() { return search_success_total_; }
|
||||
// prometheus::Counter &search_fail_total() { return search_fail_total_; }
|
||||
//
|
||||
// prometheus::Histogram &search_duration_histogram() { return search_duration_histogram_; }
|
||||
// prometheus::Histogram &raw_files_size_histogram() { return raw_files_size_histogram_; }
|
||||
// prometheus::Histogram &index_files_size_histogram() { return index_files_size_histogram_; }
|
||||
//
|
||||
// prometheus::Histogram &build_index_duration_seconds_histogram() { return build_index_duration_seconds_histogram_; }
|
||||
//
|
||||
// prometheus::Histogram &all_build_index_duration_seconds_histogram() { return all_build_index_duration_seconds_histogram_; }
|
||||
//
|
||||
// prometheus::Gauge &cache_usage_gauge() { return cache_usage_gauge_; }
|
||||
//
|
||||
// prometheus::Counter &meta_visit_total() { return meta_visit_total_; }
|
||||
//
|
||||
// prometheus::Histogram &meta_visit_duration_seconds_histogram() { return meta_visit_duration_seconds_histogram_; }
|
||||
//
|
||||
// prometheus::Gauge &mem_usage_percent_gauge() { return mem_usage_percent_gauge_; }
|
||||
//
|
||||
// prometheus::Gauge &mem_usage_total_gauge() { return mem_usage_total_gauge_; }
|
||||
|
||||
|
||||
|
||||
|
||||
std::shared_ptr<prometheus::Exposer> &exposer_ptr() {return exposer_ptr_; }
|
||||
|
@ -273,7 +245,7 @@ class PrometheusMetrics: public MetricsBase {
|
|||
.Name("build_index_duration_microseconds")
|
||||
.Help("histogram of processing time for building index")
|
||||
.Register(*registry_);
|
||||
prometheus::Histogram &build_index_duration_seconds_histogram_ = build_index_duration_seconds_.Add({}, BucketBoundaries{2e6, 4e6, 6e6, 8e6, 1e7});
|
||||
prometheus::Histogram &build_index_duration_seconds_histogram_ = build_index_duration_seconds_.Add({}, BucketBoundaries{5e5, 2e6, 4e6, 6e6, 8e6, 1e7});
|
||||
|
||||
|
||||
//record processing time for all building index
|
||||
|
@ -414,6 +386,12 @@ class PrometheusMetrics: public MetricsBase {
|
|||
.Register(*registry_);
|
||||
prometheus::Gauge &query_vector_response_per_second_gauge_ = query_vector_response_per_second_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &query_response_per_second_ = prometheus::BuildGauge()
|
||||
.Name("query_response_per_microsecond")
|
||||
.Help("the number of queries can be processed every microsecond")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &query_response_per_second_gauge = query_response_per_second_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &disk_store_IO_speed_ = prometheus::BuildGauge()
|
||||
.Name("disk_store_IO_speed_bytes_per_microseconds")
|
||||
.Help("disk_store_IO_speed")
|
||||
|
@ -433,6 +411,84 @@ class PrometheusMetrics: public MetricsBase {
|
|||
prometheus::Gauge &add_vectors_success_gauge_ = add_vectors_.Add({{"outcome", "success"}});
|
||||
prometheus::Gauge &add_vectors_fail_gauge_ = add_vectors_.Add({{"outcome", "fail"}});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &add_vectors_per_second_ = prometheus::BuildGauge()
|
||||
.Name("add_vectors_throughput_per_microsecond")
|
||||
.Help("add vectors throughput per microsecond")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &add_vectors_per_second_gauge_ = add_vectors_per_second_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &CPU_ = prometheus::BuildGauge()
|
||||
.Name("CPU_usage_percent")
|
||||
.Help("CPU usage percent by this this process")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &CPU_usage_percent_ = CPU_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &RAM_ = prometheus::BuildGauge()
|
||||
.Name("RAM_usage_percent")
|
||||
.Help("RAM usage percent by this process")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &RAM_usage_percent_ = RAM_.Add({});
|
||||
|
||||
//GPU Usage Percent
|
||||
prometheus::Family<prometheus::Gauge> &GPU_percent_ = prometheus::BuildGauge()
|
||||
.Name("Gpu_usage_percent")
|
||||
.Help("GPU_usage_percent ")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &GPU0_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "0"}});
|
||||
prometheus::Gauge &GPU1_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "1"}});
|
||||
prometheus::Gauge &GPU2_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "2"}});
|
||||
prometheus::Gauge &GPU3_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "3"}});
|
||||
prometheus::Gauge &GPU4_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "4"}});
|
||||
prometheus::Gauge &GPU5_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "5"}});
|
||||
prometheus::Gauge &GPU6_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "6"}});
|
||||
prometheus::Gauge &GPU7_percent_gauge_ = GPU_percent_.Add({{"DeviceNum", "7"}});
|
||||
// std::vector<prometheus::Gauge> GPU_percent_gauges_;
|
||||
|
||||
|
||||
|
||||
|
||||
//GPU Mempry used
|
||||
prometheus::Family<prometheus::Gauge> &GPU_memory_usage_ = prometheus::BuildGauge()
|
||||
.Name("GPU_memory_usage_total")
|
||||
.Help("GPU memory usage total ")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &GPU0_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "0"}});
|
||||
prometheus::Gauge &GPU1_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "1"}});
|
||||
prometheus::Gauge &GPU2_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "2"}});
|
||||
prometheus::Gauge &GPU3_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "3"}});
|
||||
prometheus::Gauge &GPU4_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "4"}});
|
||||
prometheus::Gauge &GPU5_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "5"}});
|
||||
prometheus::Gauge &GPU6_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "6"}});
|
||||
prometheus::Gauge &GPU7_memory_usage_gauge_ = GPU_memory_usage_.Add({{"DeviceNum", "7"}});
|
||||
// std::vector<prometheus::Gauge> GPU_memory_usage_gauges_;
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &query_index_type_per_second_ = prometheus::BuildGauge()
|
||||
.Name("query_index_throughtout_per_microsecond")
|
||||
.Help("query index throughtout per microsecond")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &query_index_IVF_type_per_second_gauge_ = query_index_type_per_second_.Add({{"IndexType","IVF"}});
|
||||
prometheus::Gauge &query_index_IDMAP_type_per_second_gauge_ = query_index_type_per_second_.Add({{"IndexType","IDMAP"}});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &connection_ = prometheus::BuildGauge()
|
||||
.Name("connection_number")
|
||||
.Help("the number of connections")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &connection_gauge_ = connection_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Counter> &keeping_alive_ = prometheus::BuildCounter()
|
||||
.Name("keeping_alive_seconds_total")
|
||||
.Help("total seconds of the serve alive")
|
||||
.Register(*registry_);
|
||||
prometheus::Counter &keeping_alive_counter_ = keeping_alive_.Add({});
|
||||
|
||||
prometheus::Family<prometheus::Gauge> &octets_ = prometheus::BuildGauge()
|
||||
.Name("octets_bytes_per_second")
|
||||
.Help("octets bytes per second")
|
||||
.Register(*registry_);
|
||||
prometheus::Gauge &inoctets_gauge_ = octets_.Add({{"type", "inoctets"}});
|
||||
prometheus::Gauge &outoctets_gauge_ = octets_.Add({{"type", "outoctets"}});
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -0,0 +1,248 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
|
||||
* Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
|
||||
#include "SystemInfo.h"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include "nvml.h"
|
||||
//#include <mutex>
|
||||
//
|
||||
//std::mutex mutex;
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
void SystemInfo::Init() {
|
||||
if(initialized_) return;
|
||||
|
||||
initialized_ = true;
|
||||
|
||||
// initialize CPU information
|
||||
FILE* file;
|
||||
struct tms time_sample;
|
||||
char line[128];
|
||||
last_cpu_ = times(&time_sample);
|
||||
last_sys_cpu_ = time_sample.tms_stime;
|
||||
last_user_cpu_ = time_sample.tms_utime;
|
||||
file = fopen("/proc/cpuinfo", "r");
|
||||
num_processors_ = 0;
|
||||
while(fgets(line, 128, file) != NULL){
|
||||
if (strncmp(line, "processor", 9) == 0) num_processors_++;
|
||||
}
|
||||
total_ram_ = GetPhysicalMemory();
|
||||
fclose(file);
|
||||
|
||||
//initialize GPU information
|
||||
nvmlReturn_t nvmlresult;
|
||||
nvmlresult = nvmlInit();
|
||||
if(NVML_SUCCESS != nvmlresult) {
|
||||
printf("System information initilization failed");
|
||||
return ;
|
||||
}
|
||||
nvmlresult = nvmlDeviceGetCount(&num_device_);
|
||||
if(NVML_SUCCESS != nvmlresult) {
|
||||
printf("Unable to get devidce number");
|
||||
return ;
|
||||
}
|
||||
|
||||
//initialize network traffic information
|
||||
std::pair<unsigned long long, unsigned long long> in_and_out_octets = Octets();
|
||||
in_octets_ = in_and_out_octets.first;
|
||||
out_octets_ = in_and_out_octets.second;
|
||||
net_time_ = std::chrono::system_clock::now();
|
||||
}
|
||||
|
||||
long long
|
||||
SystemInfo::ParseLine(char *line) {
|
||||
// This assumes that a digit will be found and the line ends in " Kb".
|
||||
int i = strlen(line);
|
||||
const char *p = line;
|
||||
while (*p < '0' || *p > '9') p++;
|
||||
line[i - 3] = '\0';
|
||||
i = atoi(p);
|
||||
return static_cast<long long>(i);
|
||||
}
|
||||
|
||||
unsigned long
|
||||
SystemInfo::GetPhysicalMemory() {
|
||||
struct sysinfo memInfo;
|
||||
sysinfo (&memInfo);
|
||||
unsigned long totalPhysMem = memInfo.totalram;
|
||||
//Multiply in next statement to avoid int overflow on right hand side...
|
||||
totalPhysMem *= memInfo.mem_unit;
|
||||
return totalPhysMem;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
SystemInfo::GetProcessUsedMemory() {
|
||||
//Note: this value is in KB!
|
||||
FILE* file = fopen("/proc/self/status", "r");
|
||||
constexpr int64_t line_length = 128;
|
||||
long long result = -1;
|
||||
constexpr int64_t KB_SIZE = 1024;
|
||||
char line[line_length];
|
||||
|
||||
while (fgets(line, line_length, file) != NULL){
|
||||
if (strncmp(line, "VmRSS:", 6) == 0){
|
||||
result = ParseLine(line);
|
||||
break;
|
||||
}
|
||||
}
|
||||
fclose(file);
|
||||
// return value in Byte
|
||||
return (result*KB_SIZE);
|
||||
|
||||
}
|
||||
|
||||
double
|
||||
SystemInfo::MemoryPercent() {
|
||||
if (!initialized_) Init();
|
||||
return GetProcessUsedMemory()*100/total_ram_;
|
||||
}
|
||||
|
||||
double
|
||||
SystemInfo::CPUPercent() {
|
||||
if (!initialized_) Init();
|
||||
struct tms time_sample;
|
||||
clock_t now;
|
||||
double percent;
|
||||
|
||||
now = times(&time_sample);
|
||||
if (now <= last_cpu_ || time_sample.tms_stime < last_sys_cpu_ ||
|
||||
time_sample.tms_utime < last_user_cpu_){
|
||||
//Overflow detection. Just skip this value.
|
||||
percent = -1.0;
|
||||
}
|
||||
else{
|
||||
percent = (time_sample.tms_stime - last_sys_cpu_) +
|
||||
(time_sample.tms_utime - last_user_cpu_);
|
||||
percent /= (now - last_cpu_);
|
||||
percent /= num_processors_;
|
||||
percent *= 100;
|
||||
}
|
||||
last_cpu_ = now;
|
||||
last_sys_cpu_ = time_sample.tms_stime;
|
||||
last_user_cpu_ = time_sample.tms_utime;
|
||||
|
||||
return percent;
|
||||
}
|
||||
|
||||
//std::unordered_map<int,std::vector<double>>
|
||||
//SystemInfo::GetGPUMemPercent(){
|
||||
// // return GPUID: MEM%
|
||||
//
|
||||
// //write GPU info to a file
|
||||
// system("nvidia-smi pmon -c 1 > GPUInfo.txt");
|
||||
// int pid = (int)getpid();
|
||||
//
|
||||
// //parse line
|
||||
// std::ifstream read_file;
|
||||
// read_file.open("GPUInfo.txt");
|
||||
// std::string line;
|
||||
// while(getline(read_file, line)){
|
||||
// std::vector<std::string> words = split(line);
|
||||
// // 0 1 2 3 4 5 6 7
|
||||
// //words stand for gpuindex, pid, type, sm, mem, enc, dec, command respectively
|
||||
// if(std::stoi(words[1]) != pid) continue;
|
||||
// int GPUindex = std::stoi(words[0]);
|
||||
// double sm_percent = std::stod(words[3]);
|
||||
// double mem_percent = std::stod(words[4]);
|
||||
//
|
||||
// }
|
||||
//
|
||||
//}
|
||||
|
||||
//std::vector<std::string>
|
||||
//SystemInfo::split(std::string input) {
|
||||
// std::vector<std::string> words;
|
||||
// input += " ";
|
||||
// int word_start = 0;
|
||||
// for (int i = 0; i < input.size(); ++i) {
|
||||
// if(input[i] != ' ') continue;
|
||||
// if(input[i] == ' ') {
|
||||
// word_start = i + 1;
|
||||
// continue;
|
||||
// }
|
||||
// words.push_back(input.substr(word_start,i-word_start));
|
||||
// }
|
||||
// return words;
|
||||
//}
|
||||
|
||||
std::vector<unsigned int>
|
||||
SystemInfo::GPUPercent() {
|
||||
// get GPU usage percent
|
||||
if(!initialized_) Init();
|
||||
std::vector<unsigned int> result;
|
||||
nvmlUtilization_t utilization;
|
||||
for (int i = 0; i < num_device_; ++i) {
|
||||
nvmlDevice_t device;
|
||||
nvmlDeviceGetHandleByIndex(i, &device);
|
||||
nvmlDeviceGetUtilizationRates(device, &utilization);
|
||||
result.push_back(utilization.gpu);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::vector<unsigned long long>
|
||||
SystemInfo::GPUMemoryUsed() {
|
||||
// get GPU memory used
|
||||
if(!initialized_) Init();
|
||||
|
||||
std::vector<unsigned long long int> result;
|
||||
nvmlMemory_t nvmlMemory;
|
||||
for (int i = 0; i < num_device_; ++i) {
|
||||
nvmlDevice_t device;
|
||||
nvmlDeviceGetHandleByIndex(i, &device);
|
||||
nvmlDeviceGetMemoryInfo(device, &nvmlMemory);
|
||||
result.push_back(nvmlMemory.used);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::pair<unsigned long long , unsigned long long >
|
||||
SystemInfo::Octets(){
|
||||
pid_t pid = getpid();
|
||||
// const std::string filename = "/proc/"+std::to_string(pid)+"/net/netstat";
|
||||
const std::string filename = "/proc/net/netstat";
|
||||
std::ifstream file(filename);
|
||||
std::string lastline = "";
|
||||
std::string line = "";
|
||||
while(file){
|
||||
getline(file, line);
|
||||
if(file.fail()){
|
||||
break;
|
||||
}
|
||||
lastline = line;
|
||||
}
|
||||
std::vector<size_t> space_position;
|
||||
size_t space_pos = lastline.find(" ");
|
||||
while(space_pos != std::string::npos){
|
||||
space_position.push_back(space_pos);
|
||||
space_pos = lastline.find(" ",space_pos+1);
|
||||
}
|
||||
// InOctets is between 6th and 7th " " and OutOctets is between 7th and 8th " "
|
||||
size_t inoctets_begin = space_position[6]+1;
|
||||
size_t inoctets_length = space_position[7]-inoctets_begin;
|
||||
size_t outoctets_begin = space_position[7]+1;
|
||||
size_t outoctets_length = space_position[8]-outoctets_begin;
|
||||
std::string inoctets = lastline.substr(inoctets_begin,inoctets_length);
|
||||
std::string outoctets = lastline.substr(outoctets_begin,outoctets_length);
|
||||
|
||||
|
||||
unsigned long long inoctets_bytes = std::stoull(inoctets);
|
||||
unsigned long long outoctets_bytes = std::stoull(outoctets);
|
||||
std::pair<unsigned long long , unsigned long long > res(inoctets_bytes, outoctets_bytes);
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*******************************************************************************
|
||||
* Copyright 上海赜睿信息科技有限公司(Zilliz) - All Rights Reserved
|
||||
* Unauthorized copying of this file, via any medium is strictly prohibited.
|
||||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "sys/types.h"
|
||||
#include "sys/sysinfo.h"
|
||||
#include "stdlib.h"
|
||||
#include "stdio.h"
|
||||
#include "string.h"
|
||||
#include "sys/times.h"
|
||||
#include "sys/vtimes.h"
|
||||
#include <chrono>
|
||||
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
namespace server {
|
||||
|
||||
class SystemInfo {
|
||||
private:
|
||||
unsigned long total_ram_ = 0;
|
||||
clock_t last_cpu_ = clock_t();
|
||||
clock_t last_sys_cpu_ = clock_t();
|
||||
clock_t last_user_cpu_ = clock_t();
|
||||
std::chrono::system_clock::time_point net_time_ = std::chrono::system_clock::now();
|
||||
int num_processors_ = 0;
|
||||
//number of GPU
|
||||
unsigned int num_device_ = 0;
|
||||
unsigned long long in_octets_ = 0;
|
||||
unsigned long long out_octets_ = 0;
|
||||
bool initialized_ = false;
|
||||
|
||||
public:
|
||||
static SystemInfo &
|
||||
GetInstance(){
|
||||
static SystemInfo instance;
|
||||
return instance;
|
||||
}
|
||||
|
||||
void Init();
|
||||
int num_device() const {return num_device_;};
|
||||
unsigned long long get_inoctets() { return in_octets_;};
|
||||
unsigned long long get_octets() { return out_octets_;};
|
||||
std::chrono::system_clock::time_point get_nettime() { return net_time_;};
|
||||
void set_inoctets(unsigned long long value) { in_octets_ = value;};
|
||||
void set_outoctets(unsigned long long value) { out_octets_ = value;};
|
||||
void set_nettime() {net_time_ = std::chrono::system_clock::now();};
|
||||
long long ParseLine(char* line);
|
||||
unsigned long GetPhysicalMemory();
|
||||
unsigned long GetProcessUsedMemory();
|
||||
double MemoryPercent();
|
||||
double CPUPercent();
|
||||
std::pair<unsigned long long , unsigned long long > Octets();
|
||||
// std::unordered_map<int,std::vector<double>> GetGPUMemPercent() {};
|
||||
// std::vector<std::string> split(std::string input) {};
|
||||
std::vector<unsigned int> GPUPercent();
|
||||
std::vector<unsigned long long> GPUMemoryUsed();
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,20 +14,19 @@ include_directories(/usr/include)
|
|||
include_directories(${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp)
|
||||
|
||||
set(service_files
|
||||
${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp/MegasearchService.cpp
|
||||
${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp/megasearch_constants.cpp
|
||||
${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp/megasearch_types.cpp
|
||||
${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp/MilvusService.cpp
|
||||
${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp/milvus_constants.cpp
|
||||
${CMAKE_SOURCE_DIR}/src/thrift/gen-cpp/milvus_types.cpp
|
||||
)
|
||||
|
||||
add_library(megasearch_sdk STATIC
|
||||
add_library(milvus_sdk STATIC
|
||||
${interface_files}
|
||||
${client_files}
|
||||
${util_files}
|
||||
${service_files}
|
||||
)
|
||||
|
||||
link_directories(../../third_party/build/lib)
|
||||
target_link_libraries(megasearch_sdk
|
||||
target_link_libraries(milvus_sdk
|
||||
${third_party_libs}
|
||||
)
|
||||
|
||||
|
|
|
@ -7,9 +7,9 @@
|
|||
aux_source_directory(src src_files)
|
||||
|
||||
include_directories(src)
|
||||
include_directories(../../megasearch_sdk/include)
|
||||
include_directories(../../include)
|
||||
|
||||
link_directories(${CMAKE_BINARY_DIR}/megasearch_sdk)
|
||||
link_directories(${CMAKE_BINARY_DIR})
|
||||
|
||||
add_executable(sdk_simple
|
||||
./main.cpp
|
||||
|
@ -17,6 +17,6 @@ add_executable(sdk_simple
|
|||
)
|
||||
|
||||
target_link_libraries(sdk_simple
|
||||
megasearch_sdk
|
||||
milvus_sdk
|
||||
pthread
|
||||
)
|
||||
|
|
|
@ -4,13 +4,13 @@
|
|||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
#include "ClientTest.h"
|
||||
#include "MegaSearch.h"
|
||||
#include "MilvusApi.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
using namespace megasearch;
|
||||
using namespace ::milvus;
|
||||
|
||||
namespace {
|
||||
std::string GetTableName();
|
||||
|
@ -20,15 +20,16 @@ namespace {
|
|||
static constexpr int64_t TOTAL_ROW_COUNT = 100000;
|
||||
static constexpr int64_t TOP_K = 10;
|
||||
static constexpr int64_t SEARCH_TARGET = 5000; //change this value, result is different
|
||||
static constexpr int64_t ADD_VECTOR_LOOP = 1;
|
||||
|
||||
#define BLOCK_SPLITER std::cout << "===========================================" << std::endl;
|
||||
|
||||
void PrintTableSchema(const megasearch::TableSchema& tb_schema) {
|
||||
void PrintTableSchema(const TableSchema& tb_schema) {
|
||||
BLOCK_SPLITER
|
||||
std::cout << "Table name: " << tb_schema.table_name << std::endl;
|
||||
std::cout << "Table index type: " << (int)tb_schema.index_type << std::endl;
|
||||
std::cout << "Table dimension: " << tb_schema.dimension << std::endl;
|
||||
std::cout << "Table store raw data: " << tb_schema.store_raw_vector << std::endl;
|
||||
std::cout << "Table store raw data: " << (tb_schema.store_raw_vector ? "true" : "false") << std::endl;
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
|
@ -75,6 +76,18 @@ namespace {
|
|||
return str;
|
||||
}
|
||||
|
||||
std::string CurrentTmDate() {
|
||||
time_t tt;
|
||||
time( &tt );
|
||||
tt = tt + 8*3600;
|
||||
tm* t= gmtime( &tt );
|
||||
|
||||
std::string str = std::to_string(t->tm_year + 1900) + "-" + std::to_string(t->tm_mon + 1)
|
||||
+ "-" + std::to_string(t->tm_mday);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
std::string GetTableName() {
|
||||
static std::string s_id(CurrentTime());
|
||||
return s_id;
|
||||
|
@ -122,7 +135,7 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
|
||||
{//server version
|
||||
std::string version = conn->ServerVersion();
|
||||
std::cout << "MegaSearch server version: " << version << std::endl;
|
||||
std::cout << "Server version: " << version << std::endl;
|
||||
}
|
||||
|
||||
{//sdk version
|
||||
|
@ -136,15 +149,17 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
std::cout << "ShowTables function call status: " << stat.ToString() << std::endl;
|
||||
std::cout << "All tables: " << std::endl;
|
||||
for(auto& table : tables) {
|
||||
std::cout << "\t" << table << std::endl;
|
||||
int64_t row_count = 0;
|
||||
stat = conn->GetTableRowCount(table, row_count);
|
||||
std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
{//create table
|
||||
TableSchema tb_schema = BuildTableSchema();
|
||||
PrintTableSchema(tb_schema);
|
||||
Status stat = conn->CreateTable(tb_schema);
|
||||
std::cout << "CreateTable function call status: " << stat.ToString() << std::endl;
|
||||
PrintTableSchema(tb_schema);
|
||||
}
|
||||
|
||||
{//describe table
|
||||
|
@ -154,9 +169,9 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
PrintTableSchema(tb_schema);
|
||||
}
|
||||
|
||||
{//add vectors
|
||||
for(int i = 0; i < ADD_VECTOR_LOOP; i++){//add vectors
|
||||
std::vector<RowRecord> record_array;
|
||||
BuildVectors(0, TOTAL_ROW_COUNT, record_array);
|
||||
BuildVectors(i*TOTAL_ROW_COUNT, (i+1)*TOTAL_ROW_COUNT, record_array);
|
||||
std::vector<int64_t> record_ids;
|
||||
Status stat = conn->AddVector(TABLE_NAME, record_array, record_ids);
|
||||
std::cout << "AddVector function call status: " << stat.ToString() << std::endl;
|
||||
|
@ -170,6 +185,10 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
BuildVectors(SEARCH_TARGET, SEARCH_TARGET + 10, record_array);
|
||||
|
||||
std::vector<Range> query_range_array;
|
||||
Range rg;
|
||||
rg.start_value = CurrentTmDate();
|
||||
rg.end_value = CurrentTmDate();
|
||||
query_range_array.emplace_back(rg);
|
||||
std::vector<TopKQueryResult> topk_query_result_array;
|
||||
Status stat = conn->SearchVector(TABLE_NAME, record_array, query_range_array, TOP_K, topk_query_result_array);
|
||||
std::cout << "SearchVector function call status: " << stat.ToString() << std::endl;
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
/** \brief MegaSearch SDK namespace
|
||||
/** \brief Milvus SDK namespace
|
||||
*/
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
|
||||
/**
|
||||
|
@ -119,7 +119,7 @@ public:
|
|||
* Connect function should be called before any operations
|
||||
* Server will be connected after Connect return OK
|
||||
*
|
||||
* @param uri, use to provide server information, example: megasearch://ipaddress:port
|
||||
* @param uri, use to provide server information, example: milvus://ipaddress:port
|
||||
*
|
||||
* @return Indicate if connect is successful
|
||||
*/
|
|
@ -3,9 +3,9 @@
|
|||
#include <string>
|
||||
#include <sstream>
|
||||
|
||||
/** \brief MegaSearch SDK namespace
|
||||
/** \brief Milvus SDK namespace
|
||||
*/
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
/**
|
||||
* @brief Status Code for SDK interface return
|
||||
|
@ -72,7 +72,7 @@ class Status {
|
|||
* @return, the status is assigned.
|
||||
*
|
||||
*/
|
||||
inline Status &operator=(const Status &s);
|
||||
Status &operator=(const Status &s);
|
||||
|
||||
/**
|
||||
* @brief Status
|
||||
|
@ -93,7 +93,7 @@ class Status {
|
|||
* @return, the status is moved.
|
||||
*
|
||||
*/
|
||||
inline Status &operator=(Status &&s) noexcept;
|
||||
Status &operator=(Status &&s) noexcept;
|
||||
|
||||
/**
|
||||
* @brief Status
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "ClientProxy.h"
|
||||
#include "util/ConvertUtil.h"
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
std::shared_ptr<ThriftClient>&
|
||||
ClientProxy::ClientPtr() const {
|
||||
|
@ -77,7 +77,7 @@ ClientProxy::Disconnect() {
|
|||
|
||||
std::string
|
||||
ClientProxy::ClientVersion() const {
|
||||
return std::string("v1.0");
|
||||
return "";
|
||||
}
|
||||
|
||||
Status
|
||||
|
@ -159,6 +159,8 @@ ClientProxy::SearchVector(const std::string &table_name,
|
|||
}
|
||||
|
||||
try {
|
||||
|
||||
//step 1: convert vectors data
|
||||
std::vector<thrift::RowRecord> thrift_records;
|
||||
for(auto& record : query_record_array) {
|
||||
thrift::RowRecord thrift_record;
|
||||
|
@ -172,10 +174,21 @@ ClientProxy::SearchVector(const std::string &table_name,
|
|||
thrift_records.emplace_back(thrift_record);
|
||||
}
|
||||
|
||||
//step 2: convert range array
|
||||
std::vector<thrift::Range> thrift_ranges;
|
||||
for(auto& range : query_range_array) {
|
||||
thrift::Range thrift_range;
|
||||
thrift_range.__set_start_value(range.start_value);
|
||||
thrift_range.__set_end_value(range.end_value);
|
||||
|
||||
thrift_ranges.emplace_back(thrift_range);
|
||||
}
|
||||
|
||||
//step 3: search vectors
|
||||
std::vector<thrift::TopKQueryResult> result_array;
|
||||
ClientPtr()->interface()->SearchVector(result_array, table_name, thrift_records, thrift_ranges, topk);
|
||||
|
||||
//step 4: convert result array
|
||||
for(auto& thrift_topk_result : result_array) {
|
||||
TopKQueryResult result;
|
||||
|
||||
|
@ -208,6 +221,8 @@ ClientProxy::DescribeTable(const std::string &table_name, TableSchema &table_sch
|
|||
|
||||
table_schema.table_name = thrift_schema.table_name;
|
||||
table_schema.index_type = (IndexType)thrift_schema.index_type;
|
||||
table_schema.dimension = thrift_schema.dimension;
|
||||
table_schema.store_raw_vector = thrift_schema.store_raw_vector;
|
||||
|
||||
} catch ( std::exception& ex) {
|
||||
return Status(StatusCode::UnknownError, "failed to describe table: " + std::string(ex.what()));
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include "MegaSearch.h"
|
||||
#include "MilvusApi.h"
|
||||
#include "ThriftClient.h"
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
class ClientProxy : public Connection {
|
||||
public:
|
||||
|
|
|
@ -5,8 +5,8 @@
|
|||
******************************************************************************/
|
||||
#include "ThriftClient.h"
|
||||
|
||||
#include "megasearch_types.h"
|
||||
#include "megasearch_constants.h"
|
||||
#include "milvus_types.h"
|
||||
#include "milvus_constants.h"
|
||||
|
||||
#include <exception>
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
|||
#include <thrift/transport/TBufferTransports.h>
|
||||
#include <thrift/concurrency/PosixThreadFactory.h>
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
using namespace ::apache::thrift;
|
||||
using namespace ::apache::thrift::protocol;
|
||||
|
@ -36,7 +36,7 @@ ThriftClient::~ThriftClient() {
|
|||
|
||||
}
|
||||
|
||||
MegasearchServiceClientPtr
|
||||
ServiceClientPtr
|
||||
ThriftClient::interface() {
|
||||
if(client_ == nullptr) {
|
||||
throw std::exception();
|
||||
|
@ -62,10 +62,10 @@ ThriftClient::Connect(const std::string& address, int32_t port, const std::strin
|
|||
}
|
||||
|
||||
transport_ptr->open();
|
||||
client_ = std::make_shared<thrift::MegasearchServiceClient>(protocol_ptr);
|
||||
client_ = std::make_shared<thrift::MilvusServiceClient>(protocol_ptr);
|
||||
} catch ( std::exception& ex) {
|
||||
//CLIENT_LOG_ERROR << "connect encounter exception: " << ex.what();
|
||||
return Status(StatusCode::NotConnected, "failed to connect megasearch server" + std::string(ex.what()));
|
||||
return Status(StatusCode::NotConnected, "failed to connect server" + std::string(ex.what()));
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
|
|
@ -5,14 +5,14 @@
|
|||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include "MegasearchService.h"
|
||||
#include "MilvusService.h"
|
||||
#include "Status.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
using MegasearchServiceClientPtr = std::shared_ptr<megasearch::thrift::MegasearchServiceClient>;
|
||||
using ServiceClientPtr = std::shared_ptr<::milvus::thrift::MilvusServiceClient>;
|
||||
|
||||
static const std::string THRIFT_PROTOCOL_JSON = "json";
|
||||
static const std::string THRIFT_PROTOCOL_BINARY = "binary";
|
||||
|
@ -23,13 +23,13 @@ public:
|
|||
ThriftClient();
|
||||
virtual ~ThriftClient();
|
||||
|
||||
MegasearchServiceClientPtr interface();
|
||||
ServiceClientPtr interface();
|
||||
|
||||
Status Connect(const std::string& address, int32_t port, const std::string& protocol);
|
||||
Status Disconnect();
|
||||
|
||||
private:
|
||||
MegasearchServiceClientPtr client_;
|
||||
ServiceClientPtr client_;
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -4,8 +4,9 @@
|
|||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
#include "ConnectionImpl.h"
|
||||
#include "version.h"
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
std::shared_ptr<Connection>
|
||||
Connection::Create() {
|
||||
|
@ -13,7 +14,7 @@ Connection::Create() {
|
|||
}
|
||||
|
||||
Status
|
||||
Connection::Destroy(std::shared_ptr<megasearch::Connection> connection_ptr) {
|
||||
Connection::Destroy(std::shared_ptr<milvus::Connection> connection_ptr) {
|
||||
if(connection_ptr != nullptr) {
|
||||
return connection_ptr->Disconnect();
|
||||
}
|
||||
|
@ -47,7 +48,7 @@ ConnectionImpl::Disconnect() {
|
|||
|
||||
std::string
|
||||
ConnectionImpl::ClientVersion() const {
|
||||
return client_proxy_->ClientVersion();
|
||||
return MILVUS_VERSION;
|
||||
}
|
||||
|
||||
Status
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include "MegaSearch.h"
|
||||
#include "MilvusApi.h"
|
||||
#include "client/ClientProxy.h"
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
class ConnectionImpl : public Connection {
|
||||
public:
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include "Status.h"
|
||||
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
Status::~Status() noexcept {
|
||||
if (state_ != nullptr) {
|
||||
|
|
|
@ -8,15 +8,15 @@
|
|||
|
||||
#include <map>
|
||||
|
||||
namespace megasearch {
|
||||
namespace milvus {
|
||||
|
||||
static const std::string INDEX_RAW = "raw";
|
||||
static const std::string INDEX_IVFFLAT = "ivfflat";
|
||||
|
||||
std::string ConvertUtil::IndexType2Str(megasearch::IndexType index) {
|
||||
static const std::map<megasearch::IndexType, std::string> s_index2str = {
|
||||
{megasearch::IndexType::cpu_idmap, INDEX_RAW},
|
||||
{megasearch::IndexType::gpu_ivfflat, INDEX_IVFFLAT}
|
||||
std::string ConvertUtil::IndexType2Str(IndexType index) {
|
||||
static const std::map<IndexType, std::string> s_index2str = {
|
||||
{IndexType::cpu_idmap, INDEX_RAW},
|
||||
{IndexType::gpu_ivfflat, INDEX_IVFFLAT}
|
||||
};
|
||||
|
||||
const auto& iter = s_index2str.find(index);
|
||||
|
@ -27,10 +27,10 @@ std::string ConvertUtil::IndexType2Str(megasearch::IndexType index) {
|
|||
return iter->second;
|
||||
}
|
||||
|
||||
megasearch::IndexType ConvertUtil::Str2IndexType(const std::string& type) {
|
||||
static const std::map<std::string, megasearch::IndexType> s_str2index = {
|
||||
{INDEX_RAW, megasearch::IndexType::cpu_idmap},
|
||||
{INDEX_IVFFLAT, megasearch::IndexType::gpu_ivfflat}
|
||||
IndexType ConvertUtil::Str2IndexType(const std::string& type) {
|
||||
static const std::map<std::string, IndexType> s_str2index = {
|
||||
{INDEX_RAW, IndexType::cpu_idmap},
|
||||
{INDEX_IVFFLAT, IndexType::gpu_ivfflat}
|
||||
};
|
||||
|
||||
const auto& iter = s_str2index.find(type);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue