Merge remote-tracking branch 'upstream/0.6.0' into 0.6.0

pull/304/head
quicksilver 2019-11-11 10:52:39 +08:00
commit 8e78c8dfda
131 changed files with 14653 additions and 5514 deletions

3
.gitignore vendored
View File

@ -26,3 +26,6 @@ cmake_build
*.lo
*.tar.gz
*.log
.coverage
*.pyc
cov_html/

View File

@ -5,9 +5,12 @@ Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.6.0 (TODO)
## Bug
- \#246 - Exclude src/external folder from code coverage for jenkin ci
## Feature
- \#12 - Pure CPU version for Milvus
- \#77 - Support table partition
- \#226 - Experimental shards middleware for Milvus
## Improvement
@ -84,7 +87,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-658 - Fix SQ8 Hybrid can't search
- MS-665 - IVF_SQ8H search crash when no GPU resource in search_resources
- \#9 - Change default gpu_cache_capacity to 4
- \#20 - C++ sdk example get grpc error
- \#20 - C++ sdk example get grpc error
- \#23 - Add unittest to improve code coverage
- \#31 - make clang-format failed after run build.sh -l
- \#39 - Create SQ8H index hang if using github server version
@ -136,7 +139,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-635 - Add compile option to support customized faiss
- MS-660 - add ubuntu_build_deps.sh
- \#18 - Add all test cases
# Milvus 0.4.0 (2019-09-12)
## Bug
@ -345,11 +348,11 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-82 - Update server startup welcome message
- MS-83 - Update vecwise to Milvus
- MS-77 - Performance issue of post-search action
- MS-22 - Enhancement for MemVector size control
- MS-22 - Enhancement for MemVector size control
- MS-92 - Unify behavior of debug and release build
- MS-98 - Install all unit test to installation directory
- MS-115 - Change is_startup of metric_config switch from true to on
- MS-122 - Archive criteria config
- MS-122 - Archive criteria config
- MS-124 - HasTable interface
- MS-126 - Add more error code
- MS-128 - Change default db path

View File

@ -132,8 +132,7 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"*/src/server/Server.cpp" \
"*/src/server/DBWrapper.cpp" \
"*/src/server/grpc_impl/GrpcServer.cpp" \
"*/src/external/easyloggingpp/easylogging++.h" \
"*/src/external/easyloggingpp/easylogging++.cc"
"*/src/external/*"
if [ $? -ne 0 ]; then
echo "gen ${FILE_INFO_OUTPUT_NEW} failed"

View File

@ -31,16 +31,16 @@ GET_CURRENT_TIME(BUILD_TIME)
string(REGEX REPLACE "\n" "" BUILD_TIME ${BUILD_TIME})
message(STATUS "Build time = ${BUILD_TIME}")
MACRO (GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
MACRO(GET_GIT_BRANCH_NAME GIT_BRANCH_NAME)
execute_process(COMMAND "git" rev-parse --abbrev-ref HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
if(GIT_BRANCH_NAME STREQUAL "")
if (GIT_BRANCH_NAME STREQUAL "")
execute_process(COMMAND "git" symbolic-ref --short -q HEAD OUTPUT_VARIABLE ${GIT_BRANCH_NAME})
endif()
ENDMACRO (GET_GIT_BRANCH_NAME)
endif ()
ENDMACRO(GET_GIT_BRANCH_NAME)
GET_GIT_BRANCH_NAME(GIT_BRANCH_NAME)
message(STATUS "GIT_BRANCH_NAME = ${GIT_BRANCH_NAME}")
if(NOT GIT_BRANCH_NAME STREQUAL "")
if (NOT GIT_BRANCH_NAME STREQUAL "")
string(REGEX REPLACE "\n" "" GIT_BRANCH_NAME ${GIT_BRANCH_NAME})
endif ()
@ -69,7 +69,7 @@ if (MILVUS_VERSION_MAJOR STREQUAL ""
OR MILVUS_VERSION_PATCH STREQUAL "")
message(WARNING "Failed to determine Milvus version from git branch name")
set(MILVUS_VERSION "0.6.0")
endif()
endif ()
message(STATUS "Build version = ${MILVUS_VERSION}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in ${CMAKE_CURRENT_SOURCE_DIR}/src/config.h @ONLY)
@ -118,17 +118,17 @@ include(DefineOptions)
include(BuildUtils)
include(ThirdPartyPackages)
set(MILVUS_GPU_VERSION false)
if (MILVUS_CPU_VERSION)
message(STATUS "Building Milvus CPU version")
add_compile_definitions("MILVUS_CPU_VERSION")
else ()
set(MILVUS_CPU_VERSION false)
if (MILVUS_GPU_VERSION)
message(STATUS "Building Milvus GPU version")
set(MILVUS_GPU_VERSION true)
add_compile_definitions("MILVUS_GPU_VERSION")
enable_language(CUDA)
find_package(CUDA 10 REQUIRED)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
else ()
message(STATUS "Building Milvus CPU version")
set(MILVUS_CPU_VERSION true)
add_compile_definitions("MILVUS_CPU_VERSION")
endif ()
if (CMAKE_BUILD_TYPE STREQUAL "Release")

View File

@ -12,7 +12,7 @@ USE_JFROG_CACHE="OFF"
RUN_CPPLINT="OFF"
CUSTOMIZATION="OFF" # default use ori faiss
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
CPU_VERSION="OFF"
GPU_VERSION="OFF" #defaults to CPU version
WITH_MKL="OFF"
CUSTOMIZED_FAISS_URL="${FAISS_URL:-NONE}"
@ -51,7 +51,7 @@ do
c)
BUILD_COVERAGE="ON"
;;
g)
z)
PROFILING="ON"
;;
j)
@ -60,8 +60,8 @@ do
x)
CUSTOMIZATION="OFF" # force use ori faiss
;;
z)
CPU_VERSION="ON"
g)
GPU_VERSION="ON"
;;
m)
WITH_MKL="ON"
@ -77,14 +77,14 @@ parameter:
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-r: remove previous build directory(default: OFF)
-c: code coverage(default: OFF)
-g: profiling(default: OFF)
-z: profiling(default: OFF)
-j: use jfrog cache build directory(default: OFF)
-z: build pure CPU version(default: OFF)
-g: build GPU version(default: OFF)
-m: build with MKL(default: OFF)
-h: help
usage:
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-g] [-j] [-z] [-m] [-h]
./build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-c] [-z] [-j] [-g] [-m] [-h]
"
exit 0
;;
@ -116,7 +116,7 @@ CMAKE_CMD="cmake \
-DUSE_JFROG_CACHE=${USE_JFROG_CACHE} \
-DCUSTOMIZATION=${CUSTOMIZATION} \
-DFAISS_URL=${CUSTOMIZED_FAISS_URL} \
-DMILVUS_CPU_VERSION=${CPU_VERSION} \
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
-DBUILD_FAISS_WITH_MKL=${WITH_MKL} \
../"
echo ${CMAKE_CMD}

View File

@ -13,16 +13,16 @@ macro(define_option name description default)
endmacro()
function(list_join lst glue out)
if("${${lst}}" STREQUAL "")
if ("${${lst}}" STREQUAL "")
set(${out} "" PARENT_SCOPE)
return()
endif()
endif ()
list(GET ${lst} 0 joined)
list(REMOVE_AT ${lst} 0)
foreach(item ${${lst}})
foreach (item ${${lst}})
set(joined "${joined}${glue}${item}")
endforeach()
endforeach ()
set(${out} ${joined} PARENT_SCOPE)
endfunction()
@ -35,15 +35,15 @@ macro(define_option_string name description default)
set("${name}_OPTION_ENUM" ${ARGN})
list_join("${name}_OPTION_ENUM" "|" "${name}_OPTION_ENUM")
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
set_property(CACHE ${name} PROPERTY STRINGS ${ARGN})
endif()
endif ()
endmacro()
#----------------------------------------------------------------------
set_option_category("CPU version")
set_option_category("GPU version")
define_option(MILVUS_CPU_VERSION "Build CPU version only" OFF)
define_option(MILVUS_GPU_VERSION "Build GPU version" OFF)
#----------------------------------------------------------------------
set_option_category("Thirdparty")
@ -51,11 +51,11 @@ set_option_category("Thirdparty")
set(MILVUS_DEPENDENCY_SOURCE_DEFAULT "AUTO")
define_option_string(MILVUS_DEPENDENCY_SOURCE
"Method to use for acquiring MILVUS's build dependencies"
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
"AUTO"
"BUNDLED"
"SYSTEM")
"Method to use for acquiring MILVUS's build dependencies"
"${MILVUS_DEPENDENCY_SOURCE_DEFAULT}"
"AUTO"
"BUNDLED"
"SYSTEM")
define_option(MILVUS_VERBOSE_THIRDPARTY_BUILD
"Show output from ExternalProjects rather than just logging to files" ON)
@ -75,33 +75,21 @@ define_option(MILVUS_WITH_YAMLCPP "Build with yaml-cpp library" ON)
if (MILVUS_ENABLE_PROFILING STREQUAL "ON")
define_option(MILVUS_WITH_LIBUNWIND "Build with libunwind" ON)
define_option(MILVUS_WITH_GPERFTOOLS "Build with gperftools" ON)
endif()
endif ()
define_option(MILVUS_WITH_GRPC "Build with GRPC" ON)
define_option(MILVUS_WITH_ZLIB "Build with zlib compression" ON)
#----------------------------------------------------------------------
if(MSVC)
set_option_category("MSVC")
define_option(MSVC_LINK_VERBOSE
"Pass verbose linking options when linking libraries and executables"
OFF)
define_option(MILVUS_USE_STATIC_CRT "Build MILVUS with statically linked CRT" OFF)
endif()
#----------------------------------------------------------------------
set_option_category("Test and benchmark")
unset(MILVUS_BUILD_TESTS CACHE)
if (BUILD_UNIT_TEST)
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" ON)
else()
else ()
define_option(MILVUS_BUILD_TESTS "Build the MILVUS googletest unit tests" OFF)
endif(BUILD_UNIT_TEST)
endif (BUILD_UNIT_TEST)
#----------------------------------------------------------------------
macro(config_summary)
@ -113,12 +101,12 @@ macro(config_summary)
message(STATUS " Generator: ${CMAKE_GENERATOR}")
message(STATUS " Build type: ${CMAKE_BUILD_TYPE}")
message(STATUS " Source directory: ${CMAKE_CURRENT_SOURCE_DIR}")
if(${CMAKE_EXPORT_COMPILE_COMMANDS})
if (${CMAKE_EXPORT_COMPILE_COMMANDS})
message(
STATUS " Compile commands: ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json")
endif()
endif ()
foreach(category ${MILVUS_OPTION_CATEGORIES})
foreach (category ${MILVUS_OPTION_CATEGORIES})
message(STATUS)
message(STATUS "${category} options:")
@ -126,50 +114,50 @@ macro(config_summary)
set(option_names ${MILVUS_${category}_OPTION_NAMES})
set(max_value_length 0)
foreach(name ${option_names})
foreach (name ${option_names})
string(LENGTH "\"${${name}}\"" value_length)
if(${max_value_length} LESS ${value_length})
if (${max_value_length} LESS ${value_length})
set(max_value_length ${value_length})
endif()
endforeach()
endif ()
endforeach ()
foreach(name ${option_names})
if("${${name}_OPTION_TYPE}" STREQUAL "string")
foreach (name ${option_names})
if ("${${name}_OPTION_TYPE}" STREQUAL "string")
set(value "\"${${name}}\"")
else()
else ()
set(value "${${name}}")
endif()
endif ()
set(default ${${name}_OPTION_DEFAULT})
set(description ${${name}_OPTION_DESCRIPTION})
string(LENGTH ${description} description_length)
if(${description_length} LESS 70)
if (${description_length} LESS 70)
string(
SUBSTRING
" "
${description_length} -1 description_padding)
else()
else ()
set(description_padding "
")
endif()
endif ()
set(comment "[${name}]")
if("${value}" STREQUAL "${default}")
if ("${value}" STREQUAL "${default}")
set(comment "[default] ${comment}")
endif()
endif ()
if(NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
if (NOT ("${${name}_OPTION_ENUM}" STREQUAL ""))
set(comment "${comment} [${${name}_OPTION_ENUM}]")
endif()
endif ()
string(
SUBSTRING "${value} "
0 ${max_value_length} value)
message(STATUS " ${description} ${description_padding} ${value} ${comment}")
endforeach()
endforeach ()
endforeach()
endforeach ()
endmacro()

View File

@ -32,9 +32,9 @@ cache_config:
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
cache_insert_data: false # whether to load inserted data into cache, must be a boolean
# Skip the following config if you are using GPU version
gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
#Uncomment the following config if you are using GPU version
# gpu_cache_capacity: 4 # GB, GPU memory used for cache, must be a positive integer
# gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered, must be in range (0.0, 1.0]
engine_config:
use_blas_threshold: 1100 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
@ -44,5 +44,4 @@ engine_config:
resource_config:
search_resources: # define the devices used for search computation, must be in format: cpu or gpux
- cpu
- gpu0
index_build_device: gpu0 # CPU / GPU used for building index, must be in format: cpu / gpux
index_build_device: cpu # CPU / GPU used for building index, must be in format: cpu / gpux

View File

@ -122,8 +122,6 @@ ${LCOV_CMD} -r "${FILE_INFO_OUTPUT}" -o "${FILE_INFO_OUTPUT_NEW}" \
"*/src/server/Server.cpp" \
"*/src/server/DBWrapper.cpp" \
"*/src/server/grpc_impl/GrpcServer.cpp" \
"*/easylogging++.h" \
"*/easylogging++.cc" \
"*/src/external/*"
if [ $? -ne 0 ]; then

28
core/migration/README.md Normal file
View File

@ -0,0 +1,28 @@
## Data Migration
####0.3.x
legacy data is not migrate-able for later versions
####0.4.x
legacy data can be reused directly by 0.5.x
legacy data can be migrated to 0.6.x
####0.5.x
legacy data can be migrated to 0.6.x
####0.6.x
how to migrate legacy 0.4.x/0.5.x data
for sqlite meta:
```shell
$ sqlite3 [parth_to]/meta.sqlite < sqlite_4_to_6.sql
```
for mysql meta:
```shell
$ mysql -h127.0.0.1 -uroot -p123456 -Dmilvus < mysql_4_to_6.sql
```

View File

@ -0,0 +1,4 @@
alter table Tables add column owner_table VARCHAR(255) DEFAULT '' NOT NULL;
alter table Tables add column partition_tag VARCHAR(255) DEFAULT '' NOT NULL;
alter table Tables add column version VARCHAR(64) DEFAULT '0.6.0' NOT NULL;
update Tables set version='0.6.0';

View File

@ -0,0 +1,4 @@
alter table Tables add column 'owner_table' TEXT DEFAULT '' NOT NULL;
alter table Tables add column 'partition_tag' TEXT DEFAULT '' NOT NULL;
alter table Tables add column 'version' TEXT DEFAULT '0.6.0' NOT NULL;
update Tables set version='0.6.0';

View File

@ -24,6 +24,9 @@ include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-status)
include_directories(${MILVUS_ENGINE_SRC}/grpc/gen-milvus)
add_subdirectory(index)
if (BUILD_FAISS_WITH_MKL)
add_compile_definitions("WITH_MKL")
endif ()
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
foreach (dir ${INDEX_INCLUDE_DIRS})

View File

@ -47,43 +47,68 @@ class DB {
virtual Status
CreateTable(meta::TableSchema& table_schema_) = 0;
virtual Status
DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
DropTable(const std::string& table_id, const meta::DatesT& dates) = 0;
virtual Status
DescribeTable(meta::TableSchema& table_schema_) = 0;
virtual Status
HasTable(const std::string& table_id, bool& has_or_not_) = 0;
virtual Status
AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
virtual Status
GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
virtual Status
PreloadTable(const std::string& table_id) = 0;
virtual Status
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
virtual Status
InsertVectors(const std::string& table_id_, uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
CreatePartition(const std::string& table_id, const std::string& partition_name,
const std::string& partition_tag) = 0;
virtual Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
QueryResults& results) = 0;
DropPartition(const std::string& partition_name) = 0;
virtual Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
const meta::DatesT& dates, QueryResults& results) = 0;
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0;
virtual Status
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0;
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) = 0;
virtual Status
InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
IDNumbers& vector_ids_) = 0;
virtual Status
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0;
virtual Status
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
ResultDistances& result_distances) = 0;
virtual Status
QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
ResultDistances& result_distances) = 0;
virtual Status
Size(uint64_t& result) = 0;
virtual Status
CreateIndex(const std::string& table_id, const TableIndex& index) = 0;
virtual Status
DescribeIndex(const std::string& table_id, TableIndex& index) = 0;
virtual Status
DropIndex(const std::string& table_id) = 0;

View File

@ -30,6 +30,7 @@
#include "scheduler/job/DeleteJob.h"
#include "scheduler/job/SearchJob.h"
#include "utils/Log.h"
#include "utils/StringHelpFunctions.h"
#include "utils/TimeRecorder.h"
#include <assert.h>
@ -38,6 +39,7 @@
#include <chrono>
#include <cstring>
#include <iostream>
#include <set>
#include <thread>
namespace milvus {
@ -49,6 +51,17 @@ constexpr uint64_t METRIC_ACTION_INTERVAL = 1;
constexpr uint64_t COMPACT_ACTION_INTERVAL = 1;
constexpr uint64_t INDEX_ACTION_INTERVAL = 1;
static const Status SHUTDOWN_ERROR = Status(DB_ERROR, "Milsvus server is shutdown!");
void
TraverseFiles(const meta::DatePartionedTableFilesSchema& date_files, meta::TableFilesSchema& files_array) {
for (auto& day_files : date_files) {
for (auto& file : day_files.second) {
files_array.push_back(file);
}
}
}
} // namespace
DBImpl::DBImpl(const DBOptions& options)
@ -113,7 +126,7 @@ DBImpl::DropAll() {
Status
DBImpl::CreateTable(meta::TableSchema& table_schema) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
meta::TableSchema temp_schema = table_schema;
@ -122,34 +135,18 @@ DBImpl::CreateTable(meta::TableSchema& table_schema) {
}
Status
DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
DBImpl::DropTable(const std::string& table_id, const meta::DatesT& dates) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
// dates partly delete files of the table but currently we don't support
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
if (dates.empty()) {
mem_mgr_->EraseMemVector(table_id); // not allow insert
meta_ptr_->DeleteTable(table_id); // soft delete table
// scheduler will determine when to delete table files
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(table_id, meta_ptr_, nres);
scheduler::JobMgrInst::GetInstance()->Put(job);
job->WaitAndDelete();
} else {
meta_ptr_->DropPartitionsByDates(table_id, dates);
}
return Status::OK();
return DropTableRecursively(table_id, dates);
}
Status
DBImpl::DescribeTable(meta::TableSchema& table_schema) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
auto stat = meta_ptr_->DescribeTable(table_schema);
@ -160,7 +157,7 @@ DBImpl::DescribeTable(meta::TableSchema& table_schema) {
Status
DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
return meta_ptr_->HasTable(table_id, has_or_not);
@ -169,7 +166,7 @@ DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
Status
DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
return meta_ptr_->AllTables(table_schema_array);
@ -178,55 +175,59 @@ DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
Status
DBImpl::PreloadTable(const std::string& table_id) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
meta::DatePartionedTableFilesSchema files;
meta::DatesT dates;
// get all table files from parent table
std::vector<size_t> ids;
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
meta::TableFilesSchema files_array;
auto status = GetFilesToSearch(table_id, ids, files_array);
if (!status.ok()) {
return status;
}
// get files from partition tables
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = GetFilesToSearch(schema.table_id_, ids, files_array);
}
int64_t size = 0;
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
int64_t available_size = cache_total - cache_usage;
for (auto& day_files : files) {
for (auto& file : day_files.second) {
ExecutionEnginePtr engine =
EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
(MetricType)file.metric_type_, file.nlist_);
if (engine == nullptr) {
ENGINE_LOG_ERROR << "Invalid engine type";
return Status(DB_ERROR, "Invalid engine type");
}
for (auto& file : files_array) {
ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_,
(MetricType)file.metric_type_, file.nlist_);
if (engine == nullptr) {
ENGINE_LOG_ERROR << "Invalid engine type";
return Status(DB_ERROR, "Invalid engine type");
}
size += engine->PhysicalSize();
if (size > available_size) {
return Status(SERVER_CACHE_FULL, "Cache is full");
} else {
try {
// step 1: load index
engine->Load(true);
} catch (std::exception& ex) {
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
return Status(DB_ERROR, msg);
}
size += engine->PhysicalSize();
if (size > available_size) {
return Status(SERVER_CACHE_FULL, "Cache is full");
} else {
try {
// step 1: load index
engine->Load(true);
} catch (std::exception& ex) {
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
return Status(DB_ERROR, msg);
}
}
}
return Status::OK();
}
Status
DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
return meta_ptr_->UpdateTableFlag(table_id, flag);
@ -235,34 +236,96 @@ DBImpl::UpdateTableFlag(const std::string& table_id, int64_t flag) {
Status
DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
return meta_ptr_->Count(table_id, row_count);
return GetTableRowCountRecursively(table_id, row_count);
}
Status
DBImpl::InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) {
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
DBImpl::CreatePartition(const std::string& table_id, const std::string& partition_name,
const std::string& partition_tag) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
return meta_ptr_->CreatePartition(table_id, partition_name, partition_tag);
}
Status
DBImpl::DropPartition(const std::string& partition_name) {
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
auto status = mem_mgr_->EraseMemVector(partition_name); // not allow insert
status = meta_ptr_->DropPartition(partition_name); // soft delete table
// scheduler will determine when to delete table files
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(partition_name, meta_ptr_, nres);
scheduler::JobMgrInst::GetInstance()->Put(job);
job->WaitAndDelete();
return Status::OK();
}
Status
DBImpl::DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) {
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
std::string partition_name;
auto status = meta_ptr_->GetPartitionName(table_id, partition_tag, partition_name);
return DropPartition(partition_name);
}
Status
DBImpl::ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) {
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
return meta_ptr_->ShowPartitions(table_id, partiton_schema_array);
}
Status
DBImpl::InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
IDNumbers& vector_ids) {
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
// if partition is specified, use partition as target table
Status status;
std::string target_table_name = table_id;
if (!partition_tag.empty()) {
std::string partition_name;
status = meta_ptr_->GetPartitionName(table_id, partition_tag, target_table_name);
}
// insert vectors into target table
milvus::server::CollectInsertMetrics metrics(n, status);
status = mem_mgr_->InsertVectors(table_id, n, vectors, vector_ids);
status = mem_mgr_->InsertVectors(target_table_name, n, vectors, vector_ids);
return status;
}
Status
DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
Status status;
{
std::unique_lock<std::mutex> lock(build_index_mutex_);
// step 1: check index difference
TableIndex old_index;
auto status = DescribeIndex(table_id, old_index);
status = DescribeIndex(table_id, old_index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to get table index info for table: " << table_id;
return status;
@ -272,11 +335,8 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
TableIndex new_index = index;
new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable
if (!utils::IsSameIndex(old_index, new_index)) {
DropIndex(table_id);
status = meta_ptr_->UpdateTableIndex(table_id, new_index);
status = UpdateTableIndexRecursively(table_id, new_index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
return status;
}
}
@ -287,101 +347,91 @@ DBImpl::CreateIndex(const std::string& table_id, const TableIndex& index) {
WaitMergeFileFinish();
// step 4: wait and build index
// for IDMAP type, only wait all NEW file converted to RAW file
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
std::vector<int> file_types;
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
file_types = {
static_cast<int32_t>(meta::TableFileSchema::NEW),
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
};
} else {
file_types = {
static_cast<int32_t>(meta::TableFileSchema::RAW),
static_cast<int32_t>(meta::TableFileSchema::NEW),
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
};
}
status = BuildTableIndexRecursively(table_id, index);
std::vector<std::string> file_ids;
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
int times = 1;
while (!file_ids.empty()) {
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
}
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
times++;
}
return Status::OK();
return status;
}
Status
DBImpl::DescribeIndex(const std::string& table_id, TableIndex& index) {
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
return meta_ptr_->DescribeTableIndex(table_id, index);
}
Status
DBImpl::DropIndex(const std::string& table_id) {
if (shutting_down_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
return meta_ptr_->DropTableIndex(table_id);
return DropTableIndexRecursively(table_id);
}
Status
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
QueryResults& results) {
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
meta::DatesT dates = {utils::GetDate()};
Status result = Query(table_id, k, nq, nprobe, vectors, dates, results);
Status result = Query(table_id, partition_tags, k, nq, nprobe, vectors, dates, result_ids, result_distances);
return result;
}
Status
DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
const meta::DatesT& dates, QueryResults& results) {
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
ResultDistances& result_distances) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id << " date range count: " << dates.size();
// get all table files from table
meta::DatePartionedTableFilesSchema files;
Status status;
std::vector<size_t> ids;
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files);
if (!status.ok()) {
return status;
}
meta::TableFilesSchema files_array;
meta::TableFilesSchema file_id_array;
for (auto& day_files : files) {
for (auto& file : day_files.second) {
file_id_array.push_back(file);
if (partition_tags.empty()) {
// no partition tag specified, means search in whole table
// get all table files from parent table
status = GetFilesToSearch(table_id, ids, files_array);
if (!status.ok()) {
return status;
}
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = GetFilesToSearch(schema.table_id_, ids, files_array);
}
} else {
// get files from specified partitions
std::set<std::string> partition_name_array;
GetPartitionsByTags(table_id, partition_tags, partition_name_array);
for (auto& partition_name : partition_name_array) {
status = GetFilesToSearch(partition_name, ids, files_array);
}
}
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results);
status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances);
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
return status;
}
Status
DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) {
DBImpl::QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
ResultDistances& result_distances) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id << " date range count: " << dates.size();
@ -395,25 +445,18 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_
ids.push_back(std::stoul(id, &sz));
}
meta::DatePartionedTableFilesSchema files_array;
auto status = meta_ptr_->FilesToSearch(table_id, ids, dates, files_array);
meta::TableFilesSchema files_array;
auto status = GetFilesToSearch(table_id, ids, files_array);
if (!status.ok()) {
return status;
}
meta::TableFilesSchema file_id_array;
for (auto& day_files : files_array) {
for (auto& file : day_files.second) {
file_id_array.push_back(file);
}
}
if (file_id_array.empty()) {
if (files_array.empty()) {
return Status(DB_ERROR, "Invalid file id");
}
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info before query
status = QueryAsync(table_id, file_id_array, k, nq, nprobe, vectors, results);
status = QueryAsync(table_id, files_array, k, nq, nprobe, vectors, result_ids, result_distances);
cache::CpuCacheMgr::GetInstance()->PrintInfo(); // print cache info after query
return status;
}
@ -421,7 +464,7 @@ DBImpl::Query(const std::string& table_id, const std::vector<std::string>& file_
Status
DBImpl::Size(uint64_t& result) {
if (shutting_down_.load(std::memory_order_acquire)) {
return Status(DB_ERROR, "Milsvus server is shutdown!");
return SHUTDOWN_ERROR;
}
return meta_ptr_->Size(result);
@ -432,7 +475,7 @@ DBImpl::Size(uint64_t& result) {
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Status
DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, QueryResults& results) {
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) {
server::CollectQueryMetrics metrics(nq);
TimeRecorder rc("");
@ -453,7 +496,8 @@ DBImpl::QueryAsync(const std::string& table_id, const meta::TableFilesSchema& fi
}
// step 3: construct results
results = job->GetResult();
result_ids = job->GetResultIds();
result_distances = job->GetResultDistances();
rc.ElapseFromBegin("Engine query totally cost");
return Status::OK();
@ -772,5 +816,183 @@ DBImpl::BackgroundBuildIndex() {
ENGINE_LOG_TRACE << "Background build index thread exit";
}
Status
DBImpl::GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids,
meta::TableFilesSchema& files) {
meta::DatesT dates;
meta::DatePartionedTableFilesSchema date_files;
auto status = meta_ptr_->FilesToSearch(table_id, file_ids, dates, date_files);
if (!status.ok()) {
return status;
}
TraverseFiles(date_files, files);
return Status::OK();
}
Status
DBImpl::GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
std::set<std::string>& partition_name_array) {
std::vector<meta::TableSchema> partiton_array;
auto status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& tag : partition_tags) {
for (auto& schema : partiton_array) {
if (server::StringHelpFunctions::IsRegexMatch(schema.partition_tag_, tag)) {
partition_name_array.insert(schema.table_id_);
}
}
}
return Status::OK();
}
Status
DBImpl::DropTableRecursively(const std::string& table_id, const meta::DatesT& dates) {
// dates partly delete files of the table but currently we don't support
ENGINE_LOG_DEBUG << "Prepare to delete table " << table_id;
Status status;
if (dates.empty()) {
status = mem_mgr_->EraseMemVector(table_id); // not allow insert
status = meta_ptr_->DropTable(table_id); // soft delete table
// scheduler will determine when to delete table files
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
scheduler::DeleteJobPtr job = std::make_shared<scheduler::DeleteJob>(table_id, meta_ptr_, nres);
scheduler::JobMgrInst::GetInstance()->Put(job);
job->WaitAndDelete();
} else {
status = meta_ptr_->DropDataByDate(table_id, dates);
}
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = DropTableRecursively(schema.table_id_, dates);
if (!status.ok()) {
return status;
}
}
return Status::OK();
}
Status
DBImpl::UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index) {
DropIndex(table_id);
auto status = meta_ptr_->UpdateTableIndex(table_id, index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to update table index info for table: " << table_id;
return status;
}
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = UpdateTableIndexRecursively(schema.table_id_, index);
if (!status.ok()) {
return status;
}
}
return Status::OK();
}
Status
DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index) {
// for IDMAP type, only wait all NEW file converted to RAW file
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
std::vector<int> file_types;
if (index.engine_type_ == static_cast<int32_t>(EngineType::FAISS_IDMAP)) {
file_types = {
static_cast<int32_t>(meta::TableFileSchema::NEW),
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
};
} else {
file_types = {
static_cast<int32_t>(meta::TableFileSchema::RAW),
static_cast<int32_t>(meta::TableFileSchema::NEW),
static_cast<int32_t>(meta::TableFileSchema::NEW_MERGE),
static_cast<int32_t>(meta::TableFileSchema::NEW_INDEX),
static_cast<int32_t>(meta::TableFileSchema::TO_INDEX),
};
}
// get files to build index
std::vector<std::string> file_ids;
auto status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
int times = 1;
while (!file_ids.empty()) {
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
if (index.engine_type_ != (int)EngineType::FAISS_IDMAP) {
status = meta_ptr_->UpdateTableFilesToIndex(table_id);
}
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
status = meta_ptr_->FilesByType(table_id, file_types, file_ids);
times++;
}
// build index for partition
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = BuildTableIndexRecursively(schema.table_id_, index);
if (!status.ok()) {
return status;
}
}
return Status::OK();
}
Status
DBImpl::DropTableIndexRecursively(const std::string& table_id) {
ENGINE_LOG_DEBUG << "Drop index for table: " << table_id;
auto status = meta_ptr_->DropTableIndex(table_id);
if (!status.ok()) {
return status;
}
// drop partition index
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
status = DropTableIndexRecursively(schema.table_id_);
if (!status.ok()) {
return status;
}
}
return Status::OK();
}
Status
DBImpl::GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count) {
row_count = 0;
auto status = meta_ptr_->Count(table_id, row_count);
if (!status.ok()) {
return status;
}
// get partition row count
std::vector<meta::TableSchema> partiton_array;
status = meta_ptr_->ShowPartitions(table_id, partiton_array);
for (auto& schema : partiton_array) {
uint64_t partition_row_count = 0;
status = GetTableRowCountRecursively(schema.table_id_, partition_row_count);
if (!status.ok()) {
return status;
}
row_count += partition_row_count;
}
return Status::OK();
}
} // namespace engine
} // namespace milvus

View File

@ -57,7 +57,7 @@ class DBImpl : public DB {
CreateTable(meta::TableSchema& table_schema) override;
Status
DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
DropTable(const std::string& table_id, const meta::DatesT& dates) override;
Status
DescribeTable(meta::TableSchema& table_schema) override;
@ -78,7 +78,21 @@ class DBImpl : public DB {
GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
Status
InsertVectors(const std::string& table_id, uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
CreatePartition(const std::string& table_id, const std::string& partition_name,
const std::string& partition_tag) override;
Status
DropPartition(const std::string& partition_name) override;
Status
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override;
Status
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) override;
Status
InsertVectors(const std::string& table_id, const std::string& partition_tag, uint64_t n, const float* vectors,
IDNumbers& vector_ids) override;
Status
CreateIndex(const std::string& table_id, const TableIndex& index) override;
@ -90,16 +104,18 @@ class DBImpl : public DB {
DropIndex(const std::string& table_id) override;
Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
QueryResults& results) override;
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances) override;
Status
Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
const meta::DatesT& dates, QueryResults& results) override;
Query(const std::string& table_id, const std::vector<std::string>& partition_tags, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
ResultDistances& result_distances) override;
Status
Query(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
QueryByFileID(const std::string& table_id, const std::vector<std::string>& file_ids, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, const meta::DatesT& dates, ResultIds& result_ids,
ResultDistances& result_distances) override;
Status
Size(uint64_t& result) override;
@ -107,7 +123,7 @@ class DBImpl : public DB {
private:
Status
QueryAsync(const std::string& table_id, const meta::TableFilesSchema& files, uint64_t k, uint64_t nq,
uint64_t nprobe, const float* vectors, QueryResults& results);
uint64_t nprobe, const float* vectors, ResultIds& result_ids, ResultDistances& result_distances);
void
BackgroundTimerTask();
@ -136,6 +152,28 @@ class DBImpl : public DB {
Status
MemSerialize();
Status
GetFilesToSearch(const std::string& table_id, const std::vector<size_t>& file_ids, meta::TableFilesSchema& files);
Status
GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
std::set<std::string>& partition_name_array);
Status
DropTableRecursively(const std::string& table_id, const meta::DatesT& dates);
Status
UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index);
Status
BuildTableIndexRecursively(const std::string& table_id, const TableIndex& index);
Status
DropTableIndexRecursively(const std::string& table_id);
Status
GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count);
private:
const DBOptions options_;

View File

@ -19,6 +19,7 @@
#include "db/engine/ExecutionEngine.h"
#include <faiss/Index.h>
#include <stdint.h>
#include <utility>
#include <vector>
@ -30,8 +31,8 @@ typedef int64_t IDNumber;
typedef IDNumber* IDNumberPtr;
typedef std::vector<IDNumber> IDNumbers;
typedef std::vector<std::pair<IDNumber, double>> QueryResult;
typedef std::vector<QueryResult> QueryResults;
typedef std::vector<faiss::Index::idx_t> ResultIds;
typedef std::vector<faiss::Index::distance_t> ResultDistances;
struct TableIndex {
int32_t engine_type_ = (int)EngineType::FAISS_IDMAP;

View File

@ -50,14 +50,11 @@ class Meta {
virtual Status
AllTables(std::vector<TableSchema>& table_schema_array) = 0;
virtual Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
virtual Status
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
virtual Status
DeleteTable(const std::string& table_id) = 0;
DropTable(const std::string& table_id) = 0;
virtual Status
DeleteTableFiles(const std::string& table_id) = 0;
@ -66,20 +63,41 @@ class Meta {
CreateTableFile(TableFileSchema& file_schema) = 0;
virtual Status
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) = 0;
DropDataByDate(const std::string& table_id, const DatesT& dates) = 0;
virtual Status
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) = 0;
virtual Status
UpdateTableFilesToIndex(const std::string& table_id) = 0;
virtual Status
UpdateTableFile(TableFileSchema& file_schema) = 0;
virtual Status
UpdateTableFiles(TableFilesSchema& files) = 0;
virtual Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
virtual Status
UpdateTableFilesToIndex(const std::string& table_id) = 0;
virtual Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
virtual Status
DropTableIndex(const std::string& table_id) = 0;
virtual Status
CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag) = 0;
virtual Status
DropPartition(const std::string& partition_name) = 0;
virtual Status
ShowPartitions(const std::string& table_name, std::vector<meta::TableSchema>& partiton_schema_array) = 0;
virtual Status
GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0;
virtual Status
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
DatePartionedTableFilesSchema& files) = 0;
@ -87,12 +105,6 @@ class Meta {
virtual Status
FilesToMerge(const std::string& table_id, DatePartionedTableFilesSchema& files) = 0;
virtual Status
Size(uint64_t& result) = 0;
virtual Status
Archive() = 0;
virtual Status
FilesToIndex(TableFilesSchema&) = 0;
@ -101,10 +113,10 @@ class Meta {
std::vector<std::string>& file_ids) = 0;
virtual Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
Size(uint64_t& result) = 0;
virtual Status
DropTableIndex(const std::string& table_id) = 0;
Archive() = 0;
virtual Status
CleanUp() = 0;

View File

@ -19,6 +19,7 @@
#include "db/Constants.h"
#include "db/engine/ExecutionEngine.h"
#include "src/config.h"
#include <map>
#include <memory>
@ -33,6 +34,7 @@ constexpr int32_t DEFAULT_ENGINE_TYPE = (int)EngineType::FAISS_IDMAP;
constexpr int32_t DEFAULT_NLIST = 16384;
constexpr int32_t DEFAULT_METRIC_TYPE = (int)MetricType::L2;
constexpr int32_t DEFAULT_INDEX_FILE_SIZE = ONE_GB;
constexpr char CURRENT_VERSION[] = MILVUS_VERSION;
constexpr int64_t FLAG_MASK_NO_USERID = 0x1;
constexpr int64_t FLAG_MASK_HAS_USERID = 0x1 << 1;
@ -57,6 +59,9 @@ struct TableSchema {
int32_t engine_type_ = DEFAULT_ENGINE_TYPE;
int32_t nlist_ = DEFAULT_NLIST;
int32_t metric_type_ = DEFAULT_METRIC_TYPE;
std::string owner_table_;
std::string partition_tag_;
std::string version_ = CURRENT_VERSION;
}; // TableSchema
struct TableFileSchema {

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ class MySQLMetaImpl : public Meta {
AllTables(std::vector<TableSchema>& table_schema_array) override;
Status
DeleteTable(const std::string& table_id) override;
DropTable(const std::string& table_id) override;
Status
DeleteTableFiles(const std::string& table_id) override;
@ -58,27 +58,17 @@ class MySQLMetaImpl : public Meta {
CreateTableFile(TableFileSchema& file_schema) override;
Status
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override;
DropDataByDate(const std::string& table_id, const DatesT& dates) override;
Status
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
Status
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
std::vector<std::string>& file_ids) override;
Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
Status
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
Status
DropTableIndex(const std::string& table_id) override;
Status
UpdateTableFile(TableFileSchema& file_schema) override;
@ -88,6 +78,24 @@ class MySQLMetaImpl : public Meta {
Status
UpdateTableFiles(TableFilesSchema& files) override;
Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
Status
DropTableIndex(const std::string& table_id) override;
Status
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override;
Status
DropPartition(const std::string& partition_name) override;
Status
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) override;
Status
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
Status
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
DatePartionedTableFilesSchema& files) override;
@ -98,6 +106,10 @@ class MySQLMetaImpl : public Meta {
Status
FilesToIndex(TableFilesSchema&) override;
Status
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
std::vector<std::string>& file_ids) override;
Status
Archive() override;

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ class SqliteMetaImpl : public Meta {
AllTables(std::vector<TableSchema>& table_schema_array) override;
Status
DeleteTable(const std::string& table_id) override;
DropTable(const std::string& table_id) override;
Status
DeleteTableFiles(const std::string& table_id) override;
@ -58,21 +58,26 @@ class SqliteMetaImpl : public Meta {
CreateTableFile(TableFileSchema& file_schema) override;
Status
DropPartitionsByDates(const std::string& table_id, const DatesT& dates) override;
DropDataByDate(const std::string& table_id, const DatesT& dates) override;
Status
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
Status
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
std::vector<std::string>& file_ids) override;
Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
Status
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
Status
UpdateTableFile(TableFileSchema& file_schema) override;
Status
UpdateTableFilesToIndex(const std::string& table_id) override;
Status
UpdateTableFiles(TableFilesSchema& files) override;
Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
@ -80,13 +85,16 @@ class SqliteMetaImpl : public Meta {
DropTableIndex(const std::string& table_id) override;
Status
UpdateTableFilesToIndex(const std::string& table_id) override;
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag) override;
Status
UpdateTableFile(TableFileSchema& file_schema) override;
DropPartition(const std::string& partition_name) override;
Status
UpdateTableFiles(TableFilesSchema& files) override;
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partiton_schema_array) override;
Status
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
Status
FilesToSearch(const std::string& table_id, const std::vector<size_t>& ids, const DatesT& dates,
@ -99,11 +107,15 @@ class SqliteMetaImpl : public Meta {
FilesToIndex(TableFilesSchema&) override;
Status
Archive() override;
FilesByType(const std::string& table_id, const std::vector<int>& file_types,
std::vector<std::string>& file_ids) override;
Status
Size(uint64_t& result) override;
Status
Archive() override;
Status
CleanUp() override;

View File

@ -22,19 +22,22 @@ namespace grpc {
static const char* MilvusService_method_names[] = {
"/milvus.grpc.MilvusService/CreateTable",
"/milvus.grpc.MilvusService/HasTable",
"/milvus.grpc.MilvusService/DropTable",
"/milvus.grpc.MilvusService/CreateIndex",
"/milvus.grpc.MilvusService/Insert",
"/milvus.grpc.MilvusService/Search",
"/milvus.grpc.MilvusService/SearchInFiles",
"/milvus.grpc.MilvusService/DescribeTable",
"/milvus.grpc.MilvusService/CountTable",
"/milvus.grpc.MilvusService/ShowTables",
"/milvus.grpc.MilvusService/Cmd",
"/milvus.grpc.MilvusService/DeleteByRange",
"/milvus.grpc.MilvusService/PreloadTable",
"/milvus.grpc.MilvusService/DropTable",
"/milvus.grpc.MilvusService/CreateIndex",
"/milvus.grpc.MilvusService/DescribeIndex",
"/milvus.grpc.MilvusService/DropIndex",
"/milvus.grpc.MilvusService/CreatePartition",
"/milvus.grpc.MilvusService/ShowPartitions",
"/milvus.grpc.MilvusService/DropPartition",
"/milvus.grpc.MilvusService/Insert",
"/milvus.grpc.MilvusService/Search",
"/milvus.grpc.MilvusService/SearchInFiles",
"/milvus.grpc.MilvusService/Cmd",
"/milvus.grpc.MilvusService/DeleteByDate",
"/milvus.grpc.MilvusService/PreloadTable",
};
std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) {
@ -46,19 +49,22 @@ std::unique_ptr< MilvusService::Stub> MilvusService::NewStub(const std::shared_p
MilvusService::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel)
: channel_(channel), rpcmethod_CreateTable_(MilvusService_method_names[0], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_HasTable_(MilvusService_method_names[1], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DropTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_CreateIndex_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Insert_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Search_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_SearchInFiles_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DescribeTable_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_CountTable_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_ShowTables_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Cmd_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DeleteByRange_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_PreloadTable_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DescribeIndex_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DropIndex_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DescribeTable_(MilvusService_method_names[2], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_CountTable_(MilvusService_method_names[3], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_ShowTables_(MilvusService_method_names[4], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DropTable_(MilvusService_method_names[5], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_CreateIndex_(MilvusService_method_names[6], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DescribeIndex_(MilvusService_method_names[7], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DropIndex_(MilvusService_method_names[8], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_CreatePartition_(MilvusService_method_names[9], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_ShowPartitions_(MilvusService_method_names[10], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DropPartition_(MilvusService_method_names[11], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Insert_(MilvusService_method_names[12], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Search_(MilvusService_method_names[13], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_SearchInFiles_(MilvusService_method_names[14], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_Cmd_(MilvusService_method_names[15], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_DeleteByDate_(MilvusService_method_names[16], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
, rpcmethod_PreloadTable_(MilvusService_method_names[17], ::grpc::internal::RpcMethod::NORMAL_RPC, channel)
{}
::grpc::Status MilvusService::Stub::CreateTable(::grpc::ClientContext* context, const ::milvus::grpc::TableSchema& request, ::milvus::grpc::Status* response) {
@ -117,146 +123,6 @@ void MilvusService::Stub::experimental_async::HasTable(::grpc::ClientContext* co
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::BoolReply>::Create(channel_.get(), cq, rpcmethod_HasTable_, context, request, false);
}
::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response);
}
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false);
}
::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response);
}
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false);
}
::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response);
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false);
}
::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response);
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false);
}
::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response);
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false);
}
::grpc::Status MilvusService::Stub::DescribeTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::TableSchema* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DescribeTable_, context, request, response);
}
@ -341,88 +207,60 @@ void MilvusService::Stub::experimental_async::ShowTables(::grpc::ClientContext*
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TableNameList>::Create(channel_.get(), cq, rpcmethod_ShowTables_, context, request, false);
}
::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response);
::grpc::Status MilvusService::Stub::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropTable_, context, request, response);
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
void MilvusService::Stub::experimental_async::DropTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropTable_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true);
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false);
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropTable_, context, request, false);
}
::grpc::Status MilvusService::Stub::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByRange_, context, request, response);
::grpc::Status MilvusService::Stub::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreateIndex_, context, request, response);
}
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f));
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, std::move(f));
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor);
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::DeleteByRange(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByRange_, context, request, response, reactor);
void MilvusService::Stub::experimental_async::CreateIndex(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreateIndex_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, true);
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByRangeRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByRangeParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByRange_, context, request, false);
}
::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response);
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false);
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreateIndexRaw(::grpc::ClientContext* context, const ::milvus::grpc::IndexParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreateIndex_, context, request, false);
}
::grpc::Status MilvusService::Stub::DescribeIndex(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::IndexParam* response) {
@ -481,6 +319,258 @@ void MilvusService::Stub::experimental_async::DropIndex(::grpc::ClientContext* c
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropIndex_, context, request, false);
}
::grpc::Status MilvusService::Stub::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_CreatePartition_, context, request, response);
}
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::CreatePartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_CreatePartition_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncCreatePartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_CreatePartition_, context, request, false);
}
::grpc::Status MilvusService::Stub::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::PartitionList* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_ShowPartitions_, context, request, response);
}
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::ShowPartitions(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::PartitionList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_ShowPartitions_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::AsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::PartitionList>* MilvusService::Stub::PrepareAsyncShowPartitionsRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::PartitionList>::Create(channel_.get(), cq, rpcmethod_ShowPartitions_, context, request, false);
}
::grpc::Status MilvusService::Stub::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DropPartition_, context, request, response);
}
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::DropPartition(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DropPartition_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDropPartitionRaw(::grpc::ClientContext* context, const ::milvus::grpc::PartitionParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DropPartition_, context, request, false);
}
::grpc::Status MilvusService::Stub::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::milvus::grpc::VectorIds* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Insert_, context, request, response);
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::Insert(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::VectorIds* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Insert_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::AsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::VectorIds>* MilvusService::Stub::PrepareAsyncInsertRaw(::grpc::ClientContext* context, const ::milvus::grpc::InsertParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::VectorIds>::Create(channel_.get(), cq, rpcmethod_Insert_, context, request, false);
}
::grpc::Status MilvusService::Stub::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::milvus::grpc::TopKQueryResultList* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Search_, context, request, response);
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::Search(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Search_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_Search_, context, request, false);
}
::grpc::Status MilvusService::Stub::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::milvus::grpc::TopKQueryResultList* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_SearchInFiles_, context, request, response);
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::SearchInFiles(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::TopKQueryResultList* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_SearchInFiles_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::AsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::TopKQueryResultList>* MilvusService::Stub::PrepareAsyncSearchInFilesRaw(::grpc::ClientContext* context, const ::milvus::grpc::SearchInFilesParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::TopKQueryResultList>::Create(channel_.get(), cq, rpcmethod_SearchInFiles_, context, request, false);
}
::grpc::Status MilvusService::Stub::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::milvus::grpc::StringReply* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_Cmd_, context, request, response);
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::Cmd(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::StringReply* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_Cmd_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::AsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::StringReply>* MilvusService::Stub::PrepareAsyncCmdRaw(::grpc::ClientContext* context, const ::milvus::grpc::Command& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::StringReply>::Create(channel_.get(), cq, rpcmethod_Cmd_, context, request, false);
}
::grpc::Status MilvusService::Stub::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_DeleteByDate_, context, request, response);
}
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::DeleteByDate(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_DeleteByDate_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncDeleteByDateRaw(::grpc::ClientContext* context, const ::milvus::grpc::DeleteByDateParam& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_DeleteByDate_, context, request, false);
}
::grpc::Status MilvusService::Stub::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::milvus::grpc::Status* response) {
return ::grpc::internal::BlockingUnaryCall(channel_.get(), rpcmethod_PreloadTable_, context, request, response);
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, std::function<void(::grpc::Status)> f) {
::grpc_impl::internal::CallbackUnaryCall(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, std::move(f));
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
}
void MilvusService::Stub::experimental_async::PreloadTable(::grpc::ClientContext* context, const ::grpc::ByteBuffer* request, ::milvus::grpc::Status* response, ::grpc::experimental::ClientUnaryReactor* reactor) {
::grpc_impl::internal::ClientCallbackUnaryFactory::Create(stub_->channel_.get(), stub_->rpcmethod_PreloadTable_, context, request, response, reactor);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::AsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, true);
}
::grpc::ClientAsyncResponseReader< ::milvus::grpc::Status>* MilvusService::Stub::PrepareAsyncPreloadTableRaw(::grpc::ClientContext* context, const ::milvus::grpc::TableName& request, ::grpc::CompletionQueue* cq) {
return ::grpc_impl::internal::ClientAsyncResponseReaderFactory< ::milvus::grpc::Status>::Create(channel_.get(), cq, rpcmethod_PreloadTable_, context, request, false);
}
MilvusService::Service::Service() {
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[0],
@ -495,68 +585,83 @@ MilvusService::Service::Service() {
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[2],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::DropTable), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[3],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::CreateIndex), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[4],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(
std::mem_fn(&MilvusService::Service::Insert), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[5],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>(
std::mem_fn(&MilvusService::Service::Search), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[6],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>(
std::mem_fn(&MilvusService::Service::SearchInFiles), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[7],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableSchema>(
std::mem_fn(&MilvusService::Service::DescribeTable), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[8],
MilvusService_method_names[3],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::TableRowCount>(
std::mem_fn(&MilvusService::Service::CountTable), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[9],
MilvusService_method_names[4],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::TableNameList>(
std::mem_fn(&MilvusService::Service::ShowTables), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[10],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>(
std::mem_fn(&MilvusService::Service::Cmd), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[11],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByRangeParam, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::DeleteByRange), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[12],
MilvusService_method_names[5],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::PreloadTable), this)));
std::mem_fn(&MilvusService::Service::DropTable), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[13],
MilvusService_method_names[6],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::IndexParam, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::CreateIndex), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[7],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::IndexParam>(
std::mem_fn(&MilvusService::Service::DescribeIndex), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[14],
MilvusService_method_names[8],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::DropIndex), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[9],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::CreatePartition), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[10],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::PartitionList>(
std::mem_fn(&MilvusService::Service::ShowPartitions), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[11],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::PartitionParam, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::DropPartition), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[12],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::InsertParam, ::milvus::grpc::VectorIds>(
std::mem_fn(&MilvusService::Service::Insert), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[13],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchParam, ::milvus::grpc::TopKQueryResultList>(
std::mem_fn(&MilvusService::Service::Search), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[14],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::SearchInFilesParam, ::milvus::grpc::TopKQueryResultList>(
std::mem_fn(&MilvusService::Service::SearchInFiles), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[15],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::Command, ::milvus::grpc::StringReply>(
std::mem_fn(&MilvusService::Service::Cmd), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[16],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::DeleteByDateParam, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::DeleteByDate), this)));
AddMethod(new ::grpc::internal::RpcServiceMethod(
MilvusService_method_names[17],
::grpc::internal::RpcMethod::NORMAL_RPC,
new ::grpc::internal::RpcMethodHandler< MilvusService::Service, ::milvus::grpc::TableName, ::milvus::grpc::Status>(
std::mem_fn(&MilvusService::Service::PreloadTable), this)));
}
MilvusService::Service::~Service() {
@ -576,41 +681,6 @@ MilvusService::Service::~Service() {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::TableSchema* response) {
(void) context;
(void) request;
@ -632,21 +702,14 @@ MilvusService::Service::~Service() {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) {
::grpc::Status MilvusService::Service::DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
::grpc::Status MilvusService::Service::CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
@ -667,6 +730,69 @@ MilvusService::Service::~Service() {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::PartitionList* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request, ::milvus::grpc::VectorIds* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request, ::milvus::grpc::TopKQueryResultList* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request, ::milvus::grpc::TopKQueryResultList* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request, ::milvus::grpc::StringReply* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
::grpc::Status MilvusService::Service::PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request, ::milvus::grpc::Status* response) {
(void) context;
(void) request;
(void) response;
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
}
} // namespace milvus
} // namespace grpc

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -5,14 +5,21 @@ import "status.proto";
package milvus.grpc;
/**
* @brief Table Name
* @brief Table name
*/
message TableName {
string table_name = 1;
}
/**
* @brief Table Name List
* @brief Partition name
*/
message PartitionName {
string partition_name = 1;
}
/**
* @brief Table name list
*/
message TableNameList {
Status status = 1;
@ -20,7 +27,7 @@ message TableNameList {
}
/**
* @brief Table Schema
* @brief Table schema
*/
message TableSchema {
Status status = 1;
@ -31,7 +38,24 @@ message TableSchema {
}
/**
* @brief Range Schema
* @brief Params of partition
*/
message PartitionParam {
string table_name = 1;
string partition_name = 2;
string tag = 3;
}
/**
* @brief Partition list
*/
message PartitionList {
Status status = 1;
repeated PartitionParam partition_array = 2;
}
/**
* @brief Range schema
*/
message Range {
string start_value = 1;
@ -46,12 +70,13 @@ message RowRecord {
}
/**
* @brief params to be inserted
* @brief Params to be inserted
*/
message InsertParam {
string table_name = 1;
repeated RowRecord row_record_array = 2;
repeated int64 row_id_array = 3; //optional
string partition_tag = 4;
}
/**
@ -63,7 +88,7 @@ message VectorIds {
}
/**
* @brief params for searching vector
* @brief Params for searching vector
*/
message SearchParam {
string table_name = 1;
@ -71,10 +96,11 @@ message SearchParam {
repeated Range query_range_array = 3;
int64 topk = 4;
int64 nprobe = 5;
repeated string partition_tag_array = 6;
}
/**
* @brief params for searching vector in files
* @brief Params for searching vector in files
*/
message SearchInFilesParam {
repeated string file_id_array = 1;
@ -105,7 +131,7 @@ message TopKQueryResultList {
}
/**
* @brief Server String Reply
* @brief Server string Reply
*/
message StringReply {
Status status = 1;
@ -129,7 +155,7 @@ message TableRowCount {
}
/**
* @brief Give Server Command
* @brief Give server Command
*/
message Command {
string cmd = 1;
@ -155,169 +181,173 @@ message IndexParam {
}
/**
* @brief table name and range for DeleteByRange
* @brief table name and range for DeleteByDate
*/
message DeleteByRangeParam {
message DeleteByDateParam {
Range range = 1;
string table_name = 2;
}
service MilvusService {
/**
* @brief Create table method
* @brief This method is used to create table
*
* This method is used to create table
*
* @param param, use to provide table information to be created.
* @param TableSchema, use to provide table information to be created.
*
* @return Status
*/
rpc CreateTable(TableSchema) returns (Status){}
/**
* @brief Test table existence method
* @brief This method is used to test table existence.
*
* This method is used to test table existence.
*
* @param table_name, table name is going to be tested.
* @param TableName, table name is going to be tested.
*
* @return BoolReply
*/
rpc HasTable(TableName) returns (BoolReply) {}
/**
* @brief Delete table method
* @brief This method is used to get table schema.
*
* This method is used to delete table.
* @param TableName, target table name.
*
* @param table_name, table name is going to be deleted.
*
*/
rpc DropTable(TableName) returns (Status) {}
/**
* @brief Build index by table method
*
* This method is used to build index by table in sync mode.
*
* @param table_name, table is going to be built index.
*
*/
rpc CreateIndex(IndexParam) returns (Status) {}
/**
* @brief Add vector array to table
*
* This method is used to add vector array to table.
*
* @param table_name, table_name is inserted.
* @param record_array, vector array is inserted.
*
* @return vector id array
*/
rpc Insert(InsertParam) returns (VectorIds) {}
/**
* @brief Query vector
*
* This method is used to query vector in table.
*
* @param table_name, table_name is queried.
* @param query_record_array, all vector are going to be queried.
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
* @param topk, how many similarity vectors will be searched.
*
* @return query result array.
*/
rpc Search(SearchParam) returns (TopKQueryResultList) {}
/**
* @brief Internal use query interface
*
* This method is used to query vector in specified files.
*
* @param file_id_array, specified files id array, queried.
* @param query_record_array, all vector are going to be queried.
* @param query_range_array, optional ranges for conditional search. If not specified, search whole table
* @param topk, how many similarity vectors will be searched.
*
* @return query result array.
*/
rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResultList) {}
/**
* @brief Get table schema
*
* This method is used to get table schema.
*
* @param table_name, target table name.
*
* @return table schema
* @return TableSchema
*/
rpc DescribeTable(TableName) returns (TableSchema) {}
/**
* @brief Get table schema
* @brief This method is used to get table schema.
*
* This method is used to get table schema.
* @param TableName, target table name.
*
* @param table_name, target table name.
*
* @return table schema
* @return TableRowCount
*/
rpc CountTable(TableName) returns (TableRowCount) {}
/**
* @brief List all tables in database
* @brief This method is used to list all tables.
*
* This method is used to list all tables.
* @param Command, dummy parameter.
*
*
* @return table names.
* @return TableNameList
*/
rpc ShowTables(Command) returns (TableNameList) {}
/**
* @brief Give the server status
* @brief This method is used to delete table.
*
* This method is used to give the server status.
* @param TableName, table name is going to be deleted.
*
* @return Server status.
* @return TableNameList
*/
rpc DropTable(TableName) returns (Status) {}
/**
* @brief This method is used to build index by table in sync mode.
*
* @param IndexParam, index paramters.
*
* @return Status
*/
rpc CreateIndex(IndexParam) returns (Status) {}
/**
* @brief This method is used to describe index
*
* @param TableName, target table name.
*
* @return IndexParam
*/
rpc DescribeIndex(TableName) returns (IndexParam) {}
/**
* @brief This method is used to drop index
*
* @param TableName, target table name.
*
* @return Status
*/
rpc DropIndex(TableName) returns (Status) {}
/**
* @brief This method is used to create partition
*
* @param PartitionParam, partition parameters.
*
* @return Status
*/
rpc CreatePartition(PartitionParam) returns (Status) {}
/**
* @brief This method is used to show partition information
*
* @param TableName, target table name.
*
* @return PartitionList
*/
rpc ShowPartitions(TableName) returns (PartitionList) {}
/**
* @brief This method is used to drop partition
*
* @param PartitionParam, target partition.
*
* @return Status
*/
rpc DropPartition(PartitionParam) returns (Status) {}
/**
* @brief This method is used to add vector array to table.
*
* @param InsertParam, insert parameters.
*
* @return VectorIds
*/
rpc Insert(InsertParam) returns (VectorIds) {}
/**
* @brief This method is used to query vector in table.
*
* @param SearchParam, search parameters.
*
* @return TopKQueryResultList
*/
rpc Search(SearchParam) returns (TopKQueryResultList) {}
/**
* @brief This method is used to query vector in specified files.
*
* @param SearchInFilesParam, search in files paremeters.
*
* @return TopKQueryResultList
*/
rpc SearchInFiles(SearchInFilesParam) returns (TopKQueryResultList) {}
/**
* @brief This method is used to give the server status.
*
* @param Command, command string
*
* @return StringReply
*/
rpc Cmd(Command) returns (StringReply) {}
/**
* @brief delete table by range
* @brief This method is used to delete vector by date range
*
* This method is used to delete vector by range
* @param DeleteByDateParam, delete parameters.
*
* @return rpc status.
* @return status
*/
rpc DeleteByRange(DeleteByRangeParam) returns (Status) {}
rpc DeleteByDate(DeleteByDateParam) returns (Status) {}
/**
* @brief preload table
* @brief This method is used to preload table
*
* This method is used to preload table
* @param TableName, target table name.
*
* @return Status.
* @return Status
*/
rpc PreloadTable(TableName) returns (Status) {}
/**
* @brief describe index
*
* This method is used to describe index
*
* @return Status.
*/
rpc DescribeIndex(TableName) returns (IndexParam) {}
/**
* @brief drop index
*
* This method is used to drop index
*
* @return Status.
*/
rpc DropIndex(TableName) returns (Status) {}
}

View File

@ -22,7 +22,7 @@ cmake_minimum_required(VERSION 3.12)
message(STATUS "------------------------------KNOWHERE-----------------------------------")
message(STATUS "Building using CMake version: ${CMAKE_VERSION}")
set(KNOWHERE_VERSION "0.5.0")
set(KNOWHERE_VERSION "0.6.0")
string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" KNOWHERE_BASE_VERSION "${KNOWHERE_VERSION}")
project(knowhere VERSION "${KNOWHERE_BASE_VERSION}" LANGUAGES C CXX)
set(CMAKE_CXX_STANDARD 14)
@ -72,17 +72,17 @@ include(ExternalProject)
include(DefineOptionsCore)
include(BuildUtilsCore)
set(KNOWHERE_GPU_VERSION false)
if (MILVUS_CPU_VERSION OR KNOWHERE_CPU_VERSION)
message(STATUS "Building Knowhere CPU version")
add_compile_definitions("MILVUS_CPU_VERSION")
else ()
set(KNOWHERE_CPU_VERSION false)
if (MILVUS_GPU_VERSION OR KNOWHERE_GPU_VERSION)
message(STATUS "Building Knowhere GPU version")
add_compile_definitions("MILVUS_GPU_VERSION")
set(KNOWHERE_GPU_VERSION true)
enable_language(CUDA)
find_package(CUDA 10 REQUIRED)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
else ()
message(STATUS "Building Knowhere CPU version")
set(KNOWHERE_CPU_VERSION true)
add_compile_definitions("MILVUS_CPU_VERSION")
endif ()
include(ThirdPartyPackagesCore)

View File

@ -41,12 +41,12 @@ macro(define_option_string name description default)
endmacro()
#----------------------------------------------------------------------
set_option_category("CPU version")
set_option_category("GPU version")
if (MILVUS_CPU_VERSION)
define_option(KNOWHERE_CPU_VERSION "Build CPU version only" ON)
if (MILVUS_GPU_VERSION)
define_option(KNOWHERE_GPU_VERSION "Build GPU version" ON)
else ()
define_option(KNOWHERE_CPU_VERSION "Build CPU version only" OFF)
define_option(KNOWHERE_GPU_VERSION "Build GPU version" OFF)
endif ()
#----------------------------------------------------------------------
@ -81,17 +81,6 @@ define_option(KNOWHERE_WITH_FAISS_GPU_VERSION "Build with FAISS GPU version" ON)
define_option(BUILD_FAISS_WITH_MKL "Build FAISS with MKL" OFF)
#----------------------------------------------------------------------
if (MSVC)
set_option_category("MSVC")
define_option(MSVC_LINK_VERBOSE
"Pass verbose linking options when linking libraries and executables"
OFF)
define_option(KNOWHERE_USE_STATIC_CRT "Build KNOWHERE with statically linked CRT" OFF)
endif ()
#----------------------------------------------------------------------
set_option_category("Test and benchmark")

View File

@ -51,7 +51,13 @@ print_banner() {
std::cout << " /_/ /_/___/____/___/\\____/___/ " << std::endl;
std::cout << std::endl;
std::cout << "Welcome to Milvus!" << std::endl;
std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << std::endl;
std::cout << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME << ", with "
#ifdef WITH_MKL
<< "MKL"
#else
<< "OpenBLAS"
#endif
<< " library." << std::endl;
#ifdef MILVUS_CPU_VERSION
std::cout << "You are using Milvus CPU version" << std::endl;
#else
@ -137,7 +143,7 @@ main(int argc, char* argv[]) {
s = server.Start();
if (s.ok()) {
std::cout << "Milvus server start successfully." << std::endl;
std::cout << "Milvus server started successfully!" << std::endl;
} else {
goto FAIL;
}

View File

@ -49,13 +49,21 @@ void
SearchJob::SearchDone(size_t index_id) {
std::unique_lock<std::mutex> lock(mutex_);
index_files_.erase(index_id);
cv_.notify_all();
if (index_files_.empty()) {
cv_.notify_all();
}
SERVER_LOG_DEBUG << "SearchJob " << id() << " finish index file: " << index_id;
}
ResultSet&
SearchJob::GetResult() {
return result_;
ResultIds&
SearchJob::GetResultIds() {
return result_ids_;
}
ResultDistances&
SearchJob::GetResultDistances() {
return result_distances_;
}
Status&

View File

@ -29,6 +29,7 @@
#include <vector>
#include "Job.h"
#include "db/Types.h"
#include "db/meta/MetaTypes.h"
namespace milvus {
@ -37,9 +38,9 @@ namespace scheduler {
using engine::meta::TableFileSchemaPtr;
using Id2IndexMap = std::unordered_map<size_t, TableFileSchemaPtr>;
using IdDistPair = std::pair<int64_t, double>;
using Id2DistVec = std::vector<IdDistPair>;
using ResultSet = std::vector<Id2DistVec>;
using ResultIds = engine::ResultIds;
using ResultDistances = engine::ResultDistances;
class SearchJob : public Job {
public:
@ -55,8 +56,11 @@ class SearchJob : public Job {
void
SearchDone(size_t index_id);
ResultSet&
GetResult();
ResultIds&
GetResultIds();
ResultDistances&
GetResultDistances();
Status&
GetStatus();
@ -90,6 +94,11 @@ class SearchJob : public Job {
return index_files_;
}
std::mutex&
mutex() {
return mutex_;
}
private:
uint64_t topk_ = 0;
uint64_t nq_ = 0;
@ -99,7 +108,8 @@ class SearchJob : public Job {
Id2IndexMap index_files_;
// TODO: column-base better ?
ResultSet result_;
ResultIds result_ids_;
ResultDistances result_distances_;
Status status_;
std::mutex mutex_;

View File

@ -219,8 +219,11 @@ XSearchTask::Execute() {
// step 3: pick up topk result
auto spec_k = index_engine_->Count() < topk ? index_engine_->Count() : topk;
XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2,
search_job->GetResult());
{
std::unique_lock<std::mutex> lock(search_job->mutex());
XSearchTask::MergeTopkToResultSet(output_ids, output_distance, spec_k, nq, topk, metric_l2,
search_job->GetResultIds(), search_job->GetResultDistances());
}
span = rc.RecordSection(hdr + ", reduce topk");
// search_job->AccumReduceCost(span);
@ -240,71 +243,69 @@ XSearchTask::Execute() {
}
void
XSearchTask::MergeTopkToResultSet(const std::vector<int64_t>& input_ids, const std::vector<float>& input_distance,
uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending,
scheduler::ResultSet& result) {
if (result.empty()) {
result.resize(nq);
XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances,
size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids,
scheduler::ResultDistances& tar_distances) {
if (src_ids.empty()) {
return;
}
size_t tar_k = tar_ids.size() / nq;
size_t buf_k = std::min(topk, src_k + tar_k);
scheduler::ResultIds buf_ids(nq * buf_k, -1);
scheduler::ResultDistances buf_distances(nq * buf_k, 0.0);
for (uint64_t i = 0; i < nq; i++) {
scheduler::Id2DistVec result_buf;
auto& result_i = result[i];
size_t buf_k_j = 0, src_k_j = 0, tar_k_j = 0;
size_t buf_idx, src_idx, tar_idx;
if (result[i].empty()) {
result_buf.resize(input_k, scheduler::IdDistPair(-1, 0.0));
uint64_t input_k_multi_i = topk * i;
for (auto k = 0; k < input_k; ++k) {
uint64_t idx = input_k_multi_i + k;
auto& result_buf_item = result_buf[k];
result_buf_item.first = input_ids[idx];
result_buf_item.second = input_distance[idx];
size_t buf_k_multi_i = buf_k * i;
size_t src_k_multi_i = topk * i;
size_t tar_k_multi_i = tar_k * i;
while (buf_k_j < buf_k && src_k_j < src_k && tar_k_j < tar_k) {
src_idx = src_k_multi_i + src_k_j;
tar_idx = tar_k_multi_i + tar_k_j;
buf_idx = buf_k_multi_i + buf_k_j;
if ((ascending && src_distances[src_idx] < tar_distances[tar_idx]) ||
(!ascending && src_distances[src_idx] > tar_distances[tar_idx])) {
buf_ids[buf_idx] = src_ids[src_idx];
buf_distances[buf_idx] = src_distances[src_idx];
src_k_j++;
} else {
buf_ids[buf_idx] = tar_ids[tar_idx];
buf_distances[buf_idx] = tar_distances[tar_idx];
tar_k_j++;
}
} else {
size_t tar_size = result_i.size();
uint64_t output_k = std::min(topk, input_k + tar_size);
result_buf.resize(output_k, scheduler::IdDistPair(-1, 0.0));
size_t buf_k = 0, src_k = 0, tar_k = 0;
uint64_t src_idx;
uint64_t input_k_multi_i = topk * i;
while (buf_k < output_k && src_k < input_k && tar_k < tar_size) {
src_idx = input_k_multi_i + src_k;
auto& result_buf_item = result_buf[buf_k];
auto& result_item = result_i[tar_k];
if ((ascending && input_distance[src_idx] < result_item.second) ||
(!ascending && input_distance[src_idx] > result_item.second)) {
result_buf_item.first = input_ids[src_idx];
result_buf_item.second = input_distance[src_idx];
src_k++;
} else {
result_buf_item = result_item;
tar_k++;
buf_k_j++;
}
if (buf_k_j < buf_k) {
if (src_k_j < src_k) {
while (buf_k_j < buf_k && src_k_j < src_k) {
buf_idx = buf_k_multi_i + buf_k_j;
src_idx = src_k_multi_i + src_k_j;
buf_ids[buf_idx] = src_ids[src_idx];
buf_distances[buf_idx] = src_distances[src_idx];
src_k_j++;
buf_k_j++;
}
buf_k++;
}
if (buf_k < output_k) {
if (src_k < input_k) {
while (buf_k < output_k && src_k < input_k) {
src_idx = input_k_multi_i + src_k;
auto& result_buf_item = result_buf[buf_k];
result_buf_item.first = input_ids[src_idx];
result_buf_item.second = input_distance[src_idx];
src_k++;
buf_k++;
}
} else {
while (buf_k < output_k && tar_k < tar_size) {
result_buf[buf_k] = result_i[tar_k];
tar_k++;
buf_k++;
}
} else {
while (buf_k_j < buf_k && tar_k_j < tar_k) {
buf_idx = buf_k_multi_i + buf_k_j;
tar_idx = tar_k_multi_i + tar_k_j;
buf_ids[buf_idx] = tar_ids[tar_idx];
buf_distances[buf_idx] = tar_distances[tar_idx];
tar_k_j++;
buf_k_j++;
}
}
}
result_i.swap(result_buf);
}
tar_ids.swap(buf_ids);
tar_distances.swap(buf_distances);
}
// void

View File

@ -39,8 +39,9 @@ class XSearchTask : public Task {
public:
static void
MergeTopkToResultSet(const std::vector<int64_t>& input_ids, const std::vector<float>& input_distance,
uint64_t input_k, uint64_t nq, uint64_t topk, bool ascending, scheduler::ResultSet& result);
MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const scheduler::ResultDistances& src_distances,
size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids,
scheduler::ResultDistances& tar_distances);
// static void
// MergeTopkArray(std::vector<int64_t>& tar_ids, std::vector<float>& tar_distance, uint64_t& tar_input_k,

View File

@ -17,5 +17,7 @@
# under the License.
#-------------------------------------------------------------------------------
aux_source_directory(${MILVUS_SOURCE_DIR}/src/sdk/examples/utils util_files)
add_subdirectory(grpcsimple)
add_subdirectory(simple)
add_subdirectory(partition)

View File

@ -1,371 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "sdk/examples/grpcsimple/src/ClientTest.h"
#include "MilvusApi.h"
#include <time.h>
#include <unistd.h>
#include <chrono>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
//#define SET_VECTOR_IDS;
namespace {
const std::string&
GetTableName();
const char* TABLE_NAME = GetTableName().c_str();
constexpr int64_t TABLE_DIMENSION = 512;
constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024;
constexpr int64_t BATCH_ROW_COUNT = 100000;
constexpr int64_t NQ = 5;
constexpr int64_t TOP_K = 10;
constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different
constexpr int64_t ADD_VECTOR_LOOP = 5;
constexpr int64_t SECONDS_EACH_HOUR = 3600;
constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8;
constexpr int32_t N_LIST = 15000;
#define BLOCK_SPLITER std::cout << "===========================================" << std::endl;
void
PrintTableSchema(const milvus::TableSchema& tb_schema) {
BLOCK_SPLITER
std::cout << "Table name: " << tb_schema.table_name << std::endl;
std::cout << "Table dimension: " << tb_schema.dimension << std::endl;
BLOCK_SPLITER
}
void
PrintSearchResult(const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
const std::vector<milvus::TopKQueryResult>& topk_query_result_array) {
BLOCK_SPLITER
std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl;
int32_t index = 0;
for (auto& result : topk_query_result_array) {
auto search_id = search_record_array[index].first;
index++;
std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id) << " top "
<< std::to_string(result.query_result_arrays.size()) << " search result:" << std::endl;
for (auto& item : result.query_result_arrays) {
std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance);
std::cout << std::endl;
}
}
BLOCK_SPLITER
}
std::string
CurrentTime() {
time_t tt;
time(&tt);
tt = tt + 8 * SECONDS_EACH_HOUR;
tm t;
gmtime_r(&tt, &t);
std::string str = std::to_string(t.tm_year + 1900) + "_" + std::to_string(t.tm_mon + 1) + "_" +
std::to_string(t.tm_mday) + "_" + std::to_string(t.tm_hour) + "_" + std::to_string(t.tm_min) +
"_" + std::to_string(t.tm_sec);
return str;
}
std::string
CurrentTmDate(int64_t offset_day = 0) {
time_t tt;
time(&tt);
tt = tt + 8 * SECONDS_EACH_HOUR;
tt = tt + 24 * SECONDS_EACH_HOUR * offset_day;
tm t;
gmtime_r(&tt, &t);
std::string str =
std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday);
return str;
}
const std::string&
GetTableName() {
static std::string s_id("tbl_" + CurrentTime());
return s_id;
}
milvus::TableSchema
BuildTableSchema() {
milvus::TableSchema tb_schema;
tb_schema.table_name = TABLE_NAME;
tb_schema.dimension = TABLE_DIMENSION;
tb_schema.index_file_size = TABLE_INDEX_FILE_SIZE;
tb_schema.metric_type = milvus::MetricType::L2;
return tb_schema;
}
void
BuildVectors(int64_t from, int64_t to, std::vector<milvus::RowRecord>& vector_record_array) {
if (to <= from) {
return;
}
vector_record_array.clear();
for (int64_t k = from; k < to; k++) {
milvus::RowRecord record;
record.data.resize(TABLE_DIMENSION);
for (int64_t i = 0; i < TABLE_DIMENSION; i++) {
record.data[i] = (float)(k % (i + 1));
}
vector_record_array.emplace_back(record);
}
}
void
Sleep(int seconds) {
std::cout << "Waiting " << seconds << " seconds ..." << std::endl;
sleep(seconds);
}
class TimeRecorder {
public:
explicit TimeRecorder(const std::string& title) : title_(title) {
start_ = std::chrono::system_clock::now();
}
~TimeRecorder() {
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
int64_t span = (std::chrono::duration_cast<std::chrono::milliseconds>(end - start_)).count();
std::cout << title_ << " totally cost: " << span << " ms" << std::endl;
}
private:
std::string title_;
std::chrono::system_clock::time_point start_;
};
void
CheckResult(const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
const std::vector<milvus::TopKQueryResult>& topk_query_result_array) {
BLOCK_SPLITER
int64_t index = 0;
for (auto& result : topk_query_result_array) {
auto result_id = result.query_result_arrays[0].id;
auto search_id = search_record_array[index++].first;
if (result_id != search_id) {
std::cout << "The top 1 result is wrong: " << result_id << " vs. " << search_id << std::endl;
} else {
std::cout << "Check result sucessfully" << std::endl;
}
}
BLOCK_SPLITER
}
void
DoSearch(std::shared_ptr<milvus::Connection> conn,
const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array, const std::string& phase_name) {
std::vector<milvus::Range> query_range_array;
milvus::Range rg;
rg.start_value = CurrentTmDate();
rg.end_value = CurrentTmDate(1);
query_range_array.emplace_back(rg);
std::vector<milvus::RowRecord> record_array;
for (auto& pair : search_record_array) {
record_array.push_back(pair.second);
}
auto start = std::chrono::high_resolution_clock::now();
std::vector<milvus::TopKQueryResult> topk_query_result_array;
{
TimeRecorder rc(phase_name);
milvus::Status stat =
conn->Search(TABLE_NAME, record_array, query_range_array, TOP_K, 32, topk_query_result_array);
std::cout << "SearchVector function call status: " << stat.message() << std::endl;
}
auto finish = std::chrono::high_resolution_clock::now();
std::cout << "SEARCHVECTOR COST: "
<< std::chrono::duration_cast<std::chrono::duration<double>>(finish - start).count() << "s\n";
PrintSearchResult(search_record_array, topk_query_result_array);
CheckResult(search_record_array, topk_query_result_array);
}
} // namespace
void
ClientTest::Test(const std::string& address, const std::string& port) {
std::shared_ptr<milvus::Connection> conn = milvus::Connection::Create();
{ // connect server
milvus::ConnectParam param = {address, port};
milvus::Status stat = conn->Connect(param);
std::cout << "Connect function call status: " << stat.message() << std::endl;
}
{ // server version
std::string version = conn->ServerVersion();
std::cout << "Server version: " << version << std::endl;
}
{ // sdk version
std::string version = conn->ClientVersion();
std::cout << "SDK version: " << version << std::endl;
}
{
std::vector<std::string> tables;
milvus::Status stat = conn->ShowTables(tables);
std::cout << "ShowTables function call status: " << stat.message() << std::endl;
std::cout << "All tables: " << std::endl;
for (auto& table : tables) {
int64_t row_count = 0;
// conn->DropTable(table);
stat = conn->CountTable(table, row_count);
std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl;
}
}
{ // create table
milvus::TableSchema tb_schema = BuildTableSchema();
milvus::Status stat = conn->CreateTable(tb_schema);
std::cout << "CreateTable function call status: " << stat.message() << std::endl;
PrintTableSchema(tb_schema);
bool has_table = conn->HasTable(tb_schema.table_name);
if (has_table) {
std::cout << "Table is created" << std::endl;
}
}
{ // describe table
milvus::TableSchema tb_schema;
milvus::Status stat = conn->DescribeTable(TABLE_NAME, tb_schema);
std::cout << "DescribeTable function call status: " << stat.message() << std::endl;
PrintTableSchema(tb_schema);
}
std::vector<std::pair<int64_t, milvus::RowRecord>> search_record_array;
{ // insert vectors
for (int i = 0; i < ADD_VECTOR_LOOP; i++) { // add vectors
std::vector<milvus::RowRecord> record_array;
int64_t begin_index = i * BATCH_ROW_COUNT;
BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array);
#ifdef SET_VECTOR_IDS
record_ids.resize(ADD_VECTOR_LOOP * BATCH_ROW_COUNT);
for (auto j = begin_index; j < begin_index + BATCH_ROW_COUNT; j++) {
record_ids[i * BATCH_ROW_COUNT + j] = i * BATCH_ROW_COUNT + j;
}
#endif
std::vector<int64_t> record_ids;
// generate user defined ids
for (int k = 0; k < BATCH_ROW_COUNT; k++) {
record_ids.push_back(i * BATCH_ROW_COUNT + k);
}
auto start = std::chrono::high_resolution_clock::now();
milvus::Status stat = conn->Insert(TABLE_NAME, record_array, record_ids);
auto finish = std::chrono::high_resolution_clock::now();
std::cout << "InsertVector cost: "
<< std::chrono::duration_cast<std::chrono::duration<double>>(finish - start).count() << "s\n";
std::cout << "InsertVector function call status: " << stat.message() << std::endl;
std::cout << "Returned id array count: " << record_ids.size() << std::endl;
if (search_record_array.size() < NQ) {
search_record_array.push_back(std::make_pair(record_ids[SEARCH_TARGET], record_array[SEARCH_TARGET]));
}
}
}
{ // search vectors without index
Sleep(2);
int64_t row_count = 0;
milvus::Status stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
// DoSearch(conn, search_record_array, "Search without index");
}
{ // wait unit build index finish
std::cout << "Wait until create all index done" << std::endl;
milvus::IndexParam index;
index.table_name = TABLE_NAME;
index.index_type = INDEX_TYPE;
index.nlist = N_LIST;
milvus::Status stat = conn->CreateIndex(index);
std::cout << "CreateIndex function call status: " << stat.message() << std::endl;
milvus::IndexParam index2;
stat = conn->DescribeIndex(TABLE_NAME, index2);
std::cout << "DescribeIndex function call status: " << stat.message() << std::endl;
}
{ // preload table
milvus::Status stat = conn->PreloadTable(TABLE_NAME);
std::cout << "PreloadTable function call status: " << stat.message() << std::endl;
}
{ // search vectors after build index finish
for (uint64_t i = 0; i < 5; ++i) {
DoSearch(conn, search_record_array, "Search after build index finish");
}
// std::cout << conn->DumpTaskTables() << std::endl;
}
{ // delete index
milvus::Status stat = conn->DropIndex(TABLE_NAME);
std::cout << "DropIndex function call status: " << stat.message() << std::endl;
int64_t row_count = 0;
stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
}
{ // delete by range
milvus::Range rg;
rg.start_value = CurrentTmDate(-3);
rg.end_value = CurrentTmDate(-2);
milvus::Status stat = conn->DeleteByRange(rg, TABLE_NAME);
std::cout << "DeleteByRange function call status: " << stat.message() << std::endl;
}
{
// delete table
// Status stat = conn->DropTable(TABLE_NAME);
// std::cout << "DeleteTable function call status: " << stat.message() << std::endl;
}
{ // server status
std::string status = conn->ServerStatus();
std::cout << "Server status before disconnect: " << status << std::endl;
}
milvus::Connection::Destroy(conn);
{ // server status
std::string status = conn->ServerStatus();
std::cout << "Server status after disconnect: " << status << std::endl;
}
}

View File

@ -0,0 +1,34 @@
#-------------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#-------------------------------------------------------------------------------
aux_source_directory(src src_files)
add_executable(sdk_partition
main.cpp
${src_files}
${util_files}
)
target_link_libraries(sdk_partition
milvus_sdk
pthread
)
install(TARGETS sdk_partition DESTINATION bin)

View File

@ -0,0 +1,79 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <getopt.h>
#include <libgen.h>
#include <cstring>
#include <string>
#include "sdk/examples/partition/src/ClientTest.h"
void
print_help(const std::string& app_name);
int
main(int argc, char* argv[]) {
printf("Client start...\n");
std::string app_name = basename(argv[0]);
static struct option long_options[] = {{"server", optional_argument, nullptr, 's'},
{"port", optional_argument, nullptr, 'p'},
{"help", no_argument, nullptr, 'h'},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
std::string address = "127.0.0.1", port = "19530";
app_name = argv[0];
int value;
while ((value = getopt_long(argc, argv, "s:p:h", long_options, &option_index)) != -1) {
switch (value) {
case 's': {
char* address_ptr = strdup(optarg);
address = address_ptr;
free(address_ptr);
break;
}
case 'p': {
char* port_ptr = strdup(optarg);
port = port_ptr;
free(port_ptr);
break;
}
case 'h':
default:
print_help(app_name);
return EXIT_SUCCESS;
}
}
ClientTest test;
test.Test(address, port);
printf("Client stop...\n");
return 0;
}
void
print_help(const std::string& app_name) {
printf("\n Usage: %s [OPTIONS]\n\n", app_name.c_str());
printf(" Options:\n");
printf(" -s --server Server address, default 127.0.0.1\n");
printf(" -p --port Server port, default 19530\n");
printf(" -h --help Print help information\n");
printf("\n");
}

View File

@ -0,0 +1,205 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "sdk/examples/partition/src/ClientTest.h"
#include "MilvusApi.h"
#include "sdk/examples/utils/Utils.h"
#include <src/sdk/examples/utils/TimeRecorder.h>
#include <time.h>
#include <unistd.h>
#include <chrono>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
namespace {
const char* TABLE_NAME = milvus_sdk::Utils::GenTableName().c_str();
constexpr int64_t TABLE_DIMENSION = 512;
constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024;
constexpr milvus::MetricType TABLE_METRIC_TYPE = milvus::MetricType::L2;
constexpr int64_t BATCH_ROW_COUNT = 10000;
constexpr int64_t NQ = 5;
constexpr int64_t TOP_K = 10;
constexpr int64_t NPROBE = 32;
constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different
constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8;
constexpr int32_t N_LIST = 15000;
constexpr int32_t PARTITION_COUNT = 5;
constexpr int32_t TARGET_PARTITION = 3;
milvus::TableSchema
BuildTableSchema() {
milvus::TableSchema tb_schema = {TABLE_NAME, TABLE_DIMENSION, TABLE_INDEX_FILE_SIZE, TABLE_METRIC_TYPE};
return tb_schema;
}
milvus::PartitionParam
BuildPartitionParam(int32_t index) {
std::string tag = std::to_string(index);
std::string partition_name = std::string(TABLE_NAME) + "_" + tag;
milvus::PartitionParam partition_param = {TABLE_NAME, partition_name, tag};
return partition_param;
}
milvus::IndexParam
BuildIndexParam() {
milvus::IndexParam index_param = {TABLE_NAME, INDEX_TYPE, N_LIST};
return index_param;
}
} // namespace
void
ClientTest::Test(const std::string& address, const std::string& port) {
std::shared_ptr<milvus::Connection> conn = milvus::Connection::Create();
milvus::Status stat;
{ // connect server
milvus::ConnectParam param = {address, port};
stat = conn->Connect(param);
std::cout << "Connect function call status: " << stat.message() << std::endl;
}
{ // create table
milvus::TableSchema tb_schema = BuildTableSchema();
stat = conn->CreateTable(tb_schema);
std::cout << "CreateTable function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintTableSchema(tb_schema);
}
{ // create partition
for (int32_t i = 0; i < PARTITION_COUNT; i++) {
milvus::PartitionParam partition_param = BuildPartitionParam(i);
stat = conn->CreatePartition(partition_param);
std::cout << "CreatePartition function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintPartitionParam(partition_param);
}
}
{ // insert vectors
milvus_sdk::TimeRecorder rc("All vectors");
for (int i = 0; i < PARTITION_COUNT * 5; i++) {
std::vector<milvus::RowRecord> record_array;
std::vector<int64_t> record_ids;
int64_t begin_index = i * BATCH_ROW_COUNT;
{ // generate vectors
milvus_sdk::TimeRecorder rc("Build vectors No." + std::to_string(i));
milvus_sdk::Utils::BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array, record_ids,
TABLE_DIMENSION);
}
std::string title = "Insert " + std::to_string(record_array.size()) + " vectors No." + std::to_string(i);
milvus_sdk::TimeRecorder rc(title);
stat = conn->Insert(TABLE_NAME, std::to_string(i % PARTITION_COUNT), record_array, record_ids);
}
}
std::vector<std::pair<int64_t, milvus::RowRecord>> search_record_array;
{ // build search vectors
std::vector<milvus::RowRecord> record_array;
std::vector<int64_t> record_ids;
int64_t index = TARGET_PARTITION * BATCH_ROW_COUNT + SEARCH_TARGET;
milvus_sdk::Utils::BuildVectors(index, index + 1, record_array, record_ids, TABLE_DIMENSION);
search_record_array.push_back(std::make_pair(record_ids[0], record_array[0]));
}
milvus_sdk::Utils::Sleep(3);
{ // table row count
int64_t row_count = 0;
stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
}
{ // search vectors
std::cout << "Search in correct partition" << std::endl;
std::vector<std::string> partiton_tags = {std::to_string(TARGET_PARTITION)};
std::vector<milvus::TopKQueryResult> topk_query_result_array;
milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array,
topk_query_result_array);
std::cout << "Search in wrong partition" << std::endl;
partiton_tags = {"0"};
milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array,
topk_query_result_array);
std::cout << "Search by regex matched partition tag" << std::endl;
partiton_tags = {"\\d"};
milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array,
topk_query_result_array);
}
{ // wait unit build index finish
std::cout << "Wait until create all index done" << std::endl;
milvus::IndexParam index1 = BuildIndexParam();
milvus_sdk::Utils::PrintIndexParam(index1);
stat = conn->CreateIndex(index1);
std::cout << "CreateIndex function call status: " << stat.message() << std::endl;
milvus::IndexParam index2;
stat = conn->DescribeIndex(TABLE_NAME, index2);
std::cout << "DescribeIndex function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintIndexParam(index2);
}
{ // table row count
int64_t row_count = 0;
stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
}
{ // drop partition
milvus::PartitionParam param1 = {TABLE_NAME, "", std::to_string(TARGET_PARTITION)};
milvus_sdk::Utils::PrintPartitionParam(param1);
stat = conn->DropPartition(param1);
std::cout << "DropPartition function call status: " << stat.message() << std::endl;
}
{ // table row count
int64_t row_count = 0;
stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
}
{ // search vectors
std::cout << "Search in whole table" << std::endl;
std::vector<std::string> partiton_tags;
std::vector<milvus::TopKQueryResult> topk_query_result_array;
milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array,
topk_query_result_array);
}
{ // drop index
stat = conn->DropIndex(TABLE_NAME);
std::cout << "DropIndex function call status: " << stat.message() << std::endl;
int64_t row_count = 0;
stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
}
{ // drop table
stat = conn->DropTable(TABLE_NAME);
std::cout << "DropTable function call status: " << stat.message() << std::endl;
}
milvus::Connection::Destroy(conn);
}

View File

@ -17,12 +17,12 @@
# under the License.
#-------------------------------------------------------------------------------
aux_source_directory(src src_files)
add_executable(sdk_simple
main.cpp
${src_files}
${util_files}
)
target_link_libraries(sdk_simple

View File

@ -20,7 +20,7 @@
#include <cstring>
#include <string>
#include "src/ClientTest.h"
#include "sdk/examples/simple/src/ClientTest.h"
void
print_help(const std::string& app_name);

View File

@ -0,0 +1,209 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "sdk/examples/simple/src/ClientTest.h"
#include "MilvusApi.h"
#include "sdk/examples/utils/TimeRecorder.h"
#include "sdk/examples/utils/Utils.h"
#include <time.h>
#include <unistd.h>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
namespace {
const char* TABLE_NAME = milvus_sdk::Utils::GenTableName().c_str();
constexpr int64_t TABLE_DIMENSION = 512;
constexpr int64_t TABLE_INDEX_FILE_SIZE = 1024;
constexpr milvus::MetricType TABLE_METRIC_TYPE = milvus::MetricType::L2;
constexpr int64_t BATCH_ROW_COUNT = 100000;
constexpr int64_t NQ = 5;
constexpr int64_t TOP_K = 10;
constexpr int64_t NPROBE = 32;
constexpr int64_t SEARCH_TARGET = 5000; // change this value, result is different
constexpr int64_t ADD_VECTOR_LOOP = 5;
constexpr milvus::IndexType INDEX_TYPE = milvus::IndexType::gpu_ivfsq8;
constexpr int32_t N_LIST = 15000;
milvus::TableSchema
BuildTableSchema() {
milvus::TableSchema tb_schema = {TABLE_NAME, TABLE_DIMENSION, TABLE_INDEX_FILE_SIZE, TABLE_METRIC_TYPE};
return tb_schema;
}
milvus::IndexParam
BuildIndexParam() {
milvus::IndexParam index_param = {TABLE_NAME, INDEX_TYPE, N_LIST};
return index_param;
}
} // namespace
void
ClientTest::Test(const std::string& address, const std::string& port) {
std::shared_ptr<milvus::Connection> conn = milvus::Connection::Create();
milvus::Status stat;
{ // connect server
milvus::ConnectParam param = {address, port};
stat = conn->Connect(param);
std::cout << "Connect function call status: " << stat.message() << std::endl;
}
{ // server version
std::string version = conn->ServerVersion();
std::cout << "Server version: " << version << std::endl;
}
{ // sdk version
std::string version = conn->ClientVersion();
std::cout << "SDK version: " << version << std::endl;
}
{ // show tables
std::vector<std::string> tables;
stat = conn->ShowTables(tables);
std::cout << "ShowTables function call status: " << stat.message() << std::endl;
std::cout << "All tables: " << std::endl;
for (auto& table : tables) {
int64_t row_count = 0;
// conn->DropTable(table);
stat = conn->CountTable(table, row_count);
std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl;
}
}
{ // create table
milvus::TableSchema tb_schema = BuildTableSchema();
stat = conn->CreateTable(tb_schema);
std::cout << "CreateTable function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintTableSchema(tb_schema);
bool has_table = conn->HasTable(tb_schema.table_name);
if (has_table) {
std::cout << "Table is created" << std::endl;
}
}
{ // describe table
milvus::TableSchema tb_schema;
stat = conn->DescribeTable(TABLE_NAME, tb_schema);
std::cout << "DescribeTable function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintTableSchema(tb_schema);
}
{ // insert vectors
for (int i = 0; i < ADD_VECTOR_LOOP; i++) {
std::vector<milvus::RowRecord> record_array;
std::vector<int64_t> record_ids;
int64_t begin_index = i * BATCH_ROW_COUNT;
{ // generate vectors
milvus_sdk::TimeRecorder rc("Build vectors No." + std::to_string(i));
milvus_sdk::Utils::BuildVectors(begin_index, begin_index + BATCH_ROW_COUNT, record_array, record_ids,
TABLE_DIMENSION);
}
std::string title = "Insert " + std::to_string(record_array.size()) + " vectors No." + std::to_string(i);
milvus_sdk::TimeRecorder rc(title);
stat = conn->Insert(TABLE_NAME, "", record_array, record_ids);
std::cout << "InsertVector function call status: " << stat.message() << std::endl;
std::cout << "Returned id array count: " << record_ids.size() << std::endl;
}
}
std::vector<std::pair<int64_t, milvus::RowRecord>> search_record_array;
{ // build search vectors
for (int64_t i = 0; i < NQ; i++) {
std::vector<milvus::RowRecord> record_array;
std::vector<int64_t> record_ids;
int64_t index = i * BATCH_ROW_COUNT + SEARCH_TARGET;
milvus_sdk::Utils::BuildVectors(index, index + 1, record_array, record_ids, TABLE_DIMENSION);
search_record_array.push_back(std::make_pair(record_ids[0], record_array[0]));
}
}
milvus_sdk::Utils::Sleep(3);
{ // search vectors
std::vector<std::string> partiton_tags;
std::vector<milvus::TopKQueryResult> topk_query_result_array;
milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array,
topk_query_result_array);
}
{ // wait unit build index finish
std::cout << "Wait until create all index done" << std::endl;
milvus::IndexParam index1 = BuildIndexParam();
milvus_sdk::Utils::PrintIndexParam(index1);
stat = conn->CreateIndex(index1);
std::cout << "CreateIndex function call status: " << stat.message() << std::endl;
milvus::IndexParam index2;
stat = conn->DescribeIndex(TABLE_NAME, index2);
std::cout << "DescribeIndex function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintIndexParam(index2);
}
{ // preload table
stat = conn->PreloadTable(TABLE_NAME);
std::cout << "PreloadTable function call status: " << stat.message() << std::endl;
}
{ // search vectors
std::vector<std::string> partiton_tags;
std::vector<milvus::TopKQueryResult> topk_query_result_array;
milvus_sdk::Utils::DoSearch(conn, TABLE_NAME, partiton_tags, TOP_K, NPROBE, search_record_array,
topk_query_result_array);
}
{ // drop index
stat = conn->DropIndex(TABLE_NAME);
std::cout << "DropIndex function call status: " << stat.message() << std::endl;
int64_t row_count = 0;
stat = conn->CountTable(TABLE_NAME, row_count);
std::cout << TABLE_NAME << "(" << row_count << " rows)" << std::endl;
}
{ // delete by range
milvus::Range rg;
rg.start_value = milvus_sdk::Utils::CurrentTmDate(-3);
rg.end_value = milvus_sdk::Utils::CurrentTmDate(-2);
stat = conn->DeleteByDate(TABLE_NAME, rg);
std::cout << "DeleteByDate function call status: " << stat.message() << std::endl;
}
{ // drop table
stat = conn->DropTable(TABLE_NAME);
std::cout << "DropTable function call status: " << stat.message() << std::endl;
}
{ // server status
std::string status = conn->ServerStatus();
std::cout << "Server status before disconnect: " << status << std::endl;
}
milvus::Connection::Destroy(conn);
{ // server status
std::string status = conn->ServerStatus();
std::cout << "Server status after disconnect: " << status << std::endl;
}
}

View File

@ -0,0 +1,26 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <string>
class ClientTest {
public:
void
Test(const std::string& address, const std::string& port);
};

View File

@ -0,0 +1,35 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "sdk/examples/utils/TimeRecorder.h"
#include <iostream>
namespace milvus_sdk {
TimeRecorder::TimeRecorder(const std::string& title) : title_(title) {
start_ = std::chrono::system_clock::now();
std::cout << title_ << " begin..." << std::endl;
}
TimeRecorder::~TimeRecorder() {
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
int64_t span = (std::chrono::duration_cast<std::chrono::milliseconds>(end - start_)).count();
std::cout << title_ << " totally cost: " << span << " ms" << std::endl;
}
} // namespace milvus_sdk

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include <chrono>
#include <string>
namespace milvus_sdk {
class TimeRecorder {
public:
explicit TimeRecorder(const std::string& title);
~TimeRecorder();
private:
std::string title_;
std::chrono::system_clock::time_point start_;
};
} // namespace milvus_sdk

View File

@ -0,0 +1,223 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "sdk/examples/utils/Utils.h"
#include "sdk/examples/utils/TimeRecorder.h"
#include <time.h>
#include <unistd.h>
#include <iostream>
#include <memory>
#include <thread>
#include <utility>
#include <vector>
namespace milvus_sdk {
constexpr int64_t SECONDS_EACH_HOUR = 3600;
#define BLOCK_SPLITER std::cout << "===========================================" << std::endl;
std::string
Utils::CurrentTime() {
time_t tt;
time(&tt);
tt = tt + 8 * SECONDS_EACH_HOUR;
tm t;
gmtime_r(&tt, &t);
std::string str = std::to_string(t.tm_year + 1900) + "_" + std::to_string(t.tm_mon + 1) + "_" +
std::to_string(t.tm_mday) + "_" + std::to_string(t.tm_hour) + "_" + std::to_string(t.tm_min) +
"_" + std::to_string(t.tm_sec);
return str;
}
std::string
Utils::CurrentTmDate(int64_t offset_day) {
time_t tt;
time(&tt);
tt = tt + 8 * SECONDS_EACH_HOUR;
tt = tt + 24 * SECONDS_EACH_HOUR * offset_day;
tm t;
gmtime_r(&tt, &t);
std::string str =
std::to_string(t.tm_year + 1900) + "-" + std::to_string(t.tm_mon + 1) + "-" + std::to_string(t.tm_mday);
return str;
}
void
Utils::Sleep(int seconds) {
std::cout << "Waiting " << seconds << " seconds ..." << std::endl;
sleep(seconds);
}
const std::string&
Utils::GenTableName() {
static std::string s_id("tbl_" + CurrentTime());
return s_id;
}
std::string
Utils::MetricTypeName(const milvus::MetricType& metric_type) {
switch (metric_type) {
case milvus::MetricType::L2:
return "L2 distance";
case milvus::MetricType::IP:
return "Inner product";
default:
return "Unknown metric type";
}
}
std::string
Utils::IndexTypeName(const milvus::IndexType& index_type) {
switch (index_type) {
case milvus::IndexType::cpu_idmap:
return "cpu idmap";
case milvus::IndexType::gpu_ivfflat:
return "gpu ivflat";
case milvus::IndexType::gpu_ivfsq8:
return "gpu ivfsq8";
case milvus::IndexType::mix_nsg:
return "mix nsg";
default:
return "Unknown index type";
}
}
void
Utils::PrintTableSchema(const milvus::TableSchema& tb_schema) {
BLOCK_SPLITER
std::cout << "Table name: " << tb_schema.table_name << std::endl;
std::cout << "Table dimension: " << tb_schema.dimension << std::endl;
std::cout << "Table index file size: " << tb_schema.index_file_size << std::endl;
std::cout << "Table metric type: " << MetricTypeName(tb_schema.metric_type) << std::endl;
BLOCK_SPLITER
}
void
Utils::PrintPartitionParam(const milvus::PartitionParam& partition_param) {
BLOCK_SPLITER
std::cout << "Table name: " << partition_param.table_name << std::endl;
std::cout << "Partition name: " << partition_param.partition_name << std::endl;
std::cout << "Partition tag: " << partition_param.partition_tag << std::endl;
BLOCK_SPLITER
}
void
Utils::PrintIndexParam(const milvus::IndexParam& index_param) {
BLOCK_SPLITER
std::cout << "Index table name: " << index_param.table_name << std::endl;
std::cout << "Index type: " << IndexTypeName(index_param.index_type) << std::endl;
std::cout << "Index nlist: " << index_param.nlist << std::endl;
BLOCK_SPLITER
}
void
Utils::BuildVectors(int64_t from, int64_t to, std::vector<milvus::RowRecord>& vector_record_array,
std::vector<int64_t>& record_ids, int64_t dimension) {
if (to <= from) {
return;
}
vector_record_array.clear();
record_ids.clear();
for (int64_t k = from; k < to; k++) {
milvus::RowRecord record;
record.data.resize(dimension);
for (int64_t i = 0; i < dimension; i++) {
record.data[i] = (float)(k % (i + 1));
}
vector_record_array.emplace_back(record);
record_ids.push_back(k);
}
}
void
Utils::PrintSearchResult(const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
const std::vector<milvus::TopKQueryResult>& topk_query_result_array) {
BLOCK_SPLITER
std::cout << "Returned result count: " << topk_query_result_array.size() << std::endl;
int32_t index = 0;
for (auto& result : topk_query_result_array) {
auto search_id = search_record_array[index].first;
index++;
std::cout << "No." << std::to_string(index) << " vector " << std::to_string(search_id) << " top "
<< std::to_string(result.query_result_arrays.size()) << " search result:" << std::endl;
for (auto& item : result.query_result_arrays) {
std::cout << "\t" << std::to_string(item.id) << "\tdistance:" << std::to_string(item.distance);
std::cout << std::endl;
}
}
BLOCK_SPLITER
}
void
Utils::CheckSearchResult(const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
const std::vector<milvus::TopKQueryResult>& topk_query_result_array) {
BLOCK_SPLITER
int64_t index = 0;
for (auto& result : topk_query_result_array) {
auto result_id = result.query_result_arrays[0].id;
auto search_id = search_record_array[index++].first;
if (result_id != search_id) {
std::cout << "The top 1 result is wrong: " << result_id << " vs. " << search_id << std::endl;
} else {
std::cout << "Check result sucessfully" << std::endl;
}
}
BLOCK_SPLITER
}
void
Utils::DoSearch(std::shared_ptr<milvus::Connection> conn, const std::string& table_name,
const std::vector<std::string>& partiton_tags, int64_t top_k, int64_t nprobe,
const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
std::vector<milvus::TopKQueryResult>& topk_query_result_array) {
topk_query_result_array.clear();
std::vector<milvus::Range> query_range_array;
milvus::Range rg;
rg.start_value = CurrentTmDate();
rg.end_value = CurrentTmDate(1);
query_range_array.emplace_back(rg);
std::vector<milvus::RowRecord> record_array;
for (auto& pair : search_record_array) {
record_array.push_back(pair.second);
}
{
BLOCK_SPLITER
milvus_sdk::TimeRecorder rc("search");
milvus::Status stat = conn->Search(table_name, partiton_tags, record_array, query_range_array, top_k, nprobe,
topk_query_result_array);
std::cout << "SearchVector function call status: " << stat.message() << std::endl;
BLOCK_SPLITER
}
PrintSearchResult(search_record_array, topk_query_result_array);
CheckSearchResult(search_record_array, topk_query_result_array);
}
} // namespace milvus_sdk

View File

@ -0,0 +1,77 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#pragma once
#include "MilvusApi.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace milvus_sdk {
class Utils {
public:
static std::string
CurrentTime();
static std::string
CurrentTmDate(int64_t offset_day = 0);
static const std::string&
GenTableName();
static void
Sleep(int seconds);
static std::string
MetricTypeName(const milvus::MetricType& metric_type);
static std::string
IndexTypeName(const milvus::IndexType& index_type);
static void
PrintTableSchema(const milvus::TableSchema& tb_schema);
static void
PrintPartitionParam(const milvus::PartitionParam& partition_param);
static void
PrintIndexParam(const milvus::IndexParam& index_param);
static void
BuildVectors(int64_t from, int64_t to, std::vector<milvus::RowRecord>& vector_record_array,
std::vector<int64_t>& record_ids, int64_t dimension);
static void
PrintSearchResult(const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
const std::vector<milvus::TopKQueryResult>& topk_query_result_array);
static void
CheckSearchResult(const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
const std::vector<milvus::TopKQueryResult>& topk_query_result_array);
static void
DoSearch(std::shared_ptr<milvus::Connection> conn, const std::string& table_name,
const std::vector<std::string>& partiton_tags, int64_t top_k, int64_t nprobe,
const std::vector<std::pair<int64_t, milvus::RowRecord>>& search_record_array,
std::vector<milvus::TopKQueryResult>& topk_query_result_array);
};
} // namespace milvus_sdk

View File

@ -138,8 +138,8 @@ ClientProxy::CreateIndex(const IndexParam& index_param) {
}
Status
ClientProxy::Insert(const std::string& table_name, const std::vector<RowRecord>& record_array,
std::vector<int64_t>& id_array) {
ClientProxy::Insert(const std::string& table_name, const std::string& partition_tag,
const std::vector<RowRecord>& record_array, std::vector<int64_t>& id_array) {
Status status = Status::OK();
try {
////////////////////////////////////////////////////////////////////////////
@ -185,6 +185,7 @@ ClientProxy::Insert(const std::string& table_name, const std::vector<RowRecord>&
#else
::milvus::grpc::InsertParam insert_param;
insert_param.set_table_name(table_name);
insert_param.set_partition_tag(partition_tag);
for (auto& record : record_array) {
::milvus::grpc::RowRecord* grpc_record = insert_param.add_row_record_array();
@ -215,15 +216,18 @@ ClientProxy::Insert(const std::string& table_name, const std::vector<RowRecord>&
}
Status
ClientProxy::Search(const std::string& table_name, const std::vector<RowRecord>& query_record_array,
const std::vector<Range>& query_range_array, int64_t topk, int64_t nprobe,
std::vector<TopKQueryResult>& topk_query_result_array) {
ClientProxy::Search(const std::string& table_name, const std::vector<std::string>& partiton_tags,
const std::vector<RowRecord>& query_record_array, const std::vector<Range>& query_range_array,
int64_t topk, int64_t nprobe, std::vector<TopKQueryResult>& topk_query_result_array) {
try {
// step 1: convert vectors data
::milvus::grpc::SearchParam search_param;
search_param.set_table_name(table_name);
search_param.set_topk(topk);
search_param.set_nprobe(nprobe);
for (auto& tag : partiton_tags) {
search_param.add_partition_tag_array(tag);
}
for (auto& record : query_record_array) {
::milvus::grpc::RowRecord* row_record = search_param.add_query_record_array();
for (auto& rec : record.data) {
@ -349,13 +353,13 @@ ClientProxy::DumpTaskTables() const {
}
Status
ClientProxy::DeleteByRange(milvus::Range& range, const std::string& table_name) {
ClientProxy::DeleteByDate(const std::string& table_name, const milvus::Range& range) {
try {
::milvus::grpc::DeleteByRangeParam delete_by_range_param;
::milvus::grpc::DeleteByDateParam delete_by_range_param;
delete_by_range_param.set_table_name(table_name);
delete_by_range_param.mutable_range()->set_start_value(range.start_value);
delete_by_range_param.mutable_range()->set_end_value(range.end_value);
return client_ptr_->DeleteByRange(delete_by_range_param);
return client_ptr_->DeleteByDate(delete_by_range_param);
} catch (std::exception& ex) {
return Status(StatusCode::UnknownError, "fail to delete by range: " + std::string(ex.what()));
}
@ -401,4 +405,51 @@ ClientProxy::DropIndex(const std::string& table_name) const {
}
}
Status
ClientProxy::CreatePartition(const PartitionParam& partition_param) {
try {
::milvus::grpc::PartitionParam grpc_partition_param;
grpc_partition_param.set_table_name(partition_param.table_name);
grpc_partition_param.set_partition_name(partition_param.partition_name);
grpc_partition_param.set_tag(partition_param.partition_tag);
Status status = client_ptr_->CreatePartition(grpc_partition_param);
return status;
} catch (std::exception& ex) {
return Status(StatusCode::UnknownError, "fail to create partition: " + std::string(ex.what()));
}
}
Status
ClientProxy::ShowPartitions(const std::string& table_name, PartitionList& partition_array) const {
try {
::milvus::grpc::TableName grpc_table_name;
grpc_table_name.set_table_name(table_name);
::milvus::grpc::PartitionList grpc_partition_list;
Status status = client_ptr_->ShowPartitions(grpc_table_name, grpc_partition_list);
partition_array.resize(grpc_partition_list.partition_array_size());
for (uint64_t i = 0; i < grpc_partition_list.partition_array_size(); ++i) {
partition_array[i].table_name = grpc_partition_list.partition_array(i).table_name();
partition_array[i].partition_name = grpc_partition_list.partition_array(i).partition_name();
partition_array[i].partition_tag = grpc_partition_list.partition_array(i).tag();
}
return status;
} catch (std::exception& ex) {
return Status(StatusCode::UnknownError, "fail to show partitions: " + std::string(ex.what()));
}
}
Status
ClientProxy::DropPartition(const PartitionParam& partition_param) {
try {
::milvus::grpc::PartitionParam grpc_partition_param;
grpc_partition_param.set_table_name(partition_param.table_name);
grpc_partition_param.set_partition_name(partition_param.partition_name);
grpc_partition_param.set_tag(partition_param.partition_tag);
Status status = client_ptr_->DropPartition(grpc_partition_param);
return status;
} catch (std::exception& ex) {
return Status(StatusCode::UnknownError, "fail to drop partition: " + std::string(ex.what()));
}
}
} // namespace milvus

View File

@ -54,13 +54,13 @@ class ClientProxy : public Connection {
CreateIndex(const IndexParam& index_param) override;
Status
Insert(const std::string& table_name, const std::vector<RowRecord>& record_array,
Insert(const std::string& table_name, const std::string& partition_tag, const std::vector<RowRecord>& record_array,
std::vector<int64_t>& id_array) override;
Status
Search(const std::string& table_name, const std::vector<RowRecord>& query_record_array,
const std::vector<Range>& query_range_array, int64_t topk, int64_t nprobe,
std::vector<TopKQueryResult>& topk_query_result_array) override;
Search(const std::string& table_name, const std::vector<std::string>& partiton_tags,
const std::vector<RowRecord>& query_record_array, const std::vector<Range>& query_range_array, int64_t topk,
int64_t nprobe, std::vector<TopKQueryResult>& topk_query_result_array) override;
Status
DescribeTable(const std::string& table_name, TableSchema& table_schema) override;
@ -84,7 +84,7 @@ class ClientProxy : public Connection {
DumpTaskTables() const override;
Status
DeleteByRange(Range& range, const std::string& table_name) override;
DeleteByDate(const std::string& table_name, const Range& range) override;
Status
PreloadTable(const std::string& table_name) const override;
@ -95,6 +95,15 @@ class ClientProxy : public Connection {
Status
DropIndex(const std::string& table_name) const override;
Status
CreatePartition(const PartitionParam& partition_param) override;
Status
ShowPartitions(const std::string& table_name, PartitionList& partition_array) const override;
Status
DropPartition(const PartitionParam& partition_param) override;
private:
std::shared_ptr<::grpc::Channel> channel_;

View File

@ -259,13 +259,13 @@ GrpcClient::PreloadTable(milvus::grpc::TableName& table_name) {
}
Status
GrpcClient::DeleteByRange(grpc::DeleteByRangeParam& delete_by_range_param) {
GrpcClient::DeleteByDate(grpc::DeleteByDateParam& delete_by_range_param) {
ClientContext context;
::milvus::grpc::Status response;
::grpc::Status grpc_status = stub_->DeleteByRange(&context, delete_by_range_param, &response);
::grpc::Status grpc_status = stub_->DeleteByDate(&context, delete_by_range_param, &response);
if (!grpc_status.ok()) {
std::cerr << "DeleteByRange gRPC failed!" << std::endl;
std::cerr << "DeleteByDate gRPC failed!" << std::endl;
return Status(StatusCode::RPCFailed, grpc_status.error_message());
}
@ -317,4 +317,57 @@ GrpcClient::DropIndex(grpc::TableName& table_name) {
return Status::OK();
}
Status
GrpcClient::CreatePartition(const grpc::PartitionParam& partition_param) {
ClientContext context;
::milvus::grpc::Status response;
::grpc::Status grpc_status = stub_->CreatePartition(&context, partition_param, &response);
if (!grpc_status.ok()) {
std::cerr << "CreatePartition gRPC failed!" << std::endl;
return Status(StatusCode::RPCFailed, grpc_status.error_message());
}
if (response.error_code() != grpc::SUCCESS) {
std::cerr << response.reason() << std::endl;
return Status(StatusCode::ServerFailed, response.reason());
}
return Status::OK();
}
Status
GrpcClient::ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const {
ClientContext context;
::grpc::Status grpc_status = stub_->ShowPartitions(&context, table_name, &partition_array);
if (!grpc_status.ok()) {
std::cerr << "ShowPartitions gRPC failed!" << std::endl;
return Status(StatusCode::RPCFailed, grpc_status.error_message());
}
if (partition_array.status().error_code() != grpc::SUCCESS) {
std::cerr << partition_array.status().reason() << std::endl;
return Status(StatusCode::ServerFailed, partition_array.status().reason());
}
return Status::OK();
}
Status
GrpcClient::DropPartition(const ::milvus::grpc::PartitionParam& partition_param) {
ClientContext context;
::milvus::grpc::Status response;
::grpc::Status grpc_status = stub_->DropPartition(&context, partition_param, &response);
if (!grpc_status.ok()) {
std::cerr << "DropPartition gRPC failed!" << std::endl;
return Status(StatusCode::RPCFailed, grpc_status.error_message());
}
if (response.error_code() != grpc::SUCCESS) {
std::cerr << response.reason() << std::endl;
return Status(StatusCode::ServerFailed, response.reason());
}
return Status::OK();
}
} // namespace milvus

View File

@ -72,7 +72,7 @@ class GrpcClient {
Cmd(std::string& result, const std::string& cmd);
Status
DeleteByRange(grpc::DeleteByRangeParam& delete_by_range_param);
DeleteByDate(grpc::DeleteByDateParam& delete_by_range_param);
Status
PreloadTable(grpc::TableName& table_name);
@ -83,6 +83,15 @@ class GrpcClient {
Status
DropIndex(grpc::TableName& table_name);
Status
CreatePartition(const grpc::PartitionParam& partition_param);
Status
ShowPartitions(const grpc::TableName& table_name, grpc::PartitionList& partition_array) const;
Status
DropPartition(const ::milvus::grpc::PartitionParam& partition_param);
Status
Disconnect();

View File

@ -64,7 +64,7 @@ struct TableSchema {
/**
* @brief Range information
* for DATE partition, the format is like: 'year-month-day'
* for DATE range, the format is like: 'year-month-day'
*/
struct Range {
std::string start_value; ///< Range start
@ -102,6 +102,17 @@ struct IndexParam {
int32_t nlist;
};
/**
* @brief partition parameters
*/
struct PartitionParam {
std::string table_name;
std::string partition_name;
std::string partition_tag;
};
using PartitionList = std::vector<PartitionParam>;
/**
* @brief SDK main class
*/
@ -195,7 +206,7 @@ class Connection {
*
* This method is used to create table
*
* @param table_name, table name is going to be tested.
* @param table_name, target table's name.
*
* @return Indicate if table is cexist
*/
@ -205,9 +216,9 @@ class Connection {
/**
* @brief Delete table method
*
* This method is used to delete table.
* This method is used to delete table(and its partitions).
*
* @param table_name, table name is going to be deleted.
* @param table_name, target table's name.
*
* @return Indicate if table is delete successfully.
*/
@ -217,7 +228,7 @@ class Connection {
/**
* @brief Create index method
*
* This method is used to create index for whole table
* This method is used to create index for whole table(and its partitions).
*
* @param IndexParam
* table_name, table name is going to be create index.
@ -235,14 +246,15 @@ class Connection {
*
* This method is used to add vector array to table.
*
* @param table_name, table_name is inserted.
* @param table_name, target table's name.
* @param partition_tag, target partition's tag, keep empty if no partition.
* @param record_array, vector array is inserted.
* @param id_array, after inserted every vector is given a id.
*
* @return Indicate if vector array are inserted successfully
*/
virtual Status
Insert(const std::string& table_name, const std::vector<RowRecord>& record_array,
Insert(const std::string& table_name, const std::string& partition_tag, const std::vector<RowRecord>& record_array,
std::vector<int64_t>& id_array) = 0;
/**
@ -250,7 +262,8 @@ class Connection {
*
* This method is used to query vector in table.
*
* @param table_name, table_name is queried.
* @param table_name, target table's name, keep empty if no partition.
* @param partition_tags, target partitions.
* @param query_record_array, all vector are going to be queried.
* @param query_range_array, time ranges, if not specified, will search in whole table
* @param topk, how many similarity vectors will be searched.
@ -259,16 +272,16 @@ class Connection {
* @return Indicate if query is successful.
*/
virtual Status
Search(const std::string& table_name, const std::vector<RowRecord>& query_record_array,
const std::vector<Range>& query_range_array, int64_t topk, int64_t nprobe,
std::vector<TopKQueryResult>& topk_query_result_array) = 0;
Search(const std::string& table_name, const std::vector<std::string>& partiton_tags,
const std::vector<RowRecord>& query_record_array, const std::vector<Range>& query_range_array, int64_t topk,
int64_t nprobe, std::vector<TopKQueryResult>& topk_query_result_array) = 0;
/**
* @brief Show table description
*
* This method is used to show table information.
*
* @param table_name, which table is show.
* @param table_name, target table's name.
* @param table_schema, table_schema is given when operation is successful.
*
* @return Indicate if this operation is successful.
@ -281,8 +294,8 @@ class Connection {
*
* This method is used to get table row count.
*
* @param table_name, table's name.
* @param row_count, table total row count.
* @param table_name, target table's name.
* @param row_count, table total row count(including partitions).
*
* @return Indicate if this operation is successful.
*/
@ -331,21 +344,28 @@ class Connection {
virtual std::string
ServerStatus() const = 0;
/**
* @brief dump server tasks information
*
* This method is internal used.
*
* @return Server status.
*/
virtual std::string
DumpTaskTables() const = 0;
/**
* @brief delete tables by range
* @brief delete tables by date range
*
* This method is used to delete tables by range.
* This method is used to delete table data by date range.
*
* @param table_name, target table's name.
* @param Range, table range to delete.
* @param table_name
*
* @return Indicate if this operation is successful.
*/
virtual Status
DeleteByRange(Range& range, const std::string& table_name) = 0;
DeleteByDate(const std::string& table_name, const Range& range) = 0;
/**
* @brief preload table
@ -364,9 +384,10 @@ class Connection {
*
* This method is used to describe index
*
* @param table_name
* @param table_name, target table's name.
* @param index_param, returned index information.
*
* @return index informations and indicate if this operation is successful.
* @return Indicate if this operation is successful.
*/
virtual Status
DescribeIndex(const std::string& table_name, IndexParam& index_param) const = 0;
@ -374,14 +395,53 @@ class Connection {
/**
* @brief drop index
*
* This method is used to drop index
* This method is used to drop index of table(and its partitions)
*
* @param table_name
* @param table_name, target table's name.
*
* @return Indicate if this operation is successful.
*/
virtual Status
DropIndex(const std::string& table_name) const = 0;
/**
* @brief Create partition method
*
* This method is used to create table partition
*
* @param param, use to provide partition information to be created.
*
* @return Indicate if partition is created successfully
*/
virtual Status
CreatePartition(const PartitionParam& param) = 0;
/**
* @brief Test table existence method
*
* This method is used to create table
*
* @param table_name, table name is going to be tested.
* @param partition_array, partition array of the table.
*
* @return Indicate if this operation is successful
*/
virtual Status
ShowPartitions(const std::string& table_name, PartitionList& partition_array) const = 0;
/**
* @brief Delete partition method
*
* This method is used to delete table partition.
*
* @param param, target partition to be deleted.
* NOTE: if param.table_name is empty, you must specify param.partition_name,
* else you can specify param.table_name and param.tag and let the param.partition_name be empty
*
* @return Indicate if partition is delete successfully.
*/
virtual Status
DropPartition(const PartitionParam& param) = 0;
};
} // namespace milvus

View File

@ -83,16 +83,16 @@ ConnectionImpl::CreateIndex(const IndexParam& index_param) {
}
Status
ConnectionImpl::Insert(const std::string& table_name, const std::vector<RowRecord>& record_array,
std::vector<int64_t>& id_array) {
return client_proxy_->Insert(table_name, record_array, id_array);
ConnectionImpl::Insert(const std::string& table_name, const std::string& partition_tag,
const std::vector<RowRecord>& record_array, std::vector<int64_t>& id_array) {
return client_proxy_->Insert(table_name, partition_tag, record_array, id_array);
}
Status
ConnectionImpl::Search(const std::string& table_name, const std::vector<RowRecord>& query_record_array,
const std::vector<Range>& query_range_array, int64_t topk, int64_t nprobe,
std::vector<TopKQueryResult>& topk_query_result_array) {
return client_proxy_->Search(table_name, query_record_array, query_range_array, topk, nprobe,
ConnectionImpl::Search(const std::string& table_name, const std::vector<std::string>& partiton_tags,
const std::vector<RowRecord>& query_record_array, const std::vector<Range>& query_range_array,
int64_t topk, int64_t nprobe, std::vector<TopKQueryResult>& topk_query_result_array) {
return client_proxy_->Search(table_name, partiton_tags, query_record_array, query_range_array, topk, nprobe,
topk_query_result_array);
}
@ -127,8 +127,8 @@ ConnectionImpl::DumpTaskTables() const {
}
Status
ConnectionImpl::DeleteByRange(Range& range, const std::string& table_name) {
return client_proxy_->DeleteByRange(range, table_name);
ConnectionImpl::DeleteByDate(const std::string& table_name, const Range& range) {
return client_proxy_->DeleteByDate(table_name, range);
}
Status
@ -146,4 +146,19 @@ ConnectionImpl::DropIndex(const std::string& table_name) const {
return client_proxy_->DropIndex(table_name);
}
Status
ConnectionImpl::CreatePartition(const PartitionParam& param) {
return client_proxy_->CreatePartition(param);
}
Status
ConnectionImpl::ShowPartitions(const std::string& table_name, PartitionList& partition_array) const {
return client_proxy_->ShowPartitions(table_name, partition_array);
}
Status
ConnectionImpl::DropPartition(const PartitionParam& param) {
return client_proxy_->DropPartition(param);
}
} // namespace milvus

View File

@ -56,13 +56,13 @@ class ConnectionImpl : public Connection {
CreateIndex(const IndexParam& index_param) override;
Status
Insert(const std::string& table_name, const std::vector<RowRecord>& record_array,
Insert(const std::string& table_name, const std::string& partition_tag, const std::vector<RowRecord>& record_array,
std::vector<int64_t>& id_array) override;
Status
Search(const std::string& table_name, const std::vector<RowRecord>& query_record_array,
const std::vector<Range>& query_range_array, int64_t topk, int64_t nprobe,
std::vector<TopKQueryResult>& topk_query_result_array) override;
Search(const std::string& table_name, const std::vector<std::string>& partiton_tags,
const std::vector<RowRecord>& query_record_array, const std::vector<Range>& query_range_array, int64_t topk,
int64_t nprobe, std::vector<TopKQueryResult>& topk_query_result_array) override;
Status
DescribeTable(const std::string& table_name, TableSchema& table_schema) override;
@ -86,7 +86,7 @@ class ConnectionImpl : public Connection {
DumpTaskTables() const override;
Status
DeleteByRange(Range& range, const std::string& table_name) override;
DeleteByDate(const std::string& table_name, const Range& range) override;
Status
PreloadTable(const std::string& table_name) const override;
@ -97,6 +97,15 @@ class ConnectionImpl : public Connection {
Status
DropIndex(const std::string& table_name) const override;
Status
CreatePartition(const PartitionParam& param) override;
Status
ShowPartitions(const std::string& table_name, PartitionList& partition_array) const override;
Status
DropPartition(const PartitionParam& param) override;
private:
std::shared_ptr<ClientProxy> client_proxy_;
};

View File

@ -25,6 +25,7 @@
#include "config/YamlConfigMgr.h"
#include "server/Config.h"
#include "utils/CommonUtil.h"
#include "utils/StringHelpFunctions.h"
#include "utils/ValidationUtil.h"
namespace milvus {
@ -306,6 +307,7 @@ Config::ResetDefaultConfig() {
return s;
}
#ifdef MILVUS_GPU_VERSION
s = SetCacheConfigGpuCacheCapacity(CONFIG_CACHE_GPU_CACHE_CAPACITY_DEFAULT);
if (!s.ok()) {
return s;
@ -315,6 +317,7 @@ Config::ResetDefaultConfig() {
if (!s.ok()) {
return s;
}
#endif
s = SetCacheConfigCacheInsertData(CONFIG_CACHE_CACHE_INSERT_DATA_DEFAULT);
if (!s.ok()) {
@ -343,6 +346,11 @@ Config::ResetDefaultConfig() {
return s;
}
s = SetResourceConfigSearchResources(CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT);
if (!s.ok()) {
return s;
}
s = SetResourceConfigIndexBuildDevice(CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT);
if (!s.ok()) {
return s;
@ -403,8 +411,7 @@ Status
Config::CheckServerConfigDeployMode(const std::string& value) {
if (value != "single" && value != "cluster_readonly" && value != "cluster_writable") {
return Status(SERVER_INVALID_ARGUMENT,
"server_config.deploy_mode is not one of "
"single, cluster_readonly, and cluster_writable.");
"server_config.deploy_mode is not one of single, cluster_readonly, and cluster_writable.");
}
return Status::OK();
}
@ -592,15 +599,15 @@ Config::CheckCacheConfigGpuCacheCapacity(const std::string& value) {
return Status(SERVER_INVALID_ARGUMENT, msg);
} else {
uint64_t gpu_cache_capacity = std::stoi(value) * GB;
int gpu_index;
Status s = GetResourceConfigIndexBuildDevice(gpu_index);
int device_id;
Status s = GetResourceConfigIndexBuildDevice(device_id);
if (!s.ok()) {
return s;
}
size_t gpu_memory;
if (!ValidationUtil::GetGpuMemory(gpu_index, gpu_memory).ok()) {
std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(gpu_index);
if (!ValidationUtil::GetGpuMemory(device_id, gpu_memory).ok()) {
std::string msg = "Fail to get GPU memory for GPU device: " + std::to_string(device_id);
return Status(SERVER_UNEXPECTED_ERROR, msg);
} else if (gpu_cache_capacity >= gpu_memory) {
std::string msg = "Invalid gpu cache capacity: " + value +
@ -689,29 +696,33 @@ Config::CheckResourceConfigMode(const std::string& value) {
}
Status
CheckGpuDevice(const std::string& value) {
CheckResource(const std::string& value) {
std::string s = value;
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
#ifdef MILVUS_CPU_VERSION
if (s != "cpu") {
return Status(SERVER_INVALID_ARGUMENT, "Invalid CPU resource: " + s);
}
#else
const std::regex pat("gpu(\\d+)");
std::cmatch m;
if (!std::regex_match(value.c_str(), m, pat)) {
std::string msg = "Invalid gpu device: " + value +
". Possible reason: resource_config.search_resources does not match your hardware.";
const std::regex pat("cpu|gpu(\\d+)");
std::smatch m;
if (!std::regex_match(s, m, pat)) {
std::string msg = "Invalid search resource: " + value +
". Possible reason: resource_config.search_resources is not in the format of cpux or gpux";
return Status(SERVER_INVALID_ARGUMENT, msg);
}
int32_t gpu_index = std::stoi(value.substr(3));
if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) {
std::string msg = "Invalid gpu device: " + value +
". Possible reason: resource_config.search_resources does not match your hardware.";
return Status(SERVER_INVALID_ARGUMENT, msg);
if (s.compare(0, 3, "gpu") == 0) {
int32_t gpu_index = std::stoi(s.substr(3));
if (!ValidationUtil::ValidateGpuIndex(gpu_index).ok()) {
std::string msg = "Invalid search resource: " + value +
". Possible reason: resource_config.search_resources does not match your hardware.";
return Status(SERVER_INVALID_ARGUMENT, msg);
}
}
#endif
return Status::OK();
}
@ -724,38 +735,20 @@ Config::CheckResourceConfigSearchResources(const std::vector<std::string>& value
return Status(SERVER_INVALID_ARGUMENT, msg);
}
bool cpu_found = false, gpu_found = false;
for (auto& device : value) {
if (device == "cpu") {
cpu_found = true;
continue;
for (auto& resource : value) {
auto status = CheckResource(resource);
if (!status.ok()) {
return Status(SERVER_INVALID_ARGUMENT, status.message());
}
if (CheckGpuDevice(device).ok()) {
gpu_found = true;
} else {
std::string msg = "Invalid search resource: " + device +
". Possible reason: resource_config.search_resources does not match your hardware.";
return Status(SERVER_INVALID_ARGUMENT, msg);
}
}
if (cpu_found && !gpu_found) {
std::string msg =
"Invalid search resource. Possible reason: resource_config.search_resources has only CPU resource.";
return Status(SERVER_INVALID_ARGUMENT, msg);
}
return Status::OK();
}
Status
Config::CheckResourceConfigIndexBuildDevice(const std::string& value) {
// if (value == "cpu") {
// return Status::OK();
// }
if (!CheckGpuDevice(value).ok()) {
std::string msg = "Invalid index build device: " + value +
". Possible reason: resource_config.index_build_device does not match your hardware.";
return Status(SERVER_INVALID_ARGUMENT, msg);
auto status = CheckResource(value);
if (!status.ok()) {
return Status(SERVER_INVALID_ARGUMENT, status.message());
}
return Status::OK();
}
@ -796,6 +789,22 @@ Config::GetConfigStr(const std::string& parent_key, const std::string& child_key
return value;
}
std::string
Config::GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim,
const std::string& default_value) {
std::string value;
if (!GetConfigValueInMem(parent_key, child_key, value).ok()) {
std::vector<std::string> sequence = GetConfigNode(parent_key).GetSequence(child_key);
if (sequence.empty()) {
value = default_value;
} else {
server::StringHelpFunctions::MergeStringWithDelimeter(sequence, delim, value);
}
SetConfigValueInMem(parent_key, child_key, value);
}
return value;
}
Status
Config::GetServerConfigAddress(std::string& value) {
value = GetConfigStr(CONFIG_SERVER, CONFIG_SERVER_ADDRESS, CONFIG_SERVER_ADDRESS_DEFAULT);
@ -1019,8 +1028,10 @@ Config::GetResourceConfigMode(std::string& value) {
Status
Config::GetResourceConfigSearchResources(std::vector<std::string>& value) {
ConfigNode resource_config = GetConfigNode(CONFIG_RESOURCE);
value = resource_config.GetSequence(CONFIG_RESOURCE_SEARCH_RESOURCES);
std::string str =
GetConfigSequenceStr(CONFIG_RESOURCE, CONFIG_RESOURCE_SEARCH_RESOURCES,
CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT);
server::StringHelpFunctions::SplitStringByDelimeter(str, CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, value);
return CheckResourceConfigSearchResources(value);
}
@ -1033,10 +1044,10 @@ Config::GetResourceConfigIndexBuildDevice(int32_t& value) {
return s;
}
if (str != "cpu") {
value = std::stoi(str.substr(3));
if (str == "cpu") {
value = CPU_DEVICE_ID;
} else {
value = -1;
value = std::stoi(str.substr(3));
}
return Status::OK();
@ -1163,7 +1174,7 @@ Config::SetMetricConfigEnableMonitor(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_ENABLE_MONITOR, value);
SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_ENABLE_MONITOR, value);
return Status::OK();
}
@ -1174,7 +1185,7 @@ Config::SetMetricConfigCollector(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_COLLECTOR, value);
SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_COLLECTOR, value);
return Status::OK();
}
@ -1185,7 +1196,7 @@ Config::SetMetricConfigPrometheusPort(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_METRIC_PROMETHEUS_PORT, value);
SetConfigValueInMem(CONFIG_METRIC, CONFIG_METRIC_PROMETHEUS_PORT, value);
return Status::OK();
}
@ -1197,7 +1208,7 @@ Config::SetCacheConfigCpuCacheCapacity(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CPU_CACHE_CAPACITY, value);
SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CPU_CACHE_CAPACITY, value);
return Status::OK();
}
@ -1208,7 +1219,7 @@ Config::SetCacheConfigCpuCacheThreshold(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CPU_CACHE_THRESHOLD, value);
SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CPU_CACHE_THRESHOLD, value);
return Status::OK();
}
@ -1219,7 +1230,7 @@ Config::SetCacheConfigGpuCacheCapacity(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_GPU_CACHE_CAPACITY, value);
SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_GPU_CACHE_CAPACITY, value);
return Status::OK();
}
@ -1230,7 +1241,7 @@ Config::SetCacheConfigGpuCacheThreshold(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_GPU_CACHE_THRESHOLD, value);
SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_GPU_CACHE_THRESHOLD, value);
return Status::OK();
}
@ -1241,7 +1252,7 @@ Config::SetCacheConfigCacheInsertData(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_CACHE_CACHE_INSERT_DATA, value);
SetConfigValueInMem(CONFIG_CACHE, CONFIG_CACHE_CACHE_INSERT_DATA, value);
return Status::OK();
}
@ -1253,7 +1264,7 @@ Config::SetEngineConfigUseBlasThreshold(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_USE_BLAS_THRESHOLD, value);
SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_USE_BLAS_THRESHOLD, value);
return Status::OK();
}
@ -1264,7 +1275,7 @@ Config::SetEngineConfigOmpThreadNum(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_OMP_THREAD_NUM, value);
SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_OMP_THREAD_NUM, value);
return Status::OK();
}
@ -1275,7 +1286,7 @@ Config::SetEngineConfigGpuSearchThreshold(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, value);
SetConfigValueInMem(CONFIG_ENGINE, CONFIG_ENGINE_GPU_SEARCH_THRESHOLD, value);
return Status::OK();
}
@ -1287,7 +1298,21 @@ Config::SetResourceConfigMode(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_MODE, value);
SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_MODE, value);
return Status::OK();
}
Status
Config::SetResourceConfigSearchResources(const std::string& value) {
std::vector<std::string> res_vec;
server::StringHelpFunctions::SplitStringByDelimeter(value, CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, res_vec);
Status s = CheckResourceConfigSearchResources(res_vec);
if (!s.ok()) {
return s;
}
SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_SEARCH_RESOURCES, value);
return Status::OK();
}
@ -1298,7 +1323,7 @@ Config::SetResourceConfigIndexBuildDevice(const std::string& value) {
return s;
}
SetConfigValueInMem(CONFIG_DB, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value);
SetConfigValueInMem(CONFIG_RESOURCE, CONFIG_RESOURCE_INDEX_BUILD_DEVICE, value);
return Status::OK();
}

View File

@ -92,12 +92,19 @@ static const char* CONFIG_RESOURCE = "resource_config";
static const char* CONFIG_RESOURCE_MODE = "mode";
static const char* CONFIG_RESOURCE_MODE_DEFAULT = "simple";
static const char* CONFIG_RESOURCE_SEARCH_RESOURCES = "search_resources";
static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER = ",";
#ifdef MILVUS_CPU_VERSION
static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT = "cpu";
#else
static const char* CONFIG_RESOURCE_SEARCH_RESOURCES_DEFAULT = "cpu,gpu0";
#endif
static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE = "index_build_device";
#ifdef MILVUS_CPU_VERSION
static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "cpu";
#else
static const char* CONFIG_RESOURCE_INDEX_BUILD_DEVICE_DEFAULT = "gpu0";
#endif
const int32_t CPU_DEVICE_ID = -1;
class Config {
public:
@ -185,6 +192,9 @@ class Config {
std::string
GetConfigStr(const std::string& parent_key, const std::string& child_key, const std::string& default_value = "");
std::string
GetConfigSequenceStr(const std::string& parent_key, const std::string& child_key, const std::string& delim = ",",
const std::string& default_value = "");
public:
/* server config */
@ -306,6 +316,8 @@ class Config {
Status
SetResourceConfigMode(const std::string& value);
Status
SetResourceConfigSearchResources(const std::string& value);
Status
SetResourceConfigIndexBuildDevice(const std::string& value);
private:

View File

@ -150,9 +150,9 @@ GrpcRequestHandler::Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Co
}
::grpc::Status
GrpcRequestHandler::DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request,
::milvus::grpc::Status* response) {
BaseTaskPtr task_ptr = DeleteByRangeTask::Create(request);
GrpcRequestHandler::DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request,
::milvus::grpc::Status* response) {
BaseTaskPtr task_ptr = DeleteByDateTask::Create(request);
::milvus::grpc::Status grpc_status;
GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status);
response->set_error_code(grpc_status.error_code());
@ -193,6 +193,36 @@ GrpcRequestHandler::DropIndex(::grpc::ServerContext* context, const ::milvus::gr
return ::grpc::Status::OK;
}
::grpc::Status
GrpcRequestHandler::CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request,
::milvus::grpc::Status* response) {
BaseTaskPtr task_ptr = CreatePartitionTask::Create(request);
GrpcRequestScheduler::ExecTask(task_ptr, response);
return ::grpc::Status::OK;
}
::grpc::Status
GrpcRequestHandler::ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::PartitionList* response) {
BaseTaskPtr task_ptr = ShowPartitionsTask::Create(request->table_name(), response);
::milvus::grpc::Status grpc_status;
GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status);
response->mutable_status()->set_reason(grpc_status.reason());
response->mutable_status()->set_error_code(grpc_status.error_code());
return ::grpc::Status::OK;
}
::grpc::Status
GrpcRequestHandler::DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request,
::milvus::grpc::Status* response) {
BaseTaskPtr task_ptr = DropPartitionTask::Create(request);
::milvus::grpc::Status grpc_status;
GrpcRequestScheduler::ExecTask(task_ptr, &grpc_status);
response->set_reason(grpc_status.reason());
response->set_error_code(grpc_status.error_code());
return ::grpc::Status::OK;
}
} // namespace grpc
} // namespace server
} // namespace milvus

View File

@ -28,296 +28,168 @@ namespace server {
namespace grpc {
class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service {
public:
/**
* @brief Create table method
*
* This method is used to create table
*
* @param context, add context for every RPC
* @param request, used to provide table information to be created.
* @param response, used to get the status
*
* @return status
*
* @param request
* @param response
* @param context
*/
// *
// @brief This method is used to create table
//
// @param TableSchema, use to provide table information to be created.
//
// @return Status
::grpc::Status
CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request,
::milvus::grpc::Status* response) override;
/**
* @brief Test table existence method
*
* This method is used to test table existence.
*
* @param context, add context for every RPC
* @param request, table name is going to be tested.
* @param response, get the bool reply of hastable
*
* @return status
*
* @param request
* @param response
* @param context
*/
// *
// @brief This method is used to test table existence.
//
// @param TableName, table name is going to be tested.
//
// @return BoolReply
::grpc::Status
HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::BoolReply* response) override;
/**
* @brief Drop table method
*
* This method is used to drop table.
*
* @param context, add context for every RPC
* @param request, table name is going to be deleted.
* @param response, get the status of droptable
*
* @return status
*
* @param request
* @param response
* @param context
*/
::grpc::Status
DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::Status* response) override;
/**
* @brief build index by table method
*
* This method is used to build index by table in sync.
*
* @param context, add context for every RPC
* @param request, table name is going to be built index.
* @param response, get the status of buildindex
*
* @return status
*
* @param request
* @param response
* @param context
*/
::grpc::Status
CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request,
::milvus::grpc::Status* response) override;
/**
* @brief Insert vector array to table
*
* This method is used to insert vector array to table.
*
* @param context, add context for every RPC
* @param request, table_name is inserted.
* @param response, vector array is inserted.
*
* @return status
*
* @param context
* @param request
* @param response
*/
::grpc::Status
Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request,
::milvus::grpc::VectorIds* response) override;
/**
* @brief Query vector
*
* This method is used to query vector in table.
*
* @param context, add context for every RPC
* @param request:
* table_name, table_name is queried.
* query_record_array, all vector are going to be queried.
* query_range_array, optional ranges for conditional search. If not specified, search whole table
* topk, how many similarity vectors will be searched.
*
* @param writer, write query result array.
*
* @return status
*
* @param context
* @param request
* @param writer
*/
::grpc::Status
Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request,
::milvus::grpc::TopKQueryResultList* response) override;
/**
* @brief Internal use query interface
*
* This method is used to query vector in specified files.
*
* @param context, add context for every RPC
* @param request:
* file_id_array, specified files id array, queried.
* query_record_array, all vector are going to be queried.
* query_range_array, optional ranges for conditional search. If not specified, search whole table
* topk, how many similarity vectors will be searched.
*
* @param writer, write query result array.
*
* @return status
*
* @param context
* @param request
* @param writer
*/
::grpc::Status
SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request,
::milvus::grpc::TopKQueryResultList* response) override;
/**
* @brief Get table schema
*
* This method is used to get table schema.
*
* @param context, add context for every RPC
* @param request, target table name.
* @param response, table schema
*
* @return status
*
* @param context
* @param request
* @param response
*/
// *
// @brief This method is used to get table schema.
//
// @param TableName, target table name.
//
// @return TableSchema
::grpc::Status
DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::TableSchema* response) override;
/**
* @brief Get table row count
*
* This method is used to get table row count.
*
* @param context, add context for every RPC
* @param request, target table name.
* @param response, table row count
*
* @return table row count
*
* @param request
* @param response
* @param context
*/
// *
// @brief This method is used to get table schema.
//
// @param TableName, target table name.
//
// @return TableRowCount
::grpc::Status
CountTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::TableRowCount* response) override;
/**
* @brief List all tables in database
*
* This method is used to list all tables.
*
* @param context, add context for every RPC
* @param request, show table command, usually not use
* @param writer, write tables to client
*
* @return status
*
* @param context
* @param request
* @param writer
*/
// *
// @brief This method is used to list all tables.
//
// @param Command, dummy parameter.
//
// @return TableNameList
::grpc::Status
ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request,
::milvus::grpc::TableNameList* response) override;
/**
* @brief Give the server status
*
*
* This method is used to give the server status.
* @param context, add context for every RPC
* @param request, give server command
* @param response, server status
*
* @return status
*
* @param context
* @param request
* @param response
*/
// *
// @brief This method is used to delete table.
//
// @param TableName, table name is going to be deleted.
//
// @return TableNameList
::grpc::Status
Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request,
::milvus::grpc::StringReply* response) override;
/**
* @brief delete table by range
*
* This method is used to delete table by range.
* @param context, add context for every RPC
* @param request, table name and range
* @param response, status
*
* @return status
*
* @param context
* @param request
* @param response
*/
DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to build index by table in sync mode.
//
// @param IndexParam, index paramters.
//
// @return Status
::grpc::Status
DeleteByRange(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByRangeParam* request,
::milvus::grpc::Status* response) override;
/**
* @brief preload table
*
* This method is used to preload table.
* @param context, add context for every RPC
* @param request, table name
* @param response, status
*
* @return status
*
* @param context
* @param request
* @param response
*/
::grpc::Status
PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::Status* response) override;
/**
* @brief Describe index
*
* This method is used to describe index.
* @param context, add context for every RPC
* @param request, table name
* @param response, index informations
*
* @return status
*
* @param context
* @param request
* @param response
*/
CreateIndex(::grpc::ServerContext* context, const ::milvus::grpc::IndexParam* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to describe index
//
// @param TableName, target table name.
//
// @return IndexParam
::grpc::Status
DescribeIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::IndexParam* response) override;
/**
* @brief Drop index
*
* This method is used to drop index.
* @param context, add context for every RPC
* @param request, table name
* @param response, status
*
* @return status
*
* @param context
* @param request
* @param response
*/
// *
// @brief This method is used to drop index
//
// @param TableName, target table name.
//
// @return Status
::grpc::Status
DropIndex(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to create partition
//
// @param PartitionParam, partition parameters.
//
// @return Status
::grpc::Status
CreatePartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to show partition information
//
// @param TableName, target table name.
//
// @return PartitionList
::grpc::Status
ShowPartitions(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::PartitionList* response) override;
// *
// @brief This method is used to drop partition
//
// @param PartitionName, target partition name.
//
// @return Status
::grpc::Status
DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to add vector array to table.
//
// @param InsertParam, insert parameters.
//
// @return VectorIds
::grpc::Status
Insert(::grpc::ServerContext* context, const ::milvus::grpc::InsertParam* request,
::milvus::grpc::VectorIds* response) override;
// *
// @brief This method is used to query vector in table.
//
// @param SearchParam, search parameters.
//
// @return TopKQueryResultList
::grpc::Status
Search(::grpc::ServerContext* context, const ::milvus::grpc::SearchParam* request,
::milvus::grpc::TopKQueryResultList* response) override;
// *
// @brief This method is used to query vector in specified files.
//
// @param SearchInFilesParam, search in files paremeters.
//
// @return TopKQueryResultList
::grpc::Status
SearchInFiles(::grpc::ServerContext* context, const ::milvus::grpc::SearchInFilesParam* request,
::milvus::grpc::TopKQueryResultList* response) override;
// *
// @brief This method is used to give the server status.
//
// @param Command, command string
//
// @return StringReply
::grpc::Status
Cmd(::grpc::ServerContext* context, const ::milvus::grpc::Command* request,
::milvus::grpc::StringReply* response) override;
// *
// @brief This method is used to delete vector by date range
//
// @param DeleteByDateParam, delete parameters.
//
// @return status
::grpc::Status
DeleteByDate(::grpc::ServerContext* context, const ::milvus::grpc::DeleteByDateParam* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to preload table
//
// @param TableName, target table name.
//
// @return Status
::grpc::Status
PreloadTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::Status* response) override;
};
} // namespace grpc

View File

@ -366,7 +366,7 @@ DropTableTask::OnExecute() {
// step 3: Drop table
std::vector<DB_DATE> dates;
status = DBWrapper::DB()->DeleteTable(table_name_, dates);
status = DBWrapper::DB()->DropTable(table_name_, dates);
if (!status.ok()) {
return status;
}
@ -505,7 +505,8 @@ InsertTask::OnExecute() {
memcpy(target_data, src_data, static_cast<size_t>(sizeof(int64_t) * insert_param_->row_id_array_size()));
}
status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), vec_count, vec_f.data(), vec_ids);
status = DBWrapper::DB()->InsertVectors(insert_param_->table_name(), insert_param_->partition_tag(), vec_count,
vec_f.data(), vec_ids);
rc.ElapseFromBegin("add vectors to engine");
if (!status.ok()) {
return status;
@ -637,7 +638,8 @@ SearchTask::OnExecute() {
rc.RecordSection("prepare vector data");
// step 6: search vectors
engine::QueryResults results;
engine::ResultIds result_ids;
engine::ResultDistances result_distances;
auto record_count = (uint64_t)search_param_->query_record_array().size();
#ifdef MILVUS_ENABLE_PROFILING
@ -647,11 +649,21 @@ SearchTask::OnExecute() {
#endif
if (file_id_array_.empty()) {
status =
DBWrapper::DB()->Query(table_name_, (size_t)top_k, record_count, nprobe, vec_f.data(), dates, results);
std::vector<std::string> partition_tags;
for (size_t i = 0; i < search_param_->partition_tag_array_size(); i++) {
partition_tags.emplace_back(search_param_->partition_tag_array(i));
}
status = ValidationUtil::ValidatePartitionTags(partition_tags);
if (!status.ok()) {
return status;
}
status = DBWrapper::DB()->Query(table_name_, partition_tags, (size_t)top_k, record_count, nprobe,
vec_f.data(), dates, result_ids, result_distances);
} else {
status = DBWrapper::DB()->Query(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe,
vec_f.data(), dates, results);
status = DBWrapper::DB()->QueryByFileID(table_name_, file_id_array_, (size_t)top_k, record_count, nprobe,
vec_f.data(), dates, result_ids, result_distances);
}
#ifdef MILVUS_ENABLE_PROFILING
@ -663,23 +675,20 @@ SearchTask::OnExecute() {
return status;
}
if (results.empty()) {
if (result_ids.empty()) {
return Status::OK(); // empty table
}
if (results.size() != record_count) {
std::string msg = "Search " + std::to_string(record_count) + " vectors but only return " +
std::to_string(results.size()) + " results";
return Status(SERVER_ILLEGAL_SEARCH_RESULT, msg);
}
size_t result_k = result_ids.size() / record_count;
// step 7: construct result array
for (auto& result : results) {
for (size_t i = 0; i < record_count; i++) {
::milvus::grpc::TopKQueryResult* topk_query_result = topk_result_list->add_topk_query_result();
for (auto& pair : result) {
for (size_t j = 0; j < result_k; j++) {
::milvus::grpc::QueryResult* grpc_result = topk_query_result->add_query_result_arrays();
grpc_result->set_id(pair.first);
grpc_result->set_distance(pair.second);
size_t idx = i * result_k + j;
grpc_result->set_id(result_ids[idx]);
grpc_result->set_distance(result_distances[idx]);
}
}
@ -759,22 +768,22 @@ CmdTask::OnExecute() {
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DeleteByRangeTask::DeleteByRangeTask(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param)
DeleteByDateTask::DeleteByDateTask(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param)
: GrpcBaseTask(DDL_DML_TASK_GROUP), delete_by_range_param_(delete_by_range_param) {
}
BaseTaskPtr
DeleteByRangeTask::Create(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param) {
DeleteByDateTask::Create(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param) {
if (delete_by_range_param == nullptr) {
SERVER_LOG_ERROR << "grpc input is null!";
return nullptr;
}
return std::shared_ptr<GrpcBaseTask>(new DeleteByRangeTask(delete_by_range_param));
return std::shared_ptr<GrpcBaseTask>(new DeleteByDateTask(delete_by_range_param));
}
Status
DeleteByRangeTask::OnExecute() {
DeleteByDateTask::OnExecute() {
try {
TimeRecorder rc("DeleteByRangeTask");
@ -815,7 +824,7 @@ DeleteByRangeTask::OnExecute() {
std::string fname = "/tmp/search_nq_" + this->delete_by_range_param_->table_name() + ".profiling";
ProfilerStart(fname.c_str());
#endif
status = DBWrapper::DB()->DeleteTable(table_name, dates);
status = DBWrapper::DB()->DropTable(table_name, dates);
if (!status.ok()) {
return status;
}
@ -946,6 +955,119 @@ DropIndexTask::OnExecute() {
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CreatePartitionTask::CreatePartitionTask(const ::milvus::grpc::PartitionParam* partition_param)
: GrpcBaseTask(DDL_DML_TASK_GROUP), partition_param_(partition_param) {
}
BaseTaskPtr
CreatePartitionTask::Create(const ::milvus::grpc::PartitionParam* partition_param) {
if (partition_param == nullptr) {
SERVER_LOG_ERROR << "grpc input is null!";
return nullptr;
}
return std::shared_ptr<GrpcBaseTask>(new CreatePartitionTask(partition_param));
}
Status
CreatePartitionTask::OnExecute() {
TimeRecorder rc("CreatePartitionTask");
try {
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(partition_param_->table_name());
if (!status.ok()) {
return status;
}
status = ValidationUtil::ValidateTableName(partition_param_->partition_name());
if (!status.ok()) {
return status;
}
status = ValidationUtil::ValidatePartitionTags({partition_param_->tag()});
if (!status.ok()) {
return status;
}
// step 2: create partition
status = DBWrapper::DB()->CreatePartition(partition_param_->table_name(), partition_param_->partition_name(),
partition_param_->tag());
if (!status.ok()) {
// partition could exist
if (status.code() == DB_ALREADY_EXIST) {
return Status(SERVER_INVALID_TABLE_NAME, status.message());
}
return status;
}
} catch (std::exception& ex) {
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}
rc.ElapseFromBegin("totally cost");
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
ShowPartitionsTask::ShowPartitionsTask(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list)
: GrpcBaseTask(INFO_TASK_GROUP), table_name_(table_name), partition_list_(partition_list) {
}
BaseTaskPtr
ShowPartitionsTask::Create(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list) {
return std::shared_ptr<GrpcBaseTask>(new ShowPartitionsTask(table_name, partition_list));
}
Status
ShowPartitionsTask::OnExecute() {
std::vector<engine::meta::TableSchema> schema_array;
auto statuts = DBWrapper::DB()->ShowPartitions(table_name_, schema_array);
if (!statuts.ok()) {
return statuts;
}
for (auto& schema : schema_array) {
::milvus::grpc::PartitionParam* param = partition_list_->add_partition_array();
param->set_table_name(schema.owner_table_);
param->set_partition_name(schema.table_id_);
param->set_tag(schema.partition_tag_);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
DropPartitionTask::DropPartitionTask(const ::milvus::grpc::PartitionParam* partition_param)
: GrpcBaseTask(DDL_DML_TASK_GROUP), partition_param_(partition_param) {
}
BaseTaskPtr
DropPartitionTask::Create(const ::milvus::grpc::PartitionParam* partition_param) {
return std::shared_ptr<GrpcBaseTask>(new DropPartitionTask(partition_param));
}
Status
DropPartitionTask::OnExecute() {
if (!partition_param_->partition_name().empty()) {
auto status = ValidationUtil::ValidateTableName(partition_param_->partition_name());
if (!status.ok()) {
return status;
}
return DBWrapper::DB()->DropPartition(partition_param_->partition_name());
} else {
auto status = ValidationUtil::ValidateTableName(partition_param_->table_name());
if (!status.ok()) {
return status;
}
status = ValidationUtil::ValidatePartitionTags({partition_param_->tag()});
if (!status.ok()) {
return status;
}
return DBWrapper::DB()->DropPartitionByTag(partition_param_->table_name(), partition_param_->tag());
}
}
} // namespace grpc
} // namespace server
} // namespace milvus

View File

@ -203,19 +203,19 @@ class CmdTask : public GrpcBaseTask {
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class DeleteByRangeTask : public GrpcBaseTask {
class DeleteByDateTask : public GrpcBaseTask {
public:
static BaseTaskPtr
Create(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param);
Create(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param);
protected:
explicit DeleteByRangeTask(const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param);
explicit DeleteByDateTask(const ::milvus::grpc::DeleteByDateParam* delete_by_range_param);
Status
OnExecute() override;
private:
const ::milvus::grpc::DeleteByRangeParam* delete_by_range_param_;
const ::milvus::grpc::DeleteByDateParam* delete_by_range_param_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -267,6 +267,55 @@ class DropIndexTask : public GrpcBaseTask {
std::string table_name_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class CreatePartitionTask : public GrpcBaseTask {
public:
static BaseTaskPtr
Create(const ::milvus::grpc::PartitionParam* partition_param);
protected:
explicit CreatePartitionTask(const ::milvus::grpc::PartitionParam* partition_param);
Status
OnExecute() override;
private:
const ::milvus::grpc::PartitionParam* partition_param_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class ShowPartitionsTask : public GrpcBaseTask {
public:
static BaseTaskPtr
Create(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list);
protected:
ShowPartitionsTask(const std::string& table_name, ::milvus::grpc::PartitionList* partition_list);
Status
OnExecute() override;
private:
std::string table_name_;
::milvus::grpc::PartitionList* partition_list_;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class DropPartitionTask : public GrpcBaseTask {
public:
static BaseTaskPtr
Create(const ::milvus::grpc::PartitionParam* partition_param);
protected:
explicit DropPartitionTask(const ::milvus::grpc::PartitionParam* partition_param);
Status
OnExecute() override;
private:
const ::milvus::grpc::PartitionParam* partition_param_;
};
} // namespace grpc
} // namespace server
} // namespace milvus

View File

@ -17,6 +17,7 @@
#include "utils/StringHelpFunctions.h"
#include <regex>
#include <string>
namespace milvus {
@ -39,39 +40,53 @@ StringHelpFunctions::TrimStringQuote(std::string& string, const std::string& qou
}
}
Status
void
StringHelpFunctions::SplitStringByDelimeter(const std::string& str, const std::string& delimeter,
std::vector<std::string>& result) {
if (str.empty()) {
return Status::OK();
return;
}
size_t last = 0;
size_t index = str.find_first_of(delimeter, last);
while (index != std::string::npos) {
result.emplace_back(str.substr(last, index - last));
last = index + 1;
index = str.find_first_of(delimeter, last);
size_t prev = 0, pos = 0;
while (true) {
pos = str.find_first_of(delimeter, prev);
if (pos == std::string::npos) {
result.emplace_back(str.substr(prev));
break;
} else {
result.emplace_back(str.substr(prev, pos - prev));
prev = pos + 1;
}
}
if (index - last > 0) {
std::string temp = str.substr(last);
result.emplace_back(temp);
}
void
StringHelpFunctions::MergeStringWithDelimeter(const std::vector<std::string>& strs, const std::string& delimeter,
std::string& result) {
if (strs.empty()) {
result = "";
return;
}
return Status::OK();
result = strs[0];
for (size_t i = 1; i < strs.size(); i++) {
result = result + delimeter + strs[i];
}
}
Status
StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::string& delimeter, const std::string& quote,
std::vector<std::string>& result) {
if (quote.empty()) {
return SplitStringByDelimeter(str, delimeter, result);
SplitStringByDelimeter(str, delimeter, result);
return Status::OK();
}
size_t last = 0;
size_t index = str.find_first_of(quote, last);
if (index == std::string::npos) {
return SplitStringByDelimeter(str, delimeter, result);
SplitStringByDelimeter(str, delimeter, result);
return Status::OK();
}
std::string process_str = str;
@ -116,11 +131,28 @@ StringHelpFunctions::SplitStringByQuote(const std::string& str, const std::strin
}
if (!process_str.empty()) {
return SplitStringByDelimeter(process_str, delimeter, result);
SplitStringByDelimeter(process_str, delimeter, result);
}
return Status::OK();
}
bool
StringHelpFunctions::IsRegexMatch(const std::string& target_str, const std::string& pattern_str) {
// if target_str equals pattern_str, return true
if (target_str == pattern_str) {
return true;
}
// regex match
std::regex pattern(pattern_str);
std::smatch results;
if (std::regex_search(target_str, results, pattern)) {
return true;
} else {
return false;
}
}
} // namespace server
} // namespace milvus

View File

@ -43,9 +43,12 @@ class StringHelpFunctions {
// ,b, | b |
// ,, | |
// a a
static Status
static void
SplitStringByDelimeter(const std::string& str, const std::string& delimeter, std::vector<std::string>& result);
static void
MergeStringWithDelimeter(const std::vector<std::string>& strs, const std::string& delimeter, std::string& result);
// assume the table has two columns, quote='\"', delimeter=','
// a,b a | b
// "aa,gg,yy",b aa,gg,yy | b
@ -56,6 +59,11 @@ class StringHelpFunctions {
static Status
SplitStringByQuote(const std::string& str, const std::string& delimeter, const std::string& quote,
std::vector<std::string>& result);
// std regex match function
// regex grammar reference: http://www.cplusplus.com/reference/regex/ECMAScript/
static bool
IsRegexMatch(const std::string& target_str, const std::string& pattern);
};
} // namespace server

View File

@ -168,6 +168,19 @@ ValidationUtil::ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSc
return Status::OK();
}
Status
ValidationUtil::ValidatePartitionTags(const std::vector<std::string>& partition_tags) {
for (auto& tag : partition_tags) {
if (tag.empty()) {
std::string msg = "Invalid partition tag: " + tag + ". " + "Partition tag should not be empty.";
SERVER_LOG_ERROR << msg;
return Status(SERVER_INVALID_NPROBE, msg);
}
}
return Status::OK();
}
Status
ValidationUtil::ValidateGpuIndex(uint32_t gpu_index) {
#ifdef MILVUS_GPU_VERSION

View File

@ -21,6 +21,7 @@
#include "utils/Status.h"
#include <string>
#include <vector>
namespace milvus {
namespace server {
@ -54,6 +55,9 @@ class ValidationUtil {
static Status
ValidateSearchNprobe(int64_t nprobe, const engine::meta::TableSchema& table_schema);
static Status
ValidatePartitionTags(const std::vector<std::string>& partition_tags);
static Status
ValidateGpuIndex(uint32_t gpu_index);

View File

@ -77,6 +77,7 @@ set(helper_files
${MILVUS_ENGINE_SRC}/utils/CommonUtil.cpp
${MILVUS_ENGINE_SRC}/utils/TimeRecorder.cpp
${MILVUS_ENGINE_SRC}/utils/Status.cpp
${MILVUS_ENGINE_SRC}/utils/StringHelpFunctions.cpp
${MILVUS_ENGINE_SRC}/utils/ValidationUtil.cpp
${MILVUS_ENGINE_SRC}/external/easyloggingpp/easylogging++.cc
)

View File

@ -171,7 +171,8 @@ TEST_F(DBTest, DB_TEST) {
BuildVectors(qb, qxb);
std::thread search([&]() {
milvus::engine::QueryResults results;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
int k = 10;
std::this_thread::sleep_for(std::chrono::seconds(2));
@ -186,17 +187,19 @@ TEST_F(DBTest, DB_TEST) {
prev_count = count;
START_TIMER;
stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results);
std::vector<std::string> tags;
stat = db_->Query(TABLE_NAME, tags, k, qb, 10, qxb.data(), result_ids, result_distances);
ss << "Search " << j << " With Size " << count / milvus::engine::M << " M";
STOP_TIMER(ss.str());
ASSERT_TRUE(stat.ok());
for (auto k = 0; k < qb; ++k) {
ASSERT_EQ(results[k][0].first, target_ids[k]);
for (auto i = 0; i < qb; ++i) {
ASSERT_EQ(result_ids[i*k], target_ids[i]);
ss.str("");
ss << "Result [" << k << "]:";
for (auto result : results[k]) {
ss << result.first << " ";
ss << "Result [" << i << "]:";
for (auto t = 0; t < k; t++) {
ss << result_ids[i * k + t] << " ";
}
/* LOG(DEBUG) << ss.str(); */
}
@ -209,10 +212,10 @@ TEST_F(DBTest, DB_TEST) {
for (auto i = 0; i < loop; ++i) {
if (i == 40) {
db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids);
ASSERT_EQ(target_ids.size(), qb);
} else {
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
}
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
@ -270,7 +273,7 @@ TEST_F(DBTest, SEARCH_TEST) {
// insert data
const int batch_size = 100;
for (int j = 0; j < nb / batch_size; ++j) {
stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data() + batch_size * j * TABLE_DIM, ids);
stat = db_->InsertVectors(TABLE_NAME, "", batch_size, xb.data() + batch_size * j * TABLE_DIM, ids);
if (j == 200) {
sleep(1);
}
@ -282,16 +285,19 @@ TEST_F(DBTest, SEARCH_TEST) {
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
{
milvus::engine::QueryResults results;
stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results);
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(TABLE_NAME, tags, k, nq, 10, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
{ // search by specify index file
milvus::engine::meta::DatesT dates;
std::vector<std::string> file_ids = {"1", "2", "3", "4", "5", "6"};
milvus::engine::QueryResults results;
stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results);
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByFileID(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
@ -340,7 +346,7 @@ TEST_F(DBTest, PRELOADTABLE_TEST) {
int loop = 5;
for (auto i = 0; i < loop; ++i) {
milvus::engine::IDNumbers vector_ids;
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
ASSERT_EQ(vector_ids.size(), nb);
}
@ -370,7 +376,7 @@ TEST_F(DBTest, SHUTDOWN_TEST) {
ASSERT_FALSE(stat.ok());
milvus::engine::IDNumbers ids;
stat = db_->InsertVectors(table_info.table_id_, 0, nullptr, ids);
stat = db_->InsertVectors(table_info.table_id_, "", 0, nullptr, ids);
ASSERT_FALSE(stat.ok());
stat = db_->PreloadTable(table_info.table_id_);
@ -387,15 +393,17 @@ TEST_F(DBTest, SHUTDOWN_TEST) {
stat = db_->DescribeIndex(table_info.table_id_, index);
ASSERT_FALSE(stat.ok());
std::vector<std::string> tags;
milvus::engine::meta::DatesT dates;
milvus::engine::QueryResults results;
stat = db_->Query(table_info.table_id_, 1, 1, 1, nullptr, dates, results);
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(table_info.table_id_, tags, 1, 1, 1, nullptr, dates, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
std::vector<std::string> file_ids;
stat = db_->Query(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, results);
stat = db_->QueryByFileID(table_info.table_id_, file_ids, 1, 1, 1, nullptr, dates, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
stat = db_->DeleteTable(table_info.table_id_, dates);
stat = db_->DropTable(table_info.table_id_, dates);
ASSERT_FALSE(stat.ok());
}
@ -408,7 +416,7 @@ TEST_F(DBTest, INDEX_TEST) {
BuildVectors(nb, xb);
milvus::engine::IDNumbers vector_ids;
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
ASSERT_EQ(vector_ids.size(), nb);
milvus::engine::TableIndex index;
@ -438,6 +446,106 @@ TEST_F(DBTest, INDEX_TEST) {
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTest, PARTITION_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// create partition and insert data
const int64_t PARTITION_COUNT = 5;
const int64_t INSERT_BATCH = 2000;
std::string table_name = TABLE_NAME;
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
std::string partition_tag = std::to_string(i);
std::string partition_name = table_name + "_" + partition_tag;
stat = db_->CreatePartition(table_name, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
std::vector<float> xb;
BuildVectors(INSERT_BATCH, xb);
milvus::engine::IDNumbers vector_ids;
vector_ids.resize(INSERT_BATCH);
for (int64_t k = 0; k < INSERT_BATCH; k++) {
vector_ids[k] = i*INSERT_BATCH + k;
}
db_->InsertVectors(table_name, partition_tag, INSERT_BATCH, xb.data(), vector_ids);
ASSERT_EQ(vector_ids.size(), INSERT_BATCH);
}
//duplicated partition is not allowed
stat = db_->CreatePartition(table_name, "", "0");
ASSERT_FALSE(stat.ok());
std::vector<milvus::engine::meta::TableSchema> partiton_schema_array;
stat = db_->ShowPartitions(table_name, partiton_schema_array);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(partiton_schema_array.size(), PARTITION_COUNT);
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
ASSERT_EQ(partiton_schema_array[i].table_id_, table_name + "_" + std::to_string(i));
}
{ // build index
milvus::engine::TableIndex index;
index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT;
index.metric_type_ = (int) milvus::engine::MetricType::L2;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
uint64_t row_count = 0;
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, INSERT_BATCH*PARTITION_COUNT);
}
{ // search
const int64_t nq = 5;
const int64_t topk = 10;
const int64_t nprobe = 10;
std::vector<float> xq;
BuildVectors(nq, xq);
// specify partition tags
std::vector<std::string> tags = {"0", std::to_string(PARTITION_COUNT - 1)};
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size()/topk, nq);
// search in whole table
tags.clear();
result_ids.clear();
result_distances.clear();
stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size()/topk, nq);
// search in all partitions(tag regex match)
tags.push_back("\\d");
result_ids.clear();
result_distances.clear();
stat = db_->Query(TABLE_NAME, tags, topk, nq, nprobe, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size()/topk, nq);
}
stat = db_->DropPartition(table_name + "_0");
ASSERT_TRUE(stat.ok());
stat = db_->DropPartitionByTag(table_name, "1");
ASSERT_TRUE(stat.ok());
stat = db_->DropIndex(table_name);
ASSERT_TRUE(stat.ok());
milvus::engine::meta::DatesT dates;
stat = db_->DropTable(table_name, dates);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
@ -470,7 +578,7 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
int loop = INSERT_LOOP;
for (auto i = 0; i < loop; ++i) {
milvus::engine::IDNumbers vector_ids;
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
@ -502,12 +610,12 @@ TEST_F(DBTest2, DELETE_TEST) {
BuildVectors(nb, xb);
milvus::engine::IDNumbers vector_ids;
stat = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
stat = db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
milvus::engine::TableIndex index;
stat = db_->CreateIndex(TABLE_NAME, index);
std::vector<milvus::engine::meta::DateT> dates;
stat = db_->DeleteTable(TABLE_NAME, dates);
stat = db_->DropTable(TABLE_NAME, dates);
std::this_thread::sleep_for(std::chrono::seconds(2));
ASSERT_TRUE(stat.ok());
@ -537,7 +645,7 @@ TEST_F(DBTest2, DELETE_BY_RANGE_TEST) {
BuildVectors(nb, xb);
milvus::engine::IDNumbers vector_ids;
stat = db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
stat = db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
milvus::engine::TableIndex index;
stat = db_->CreateIndex(TABLE_NAME, index);
@ -549,7 +657,7 @@ TEST_F(DBTest2, DELETE_BY_RANGE_TEST) {
std::string end_value = CurrentTmDate(1);
ConvertTimeRangeToDBDates(start_value, end_value, dates);
stat = db_->DeleteTable(TABLE_NAME, dates);
stat = db_->DropTable(TABLE_NAME, dates);
ASSERT_TRUE(stat.ok());
uint64_t row_count = 0;

View File

@ -77,11 +77,12 @@ TEST_F(MySqlDBTest, DB_TEST) {
std::vector<float> qxb;
BuildVectors(qb, qxb);
db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids);
ASSERT_EQ(target_ids.size(), qb);
std::thread search([&]() {
milvus::engine::QueryResults results;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
int k = 10;
std::this_thread::sleep_for(std::chrono::seconds(5));
@ -96,25 +97,26 @@ TEST_F(MySqlDBTest, DB_TEST) {
prev_count = count;
START_TIMER;
stat = db_->Query(TABLE_NAME, k, qb, 10, qxb.data(), results);
std::vector<std::string> tags;
stat = db_->Query(TABLE_NAME, tags, k, qb, 10, qxb.data(), result_ids, result_distances);
ss << "Search " << j << " With Size " << count / milvus::engine::M << " M";
STOP_TIMER(ss.str());
ASSERT_TRUE(stat.ok());
for (auto k = 0; k < qb; ++k) {
// std::cout << results[k][0].first << " " << target_ids[k] << std::endl;
// ASSERT_EQ(results[k][0].first, target_ids[k]);
for (auto i = 0; i < qb; ++i) {
// std::cout << results[k][0].first << " " << target_ids[k] << std::endl;
// ASSERT_EQ(results[k][0].first, target_ids[k]);
bool exists = false;
for (auto& result : results[k]) {
if (result.first == target_ids[k]) {
for (auto t = 0; t < k; t++) {
if (result_ids[i * k + t] == target_ids[i]) {
exists = true;
}
}
ASSERT_TRUE(exists);
ss.str("");
ss << "Result [" << k << "]:";
for (auto result : results[k]) {
ss << result.first << " ";
ss << "Result [" << i << "]:";
for (auto t = 0; t < k; t++) {
ss << result_ids[i * k + t] << " ";
}
/* LOG(DEBUG) << ss.str(); */
}
@ -128,13 +130,13 @@ TEST_F(MySqlDBTest, DB_TEST) {
int loop = INSERT_LOOP;
for (auto i = 0; i < loop; ++i) {
// if (i==10) {
// db_->InsertVectors(TABLE_NAME, qb, qxb.data(), target_ids);
// ASSERT_EQ(target_ids.size(), qb);
// } else {
// db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
// }
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
// if (i==10) {
// db_->InsertVectors(TABLE_NAME, "", qb, qxb.data(), target_ids);
// ASSERT_EQ(target_ids.size(), qb);
// } else {
// db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
// }
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
@ -181,17 +183,17 @@ TEST_F(MySqlDBTest, SEARCH_TEST) {
// insert data
const int batch_size = 100;
for (int j = 0; j < nb / batch_size; ++j) {
stat = db_->InsertVectors(TABLE_NAME, batch_size, xb.data() + batch_size * j * TABLE_DIM, ids);
if (j == 200) {
sleep(1);
}
stat = db_->InsertVectors(TABLE_NAME, "", batch_size, xb.data() + batch_size * j * TABLE_DIM, ids);
if (j == 200) { sleep(1); }
ASSERT_TRUE(stat.ok());
}
sleep(2); // wait until build index finish
milvus::engine::QueryResults results;
stat = db_->Query(TABLE_NAME, k, nq, 10, xq.data(), results);
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(TABLE_NAME, tags, k, nq, 10, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
}
@ -229,7 +231,7 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) {
int loop = INSERT_LOOP;
for (auto i = 0; i < loop; ++i) {
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
@ -265,17 +267,117 @@ TEST_F(MySqlDBTest, DELETE_TEST) {
int loop = 20;
for (auto i = 0; i < loop; ++i) {
db_->InsertVectors(TABLE_NAME, nb, xb.data(), vector_ids);
db_->InsertVectors(TABLE_NAME, "", nb, xb.data(), vector_ids);
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
// std::vector<engine::meta::DateT> dates;
// stat = db_->DeleteTable(TABLE_NAME, dates);
//// std::cout << "5 sec start" << std::endl;
// std::this_thread::sleep_for(std::chrono::seconds(5));
//// std::cout << "5 sec finish" << std::endl;
// ASSERT_TRUE(stat.ok());
//
// db_->HasTable(TABLE_NAME, has_table);
// ASSERT_FALSE(has_table);
// std::vector<engine::meta::DateT> dates;
// stat = db_->DropTable(TABLE_NAME, dates);
//// std::cout << "5 sec start" << std::endl;
// std::this_thread::sleep_for(std::chrono::seconds(5));
//// std::cout << "5 sec finish" << std::endl;
// ASSERT_TRUE(stat.ok());
//
// db_->HasTable(TABLE_NAME, has_table);
// ASSERT_FALSE(has_table);
}
TEST_F(MySqlDBTest, PARTITION_TEST) {
milvus::engine::meta::TableSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
ASSERT_TRUE(stat.ok());
// create partition and insert data
const int64_t PARTITION_COUNT = 5;
const int64_t INSERT_BATCH = 2000;
std::string table_name = TABLE_NAME;
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
std::string partition_tag = std::to_string(i);
std::string partition_name = table_name + "_" + partition_tag;
stat = db_->CreatePartition(table_name, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
std::vector<float> xb;
BuildVectors(INSERT_BATCH, xb);
milvus::engine::IDNumbers vector_ids;
vector_ids.resize(INSERT_BATCH);
for (int64_t k = 0; k < INSERT_BATCH; k++) {
vector_ids[k] = i*INSERT_BATCH + k;
}
db_->InsertVectors(table_name, partition_tag, INSERT_BATCH, xb.data(), vector_ids);
ASSERT_EQ(vector_ids.size(), INSERT_BATCH);
}
//duplicated partition is not allowed
stat = db_->CreatePartition(table_name, "", "0");
ASSERT_FALSE(stat.ok());
std::vector<milvus::engine::meta::TableSchema> partiton_schema_array;
stat = db_->ShowPartitions(table_name, partiton_schema_array);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(partiton_schema_array.size(), PARTITION_COUNT);
for (int64_t i = 0; i < PARTITION_COUNT; i++) {
ASSERT_EQ(partiton_schema_array[i].table_id_, table_name + "_" + std::to_string(i));
}
{ // build index
milvus::engine::TableIndex index;
index.engine_type_ = (int) milvus::engine::EngineType::FAISS_IVFFLAT;
index.metric_type_ = (int) milvus::engine::MetricType::L2;
stat = db_->CreateIndex(table_info.table_id_, index);
ASSERT_TRUE(stat.ok());
uint64_t row_count = 0;
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, INSERT_BATCH*PARTITION_COUNT);
}
{ // search
const int64_t nq = 5;
const int64_t topk = 10;
const int64_t nprobe = 10;
std::vector<float> xq;
BuildVectors(nq, xq);
// specify partition tags
std::vector<std::string> tags = {"0", std::to_string(PARTITION_COUNT - 1)};
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size()/topk, nq);
// search in whole table
tags.clear();
result_ids.clear();
result_distances.clear();
stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size()/topk, nq);
// search in all partitions(tag regex match)
tags.push_back("\\d");
result_ids.clear();
result_distances.clear();
stat = db_->Query(TABLE_NAME, tags, 10, nq, 10, xq.data(), result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size()/topk, nq);
}
stat = db_->DropPartition(table_name + "_0");
ASSERT_TRUE(stat.ok());
stat = db_->DropPartitionByTag(table_name, "1");
ASSERT_TRUE(stat.ok());
stat = db_->DropIndex(table_name);
ASSERT_TRUE(stat.ok());
milvus::engine::meta::DatesT dates;
stat = db_->DropTable(table_name, dates);
ASSERT_TRUE(stat.ok());
}

View File

@ -231,7 +231,7 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) {
vector_ids.push_back(i);
}
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
ASSERT_TRUE(stat.ok());
std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk
@ -254,10 +254,13 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) {
int topk = 10, nprobe = 10;
for (auto& pair : search_vectors) {
auto& search = pair.second;
milvus::engine::QueryResults results;
stat = db_->Query(GetTableName(), topk, 1, nprobe, search.data(), results);
ASSERT_EQ(results[0][0].first, pair.first);
ASSERT_LT(results[0][0].second, 1e-4);
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(GetTableName(), tags, topk, 1, nprobe, search.data(), result_ids, result_distances);
ASSERT_EQ(result_ids[0], pair.first);
ASSERT_LT(result_distances[0], 1e-4);
}
}
@ -279,7 +282,7 @@ TEST_F(MemManagerTest2, INSERT_TEST) {
std::vector<float> xb;
BuildVectors(nb, xb);
milvus::engine::IDNumbers vector_ids;
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
ASSERT_TRUE(stat.ok());
}
auto end_time = METRICS_NOW_TIME;
@ -309,7 +312,8 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) {
BuildVectors(qb, qxb);
std::thread search([&]() {
milvus::engine::QueryResults results;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
int k = 10;
std::this_thread::sleep_for(std::chrono::seconds(2));
@ -324,17 +328,19 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) {
prev_count = count;
START_TIMER;
stat = db_->Query(GetTableName(), k, qb, 10, qxb.data(), results);
std::vector<std::string> tags;
stat = db_->Query(GetTableName(), tags, k, qb, 10, qxb.data(), result_ids, result_distances);
ss << "Search " << j << " With Size " << count / milvus::engine::M << " M";
STOP_TIMER(ss.str());
ASSERT_TRUE(stat.ok());
for (auto k = 0; k < qb; ++k) {
ASSERT_EQ(results[k][0].first, target_ids[k]);
for (auto i = 0; i < qb; ++i) {
ASSERT_EQ(result_ids[i * k], target_ids[i]);
ss.str("");
ss << "Result [" << k << "]:";
for (auto result : results[k]) {
ss << result.first << " ";
ss << "Result [" << i << "]:";
for (auto t = 0; t < k; t++) {
ss << result_ids[i * k + t] << " ";
}
/* LOG(DEBUG) << ss.str(); */
}
@ -347,10 +353,10 @@ TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) {
for (auto i = 0; i < loop; ++i) {
if (i == 0) {
db_->InsertVectors(GetTableName(), qb, qxb.data(), target_ids);
db_->InsertVectors(GetTableName(), "", qb, qxb.data(), target_ids);
ASSERT_EQ(target_ids.size(), qb);
} else {
db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
}
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
@ -379,7 +385,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) {
vector_ids[i] = i;
}
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
ASSERT_EQ(vector_ids[0], 0);
ASSERT_TRUE(stat.ok());
@ -391,7 +397,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) {
for (auto i = 0; i < nb; i++) {
vector_ids[i] = i + nb;
}
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
ASSERT_EQ(vector_ids[0], nb);
ASSERT_TRUE(stat.ok());
@ -403,7 +409,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) {
for (auto i = 0; i < nb; i++) {
vector_ids[i] = i + nb / 2;
}
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
ASSERT_EQ(vector_ids[0], nb / 2);
ASSERT_TRUE(stat.ok());
@ -411,7 +417,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) {
xb.clear();
BuildVectors(nb, xb);
vector_ids.clear();
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
ASSERT_TRUE(stat.ok());
nb = 100;
@ -422,7 +428,7 @@ TEST_F(MemManagerTest2, VECTOR_IDS_TEST) {
for (auto i = 0; i < nb; i++) {
vector_ids[i] = i + nb;
}
stat = db_->InsertVectors(GetTableName(), nb, xb.data(), vector_ids);
stat = db_->InsertVectors(GetTableName(), "", nb, xb.data(), vector_ids);
for (auto i = 0; i < nb; i++) {
ASSERT_EQ(vector_ids[i], i + nb);
}

View File

@ -84,14 +84,14 @@ TEST_F(MetaTest, TABLE_FILE_TEST) {
milvus::engine::meta::DatesT dates;
dates.push_back(milvus::engine::utils::GetDate());
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
status = impl_->DropDataByDate(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
dates.clear();
for (auto i = 2; i < 10; ++i) {
dates.push_back(milvus::engine::utils::GetDateWithDelta(-1 * i));
}
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
status = impl_->DropDataByDate(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
table_file.date_ = milvus::engine::utils::GetDateWithDelta(-2);
@ -102,7 +102,7 @@ TEST_F(MetaTest, TABLE_FILE_TEST) {
dates.clear();
dates.push_back(table_file.date_);
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
status = impl_->DropDataByDate(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
std::vector<size_t> ids = {table_file.id_};
@ -332,7 +332,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
status = impl_->CleanUp();
ASSERT_TRUE(status.ok());
status = impl_->DeleteTable(table_id);
status = impl_->DropTable(table_id);
ASSERT_TRUE(status.ok());
status = impl_->CleanUpFilesWithTTL(1UL);

View File

@ -74,7 +74,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
milvus::engine::meta::DatesT dates;
dates.push_back(milvus::engine::utils::GetDate());
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
status = impl_->DropDataByDate(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
uint64_t cnt = 0;
@ -95,7 +95,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
for (auto i = 2; i < 10; ++i) {
dates.push_back(milvus::engine::utils::GetDateWithDelta(-1 * i));
}
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
status = impl_->DropDataByDate(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
table_file.date_ = milvus::engine::utils::GetDateWithDelta(-2);
@ -106,7 +106,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
dates.clear();
dates.push_back(table_file.date_);
status = impl_->DropPartitionsByDates(table_file.table_id_, dates);
status = impl_->DropDataByDate(table_file.table_id_, dates);
ASSERT_TRUE(status.ok());
std::vector<size_t> ids = {table_file.id_};
@ -346,7 +346,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
status = impl_->DeleteTableFiles(table_id);
ASSERT_TRUE(status.ok());
status = impl_->DeleteTable(table_id);
status = impl_->DropTable(table_id);
ASSERT_TRUE(status.ok());
status = impl_->CleanUpFilesWithTTL(0UL);

View File

@ -19,73 +19,97 @@
#include <cmath>
#include <vector>
#include "scheduler/job/SearchJob.h"
#include "scheduler/task/SearchTask.h"
#include "utils/ThreadPool.h"
#include "utils/TimeRecorder.h"
#include "utils/ThreadPool.h"
namespace {
namespace ms = milvus::scheduler;
void
BuildResult(std::vector<int64_t>& output_ids, std::vector<float>& output_distance, uint64_t input_k, uint64_t topk,
uint64_t nq, bool ascending) {
BuildResult(ms::ResultIds& output_ids,
ms::ResultDistances & output_distances,
size_t input_k,
size_t topk,
size_t nq,
bool ascending) {
output_ids.clear();
output_ids.resize(nq * topk);
output_distance.clear();
output_distance.resize(nq * topk);
output_distances.clear();
output_distances.resize(nq * topk);
for (uint64_t i = 0; i < nq; i++) {
// insert valid items
for (uint64_t j = 0; j < input_k; j++) {
for (size_t i = 0; i < nq; i++) {
//insert valid items
for (size_t j = 0; j < input_k; j++) {
output_ids[i * topk + j] = (int64_t)(drand48() * 100000);
output_distance[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48());
output_distances[i * topk + j] = ascending ? (j + drand48()) : ((input_k - j) + drand48());
}
// insert invalid items
for (uint64_t j = input_k; j < topk; j++) {
//insert invalid items
for (size_t j = input_k; j < topk; j++) {
output_ids[i * topk + j] = -1;
output_distance[i * topk + j] = -1.0;
output_distances[i * topk + j] = -1.0;
}
}
}
void
CopyResult(std::vector<int64_t>& output_ids, std::vector<float>& output_distance, uint64_t output_topk,
std::vector<int64_t>& input_ids, std::vector<float>& input_distance, uint64_t input_topk, uint64_t nq) {
CopyResult(ms::ResultIds& output_ids,
ms::ResultDistances& output_distances,
size_t output_topk,
ms::ResultIds& input_ids,
ms::ResultDistances& input_distances,
size_t input_topk,
size_t nq) {
ASSERT_TRUE(input_ids.size() >= nq * input_topk);
ASSERT_TRUE(input_distance.size() >= nq * input_topk);
ASSERT_TRUE(input_distances.size() >= nq * input_topk);
ASSERT_TRUE(output_topk <= input_topk);
output_ids.clear();
output_ids.resize(nq * output_topk);
output_distance.clear();
output_distance.resize(nq * output_topk);
output_distances.clear();
output_distances.resize(nq * output_topk);
for (uint64_t i = 0; i < nq; i++) {
for (uint64_t j = 0; j < output_topk; j++) {
for (size_t i = 0; i < nq; i++) {
for (size_t j = 0; j < output_topk; j++) {
output_ids[i * output_topk + j] = input_ids[i * input_topk + j];
output_distance[i * output_topk + j] = input_distance[i * input_topk + j];
output_distances[i * output_topk + j] = input_distances[i * input_topk + j];
}
}
}
void
CheckTopkResult(const std::vector<int64_t>& input_ids_1, const std::vector<float>& input_distance_1,
const std::vector<int64_t>& input_ids_2, const std::vector<float>& input_distance_2, uint64_t topk,
uint64_t nq, bool ascending, const milvus::scheduler::ResultSet& result) {
ASSERT_EQ(result.size(), nq);
ASSERT_EQ(input_ids_1.size(), input_distance_1.size());
ASSERT_EQ(input_ids_2.size(), input_distance_2.size());
CheckTopkResult(const ms::ResultIds& input_ids_1,
const ms::ResultDistances& input_distances_1,
size_t input_k_1,
const ms::ResultIds& input_ids_2,
const ms::ResultDistances& input_distances_2,
size_t input_k_2,
size_t topk,
size_t nq,
bool ascending,
const ms::ResultIds& result_ids,
const ms::ResultDistances& result_distances) {
ASSERT_EQ(result_ids.size(), result_distances.size());
ASSERT_EQ(input_ids_1.size(), input_distances_1.size());
ASSERT_EQ(input_ids_2.size(), input_distances_2.size());
for (int64_t i = 0; i < nq; i++) {
std::vector<float> src_vec(input_distance_1.begin() + i * topk, input_distance_1.begin() + (i + 1) * topk);
src_vec.insert(src_vec.end(), input_distance_2.begin() + i * topk, input_distance_2.begin() + (i + 1) * topk);
size_t result_k = result_distances.size() / nq;
ASSERT_EQ(result_k, std::min(topk, input_k_1 + input_k_2));
for (size_t i = 0; i < nq; i++) {
std::vector<float>
src_vec(input_distances_1.begin() + i * topk, input_distances_1.begin() + (i + 1) * topk);
src_vec.insert(src_vec.end(),
input_distances_2.begin() + i * topk,
input_distances_2.begin() + (i + 1) * topk);
if (ascending) {
std::sort(src_vec.begin(), src_vec.end());
} else {
std::sort(src_vec.begin(), src_vec.end(), std::greater<float>());
}
// erase invalid items
//erase invalid items
std::vector<float>::iterator iter;
for (iter = src_vec.begin(); iter != src_vec.end();) {
if (*iter < 0.0)
@ -94,36 +118,38 @@ CheckTopkResult(const std::vector<int64_t>& input_ids_1, const std::vector<float
++iter;
}
uint64_t n = std::min(topk, result[i].size());
for (uint64_t j = 0; j < n; j++) {
if (result[i][j].first < 0) {
size_t n = std::min(topk, result_ids.size() / nq);
for (size_t j = 0; j < n; j++) {
size_t idx = i * n + j;
if (result_ids[idx] < 0) {
continue;
}
if (src_vec[j] != result[i][j].second) {
std::cout << src_vec[j] << " " << result[i][j].second << std::endl;
if (src_vec[j] != result_distances[idx]) {
std::cout << src_vec[j] << " " << result_distances[idx] << std::endl;
}
ASSERT_TRUE(src_vec[j] == result[i][j].second);
ASSERT_TRUE(src_vec[j] == result_distances[idx]);
}
}
}
} // namespace
} // namespace
void
MergeTopkToResultSetTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) {
std::vector<int64_t> ids1, ids2;
std::vector<float> dist1, dist2;
ms::ResultSet result;
MergeTopkToResultSetTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) {
ms::ResultIds ids1, ids2;
ms::ResultDistances dist1, dist2;
ms::ResultIds result_ids;
ms::ResultDistances result_distances;
BuildResult(ids1, dist1, topk_1, topk, nq, ascending);
BuildResult(ids2, dist2, topk_2, topk, nq, ascending);
ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result);
ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result);
CheckTopkResult(ids1, dist1, ids2, dist2, topk, nq, ascending, result);
ms::XSearchTask::MergeTopkToResultSet(ids1, dist1, topk_1, nq, topk, ascending, result_ids, result_distances);
ms::XSearchTask::MergeTopkToResultSet(ids2, dist2, topk_2, nq, topk, ascending, result_ids, result_distances);
CheckTopkResult(ids1, dist1, topk_1, ids2, dist2, topk_2, topk, nq, ascending, result_ids, result_distances);
}
TEST(DBSearchTest, MERGE_RESULT_SET_TEST) {
uint64_t NQ = 15;
uint64_t TOP_K = 64;
size_t NQ = 15;
size_t TOP_K = 64;
/* test1, id1/dist1 valid, id2/dist2 empty */
MergeTopkToResultSetTest(TOP_K, 0, NQ, TOP_K, true);
@ -142,21 +168,21 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) {
MergeTopkToResultSetTest(TOP_K / 2, TOP_K / 3, NQ, TOP_K, false);
}
// void MergeTopkArrayTest(uint64_t topk_1, uint64_t topk_2, uint64_t nq, uint64_t topk, bool ascending) {
//void MergeTopkArrayTest(size_t topk_1, size_t topk_2, size_t nq, size_t topk, bool ascending) {
// std::vector<int64_t> ids1, ids2;
// std::vector<float> dist1, dist2;
// ms::ResultSet result;
// BuildResult(ids1, dist1, topk_1, topk, nq, ascending);
// BuildResult(ids2, dist2, topk_2, topk, nq, ascending);
// uint64_t result_topk = std::min(topk, topk_1 + topk_2);
// size_t result_topk = std::min(topk, topk_1 + topk_2);
// ms::XSearchTask::MergeTopkArray(ids1, dist1, topk_1, ids2, dist2, topk_2, nq, topk, ascending);
// if (ids1.size() != result_topk * nq) {
// std::cout << ids1.size() << " " << result_topk * nq << std::endl;
// }
// ASSERT_TRUE(ids1.size() == result_topk * nq);
// ASSERT_TRUE(dist1.size() == result_topk * nq);
// for (uint64_t i = 0; i < nq; i++) {
// for (uint64_t k = 1; k < result_topk; k++) {
// for (size_t i = 0; i < nq; i++) {
// for (size_t k = 1; k < result_topk; k++) {
// float f0 = dist1[i * topk + k - 1];
// float f1 = dist1[i * topk + k];
// if (ascending) {
@ -174,9 +200,9 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) {
// }
//}
// TEST(DBSearchTest, MERGE_ARRAY_TEST) {
// uint64_t NQ = 15;
// uint64_t TOP_K = 64;
//TEST(DBSearchTest, MERGE_ARRAY_TEST) {
// size_t NQ = 15;
// size_t TOP_K = 64;
//
// /* test1, id1/dist1 valid, id2/dist2 empty */
// MergeTopkArrayTest(TOP_K, 0, NQ, TOP_K, true);
@ -202,26 +228,26 @@ TEST(DBSearchTest, MERGE_RESULT_SET_TEST) {
//}
TEST(DBSearchTest, REDUCE_PERF_TEST) {
int32_t index_file_num = 478; /* sift1B dataset, index files num */
int32_t index_file_num = 478; /* sift1B dataset, index files num */
bool ascending = true;
std::vector<int32_t> thread_vec = {4, 8};
std::vector<int32_t> nq_vec = {1, 10, 100};
std::vector<int32_t> topk_vec = {1, 4, 16, 64};
int32_t NQ = nq_vec[nq_vec.size() - 1];
int32_t TOPK = topk_vec[topk_vec.size() - 1];
std::vector<size_t> thread_vec = {4};
std::vector<size_t> nq_vec = {1000};
std::vector<size_t> topk_vec = {64};
size_t NQ = nq_vec[nq_vec.size() - 1];
size_t TOPK = topk_vec[topk_vec.size() - 1];
std::vector<std::vector<int64_t>> id_vec;
std::vector<std::vector<float>> dist_vec;
std::vector<int64_t> input_ids;
std::vector<float> input_distance;
std::vector<ms::ResultIds> id_vec;
std::vector<ms::ResultDistances> dist_vec;
ms::ResultIds input_ids;
ms::ResultDistances input_distances;
int32_t i, k, step;
/* generate testing data */
for (i = 0; i < index_file_num; i++) {
BuildResult(input_ids, input_distance, TOPK, TOPK, NQ, ascending);
BuildResult(input_ids, input_distances, TOPK, TOPK, NQ, ascending);
id_vec.push_back(input_ids);
dist_vec.push_back(input_distance);
dist_vec.push_back(input_distances);
}
for (int32_t max_thread_num : thread_vec) {
@ -230,136 +256,144 @@ TEST(DBSearchTest, REDUCE_PERF_TEST) {
for (int32_t nq : nq_vec) {
for (int32_t top_k : topk_vec) {
ms::ResultSet final_result, final_result_2, final_result_3;
ms::ResultIds final_result_ids, final_result_ids_2, final_result_ids_3;
ms::ResultDistances final_result_distances, final_result_distances_2, final_result_distances_3;
std::vector<std::vector<int64_t>> id_vec_1(index_file_num);
std::vector<std::vector<float>> dist_vec_1(index_file_num);
std::vector<ms::ResultIds> id_vec_1(index_file_num);
std::vector<ms::ResultDistances> dist_vec_1(index_file_num);
for (i = 0; i < index_file_num; i++) {
CopyResult(id_vec_1[i], dist_vec_1[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
}
std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " + std::to_string(nq) + " " +
std::to_string(top_k);
std::string str1 = "Method-1 " + std::to_string(max_thread_num) + " " +
std::to_string(nq) + " " + std::to_string(top_k);
milvus::TimeRecorder rc1(str1);
///////////////////////////////////////////////////////////////////////////////////////
/* method-1 */
for (i = 0; i < index_file_num; i++) {
ms::XSearchTask::MergeTopkToResultSet(id_vec_1[i], dist_vec_1[i], top_k, nq, top_k, ascending,
final_result);
ASSERT_EQ(final_result.size(), nq);
ms::XSearchTask::MergeTopkToResultSet(id_vec_1[i],
dist_vec_1[i],
top_k,
nq,
top_k,
ascending,
final_result_ids,
final_result_distances);
ASSERT_EQ(final_result_ids.size(), nq * top_k);
ASSERT_EQ(final_result_distances.size(), nq * top_k);
}
rc1.RecordSection("reduce done");
// ///////////////////////////////////////////////////////////////////////////////////////
// /* method-2 */
// std::vector<std::vector<int64_t>> id_vec_2(index_file_num);
// std::vector<std::vector<float>> dist_vec_2(index_file_num);
// std::vector<uint64_t> k_vec_2(index_file_num);
// for (i = 0; i < index_file_num; i++) {
// CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
// k_vec_2[i] = top_k;
// }
//
// std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " +
// std::to_string(nq) + " " + std::to_string(top_k);
// milvus::TimeRecorder rc2(str2);
//
// for (step = 1; step < index_file_num; step *= 2) {
// for (i = 0; i + step < index_file_num; i += step * 2) {
// ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i],
// id_vec_2[i + step], dist_vec_2[i + step],
// k_vec_2[i + step], nq, top_k, ascending);
// }
// }
// ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0],
// dist_vec_2[0],
// k_vec_2[0],
// nq,
// top_k,
// ascending,
// final_result_2);
// ASSERT_EQ(final_result_2.size(), nq);
//
// rc2.RecordSection("reduce done");
//
// for (i = 0; i < nq; i++) {
// ASSERT_EQ(final_result[i].size(), final_result_2[i].size());
// for (k = 0; k < final_result[i].size(); k++) {
// if (final_result[i][k].first != final_result_2[i][k].first) {
// std::cout << i << " " << k << std::endl;
// }
// ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first);
// ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second);
// }
// }
//
// ///////////////////////////////////////////////////////////////////////////////////////
// /* method-3 parallel */
// std::vector<std::vector<int64_t>> id_vec_3(index_file_num);
// std::vector<std::vector<float>> dist_vec_3(index_file_num);
// std::vector<uint64_t> k_vec_3(index_file_num);
// for (i = 0; i < index_file_num; i++) {
// CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
// k_vec_3[i] = top_k;
// }
//
// std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " +
// std::to_string(nq) + " " + std::to_string(top_k);
// milvus::TimeRecorder rc3(str3);
//
// for (step = 1; step < index_file_num; step *= 2) {
// for (i = 0; i + step < index_file_num; i += step * 2) {
// threads_list.push_back(
// threadPool.enqueue(ms::XSearchTask::MergeTopkArray,
// std::ref(id_vec_3[i]),
// std::ref(dist_vec_3[i]),
// std::ref(k_vec_3[i]),
// std::ref(id_vec_3[i + step]),
// std::ref(dist_vec_3[i + step]),
// std::ref(k_vec_3[i + step]),
// nq,
// top_k,
// ascending));
// }
//
// while (threads_list.size() > 0) {
// int nready = 0;
// for (auto it = threads_list.begin(); it != threads_list.end(); it = it) {
// auto &p = *it;
// std::chrono::milliseconds span(0);
// if (p.wait_for(span) == std::future_status::ready) {
// threads_list.erase(it++);
// ++nready;
// } else {
// ++it;
// }
// }
//
// if (nready == 0) {
// std::this_thread::yield();
// }
// }
// }
// ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0],
// dist_vec_3[0],
// k_vec_3[0],
// nq,
// top_k,
// ascending,
// final_result_3);
// ASSERT_EQ(final_result_3.size(), nq);
//
// rc3.RecordSection("reduce done");
//
// for (i = 0; i < nq; i++) {
// ASSERT_EQ(final_result[i].size(), final_result_3[i].size());
// for (k = 0; k < final_result[i].size(); k++) {
// ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first);
// ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second);
// }
// }
// ///////////////////////////////////////////////////////////////////////////////////////
// /* method-2 */
// std::vector<std::vector<int64_t>> id_vec_2(index_file_num);
// std::vector<std::vector<float>> dist_vec_2(index_file_num);
// std::vector<size_t> k_vec_2(index_file_num);
// for (i = 0; i < index_file_num; i++) {
// CopyResult(id_vec_2[i], dist_vec_2[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
// k_vec_2[i] = top_k;
// }
//
// std::string str2 = "Method-2 " + std::to_string(max_thread_num) + " " +
// std::to_string(nq) + " " + std::to_string(top_k);
// milvus::TimeRecorder rc2(str2);
//
// for (step = 1; step < index_file_num; step *= 2) {
// for (i = 0; i + step < index_file_num; i += step * 2) {
// ms::XSearchTask::MergeTopkArray(id_vec_2[i], dist_vec_2[i], k_vec_2[i],
// id_vec_2[i + step], dist_vec_2[i + step], k_vec_2[i + step],
// nq, top_k, ascending);
// }
// }
// ms::XSearchTask::MergeTopkToResultSet(id_vec_2[0],
// dist_vec_2[0],
// k_vec_2[0],
// nq,
// top_k,
// ascending,
// final_result_2);
// ASSERT_EQ(final_result_2.size(), nq);
//
// rc2.RecordSection("reduce done");
//
// for (i = 0; i < nq; i++) {
// ASSERT_EQ(final_result[i].size(), final_result_2[i].size());
// for (k = 0; k < final_result[i].size(); k++) {
// if (final_result[i][k].first != final_result_2[i][k].first) {
// std::cout << i << " " << k << std::endl;
// }
// ASSERT_EQ(final_result[i][k].first, final_result_2[i][k].first);
// ASSERT_EQ(final_result[i][k].second, final_result_2[i][k].second);
// }
// }
//
// ///////////////////////////////////////////////////////////////////////////////////////
// /* method-3 parallel */
// std::vector<std::vector<int64_t>> id_vec_3(index_file_num);
// std::vector<std::vector<float>> dist_vec_3(index_file_num);
// std::vector<size_t> k_vec_3(index_file_num);
// for (i = 0; i < index_file_num; i++) {
// CopyResult(id_vec_3[i], dist_vec_3[i], top_k, id_vec[i], dist_vec[i], TOPK, nq);
// k_vec_3[i] = top_k;
// }
//
// std::string str3 = "Method-3 " + std::to_string(max_thread_num) + " " +
// std::to_string(nq) + " " + std::to_string(top_k);
// milvus::TimeRecorder rc3(str3);
//
// for (step = 1; step < index_file_num; step *= 2) {
// for (i = 0; i + step < index_file_num; i += step * 2) {
// threads_list.push_back(
// threadPool.enqueue(ms::XSearchTask::MergeTopkArray,
// std::ref(id_vec_3[i]),
// std::ref(dist_vec_3[i]),
// std::ref(k_vec_3[i]),
// std::ref(id_vec_3[i + step]),
// std::ref(dist_vec_3[i + step]),
// std::ref(k_vec_3[i + step]),
// nq,
// top_k,
// ascending));
// }
//
// while (threads_list.size() > 0) {
// int nready = 0;
// for (auto it = threads_list.begin(); it != threads_list.end(); it = it) {
// auto &p = *it;
// std::chrono::milliseconds span(0);
// if (p.wait_for(span) == std::future_status::ready) {
// threads_list.erase(it++);
// ++nready;
// } else {
// ++it;
// }
// }
//
// if (nready == 0) {
// std::this_thread::yield();
// }
// }
// }
// ms::XSearchTask::MergeTopkToResultSet(id_vec_3[0],
// dist_vec_3[0],
// k_vec_3[0],
// nq,
// top_k,
// ascending,
// final_result_3);
// ASSERT_EQ(final_result_3.size(), nq);
//
// rc3.RecordSection("reduce done");
//
// for (i = 0; i < nq; i++) {
// ASSERT_EQ(final_result[i].size(), final_result_3[i].size());
// for (k = 0; k < final_result[i].size(); k++) {
// ASSERT_EQ(final_result[i][k].first, final_result_3[i][k].first);
// ASSERT_EQ(final_result[i][k].second, final_result_3[i][k].second);
// }
// }
}
}
}

View File

@ -15,19 +15,19 @@
// specific language governing permissions and limitations
// under the License.
#include <gtest/gtest.h>
#include <chrono>
#include <map>
#include <memory>
#include <string>
#include <thread>
#include <gtest/gtest.h>
#include "cache/CpuCacheMgr.h"
#include "db/DB.h"
#include "db/meta/SqliteMetaImpl.h"
#include "server/Config.h"
#include "metrics/Metrics.h"
#include "metrics/utils.h"
#include "server/Config.h"
#include "db/DB.h"
#include "db/meta/SqliteMetaImpl.h"
TEST_F(MetricTest, METRIC_TEST) {
milvus::server::Config::GetInstance().SetMetricConfigCollector("zabbix");
@ -36,15 +36,15 @@ TEST_F(MetricTest, METRIC_TEST) {
milvus::server::Metrics::GetInstance();
milvus::server::SystemInfo::GetInstance().Init();
// server::Metrics::GetInstance().Init();
// server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr());
// server::Metrics::GetInstance().Init();
// server::Metrics::GetInstance().exposer_ptr()->RegisterCollectable(server::Metrics::GetInstance().registry_ptr());
milvus::server::Metrics::GetInstance().Init();
// server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr());
// server::PrometheusMetrics::GetInstance().exposer_ptr()->RegisterCollectable(server::PrometheusMetrics::GetInstance().registry_ptr());
milvus::cache::CpuCacheMgr::GetInstance()->SetCapacity(1UL * 1024 * 1024 * 1024);
std::cout << milvus::cache::CpuCacheMgr::GetInstance()->CacheCapacity() << std::endl;
static const char* group_name = "test_group";
static const char *group_name = "test_group";
static const int group_dim = 256;
milvus::engine::meta::TableSchema group_info;
@ -61,21 +61,23 @@ TEST_F(MetricTest, METRIC_TEST) {
int d = 256;
int nb = 50;
float* xb = new float[d * nb];
float *xb = new float[d * nb];
for (int i = 0; i < nb; i++) {
for (int j = 0; j < d; j++) xb[d * i + j] = drand48();
xb[d * i] += i / 2000.;
}
int qb = 5;
float* qxb = new float[d * qb];
float *qxb = new float[d * qb];
for (int i = 0; i < qb; i++) {
for (int j = 0; j < d; j++) qxb[d * i + j] = drand48();
qxb[d * i] += i / 2000.;
}
std::thread search([&]() {
milvus::engine::QueryResults results;
// std::vector<std::string> tags;
// milvus::engine::ResultIds result_ids;
// milvus::engine::ResultDistances result_distances;
int k = 10;
std::this_thread::sleep_for(std::chrono::seconds(2));
@ -90,16 +92,17 @@ TEST_F(MetricTest, METRIC_TEST) {
prev_count = count;
START_TIMER;
// stat = db_->Query(group_name, k, qb, qxb, results);
ss << "Search " << j << " With Size " << (float)(count * group_dim * sizeof(float)) / (1024 * 1024) << " M";
// stat = db_->Query(group_name, tags, k, qb, qxb, result_ids, result_distances);
ss << "Search " << j << " With Size " << (float) (count * group_dim * sizeof(float)) / (1024 * 1024)
<< " M";
for (auto k = 0; k < qb; ++k) {
// ASSERT_EQ(results[k][0].first, target_ids[k]);
// ASSERT_EQ(results[k][0].first, target_ids[k]);
ss.str("");
ss << "Result [" << k << "]:";
// for (auto result : results[k]) {
// ss << result.first << " ";
// }
// for (auto result : results[k]) {
// ss << result.first << " ";
// }
}
ASSERT_TRUE(count >= prev_count);
std::this_thread::sleep_for(std::chrono::seconds(1));
@ -110,10 +113,10 @@ TEST_F(MetricTest, METRIC_TEST) {
for (auto i = 0; i < loop; ++i) {
if (i == 40) {
db_->InsertVectors(group_name, qb, qxb, target_ids);
db_->InsertVectors(group_name, "", qb, qxb, target_ids);
ASSERT_EQ(target_ids.size(), qb);
} else {
db_->InsertVectors(group_name, nb, xb, vector_ids);
db_->InsertVectors(group_name, "", nb, xb, vector_ids);
}
std::this_thread::sleep_for(std::chrono::microseconds(2000));
}
@ -152,3 +155,5 @@ TEST_F(MetricTest, COLLECTOR_METRICS_TEST) {
milvus::server::MetricCollector metric_collector();
}

View File

@ -22,6 +22,7 @@
#include "server/Config.h"
#include "server/utils.h"
#include "utils/CommonUtil.h"
#include "utils/StringHelpFunctions.h"
#include "utils/ValidationUtil.h"
namespace {
@ -98,6 +99,328 @@ TEST_F(ConfigTest, CONFIG_TEST) {
ASSERT_TRUE(seqs.empty());
}
TEST_F(ConfigTest, SERVER_CONFIG_VALID_TEST) {
std::string config_path(CONFIG_PATH);
milvus::server::Config& config = milvus::server::Config::GetInstance();
milvus::Status s;
std::string str_val;
int32_t int32_val;
int64_t int64_val;
float float_val;
bool bool_val;
/* server config */
std::string server_addr = "192.168.1.155";
s = config.SetServerConfigAddress(server_addr);
ASSERT_TRUE(s.ok());
s = config.GetServerConfigAddress(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == server_addr);
std::string server_port = "12345";
s = config.SetServerConfigPort(server_port);
ASSERT_TRUE(s.ok());
s = config.GetServerConfigPort(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == server_port);
std::string server_mode = "cluster_readonly";
s = config.SetServerConfigDeployMode(server_mode);
ASSERT_TRUE(s.ok());
s = config.GetServerConfigDeployMode(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == server_mode);
std::string server_time_zone = "UTC+6";
s = config.SetServerConfigTimeZone(server_time_zone);
ASSERT_TRUE(s.ok());
s = config.GetServerConfigTimeZone(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == server_time_zone);
/* db config */
std::string db_primary_path = "/home/zilliz";
s = config.SetDBConfigPrimaryPath(db_primary_path);
ASSERT_TRUE(s.ok());
s = config.GetDBConfigPrimaryPath(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == db_primary_path);
std::string db_secondary_path = "/home/zilliz";
s = config.SetDBConfigSecondaryPath(db_secondary_path);
ASSERT_TRUE(s.ok());
s = config.GetDBConfigSecondaryPath(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == db_secondary_path);
std::string db_backend_url = "mysql://root:123456@127.0.0.1:19530/milvus";
s = config.SetDBConfigBackendUrl(db_backend_url);
ASSERT_TRUE(s.ok());
s = config.GetDBConfigBackendUrl(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == db_backend_url);
int32_t db_archive_disk_threshold = 100;
s = config.SetDBConfigArchiveDiskThreshold(std::to_string(db_archive_disk_threshold));
ASSERT_TRUE(s.ok());
s = config.GetDBConfigArchiveDiskThreshold(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == db_archive_disk_threshold);
int32_t db_archive_days_threshold = 365;
s = config.SetDBConfigArchiveDaysThreshold(std::to_string(db_archive_days_threshold));
ASSERT_TRUE(s.ok());
s = config.GetDBConfigArchiveDaysThreshold(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == db_archive_days_threshold);
int32_t db_insert_buffer_size = 2;
s = config.SetDBConfigInsertBufferSize(std::to_string(db_insert_buffer_size));
ASSERT_TRUE(s.ok());
s = config.GetDBConfigInsertBufferSize(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == db_insert_buffer_size);
/* metric config */
bool metric_enable_monitor = false;
s = config.SetMetricConfigEnableMonitor(std::to_string(metric_enable_monitor));
ASSERT_TRUE(s.ok());
s = config.GetMetricConfigEnableMonitor(bool_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(bool_val == metric_enable_monitor);
std::string metric_collector = "prometheus";
s = config.SetMetricConfigCollector(metric_collector);
ASSERT_TRUE(s.ok());
s = config.GetMetricConfigCollector(str_val);
ASSERT_TRUE(str_val == metric_collector);
std::string metric_prometheus_port = "2222";
s = config.SetMetricConfigPrometheusPort(metric_prometheus_port);
ASSERT_TRUE(s.ok());
s = config.GetMetricConfigPrometheusPort(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == metric_prometheus_port);
/* cache config */
int64_t cache_cpu_cache_capacity = 5;
s = config.SetCacheConfigCpuCacheCapacity(std::to_string(cache_cpu_cache_capacity));
ASSERT_TRUE(s.ok());
s = config.GetCacheConfigCpuCacheCapacity(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int64_val == cache_cpu_cache_capacity);
float cache_cpu_cache_threshold = 0.1;
s = config.SetCacheConfigCpuCacheThreshold(std::to_string(cache_cpu_cache_threshold));
ASSERT_TRUE(s.ok());
s = config.GetCacheConfigCpuCacheThreshold(float_val);
ASSERT_TRUE(float_val == cache_cpu_cache_threshold);
#ifdef MILVUS_GPU_VERSION
int64_t cache_gpu_cache_capacity = 1;
s = config.SetCacheConfigGpuCacheCapacity(std::to_string(cache_gpu_cache_capacity));
ASSERT_TRUE(s.ok());
s = config.GetCacheConfigGpuCacheCapacity(int64_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int64_val == cache_gpu_cache_capacity);
float cache_gpu_cache_threshold = 0.2;
s = config.SetCacheConfigGpuCacheThreshold(std::to_string(cache_gpu_cache_threshold));
ASSERT_TRUE(s.ok());
s = config.GetCacheConfigGpuCacheThreshold(float_val);
ASSERT_TRUE(float_val == cache_gpu_cache_threshold);
#endif
bool cache_insert_data = true;
s = config.SetCacheConfigCacheInsertData(std::to_string(cache_insert_data));
ASSERT_TRUE(s.ok());
s = config.GetCacheConfigCacheInsertData(bool_val);
ASSERT_TRUE(bool_val == cache_insert_data);
/* engine config */
int32_t engine_use_blas_threshold = 50;
s = config.SetEngineConfigUseBlasThreshold(std::to_string(engine_use_blas_threshold));
ASSERT_TRUE(s.ok());
s = config.GetEngineConfigUseBlasThreshold(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == engine_use_blas_threshold);
int32_t engine_omp_thread_num = 8;
s = config.SetEngineConfigOmpThreadNum(std::to_string(engine_omp_thread_num));
ASSERT_TRUE(s.ok());
s = config.GetEngineConfigOmpThreadNum(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == engine_omp_thread_num);
int32_t engine_gpu_search_threshold = 800;
s = config.SetEngineConfigGpuSearchThreshold(std::to_string(engine_gpu_search_threshold));
ASSERT_TRUE(s.ok());
s = config.GetEngineConfigGpuSearchThreshold(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == engine_gpu_search_threshold);
/* resource config */
std::string resource_mode = "simple";
s = config.SetResourceConfigMode(resource_mode);
ASSERT_TRUE(s.ok());
s = config.GetResourceConfigMode(str_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(str_val == resource_mode);
#ifdef MILVUS_CPU_VERSION
std::vector<std::string> search_resources = {"cpu"};
#else
std::vector<std::string> search_resources = {"cpu", "gpu0"};
#endif
std::vector<std::string> res_vec;
std::string res_str;
milvus::server::StringHelpFunctions::MergeStringWithDelimeter(
search_resources, milvus::server::CONFIG_RESOURCE_SEARCH_RESOURCES_DELIMITER, res_str);
s = config.SetResourceConfigSearchResources(res_str);
ASSERT_TRUE(s.ok());
s = config.GetResourceConfigSearchResources(res_vec);
ASSERT_TRUE(s.ok());
for (size_t i = 0; i < search_resources.size(); i++) {
ASSERT_TRUE(search_resources[i] == res_vec[i]);
}
#ifdef MILVUS_CPU_VERSION
int32_t resource_index_build_device = milvus::server::CPU_DEVICE_ID;
s = config.SetResourceConfigIndexBuildDevice("cpu");
#else
int32_t resource_index_build_device = 0;
s = config.SetResourceConfigIndexBuildDevice("gpu" + std::to_string(resource_index_build_device));
#endif
ASSERT_TRUE(s.ok());
s = config.GetResourceConfigIndexBuildDevice(int32_val);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(int32_val == resource_index_build_device);
}
TEST_F(ConfigTest, SERVER_CONFIG_INVALID_TEST) {
std::string config_path(CONFIG_PATH);
milvus::server::Config& config = milvus::server::Config::GetInstance();
milvus::Status s;
s = config.LoadConfigFile("");
ASSERT_FALSE(s.ok());
s = config.LoadConfigFile(config_path + INVALID_CONFIG_FILE);
ASSERT_FALSE(s.ok());
s = config.LoadConfigFile(config_path + "dummy.yaml");
ASSERT_FALSE(s.ok());
/* server config */
s = config.SetServerConfigAddress("0.0.0");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigAddress("0.0.0.256");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigPort("a");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigPort("99999");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigDeployMode("cluster");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigTimeZone("GM");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigTimeZone("GMT8");
ASSERT_FALSE(s.ok());
s = config.SetServerConfigTimeZone("UTCA");
ASSERT_FALSE(s.ok());
/* db config */
s = config.SetDBConfigPrimaryPath("");
ASSERT_FALSE(s.ok());
// s = config.SetDBConfigSecondaryPath("");
// ASSERT_FALSE(s.ok());
s = config.SetDBConfigBackendUrl("http://www.google.com");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigBackendUrl("sqlite://:@:");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigBackendUrl("mysql://root:123456@127.0.0.1/milvus");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigArchiveDiskThreshold("0x10");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigArchiveDaysThreshold("0x10");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigInsertBufferSize("a");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigInsertBufferSize("0");
ASSERT_FALSE(s.ok());
s = config.SetDBConfigInsertBufferSize("2048");
ASSERT_FALSE(s.ok());
/* metric config */
s = config.SetMetricConfigEnableMonitor("Y");
ASSERT_FALSE(s.ok());
s = config.SetMetricConfigCollector("zilliz");
ASSERT_FALSE(s.ok());
s = config.SetMetricConfigPrometheusPort("0xff");
ASSERT_FALSE(s.ok());
/* cache config */
s = config.SetCacheConfigCpuCacheCapacity("a");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigCpuCacheCapacity("0");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigCpuCacheCapacity("2048");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigCpuCacheThreshold("a");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigCpuCacheThreshold("1.0");
ASSERT_FALSE(s.ok());
#ifdef MILVUS_GPU_VERSION
s = config.SetCacheConfigGpuCacheCapacity("a");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigGpuCacheCapacity("128");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigGpuCacheThreshold("a");
ASSERT_FALSE(s.ok());
s = config.SetCacheConfigGpuCacheThreshold("1.0");
ASSERT_FALSE(s.ok());
#endif
s = config.SetCacheConfigCacheInsertData("N");
ASSERT_FALSE(s.ok());
/* engine config */
s = config.SetEngineConfigUseBlasThreshold("0xff");
ASSERT_FALSE(s.ok());
s = config.SetEngineConfigOmpThreadNum("a");
ASSERT_FALSE(s.ok());
s = config.SetEngineConfigOmpThreadNum("10000");
ASSERT_FALSE(s.ok());
s = config.SetEngineConfigGpuSearchThreshold("-1");
ASSERT_FALSE(s.ok());
/* resource config */
s = config.SetResourceConfigMode("default");
ASSERT_FALSE(s.ok());
s = config.SetResourceConfigSearchResources("gpu10");
ASSERT_FALSE(s.ok());
s = config.SetResourceConfigIndexBuildDevice("gup2");
ASSERT_FALSE(s.ok());
s = config.SetResourceConfigIndexBuildDevice("gpu16");
ASSERT_FALSE(s.ok());
}
TEST_F(ConfigTest, SERVER_CONFIG_TEST) {
std::string config_path(CONFIG_PATH);
milvus::server::Config& config = milvus::server::Config::GetInstance();

View File

@ -380,6 +380,44 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS);
}
TEST_F(RpcHandlerTest, PARTITION_TEST) {
::grpc::ServerContext context;
::milvus::grpc::TableSchema table_schema;
::milvus::grpc::Status response;
std::string str_table_name = "tbl_partition";
table_schema.set_table_name(str_table_name);
table_schema.set_dimension(TABLE_DIM);
table_schema.set_index_file_size(INDEX_FILE_SIZE);
table_schema.set_metric_type(1);
handler->CreateTable(&context, &table_schema, &response);
::milvus::grpc::PartitionParam partition_param;
partition_param.set_table_name(str_table_name);
std::string partition_name = "tbl_partition_0";
partition_param.set_partition_name(partition_name);
std::string partition_tag = "0";
partition_param.set_tag(partition_tag);
handler->CreatePartition(&context, &partition_param, &response);
ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code());
::milvus::grpc::TableName table_name;
table_name.set_table_name(str_table_name);
::milvus::grpc::PartitionList partition_list;
handler->ShowPartitions(&context, &table_name, &partition_list);
ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code());
ASSERT_EQ(partition_list.partition_array_size(), 1);
::milvus::grpc::PartitionParam partition_parm;
partition_parm.set_table_name(str_table_name);
partition_parm.set_tag(partition_tag);
handler->DropPartition(&context, &partition_parm, &response);
ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code());
partition_parm.set_partition_name(partition_name);
handler->DropPartition(&context, &partition_parm, &response);
ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code());
}
TEST_F(RpcHandlerTest, CMD_TEST) {
::grpc::ServerContext context;
::milvus::grpc::Command command;
@ -396,26 +434,26 @@ TEST_F(RpcHandlerTest, CMD_TEST) {
TEST_F(RpcHandlerTest, DELETE_BY_RANGE_TEST) {
::grpc::ServerContext context;
::milvus::grpc::DeleteByRangeParam request;
::milvus::grpc::DeleteByDateParam request;
::milvus::grpc::Status status;
handler->DeleteByRange(&context, nullptr, &status);
handler->DeleteByRange(&context, &request, &status);
handler->DeleteByDate(&context, nullptr, &status);
handler->DeleteByDate(&context, &request, &status);
request.set_table_name(TABLE_NAME);
request.mutable_range()->set_start_value(CurrentTmDate(-3));
request.mutable_range()->set_end_value(CurrentTmDate(-2));
::grpc::Status grpc_status = handler->DeleteByRange(&context, &request, &status);
::grpc::Status grpc_status = handler->DeleteByDate(&context, &request, &status);
int error_code = status.error_code();
// ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS);
request.mutable_range()->set_start_value("test6");
grpc_status = handler->DeleteByRange(&context, &request, &status);
grpc_status = handler->DeleteByDate(&context, &request, &status);
request.mutable_range()->set_start_value(CurrentTmDate(-2));
request.mutable_range()->set_end_value("test6");
grpc_status = handler->DeleteByRange(&context, &request, &status);
grpc_status = handler->DeleteByDate(&context, &request, &status);
request.mutable_range()->set_end_value(CurrentTmDate(-2));
grpc_status = handler->DeleteByRange(&context, &request, &status);
grpc_status = handler->DeleteByDate(&context, &request, &status);
}
//////////////////////////////////////////////////////////////////////

View File

@ -117,12 +117,11 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) {
str = "a,b,c";
std::vector<std::string> result;
auto status = milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result);
ASSERT_TRUE(status.ok());
milvus::server::StringHelpFunctions::SplitStringByDelimeter(str, ",", result);
ASSERT_EQ(result.size(), 3UL);
result.clear();
status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result);
auto status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result);
ASSERT_TRUE(status.ok());
ASSERT_EQ(result.size(), 3UL);
@ -136,6 +135,10 @@ TEST(UtilTest, STRINGFUNCTIONS_TEST) {
status = milvus::server::StringHelpFunctions::SplitStringByQuote(str, ",", "\"", result);
ASSERT_TRUE(status.ok());
ASSERT_EQ(result.size(), 3UL);
ASSERT_TRUE(milvus::server::StringHelpFunctions::IsRegexMatch("abc", "abc"));
ASSERT_TRUE(milvus::server::StringHelpFunctions::IsRegexMatch("a8c", "a\\d."));
ASSERT_FALSE(milvus::server::StringHelpFunctions::IsRegexMatch("abc", "a\\dc"));
}
TEST(UtilTest, BLOCKINGQUEUE_TEST) {
@ -314,6 +317,13 @@ TEST(ValidationUtilTest, VALIDATE_NPROBE_TEST) {
ASSERT_NE(milvus::server::ValidationUtil::ValidateSearchNprobe(101, schema).code(), milvus::SERVER_SUCCESS);
}
TEST(ValidationUtilTest, VALIDATE_PARTITION_TAGS) {
std::vector<std::string> partition_tags = {"abc"};
ASSERT_EQ(milvus::server::ValidationUtil::ValidatePartitionTags(partition_tags).code(), milvus::SERVER_SUCCESS);
partition_tags.push_back("");
ASSERT_NE(milvus::server::ValidationUtil::ValidatePartitionTags(partition_tags).code(), milvus::SERVER_SUCCESS);
}
#ifdef MILVUS_GPU_VERSION
TEST(ValidationUtilTest, VALIDATE_GPU_TEST) {
ASSERT_EQ(milvus::server::ValidationUtil::ValidateGpuIndex(0).code(), milvus::SERVER_SUCCESS);

13
shards/.dockerignore Normal file
View File

@ -0,0 +1,13 @@
.git
.gitignore
.env
.coverage
.dockerignore
cov_html/
.pytest_cache
__pycache__
*/__pycache__
*.md
*.yml
*.yaml

10
shards/Dockerfile Normal file
View File

@ -0,0 +1,10 @@
FROM python:3.6
RUN apt update && apt install -y \
less \
telnet
RUN mkdir /source
WORKDIR /source
ADD ./requirements.txt ./
RUN pip install -r requirements.txt
COPY . .
CMD python mishards/main.py

35
shards/Makefile Normal file
View File

@ -0,0 +1,35 @@
HOST=$(or $(host),127.0.0.1)
PORT=$(or $(port),19530)
build:
docker build --network=host -t milvusdb/mishards .
push:
docker push milvusdb/mishards
pull:
docker pull milvusdb/mishards
deploy: clean_deploy
cd all_in_one && docker-compose -f all_in_one.yml up -d && cd -
clean_deploy:
cd all_in_one && docker-compose -f all_in_one.yml down && cd -
probe_deploy:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py"
cluster:
cd kubernetes_demo;./start.sh baseup;sleep 10;./start.sh appup;cd -
clean_cluster:
cd kubernetes_demo;./start.sh cleanup;cd -
cluster_status:
kubectl get pods -n milvus -o wide
probe_cluster:
@echo
$(shell kubectl get service -n milvus | grep milvus-proxy-servers | awk {'print $$4,$$5'} | awk -F"[: ]" {'print "docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c \"python all_in_one/probe_test.py --port="$$2" --host="$$1"\""'})
probe:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py --port=${PORT} --host=${HOST}"
clean_coverage:
rm -rf cov_html
clean: clean_coverage clean_deploy clean_cluster
style:
pycodestyle --config=.
coverage:
pytest --cov-report html:cov_html --cov=mishards
test:
pytest

147
shards/Tutorial_CN.md Normal file
View File

@ -0,0 +1,147 @@
# Mishards使用文档
---
Milvus 旨在帮助用户实现海量非结构化数据的近似检索和分析。单个 Milvus 实例可处理十亿级数据规模,而对于百亿或者千亿规模数据的需求,则需要一个 Milvus 集群实例该实例对于上层应用可以像单机实例一样使用同时满足海量数据低延迟高并发业务需求。mishards就是一个集群中间件其内部处理请求转发读写分离水平扩展动态扩容为用户提供内存和算力可以无限扩容的 Milvus 实例。
## 运行环境
---
### 单机快速启动实例
**`python >= 3.4`环境**
```
1. cd milvus/shards
2. pip install -r requirements.txt
3. nvidia-docker run --rm -d -p 19530:19530 -v /tmp/milvus/db:/opt/milvus/db milvusdb/milvus:0.5.0-d102119-ede20b
4. sudo chown -R $USER:$USER /tmp/milvus
5. cp mishards/.env.example mishards/.env
6. 在python mishards/main.py #.env配置mishards监听19532端口
7. make probe port=19532 #健康检查
```
### 容器启动实例
`all_in_one`会在服务器上开启两个milvus实例一个mishards实例一个jaeger链路追踪实例
**启动**
```
cd milvus/shards
1. 安装docker-compose
2. make build
3. make deploy #监听19531端口
4. make clean_deploy #清理服务
5. make probe_deplopy #健康检查
```
**打开Jaeger UI**
```
浏览器打开 "http://127.0.0.1:16686/"
```
### kubernetes中快速启动
**准备**
```
- kubernetes集群
- 安装nvidia-docker
- 共享存储
- 安装kubectl并能访问集群
```
**步骤**
```
cd milvus/shards
1. make deploy_cluster #启动集群
2. make probe_cluster #健康检查
3. make clean_cluster #关闭集群
```
**扩容计算实例**
```
cd milvus/shards/kubernetes_demo/
./start.sh scale-ro-server 2 扩容计算实例到2
```
**扩容代理器实例**
```
cd milvus/shards/kubernetes_demo/
./start.sh scale-proxy 2 扩容代理服务器实例到2
```
**查看日志**
```
kubectl logs -f --tail=1000 -n milvus milvus-ro-servers-0 查看计算节点milvus-ro-servers-0日志
```
## 测试
**启动单元测试**
```
1. cd milvus/shards
2. make test
```
**单元测试覆盖率**
```
1. cd milvus/shards
2. make coverage
```
**代码风格检查**
```
1. cd milvus/shards
2. make style
```
## mishards配置详解
### 全局
| Name | Required | Type | Default Value | Explanation |
| --------------------------- | -------- | -------- | ------------- | ------------- |
| Debug | No | bool | True | 是否Debug工作模式 |
| TIMEZONE | No | string | "UTC" | 时区 |
| MAX_RETRY | No | int | 3 | 最大连接重试次数 |
| SERVER_PORT | No | int | 19530 | 配置服务端口 |
| WOSERVER | **Yes** | str | - | 配置后台可写Milvus实例地址。目前只支持静态设置,例"tcp://127.0.0.1:19530" |
### 元数据
| Name | Required | Type | Default Value | Explanation |
| --------------------------- | -------- | -------- | ------------- | ------------- |
| SQLALCHEMY_DATABASE_URI | **Yes** | string | - | 配置元数据存储数据库地址 |
| SQL_ECHO | No | bool | False | 是否打印Sql详细语句 |
| SQLALCHEMY_DATABASE_TEST_URI | No | string | - | 配置测试环境下元数据存储数据库地址 |
| SQL_TEST_ECHO | No | bool | False | 配置测试环境下是否打印Sql详细语句 |
### 服务发现
| Name | Required | Type | Default Value | Explanation |
| --------------------------- | -------- | -------- | ------------- | ------------- |
| DISCOVERY_PLUGIN_PATH | No | string | - | 用户自定义服务发现插件搜索路径,默认使用系统搜索路径|
| DISCOVERY_CLASS_NAME | No | string | static | 在服务发现插件搜索路径下搜索类并实例化。目前系统提供 **static****kubernetes** 两种类,默认使用 **static** |
| DISCOVERY_STATIC_HOSTS | No | list | [] | **DISCOVERY_CLASS_NAME****static** 时,配置服务地址列表,例"192.168.1.188,192.168.1.190"|
| DISCOVERY_STATIC_PORT | No | int | 19530 | **DISCOVERY_CLASS_NAME****static** 时,配置 Hosts 监听端口 |
| DISCOVERY_KUBERNETES_NAMESPACE | No | string | - | **DISCOVERY_CLASS_NAME****kubernetes** 时,配置集群 namespace |
| DISCOVERY_KUBERNETES_IN_CLUSTER | No | bool | False | **DISCOVERY_CLASS_NAME****kubernetes** 时,标明服务发现是否在集群中运行 |
| DISCOVERY_KUBERNETES_POLL_INTERVAL | No | int | 5 | **DISCOVERY_CLASS_NAME****kubernetes** 时,标明服务发现监听服务列表频率,单位 Second |
| DISCOVERY_KUBERNETES_POD_PATT | No | string | - | **DISCOVERY_CLASS_NAME****kubernetes** 时,匹配可读 Milvus 实例的正则表达式 |
| DISCOVERY_KUBERNETES_LABEL_SELECTOR | No | string | - | **SD_PROVIDER** 为**Kubernetes**时匹配可读Milvus实例的标签选择 |
### 链路追踪
| Name | Required | Type | Default Value | Explanation |
| --------------------------- | -------- | -------- | ------------- | ------------- |
| TRACER_PLUGIN_PATH | No | string | - | 用户自定义链路追踪插件搜索路径,默认使用系统搜索路径|
| TRACER_CLASS_NAME | No | string | "" | 链路追踪方案选择,目前只实现 **Jaeger**, 默认不使用|
| TRACING_SERVICE_NAME | No | string | "mishards" | **TRACING_TYPE****Jaeger** 时,链路追踪服务名 |
| TRACING_SAMPLER_TYPE | No | string | "const" | **TRACING_TYPE****Jaeger** 时,链路追踪采样类型 |
| TRACING_SAMPLER_PARAM | No | int | 1 | **TRACING_TYPE****Jaeger** 时,链路追踪采样频率 |
| TRACING_LOG_PAYLOAD | No | bool | False | **TRACING_TYPE****Jaeger** 时,链路追踪是否采集 Payload |
### 日志
| Name | Required | Type | Default Value | Explanation |
| --------------------------- | -------- | -------- | ------------- | ------------- |
| LOG_LEVEL | No | string | "DEBUG" if Debug is ON else "INFO" | 日志记录级别 |
| LOG_PATH | No | string | "/tmp/mishards" | 日志记录路径 |
| LOG_NAME | No | string | "logfile" | 日志记录名 |
### 路由
| Name | Required | Type | Default Value | Explanation |
| --------------------------- | -------- | -------- | ------------- | ------------- |
| ROUTER_PLUGIN_PATH | No | string | - | 用户自定义路由插件搜索路径,默认使用系统搜索路径|
| ROUTER_CLASS_NAME | No | string | FileBasedHashRingRouter | 处理请求路由类名, 可注册自定义类。目前系统只提供了类 **FileBasedHashRingRouter** |
| ROUTER_CLASS_TEST_NAME | No | string | FileBasedHashRingRouter | 测试环境下处理请求路由类名, 可注册自定义类 |

View File

@ -0,0 +1,53 @@
version: "2.3"
services:
milvus_wr:
runtime: nvidia
restart: always
image: milvusdb/milvus:0.5.0-d102119-ede20b
volumes:
- /tmp/milvus/db:/opt/milvus/db
milvus_ro:
runtime: nvidia
restart: always
image: milvusdb/milvus:0.5.0-d102119-ede20b
volumes:
- /tmp/milvus/db:/opt/milvus/db
- ./ro_server.yml:/opt/milvus/conf/server_config.yaml
jaeger:
restart: always
image: jaegertracing/all-in-one:1.14
ports:
- "0.0.0.0:5775:5775/udp"
- "0.0.0.0:16686:16686"
- "0.0.0.0:9441:9441"
environment:
COLLECTOR_ZIPKIN_HTTP_PORT: 9411
mishards:
restart: always
image: milvusdb/mishards
ports:
- "0.0.0.0:19531:19531"
- "0.0.0.0:19532:19532"
volumes:
- /tmp/milvus/db:/tmp/milvus/db
# - /tmp/mishards_env:/source/mishards/.env
command: ["python", "mishards/main.py"]
environment:
FROM_EXAMPLE: 'true'
DEBUG: 'true'
SERVER_PORT: 19531
WOSERVER: tcp://milvus_wr:19530
DISCOVERY_PLUGIN_PATH: static
DISCOVERY_STATIC_HOSTS: milvus_wr,milvus_ro
TRACER_CLASS_NAME: jaeger
TRACING_SERVICE_NAME: mishards-demo
TRACING_REPORTING_HOST: jaeger
TRACING_REPORTING_PORT: 5775
depends_on:
- milvus_wr
- milvus_ro
- jaeger

View File

@ -0,0 +1,25 @@
from milvus import Milvus
RED = '\033[0;31m'
GREEN = '\033[0;32m'
ENDC = ''
def test(host='127.0.0.1', port=19531):
client = Milvus()
try:
status = client.connect(host=host, port=port)
if status.OK():
print('{}Pass: Connected{}'.format(GREEN, ENDC))
return 0
else:
print('{}Error: {}{}'.format(RED, status, ENDC))
return 1
except Exception as exc:
print('{}Error: {}{}'.format(RED, exc, ENDC))
return 1
if __name__ == '__main__':
import fire
fire.Fire(test)

View File

@ -0,0 +1,41 @@
server_config:
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # port range: 1025 ~ 65534
deploy_mode: cluster_readonly # deployment type: single, cluster_readonly, cluster_writable
time_zone: UTC+8
db_config:
primary_path: /opt/milvus # path used to store data and meta
secondary_path: # path used to store data only, split by semicolon
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
# Keep 'dialect://:@:/', and replace other texts with real values
# Replace 'dialect' with 'mysql' or 'sqlite'
insert_buffer_size: 4 # GB, maximum insert buffer size allowed
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
# you can specify preload tables like this: table1,table2,table3
metric_config:
enable_monitor: false # enable monitoring or not
collector: prometheus # prometheus
prometheus_config:
port: 8080 # port prometheus uses to fetch metrics
cache_config:
cpu_cache_capacity: 16 # GB, CPU memory used for cache
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
gpu_cache_capacity: 4 # GB, GPU memory used for cache
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
cache_insert_data: false # whether to load inserted data into cache
engine_config:
use_blas_threshold: 20 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
resource_config:
search_resources: # define the GPUs used for search computation, valid value: gpux
- gpu0
index_build_device: gpu0 # GPU used for building index

39
shards/conftest.py Normal file
View File

@ -0,0 +1,39 @@
import os
import logging
import pytest
import grpc
import tempfile
import shutil
from mishards import settings, db, create_app
logger = logging.getLogger(__name__)
tpath = tempfile.mkdtemp()
dirpath = '{}/db'.format(tpath)
filepath = '{}/meta.sqlite'.format(dirpath)
os.makedirs(dirpath, 0o777)
settings.TestingConfig.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}?check_same_thread=False'.format(
filepath)
@pytest.fixture
def app(request):
app = create_app(settings.TestingConfig)
db.drop_all()
db.create_all()
yield app
db.drop_all()
app.stop()
# shutil.rmtree(tpath)
@pytest.fixture
def started_app(app):
app.on_pre_run()
app.start(settings.SERVER_TEST_PORT)
yield app
app.stop()

View File

@ -0,0 +1,37 @@
import os
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
import logging
from utils import dotdict
logger = logging.getLogger(__name__)
class DiscoveryConfig(dotdict):
CONFIG_PREFIX = 'DISCOVERY_'
def dump(self):
logger.info('----------- DiscoveryConfig -----------------')
for k, v in self.items():
logger.info('{}: {}'.format(k, v))
if len(self) <= 0:
logger.error(' Empty DiscoveryConfig Found! ')
logger.info('---------------------------------------------')
@classmethod
def Create(cls, **kwargs):
o = cls()
for k, v in os.environ.items():
if not k.startswith(cls.CONFIG_PREFIX):
continue
o[k] = v
for k, v in kwargs.items():
o[k] = v
o.dump()
return o

View File

@ -0,0 +1,22 @@
import logging
from discovery import DiscoveryConfig
from utils.plugins import BaseMixin
logger = logging.getLogger(__name__)
PLUGIN_PACKAGE_NAME = 'discovery.plugins'
class DiscoveryFactory(BaseMixin):
PLUGIN_TYPE = 'Discovery'
def __init__(self, searchpath=None):
super().__init__(searchpath=searchpath, package_name=PLUGIN_PACKAGE_NAME)
def _create(self, plugin_class, **kwargs):
conn_mgr = kwargs.pop('conn_mgr', None)
if not conn_mgr:
raise RuntimeError('Please pass conn_mgr to create discovery!')
plugin_config = DiscoveryConfig.Create()
plugin = plugin_class.Create(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs)
return plugin

View File

@ -0,0 +1,346 @@
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
import re
import logging
import time
import copy
import threading
import queue
import enum
from kubernetes import client, config, watch
logger = logging.getLogger(__name__)
INCLUSTER_NAMESPACE_PATH = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
class EventType(enum.Enum):
PodHeartBeat = 1
Watch = 2
class K8SMixin:
def __init__(self, namespace, in_cluster=False, **kwargs):
self.namespace = namespace
self.in_cluster = in_cluster
self.kwargs = kwargs
self.v1 = kwargs.get('v1', None)
if not self.namespace:
self.namespace = open(INCLUSTER_NAMESPACE_PATH).read()
if not self.v1:
config.load_incluster_config(
) if self.in_cluster else config.load_kube_config()
self.v1 = client.CoreV1Api()
class K8SHeartbeatHandler(threading.Thread, K8SMixin):
name = 'kubernetes'
def __init__(self,
message_queue,
namespace,
label_selector,
in_cluster=False,
**kwargs):
K8SMixin.__init__(self,
namespace=namespace,
in_cluster=in_cluster,
**kwargs)
threading.Thread.__init__(self)
self.queue = message_queue
self.terminate = False
self.label_selector = label_selector
self.poll_interval = kwargs.get('poll_interval', 5)
def run(self):
while not self.terminate:
try:
pods = self.v1.list_namespaced_pod(
namespace=self.namespace,
label_selector=self.label_selector)
event_message = {'eType': EventType.PodHeartBeat, 'events': []}
for item in pods.items:
pod = self.v1.read_namespaced_pod(name=item.metadata.name,
namespace=self.namespace)
name = pod.metadata.name
ip = pod.status.pod_ip
phase = pod.status.phase
reason = pod.status.reason
message = pod.status.message
ready = True if phase == 'Running' else False
pod_event = dict(pod=name,
ip=ip,
ready=ready,
reason=reason,
message=message)
event_message['events'].append(pod_event)
self.queue.put(event_message)
except Exception as exc:
logger.error(exc)
time.sleep(self.poll_interval)
def stop(self):
self.terminate = True
class K8SEventListener(threading.Thread, K8SMixin):
def __init__(self, message_queue, namespace, in_cluster=False, **kwargs):
K8SMixin.__init__(self,
namespace=namespace,
in_cluster=in_cluster,
**kwargs)
threading.Thread.__init__(self)
self.queue = message_queue
self.terminate = False
self.at_start_up = True
self._stop_event = threading.Event()
def stop(self):
self.terminate = True
self._stop_event.set()
def run(self):
resource_version = ''
w = watch.Watch()
for event in w.stream(self.v1.list_namespaced_event,
namespace=self.namespace,
field_selector='involvedObject.kind=Pod'):
if self.terminate:
break
resource_version = int(event['object'].metadata.resource_version)
info = dict(
eType=EventType.Watch,
pod=event['object'].involved_object.name,
reason=event['object'].reason,
message=event['object'].message,
start_up=self.at_start_up,
)
self.at_start_up = False
# logger.info('Received event: {}'.format(info))
self.queue.put(info)
class EventHandler(threading.Thread):
def __init__(self, mgr, message_queue, namespace, pod_patt, **kwargs):
threading.Thread.__init__(self)
self.mgr = mgr
self.queue = message_queue
self.kwargs = kwargs
self.terminate = False
self.pod_patt = re.compile(pod_patt)
self.namespace = namespace
def stop(self):
self.terminate = True
def on_drop(self, event, **kwargs):
pass
def on_pod_started(self, event, **kwargs):
try_cnt = 3
pod = None
while try_cnt > 0:
try_cnt -= 1
try:
pod = self.mgr.v1.read_namespaced_pod(name=event['pod'],
namespace=self.namespace)
if not pod.status.pod_ip:
time.sleep(0.5)
continue
break
except client.rest.ApiException as exc:
time.sleep(0.5)
if try_cnt <= 0 and not pod:
if not event['start_up']:
logger.error('Pod {} is started but cannot read pod'.format(
event['pod']))
return
elif try_cnt <= 0 and not pod.status.pod_ip:
logger.warning('NoPodIPFoundError')
return
logger.info('Register POD {} with IP {}'.format(
pod.metadata.name, pod.status.pod_ip))
self.mgr.add_pod(name=pod.metadata.name, ip=pod.status.pod_ip)
def on_pod_killing(self, event, **kwargs):
logger.info('Unregister POD {}'.format(event['pod']))
self.mgr.delete_pod(name=event['pod'])
def on_pod_heartbeat(self, event, **kwargs):
names = self.mgr.conn_mgr.conn_names
running_names = set()
for each_event in event['events']:
if each_event['ready']:
self.mgr.add_pod(name=each_event['pod'], ip=each_event['ip'])
running_names.add(each_event['pod'])
else:
self.mgr.delete_pod(name=each_event['pod'])
to_delete = names - running_names
for name in to_delete:
self.mgr.delete_pod(name)
logger.info(self.mgr.conn_mgr.conn_names)
def handle_event(self, event):
if event['eType'] == EventType.PodHeartBeat:
return self.on_pod_heartbeat(event)
if not event or (event['reason'] not in ('Started', 'Killing')):
return self.on_drop(event)
if not re.match(self.pod_patt, event['pod']):
return self.on_drop(event)
logger.info('Handling event: {}'.format(event))
if event['reason'] == 'Started':
return self.on_pod_started(event)
return self.on_pod_killing(event)
def run(self):
while not self.terminate:
try:
event = self.queue.get(timeout=1)
self.handle_event(event)
except queue.Empty:
continue
class KubernetesProviderSettings:
def __init__(self, namespace, pod_patt, label_selector, in_cluster,
poll_interval, port=None, **kwargs):
self.namespace = namespace
self.pod_patt = pod_patt
self.label_selector = label_selector
self.in_cluster = in_cluster
self.poll_interval = poll_interval
self.port = int(port) if port else 19530
class KubernetesProvider(object):
name = 'kubernetes'
def __init__(self, plugin_config, conn_mgr, **kwargs):
self.namespace = plugin_config.DISCOVERY_KUBERNETES_NAMESPACE
self.pod_patt = plugin_config.DISCOVERY_KUBERNETES_POD_PATT
self.label_selector = plugin_config.DISCOVERY_KUBERNETES_LABEL_SELECTOR
self.in_cluster = plugin_config.DISCOVERY_KUBERNETES_IN_CLUSTER.lower()
self.in_cluster = self.in_cluster == 'true'
self.poll_interval = plugin_config.DISCOVERY_KUBERNETES_POLL_INTERVAL
self.poll_interval = int(self.poll_interval) if self.poll_interval else 5
self.port = plugin_config.DISCOVERY_KUBERNETES_PORT
self.port = int(self.port) if self.port else 19530
self.kwargs = kwargs
self.queue = queue.Queue()
self.conn_mgr = conn_mgr
if not self.namespace:
self.namespace = open(incluster_namespace_path).read()
config.load_incluster_config(
) if self.in_cluster else config.load_kube_config()
self.v1 = client.CoreV1Api()
self.listener = K8SEventListener(message_queue=self.queue,
namespace=self.namespace,
in_cluster=self.in_cluster,
v1=self.v1,
**kwargs)
self.pod_heartbeater = K8SHeartbeatHandler(
message_queue=self.queue,
namespace=self.namespace,
label_selector=self.label_selector,
in_cluster=self.in_cluster,
v1=self.v1,
poll_interval=self.poll_interval,
**kwargs)
self.event_handler = EventHandler(mgr=self,
message_queue=self.queue,
namespace=self.namespace,
pod_patt=self.pod_patt,
**kwargs)
def add_pod(self, name, ip):
self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port))
def delete_pod(self, name):
self.conn_mgr.unregister(name)
def start(self):
self.listener.daemon = True
self.listener.start()
self.event_handler.start()
self.pod_heartbeater.start()
def stop(self):
self.listener.stop()
self.pod_heartbeater.stop()
self.event_handler.stop()
@classmethod
def Create(cls, conn_mgr, plugin_config, **kwargs):
discovery = cls(plugin_config=plugin_config, conn_mgr=conn_mgr, **kwargs)
return discovery
def setup(app):
logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name))
app.on_plugin_setup(KubernetesProvider)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))))
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
class Connect:
def register(self, name, value):
logger.error('Register: {} - {}'.format(name, value))
def unregister(self, name):
logger.error('Unregister: {}'.format(name))
@property
def conn_names(self):
return set()
connect_mgr = Connect()
from discovery import DiscoveryConfig
settings = DiscoveryConfig(DISCOVERY_KUBERNETES_NAMESPACE='xp',
DISCOVERY_KUBERNETES_POD_PATT=".*-ro-servers-.*",
DISCOVERY_KUBERNETES_LABEL_SELECTOR='tier=ro-servers',
DISCOVERY_KUBERNETES_POLL_INTERVAL=5,
DISCOVERY_KUBERNETES_IN_CLUSTER=False)
provider_class = KubernetesProvider
t = provider_class(conn_mgr=connect_mgr, plugin_config=settings)
t.start()
cnt = 100
while cnt > 0:
time.sleep(2)
cnt -= 1
t.stop()

View File

@ -0,0 +1,45 @@
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import logging
import socket
from environs import Env
logger = logging.getLogger(__name__)
env = Env()
class StaticDiscovery(object):
name = 'static'
def __init__(self, config, conn_mgr, **kwargs):
self.conn_mgr = conn_mgr
hosts = env.list('DISCOVERY_STATIC_HOSTS', [])
self.port = env.int('DISCOVERY_STATIC_PORT', 19530)
self.hosts = [socket.gethostbyname(host) for host in hosts]
def start(self):
for host in self.hosts:
self.add_pod(host, host)
def stop(self):
for host in self.hosts:
self.delete_pod(host)
def add_pod(self, name, ip):
self.conn_mgr.register(name, 'tcp://{}:{}'.format(ip, self.port))
def delete_pod(self, name):
self.conn_mgr.unregister(name)
@classmethod
def Create(cls, conn_mgr, plugin_config, **kwargs):
discovery = cls(config=plugin_config, conn_mgr=conn_mgr, **kwargs)
return discovery
def setup(app):
logger.info('Plugin \'{}\' Installed In Package: {}'.format(__file__, app.plugin_package_name))
app.on_plugin_setup(StaticDiscovery)

View File

@ -0,0 +1,67 @@
kind: Service
apiVersion: v1
metadata:
name: milvus-mysql
namespace: milvus
spec:
type: ClusterIP
selector:
app: milvus
tier: mysql
ports:
- protocol: TCP
port: 3306
targetPort: 3306
name: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: milvus-mysql
namespace: milvus
spec:
selector:
matchLabels:
app: milvus
tier: mysql
replicas: 1
template:
metadata:
labels:
app: milvus
tier: mysql
spec:
containers:
- name: milvus-mysql
image: mysql:5.7
imagePullPolicy: IfNotPresent
# lifecycle:
# postStart:
# exec:
# command: ["/bin/sh", "-c", "mysql -h milvus-mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e \"CREATE DATABASE IF NOT EXISTS ${DATABASE};\"; \
# mysql -uroot -p${MYSQL_ROOT_PASSWORD} -e \"GRANT ALL PRIVILEGES ON ${DATABASE}.* TO 'root'@'%';\""]
env:
- name: MYSQL_ROOT_PASSWORD
value: milvusroot
- name: DATABASE
value: milvus
ports:
- name: mysql-port
containerPort: 3306
volumeMounts:
- name: milvus-mysql-disk
mountPath: /data
subPath: mysql
- name: milvus-mysql-configmap
mountPath: /etc/mysql/mysql.conf.d/mysqld.cnf
subPath: milvus_mysql_config.yml
volumes:
- name: milvus-mysql-disk
persistentVolumeClaim:
claimName: milvus-mysql-disk
- name: milvus-mysql-configmap
configMap:
name: milvus-mysql-configmap

View File

@ -0,0 +1,185 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: milvus-mysql-configmap
namespace: milvus
data:
milvus_mysql_config.yml: |
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /data
log-error = /var/log/mysql/error.log # mount out to host
# By default we only accept connections from localhost
bind-address = 0.0.0.0
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
init_connect='SET NAMES utf8mb4'
skip-character-set-client-handshake = true
max_connections = 1000
wait_timeout = 31536000
---
apiVersion: v1
kind: ConfigMap
metadata:
name: milvus-proxy-configmap
namespace: milvus
data:
milvus_proxy_config.yml: |
DEBUG=True
TESTING=False
WOSERVER=tcp://milvus-wo-servers:19530
SERVER_PORT=19530
DISCOVERY_CLASS_NAME=kubernetes
DISCOVERY_KUBERNETES_NAMESPACE=milvus
DISCOVERY_KUBERNETES_POD_PATT=.*-ro-servers-.*
DISCOVERY_KUBERNETES_LABEL_SELECTOR=tier=ro-servers
DISCOVERY_KUBERNETES_POLL_INTERVAL=10
DISCOVERY_KUBERNETES_IN_CLUSTER=True
SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:milvusroot@milvus-mysql:3306/milvus?charset=utf8mb4
SQLALCHEMY_POOL_SIZE=50
SQLALCHEMY_POOL_RECYCLE=7200
LOG_PATH=/var/log/milvus
TIMEZONE=Asia/Shanghai
---
apiVersion: v1
kind: ConfigMap
metadata:
name: milvus-roserver-configmap
namespace: milvus
data:
config.yml: |
server_config:
address: 0.0.0.0
port: 19530
mode: cluster_readonly
db_config:
primary_path: /var/milvus
backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus
insert_buffer_size: 2
metric_config:
enable_monitor: off # true is on, false is off
cache_config:
cpu_cache_capacity: 12 # memory pool to hold index data, unit: GB
cpu_cache_free_percent: 0.85
insert_cache_immediately: false
# gpu_cache_capacity: 4
# gpu_cache_free_percent: 0.85
# gpu_ids:
# - 0
engine_config:
use_blas_threshold: 800
resource_config:
search_resources:
- gpu0
log.conf: |
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = true
SUBSECOND_PRECISION = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
* DEBUG:
FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-warning.log"
* TRACE:
FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = true
TO_STANDARD_OUTPUT = true
## Error logs
* ERROR:
ENABLED = true
FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "/var/milvus/logs/milvus-ro-%datetime{%H:%m}-fatal.log"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: milvus-woserver-configmap
namespace: milvus
data:
config.yml: |
server_config:
address: 0.0.0.0
port: 19530
mode: cluster_writable
db_config:
primary_path: /var/milvus
backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus
insert_buffer_size: 2
metric_config:
enable_monitor: off # true is on, false is off
cache_config:
cpu_cache_capacity: 2 # memory pool to hold index data, unit: GB
cpu_cache_free_percent: 0.85
insert_cache_immediately: false
# gpu_cache_capacity: 4
# gpu_cache_free_percent: 0.85
# gpu_ids:
# - 0
engine_config:
use_blas_threshold: 800
resource_config:
search_resources:
- gpu0
log.conf: |
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = true
SUBSECOND_PRECISION = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
* DEBUG:
FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-warning.log"
* TRACE:
FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = true
TO_STANDARD_OUTPUT = true
## Error logs
* ERROR:
ENABLED = true
FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "/var/milvus/logs/milvus-wo-%datetime{%H:%m}-fatal.log"

View File

@ -0,0 +1,57 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: milvus-db-disk
namespace: milvus
spec:
accessModes:
- ReadWriteMany
storageClassName: default
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: milvus-log-disk
namespace: milvus
spec:
accessModes:
- ReadWriteMany
storageClassName: default
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: milvus-mysql-disk
namespace: milvus
spec:
accessModes:
- ReadWriteMany
storageClassName: default
resources:
requests:
storage: 50Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: milvus-redis-disk
namespace: milvus
spec:
accessModes:
- ReadWriteOnce
storageClassName: default
resources:
requests:
storage: 5Gi

View File

@ -0,0 +1,88 @@
kind: Service
apiVersion: v1
metadata:
name: milvus-proxy-servers
namespace: milvus
spec:
type: LoadBalancer
selector:
app: milvus
tier: proxy
ports:
- name: tcp
protocol: TCP
port: 19530
targetPort: 19530
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: milvus-proxy
namespace: milvus
spec:
selector:
matchLabels:
app: milvus
tier: proxy
replicas: 1
template:
metadata:
labels:
app: milvus
tier: proxy
spec:
containers:
- name: milvus-proxy
image: milvusdb/mishards:0.1.0-rc0
imagePullPolicy: Always
command: ["python", "mishards/main.py"]
resources:
limits:
memory: "3Gi"
cpu: "4"
requests:
memory: "2Gi"
ports:
- name: tcp
containerPort: 5000
env:
# - name: SQL_ECHO
# value: "True"
- name: DEBUG
value: "False"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MILVUS_CLIENT
value: "False"
- name: LOG_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: LOG_PATH
value: /var/log/milvus
- name: SD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SD_ROSERVER_POD_PATT
value: ".*-ro-servers-.*"
volumeMounts:
- name: milvus-proxy-configmap
mountPath: /source/mishards/.env
subPath: milvus_proxy_config.yml
- name: milvus-log-disk
mountPath: /var/log/milvus
subPath: proxylog
# imagePullSecrets:
# - name: regcred
volumes:
- name: milvus-proxy-configmap
configMap:
name: milvus-proxy-configmap
- name: milvus-log-disk
persistentVolumeClaim:
claimName: milvus-log-disk

View File

@ -0,0 +1,24 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: pods-list
rules:
- apiGroups: [""]
resources: ["pods", "events"]
verbs: ["list", "get", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: pods-list
subjects:
- kind: ServiceAccount
name: default
namespace: milvus
roleRef:
kind: ClusterRole
name: pods-list
apiGroup: rbac.authorization.k8s.io
---

View File

@ -0,0 +1,68 @@
kind: Service
apiVersion: v1
metadata:
name: milvus-ro-servers
namespace: milvus
spec:
type: ClusterIP
selector:
app: milvus
tier: ro-servers
ports:
- protocol: TCP
port: 19530
targetPort: 19530
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: milvus-ro-servers
namespace: milvus
spec:
serviceName: "milvus-ro-servers"
replicas: 1
template:
metadata:
labels:
app: milvus
tier: ro-servers
spec:
terminationGracePeriodSeconds: 11
containers:
- name: milvus-ro-server
image: milvusdb/milvus:0.5.0-d102119-ede20b
imagePullPolicy: Always
ports:
- containerPort: 19530
resources:
limits:
memory: "16Gi"
cpu: "8.0"
requests:
memory: "14Gi"
volumeMounts:
- name: milvus-db-disk
mountPath: /var/milvus
subPath: dbdata
- name: milvus-roserver-configmap
mountPath: /opt/milvus/conf/server_config.yaml
subPath: config.yml
- name: milvus-roserver-configmap
mountPath: /opt/milvus/conf/log_config.conf
subPath: log.conf
# imagePullSecrets:
# - name: regcred
# tolerations:
# - key: "worker"
# operator: "Equal"
# value: "performance"
# effect: "NoSchedule"
volumes:
- name: milvus-roserver-configmap
configMap:
name: milvus-roserver-configmap
- name: milvus-db-disk
persistentVolumeClaim:
claimName: milvus-db-disk

View File

@ -0,0 +1,70 @@
kind: Service
apiVersion: v1
metadata:
name: milvus-wo-servers
namespace: milvus
spec:
type: ClusterIP
selector:
app: milvus
tier: wo-servers
ports:
- protocol: TCP
port: 19530
targetPort: 19530
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: milvus-wo-servers
namespace: milvus
spec:
selector:
matchLabels:
app: milvus
tier: wo-servers
replicas: 1
template:
metadata:
labels:
app: milvus
tier: wo-servers
spec:
containers:
- name: milvus-wo-server
image: milvusdb/milvus:0.5.0-d102119-ede20b
imagePullPolicy: Always
ports:
- containerPort: 19530
resources:
limits:
memory: "5Gi"
cpu: "1.0"
requests:
memory: "4Gi"
volumeMounts:
- name: milvus-db-disk
mountPath: /var/milvus
subPath: dbdata
- name: milvus-woserver-configmap
mountPath: /opt/milvus/conf/server_config.yaml
subPath: config.yml
- name: milvus-woserver-configmap
mountPath: /opt/milvus/conf/log_config.conf
subPath: log.conf
# imagePullSecrets:
# - name: regcred
# tolerations:
# - key: "worker"
# operator: "Equal"
# value: "performance"
# effect: "NoSchedule"
volumes:
- name: milvus-woserver-configmap
configMap:
name: milvus-woserver-configmap
- name: milvus-db-disk
persistentVolumeClaim:
claimName: milvus-db-disk

368
shards/kubernetes_demo/start.sh Executable file
View File

@ -0,0 +1,368 @@
#!/bin/bash
UL=`tput smul`
NOUL=`tput rmul`
BOLD=`tput bold`
NORMAL=`tput sgr0`
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
ENDC='\033[0m'
function showHelpMessage () {
echo -e "${BOLD}Usage:${NORMAL} ${RED}$0${ENDC} [option...] {cleanup${GREEN}|${ENDC}baseup${GREEN}|${ENDC}appup${GREEN}|${ENDC}appdown${GREEN}|${ENDC}allup}" >&2
echo
echo " -h, --help show help message"
echo " ${BOLD}cleanup, delete all resources${NORMAL}"
echo " ${BOLD}baseup, start all required base resources${NORMAL}"
echo " ${BOLD}appup, start all pods${NORMAL}"
echo " ${BOLD}appdown, remove all pods${NORMAL}"
echo " ${BOLD}allup, start all base resources and pods${NORMAL}"
echo " ${BOLD}scale-proxy, scale proxy${NORMAL}"
echo " ${BOLD}scale-ro-server, scale readonly servers${NORMAL}"
echo " ${BOLD}scale-worker, scale calculation workers${NORMAL}"
}
function showscaleHelpMessage () {
echo -e "${BOLD}Usage:${NORMAL} ${RED}$0 $1${ENDC} [option...] {1|2|3|4|...}" >&2
echo
echo " -h, --help show help message"
echo " ${BOLD}number, (int) target scale number"
}
function PrintScaleSuccessMessage() {
echo -e "${BLUE}${BOLD}Successfully Scaled: ${1} --> ${2}${ENDC}"
}
function PrintPodStatusMessage() {
echo -e "${BOLD}${1}${NORMAL}"
}
timeout=60
function setUpMysql () {
mysqlUserName=$(kubectl describe configmap -n milvus milvus-roserver-configmap |
grep backend_url |
awk '{print $2}' |
awk '{split($0, level1, ":");
split(level1[2], level2, "/");
print level2[3]}')
mysqlPassword=$(kubectl describe configmap -n milvus milvus-roserver-configmap |
grep backend_url |
awk '{print $2}' |
awk '{split($0, level1, ":");
split(level1[3], level3, "@");
print level3[1]}')
mysqlDBName=$(kubectl describe configmap -n milvus milvus-roserver-configmap |
grep backend_url |
awk '{print $2}' |
awk '{split($0, level1, ":");
split(level1[4], level4, "/");
print level4[2]}')
mysqlContainer=$(kubectl get pods -n milvus | grep milvus-mysql | awk '{print $1}')
kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "CREATE DATABASE IF NOT EXISTS $mysqlDBName;"
checkDBExists=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '$mysqlDBName';" | grep -o $mysqlDBName | wc -l)
counter=0
while [ $checkDBExists -lt 1 ]; do
sleep 1
let counter=counter+1
if [ $counter == $timeout ]; then
echo "Creating MySQL database $mysqlDBName timeout"
return 1
fi
checkDBExists=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '$mysqlDBName';" | grep -o $mysqlDBName | wc -l)
done;
kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "GRANT ALL PRIVILEGES ON $mysqlDBName.* TO '$mysqlUserName'@'%';"
kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "FLUSH PRIVILEGES;"
checkGrant=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SHOW GRANTS for $mysqlUserName;" | grep -o "GRANT ALL PRIVILEGES ON \`$mysqlDBName\`\.\*" | wc -l)
counter=0
while [ $checkGrant -lt 1 ]; do
sleep 1
let counter=counter+1
if [ $counter == $timeout ]; then
echo "Granting all privileges on $mysqlDBName to $mysqlUserName timeout"
return 1
fi
checkGrant=$(kubectl exec -n milvus $mysqlContainer -- mysql -h milvus-mysql -u$mysqlUserName -p$mysqlPassword -e "SHOW GRANTS for $mysqlUserName;" | grep -o "GRANT ALL PRIVILEGES ON \`$mysqlDBName\`\.\*" | wc -l)
done;
}
function checkStatefulSevers() {
stateful_replicas=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Replicas:" | awk '{print $2}')
stateful_running_pods=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Pods Status:" | awk '{print $3}')
counter=0
prev=$stateful_running_pods
PrintPodStatusMessage "Running milvus-ro-servers Pods: $stateful_running_pods/$stateful_replicas"
while [ $stateful_replicas != $stateful_running_pods ]; do
echo -e "${YELLOW}Wait another 1 sec --- ${counter}${ENDC}"
sleep 1;
let counter=counter+1
if [ $counter -eq $timeout ]; then
return 1;
fi
stateful_running_pods=$(kubectl describe statefulset -n milvus milvus-ro-servers | grep "Pods Status:" | awk '{print $3}')
if [ $stateful_running_pods -ne $prev ]; then
PrintPodStatusMessage "Running milvus-ro-servers Pods: $stateful_running_pods/$stateful_replicas"
fi
prev=$stateful_running_pods
done;
return 0;
}
function checkDeployment() {
deployment_name=$1
replicas=$(kubectl describe deployment -n milvus $deployment_name | grep "Replicas:" | awk '{print $2}')
running=$(kubectl get pods -n milvus | grep $deployment_name | grep Running | wc -l)
counter=0
prev=$running
PrintPodStatusMessage "Running $deployment_name Pods: $running/$replicas"
while [ $replicas != $running ]; do
echo -e "${YELLOW}Wait another 1 sec --- ${counter}${ENDC}"
sleep 1;
let counter=counter+1
if [ $counter == $timeout ]; then
return 1
fi
running=$(kubectl get pods -n milvus | grep "$deployment_name" | grep Running | wc -l)
if [ $running -ne $prev ]; then
PrintPodStatusMessage "Running $deployment_name Pods: $running/$replicas"
fi
prev=$running
done
}
function startDependencies() {
kubectl apply -f milvus_data_pvc.yaml
kubectl apply -f milvus_configmap.yaml
kubectl apply -f milvus_auxiliary.yaml
counter=0
while [ $(kubectl get pvc -n milvus | grep Bound | wc -l) != 4 ]; do
sleep 1;
let counter=counter+1
if [ $counter == $timeout ]; then
echo "baseup timeout"
return 1
fi
done
checkDeployment "milvus-mysql"
}
function startApps() {
counter=0
errmsg=""
echo -e "${GREEN}${BOLD}Checking required resouces...${NORMAL}${ENDC}"
while [ $counter -lt $timeout ]; do
sleep 1;
if [ $(kubectl get pvc -n milvus 2>/dev/null | grep Bound | wc -l) != 4 ]; then
echo -e "${YELLOW}No pvc. Wait another sec... $counter${ENDC}";
errmsg='No pvc';
let counter=counter+1;
continue
fi
if [ $(kubectl get configmap -n milvus 2>/dev/null | grep milvus | wc -l) != 4 ]; then
echo -e "${YELLOW}No configmap. Wait another sec... $counter${ENDC}";
errmsg='No configmap';
let counter=counter+1;
continue
fi
if [ $(kubectl get ep -n milvus 2>/dev/null | grep milvus-mysql | awk '{print $2}') == "<none>" ]; then
echo -e "${YELLOW}No mysql. Wait another sec... $counter${ENDC}";
errmsg='No mysql';
let counter=counter+1;
continue
fi
# if [ $(kubectl get ep -n milvus 2>/dev/null | grep milvus-redis | awk '{print $2}') == "<none>" ]; then
# echo -e "${NORMAL}${YELLOW}No redis. Wait another sec... $counter${ENDC}";
# errmsg='No redis';
# let counter=counter+1;
# continue
# fi
break;
done
if [ $counter -ge $timeout ]; then
echo -e "${RED}${BOLD}Start APP Error: $errmsg${NORMAL}${ENDC}"
exit 1;
fi
echo -e "${GREEN}${BOLD}Setup requried database ...${NORMAL}${ENDC}"
setUpMysql
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Setup MySQL database timeout${NORMAL}${ENDC}"
exit 1
fi
echo -e "${GREEN}${BOLD}Start servers ...${NORMAL}${ENDC}"
kubectl apply -f milvus_stateful_servers.yaml
kubectl apply -f milvus_write_servers.yaml
checkStatefulSevers
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Starting milvus-ro-servers timeout${NORMAL}${ENDC}"
exit 1
fi
checkDeployment "milvus-wo-servers"
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Starting milvus-wo-servers timeout${NORMAL}${ENDC}"
exit 1
fi
echo -e "${GREEN}${BOLD}Start rolebinding ...${NORMAL}${ENDC}"
kubectl apply -f milvus_rbac.yaml
echo -e "${GREEN}${BOLD}Start proxies ...${NORMAL}${ENDC}"
kubectl apply -f milvus_proxy.yaml
checkDeployment "milvus-proxy"
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Starting milvus-proxy timeout${NORMAL}${ENDC}"
exit 1
fi
# echo -e "${GREEN}${BOLD}Start flower ...${NORMAL}${ENDC}"
# kubectl apply -f milvus_flower.yaml
# checkDeployment "milvus-flower"
# if [ $? -ne 0 ]; then
# echo -e "${RED}${BOLD}Starting milvus-flower timeout${NORMAL}${ENDC}"
# exit 1
# fi
}
function removeApps () {
# kubectl delete -f milvus_flower.yaml 2>/dev/null
kubectl delete -f milvus_proxy.yaml 2>/dev/null
kubectl delete -f milvus_stateful_servers.yaml 2>/dev/null
kubectl delete -f milvus_write_servers.yaml 2>/dev/null
kubectl delete -f milvus_rbac.yaml 2>/dev/null
# kubectl delete -f milvus_monitor.yaml 2>/dev/null
}
function scaleDeployment() {
deployment_name=$1
subcommand=$2
des=$3
case $des in
-h|--help|"")
showscaleHelpMessage $subcommand
exit 3
;;
esac
cur=$(kubectl get deployment -n milvus $deployment_name |grep $deployment_name |awk '{split($2, status, "/"); print status[2];}')
echo -e "${GREEN}Current Running ${BOLD}$cur ${GREEN}${deployment_name}, Scaling to ${BOLD}$des ...${ENDC}";
scalecmd="kubectl scale deployment -n milvus ${deployment_name} --replicas=${des}"
${scalecmd}
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Scale Error: ${GREEN}${scalecmd}${ENDC}"
exit 1
fi
checkDeployment $deployment_name
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Scale ${deployment_name} timeout${NORMAL}${ENDC}"
scalecmd="kubectl scale deployment -n milvus ${deployment_name} --replicas=${cur}"
${scalecmd}
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Scale Rollback Error: ${GREEN}${scalecmd}${ENDC}"
exit 2
fi
echo -e "${BLUE}${BOLD}Scale Rollback to ${cur}${ENDC}"
exit 1
fi
PrintScaleSuccessMessage $cur $des
}
function scaleROServers() {
subcommand=$1
des=$2
case $des in
-h|--help|"")
showscaleHelpMessage $subcommand
exit 3
;;
esac
cur=$(kubectl get statefulset -n milvus milvus-ro-servers |tail -n 1 |awk '{split($2, status, "/"); print status[2];}')
echo -e "${GREEN}Current Running ${BOLD}$cur ${GREEN}Readonly Servers, Scaling to ${BOLD}$des ...${ENDC}";
scalecmd="kubectl scale sts milvus-ro-servers -n milvus --replicas=${des}"
${scalecmd}
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Scale Error: ${GREEN}${scalecmd}${ENDC}"
exit 1
fi
checkStatefulSevers
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Scale milvus-ro-servers timeout${NORMAL}${ENDC}"
scalecmd="kubectl scale sts milvus-ro-servers -n milvus --replicas=${cur}"
${scalecmd}
if [ $? -ne 0 ]; then
echo -e "${RED}${BOLD}Scale Rollback Error: ${GREEN}${scalecmd}${ENDC}"
exit 2
fi
echo -e "${BLUE}${BOLD}Scale Rollback to ${cur}${ENDC}"
exit 1
fi
PrintScaleSuccessMessage $cur $des
}
case "$1" in
cleanup)
kubectl delete -f . 2>/dev/null
echo -e "${BLUE}${BOLD}All resources are removed${NORMAL}${ENDC}"
;;
appdown)
removeApps;
echo -e "${BLUE}${BOLD}All pods are removed${NORMAL}${ENDC}"
;;
baseup)
startDependencies;
echo -e "${BLUE}${BOLD}All pvc, configmap and services up${NORMAL}${ENDC}"
;;
appup)
startApps;
echo -e "${BLUE}${BOLD}All pods up${NORMAL}${ENDC}"
;;
allup)
startDependencies;
sleep 2
startApps;
echo -e "${BLUE}${BOLD}All resources and pods up${NORMAL}${ENDC}"
;;
scale-ro-server)
scaleROServers $1 $2
;;
scale-proxy)
scaleDeployment "milvus-proxy" $1 $2
;;
-h|--help|*)
showHelpMessage
;;
esac

18
shards/manager.py Normal file
View File

@ -0,0 +1,18 @@
import fire
from mishards import db, settings
class DBHandler:
@classmethod
def create_all(cls):
db.create_all()
@classmethod
def drop_all(cls):
db.drop_all()
if __name__ == '__main__':
db.init_db(settings.DefaultConfig.SQLALCHEMY_DATABASE_URI)
from mishards import models
fire.Fire(DBHandler)

View File

@ -0,0 +1,36 @@
DEBUG=True
WOSERVER=tcp://127.0.0.1:19530
SERVER_PORT=19532
SERVER_TEST_PORT=19888
#SQLALCHEMY_DATABASE_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4
SQLALCHEMY_DATABASE_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False
SQL_ECHO=False
#SQLALCHEMY_DATABASE_TEST_URI=mysql+pymysql://root:root@127.0.0.1:3306/milvus?charset=utf8mb4
SQLALCHEMY_DATABASE_TEST_URI=sqlite:////tmp/milvus/db/meta.sqlite?check_same_thread=False
SQL_TEST_ECHO=False
TRACER_PLUGIN_PATH=/tmp/plugins
# TRACING_TEST_TYPE=jaeger
TRACER_CLASS_NAME=jaeger
TRACING_SERVICE_NAME=fortest
TRACING_SAMPLER_TYPE=const
TRACING_SAMPLER_PARAM=1
TRACING_LOG_PAYLOAD=True
#TRACING_SAMPLER_TYPE=probabilistic
#TRACING_SAMPLER_PARAM=0.5
#DISCOVERY_PLUGIN_PATH=
#DISCOVERY_CLASS_NAME=kubernetes
DISCOVERY_STATIC_HOSTS=127.0.0.1
DISCOVERY_STATIC_PORT=19530
DISCOVERY_KUBERNETES_NAMESPACE=xp
DISCOVERY_KUBERNETES_POD_PATT=.*-ro-servers-.*
DISCOVERY_KUBERNETES_LABEL_SELECTOR=tier=ro-servers
DISCOVERY_KUBERNETES_POLL_INTERVAL=5
DISCOVERY_KUBERNETES_IN_CLUSTER=False

View File

@ -0,0 +1,40 @@
import logging
from mishards import settings
logger = logging.getLogger()
from mishards.db_base import DB
db = DB()
from mishards.server import Server
grpc_server = Server()
def create_app(testing_config=None):
config = testing_config if testing_config else settings.DefaultConfig
db.init_db(uri=config.SQLALCHEMY_DATABASE_URI, echo=config.SQL_ECHO)
from mishards.connections import ConnectionMgr
connect_mgr = ConnectionMgr()
from discovery.factory import DiscoveryFactory
discover = DiscoveryFactory(config.DISCOVERY_PLUGIN_PATH).create(config.DISCOVERY_CLASS_NAME,
conn_mgr=connect_mgr)
from mishards.grpc_utils import GrpcSpanDecorator
from tracer.factory import TracerFactory
tracer = TracerFactory(config.TRACER_PLUGIN_PATH).create(config.TRACER_CLASS_NAME,
plugin_config=settings.TracingConfig,
span_decorator=GrpcSpanDecorator())
from mishards.router.factory import RouterFactory
router = RouterFactory(config.ROUTER_PLUGIN_PATH).create(config.ROUTER_CLASS_NAME,
conn_mgr=connect_mgr)
grpc_server.init_app(conn_mgr=connect_mgr,
tracer=tracer,
router=router,
discover=discover)
from mishards import exception_handlers
return grpc_server

View File

@ -0,0 +1,154 @@
import logging
import threading
from functools import wraps
from milvus import Milvus
from mishards import (settings, exceptions)
from utils import singleton
logger = logging.getLogger(__name__)
class Connection:
def __init__(self, name, uri, max_retry=1, error_handlers=None, **kwargs):
self.name = name
self.uri = uri
self.max_retry = max_retry
self.retried = 0
self.conn = Milvus()
self.error_handlers = [] if not error_handlers else error_handlers
self.on_retry_func = kwargs.get('on_retry_func', None)
# self._connect()
def __str__(self):
return 'Connection:name=\"{}\";uri=\"{}\"'.format(self.name, self.uri)
def _connect(self, metadata=None):
try:
self.conn.connect(uri=self.uri)
except Exception as e:
if not self.error_handlers:
raise exceptions.ConnectionConnectError(message=str(e), metadata=metadata)
for handler in self.error_handlers:
handler(e, metadata=metadata)
@property
def can_retry(self):
return self.retried < self.max_retry
@property
def connected(self):
return self.conn.connected()
def on_retry(self):
if self.on_retry_func:
self.on_retry_func(self)
else:
self.retried > 1 and logger.warning('{} is retrying {}'.format(self, self.retried))
def on_connect(self, metadata=None):
while not self.connected and self.can_retry:
self.retried += 1
self.on_retry()
self._connect(metadata=metadata)
if not self.can_retry and not self.connected:
raise exceptions.ConnectionConnectError(message='Max retry {} reached!'.format(self.max_retry,
metadata=metadata))
self.retried = 0
def connect(self, func, exception_handler=None):
@wraps(func)
def inner(*args, **kwargs):
self.on_connect()
try:
return func(*args, **kwargs)
except Exception as e:
if exception_handler:
exception_handler(e)
else:
raise e
return inner
@singleton
class ConnectionMgr:
def __init__(self):
self.metas = {}
self.conns = {}
@property
def conn_names(self):
return set(self.metas.keys()) - set(['WOSERVER'])
def conn(self, name, metadata, throw=False):
c = self.conns.get(name, None)
if not c:
url = self.metas.get(name, None)
if not url:
if not throw:
return None
raise exceptions.ConnectionNotFoundError(message='Connection {} not found'.format(name),
metadata=metadata)
this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY)
threaded = {
threading.get_ident(): this_conn
}
self.conns[name] = threaded
return this_conn
tid = threading.get_ident()
rconn = c.get(tid, None)
if not rconn:
url = self.metas.get(name, None)
if not url:
if not throw:
return None
raise exceptions.ConnectionNotFoundError('Connection {} not found'.format(name),
metadata=metadata)
this_conn = Connection(name=name, uri=url, max_retry=settings.MAX_RETRY)
c[tid] = this_conn
return this_conn
return rconn
def on_new_meta(self, name, url):
logger.info('Register Connection: name={};url={}'.format(name, url))
self.metas[name] = url
def on_duplicate_meta(self, name, url):
if self.metas[name] == url:
return self.on_same_meta(name, url)
return self.on_diff_meta(name, url)
def on_same_meta(self, name, url):
# logger.warning('Register same meta: {}:{}'.format(name, url))
pass
def on_diff_meta(self, name, url):
logger.warning('Received {} with diff url={}'.format(name, url))
self.metas[name] = url
self.conns[name] = {}
def on_unregister_meta(self, name, url):
logger.info('Unregister name={};url={}'.format(name, url))
self.conns.pop(name, None)
def on_nonexisted_meta(self, name):
logger.warning('Non-existed meta: {}'.format(name))
def register(self, name, url):
meta = self.metas.get(name)
if not meta:
return self.on_new_meta(name, url)
else:
return self.on_duplicate_meta(name, url)
def unregister(self, name):
logger.info('Unregister Connection: name={}'.format(name))
url = self.metas.pop(name, None)
if url is None:
return self.on_nonexisted_meta(name)
return self.on_unregister_meta(name, url)

Some files were not shown because too many files have changed in this diff Show More