* #1724 Remove unused unittests

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* #1724 Remove unused unittests - part2

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* #1724 Remove unused unittests - part3

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* #1724 Remove unused unittests - part4

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* #1724 Remove unused unittests - part5

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* Change table_id to collection_id

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change table to collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix format error

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* Change table_ to collection_

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change table_id_ to collection_id_

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix format error

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* #1724 Change table_name to collection_name

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Reformat

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* Change TableFile to Segment

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* Change TableSchema to CollectionSchema

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* fix compile lint

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>
pull/1835/head
Jin Hai 2020-04-01 17:37:55 +08:00 committed by GitHub
parent 6f85895340
commit 68625cbf1c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
127 changed files with 2884 additions and 2832 deletions

View File

@ -788,22 +788,22 @@ Config::CheckDBConfigPreloadTable(const std::string& value) {
std::unordered_set<std::string> table_set;
for (auto& table : tables) {
if (!ValidationUtil::ValidateTableName(table).ok()) {
return Status(SERVER_INVALID_ARGUMENT, "Invalid table name: " + table);
for (auto& collection : tables) {
if (!ValidationUtil::ValidateCollectionName(collection).ok()) {
return Status(SERVER_INVALID_ARGUMENT, "Invalid collection name: " + collection);
}
bool exist = false;
auto status = DBWrapper::DB()->HasNativeTable(table, exist);
auto status = DBWrapper::DB()->HasNativeTable(collection, exist);
if (!(status.ok() && exist)) {
return Status(SERVER_TABLE_NOT_EXIST, "Table " + table + " not exist");
return Status(SERVER_TABLE_NOT_EXIST, "Collection " + collection + " not exist");
}
table_set.insert(table);
table_set.insert(collection);
}
if (table_set.size() != tables.size()) {
std::string msg =
"Invalid preload tables. "
"Possible reason: db_config.preload_table contains duplicate table.";
"Possible reason: db_config.preload_table contains duplicate collection.";
return Status(SERVER_INVALID_ARGUMENT, msg);
}

View File

@ -44,82 +44,82 @@ class DB {
Stop() = 0;
virtual Status
CreateTable(meta::TableSchema& table_schema_) = 0;
CreateTable(meta::CollectionSchema& table_schema_) = 0;
virtual Status
DropTable(const std::string& table_id) = 0;
DropTable(const std::string& collection_id) = 0;
virtual Status
DescribeTable(meta::TableSchema& table_schema_) = 0;
DescribeTable(meta::CollectionSchema& table_schema_) = 0;
virtual Status
HasTable(const std::string& table_id, bool& has_or_not_) = 0;
HasTable(const std::string& collection_id, bool& has_or_not_) = 0;
virtual Status
HasNativeTable(const std::string& table_id, bool& has_or_not_) = 0;
HasNativeTable(const std::string& collection_id, bool& has_or_not_) = 0;
virtual Status
AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
AllTables(std::vector<meta::CollectionSchema>& table_schema_array) = 0;
virtual Status
GetTableInfo(const std::string& table_id, TableInfo& table_info) = 0;
GetTableInfo(const std::string& collection_id, TableInfo& table_info) = 0;
virtual Status
GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
GetTableRowCount(const std::string& collection_id, uint64_t& row_count) = 0;
virtual Status
PreloadTable(const std::string& table_id) = 0;
PreloadTable(const std::string& collection_id) = 0;
virtual Status
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
UpdateTableFlag(const std::string& collection_id, int64_t flag) = 0;
virtual Status
CreatePartition(const std::string& table_id, const std::string& partition_name,
CreatePartition(const std::string& collection_id, const std::string& partition_name,
const std::string& partition_tag) = 0;
virtual Status
DropPartition(const std::string& partition_name) = 0;
virtual Status
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) = 0;
DropPartitionByTag(const std::string& collection_id, const std::string& partition_tag) = 0;
virtual Status
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) = 0;
ShowPartitions(const std::string& collection_id, std::vector<meta::CollectionSchema>& partition_schema_array) = 0;
virtual Status
InsertVectors(const std::string& table_id, const std::string& partition_tag, VectorsData& vectors) = 0;
InsertVectors(const std::string& collection_id, const std::string& partition_tag, VectorsData& vectors) = 0;
virtual Status
DeleteVector(const std::string& table_id, IDNumber vector_id) = 0;
DeleteVector(const std::string& collection_id, IDNumber vector_id) = 0;
virtual Status
DeleteVectors(const std::string& table_id, IDNumbers vector_ids) = 0;
DeleteVectors(const std::string& collection_id, IDNumbers vector_ids) = 0;
virtual Status
Flush(const std::string& table_id) = 0;
Flush(const std::string& collection_id) = 0;
virtual Status
Flush() = 0;
virtual Status
Compact(const std::string& table_id) = 0;
Compact(const std::string& collection_id) = 0;
virtual Status
GetVectorByID(const std::string& table_id, const IDNumber& vector_id, VectorsData& vector) = 0;
GetVectorByID(const std::string& collection_id, const IDNumber& vector_id, VectorsData& vector) = 0;
virtual Status
GetVectorIDs(const std::string& table_id, const std::string& segment_id, IDNumbers& vector_ids) = 0;
GetVectorIDs(const std::string& collection_id, const std::string& segment_id, IDNumbers& vector_ids) = 0;
// virtual Status
// Merge(const std::set<std::string>& table_ids) = 0;
virtual Status
QueryByID(const std::shared_ptr<server::Context>& context, const std::string& table_id,
QueryByID(const std::shared_ptr<server::Context>& context, const std::string& collection_id,
const std::vector<std::string>& partition_tags, uint64_t k, const milvus::json& extra_params,
IDNumber vector_id, ResultIds& result_ids, ResultDistances& result_distances) = 0;
virtual Status
Query(const std::shared_ptr<server::Context>& context, const std::string& table_id,
Query(const std::shared_ptr<server::Context>& context, const std::string& collection_id,
const std::vector<std::string>& partition_tags, uint64_t k, const milvus::json& extra_params,
const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances) = 0;
@ -132,13 +132,13 @@ class DB {
Size(uint64_t& result) = 0;
virtual Status
CreateIndex(const std::string& table_id, const TableIndex& index) = 0;
CreateIndex(const std::string& collection_id, const TableIndex& index) = 0;
virtual Status
DescribeIndex(const std::string& table_id, TableIndex& index) = 0;
DescribeIndex(const std::string& collection_id, TableIndex& index) = 0;
virtual Status
DropIndex(const std::string& table_id) = 0;
DropIndex(const std::string& collection_id) = 0;
virtual Status
DropAll() = 0;

File diff suppressed because it is too large Load Diff

View File

@ -55,91 +55,92 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
DropAll() override;
Status
CreateTable(meta::TableSchema& table_schema) override;
CreateTable(meta::CollectionSchema& table_schema) override;
Status
DropTable(const std::string& table_id) override;
DropTable(const std::string& collection_id) override;
Status
DescribeTable(meta::TableSchema& table_schema) override;
DescribeTable(meta::CollectionSchema& table_schema) override;
Status
HasTable(const std::string& table_id, bool& has_or_not) override;
HasTable(const std::string& collection_id, bool& has_or_not) override;
Status
HasNativeTable(const std::string& table_id, bool& has_or_not_) override;
HasNativeTable(const std::string& collection_id, bool& has_or_not_) override;
Status
AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
AllTables(std::vector<meta::CollectionSchema>& table_schema_array) override;
Status
GetTableInfo(const std::string& table_id, TableInfo& table_info) override;
GetTableInfo(const std::string& collection_id, TableInfo& table_info) override;
Status
PreloadTable(const std::string& table_id) override;
PreloadTable(const std::string& collection_id) override;
Status
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
UpdateTableFlag(const std::string& collection_id, int64_t flag) override;
Status
GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
GetTableRowCount(const std::string& collection_id, uint64_t& row_count) override;
Status
CreatePartition(const std::string& table_id, const std::string& partition_name,
CreatePartition(const std::string& collection_id, const std::string& partition_name,
const std::string& partition_tag) override;
Status
DropPartition(const std::string& partition_name) override;
Status
DropPartitionByTag(const std::string& table_id, const std::string& partition_tag) override;
DropPartitionByTag(const std::string& collection_id, const std::string& partition_tag) override;
Status
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) override;
ShowPartitions(const std::string& collection_id,
std::vector<meta::CollectionSchema>& partition_schema_array) override;
Status
InsertVectors(const std::string& table_id, const std::string& partition_tag, VectorsData& vectors) override;
InsertVectors(const std::string& collection_id, const std::string& partition_tag, VectorsData& vectors) override;
Status
DeleteVector(const std::string& table_id, IDNumber vector_id) override;
DeleteVector(const std::string& collection_id, IDNumber vector_id) override;
Status
DeleteVectors(const std::string& table_id, IDNumbers vector_ids) override;
DeleteVectors(const std::string& collection_id, IDNumbers vector_ids) override;
Status
Flush(const std::string& table_id) override;
Flush(const std::string& collection_id) override;
Status
Flush() override;
Status
Compact(const std::string& table_id) override;
Compact(const std::string& collection_id) override;
Status
GetVectorByID(const std::string& table_id, const IDNumber& vector_id, VectorsData& vector) override;
GetVectorByID(const std::string& collection_id, const IDNumber& vector_id, VectorsData& vector) override;
Status
GetVectorIDs(const std::string& table_id, const std::string& segment_id, IDNumbers& vector_ids) override;
GetVectorIDs(const std::string& collection_id, const std::string& segment_id, IDNumbers& vector_ids) override;
// Status
// Merge(const std::set<std::string>& table_ids) override;
Status
CreateIndex(const std::string& table_id, const TableIndex& index) override;
CreateIndex(const std::string& collection_id, const TableIndex& index) override;
Status
DescribeIndex(const std::string& table_id, TableIndex& index) override;
DescribeIndex(const std::string& collection_id, TableIndex& index) override;
Status
DropIndex(const std::string& table_id) override;
DropIndex(const std::string& collection_id) override;
Status
QueryByID(const std::shared_ptr<server::Context>& context, const std::string& table_id,
QueryByID(const std::shared_ptr<server::Context>& context, const std::string& collection_id,
const std::vector<std::string>& partition_tags, uint64_t k, const milvus::json& extra_params,
IDNumber vector_id, ResultIds& result_ids, ResultDistances& result_distances) override;
Status
Query(const std::shared_ptr<server::Context>& context, const std::string& table_id,
Query(const std::shared_ptr<server::Context>& context, const std::string& collection_id,
const std::vector<std::string>& partition_tags, uint64_t k, const milvus::json& extra_params,
const VectorsData& vectors, ResultIds& result_ids, ResultDistances& result_distances) override;
@ -160,13 +161,13 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
private:
Status
QueryAsync(const std::shared_ptr<server::Context>& context, const meta::TableFilesSchema& files, uint64_t k,
QueryAsync(const std::shared_ptr<server::Context>& context, const meta::SegmentsSchema& files, uint64_t k,
const milvus::json& extra_params, const VectorsData& vectors, ResultIds& result_ids,
ResultDistances& result_distances);
Status
GetVectorByIdHelper(const std::string& table_id, IDNumber vector_id, VectorsData& vector,
const meta::TableFilesSchema& files);
GetVectorByIdHelper(const std::string& collection_id, IDNumber vector_id, VectorsData& vector,
const meta::SegmentsSchema& files);
void
BackgroundTimerTask();
@ -184,10 +185,10 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
StartMergeTask();
Status
MergeFiles(const std::string& table_id, const meta::TableFilesSchema& files);
MergeFiles(const std::string& collection_id, const meta::SegmentsSchema& files);
Status
BackgroundMergeFiles(const std::string& table_id);
BackgroundMergeFiles(const std::string& collection_id);
void
BackgroundMerge(std::set<std::string> table_ids);
@ -199,8 +200,8 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
BackgroundBuildIndex();
Status
CompactFile(const std::string& table_id, const meta::TableFileSchema& file,
meta::TableFilesSchema& files_to_update);
CompactFile(const std::string& collection_id, const meta::SegmentSchema& file,
meta::SegmentsSchema& files_to_update);
/*
Status
@ -208,33 +209,33 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
*/
Status
GetFilesToBuildIndex(const std::string& table_id, const std::vector<int>& file_types,
meta::TableFilesSchema& files);
GetFilesToBuildIndex(const std::string& collection_id, const std::vector<int>& file_types,
meta::SegmentsSchema& files);
Status
GetFilesToSearch(const std::string& table_id, meta::TableFilesSchema& files);
GetFilesToSearch(const std::string& collection_id, meta::SegmentsSchema& files);
Status
GetPartitionByTag(const std::string& table_id, const std::string& partition_tag, std::string& partition_name);
GetPartitionByTag(const std::string& collection_id, const std::string& partition_tag, std::string& partition_name);
Status
GetPartitionsByTags(const std::string& table_id, const std::vector<std::string>& partition_tags,
GetPartitionsByTags(const std::string& collection_id, const std::vector<std::string>& partition_tags,
std::set<std::string>& partition_name_array);
Status
DropTableRecursively(const std::string& table_id);
DropTableRecursively(const std::string& collection_id);
Status
UpdateTableIndexRecursively(const std::string& table_id, const TableIndex& index);
UpdateTableIndexRecursively(const std::string& collection_id, const TableIndex& index);
Status
WaitTableIndexRecursively(const std::string& table_id, const TableIndex& index);
WaitTableIndexRecursively(const std::string& collection_id, const TableIndex& index);
Status
DropTableIndexRecursively(const std::string& table_id);
DropTableIndexRecursively(const std::string& collection_id);
Status
GetTableRowCountRecursively(const std::string& table_id, uint64_t& row_count);
GetTableRowCountRecursively(const std::string& collection_id, uint64_t& row_count);
Status
ExecWalRecord(const wal::MXLogRecord& record);

View File

@ -20,17 +20,17 @@ namespace engine {
constexpr uint64_t INDEX_FAILED_RETRY_TIME = 1;
Status
IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& table_id) {
IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& collection_id) {
std::lock_guard<std::mutex> lck(mutex_);
index_failed_files_.erase(table_id); // rebuild failed index files for this table
index_failed_files_.erase(collection_id); // rebuild failed index files for this collection
return Status::OK();
}
Status
IndexFailedChecker::GetErrMsgForTable(const std::string& table_id, std::string& err_msg) {
IndexFailedChecker::GetErrMsgForTable(const std::string& collection_id, std::string& err_msg) {
std::lock_guard<std::mutex> lck(mutex_);
auto iter = index_failed_files_.find(table_id);
auto iter = index_failed_files_.find(collection_id);
if (iter != index_failed_files_.end()) {
err_msg = iter->second.begin()->second[0];
}
@ -39,14 +39,14 @@ IndexFailedChecker::GetErrMsgForTable(const std::string& table_id, std::string&
}
Status
IndexFailedChecker::MarkFailedIndexFile(const meta::TableFileSchema& file, const std::string& err_msg) {
IndexFailedChecker::MarkFailedIndexFile(const meta::SegmentSchema& file, const std::string& err_msg) {
std::lock_guard<std::mutex> lck(mutex_);
auto iter = index_failed_files_.find(file.table_id_);
auto iter = index_failed_files_.find(file.collection_id_);
if (iter == index_failed_files_.end()) {
File2ErrArray failed_files;
failed_files.insert(std::make_pair(file.file_id_, std::vector<std::string>(1, err_msg)));
index_failed_files_.insert(std::make_pair(file.table_id_, failed_files));
index_failed_files_.insert(std::make_pair(file.collection_id_, failed_files));
} else {
auto it_failed_files = iter->second.find(file.file_id_);
if (it_failed_files != iter->second.end()) {
@ -60,14 +60,14 @@ IndexFailedChecker::MarkFailedIndexFile(const meta::TableFileSchema& file, const
}
Status
IndexFailedChecker::MarkSucceedIndexFile(const meta::TableFileSchema& file) {
IndexFailedChecker::MarkSucceedIndexFile(const meta::SegmentSchema& file) {
std::lock_guard<std::mutex> lck(mutex_);
auto iter = index_failed_files_.find(file.table_id_);
auto iter = index_failed_files_.find(file.collection_id_);
if (iter != index_failed_files_.end()) {
iter->second.erase(file.file_id_);
if (iter->second.empty()) {
index_failed_files_.erase(file.table_id_);
index_failed_files_.erase(file.collection_id_);
}
}
@ -75,14 +75,14 @@ IndexFailedChecker::MarkSucceedIndexFile(const meta::TableFileSchema& file) {
}
Status
IndexFailedChecker::IgnoreFailedIndexFiles(meta::TableFilesSchema& table_files) {
IndexFailedChecker::IgnoreFailedIndexFiles(meta::SegmentsSchema& table_files) {
std::lock_guard<std::mutex> lck(mutex_);
// there could be some failed files belong to different table.
// there could be some failed files belong to different collection.
// some files may has failed for several times, no need to build index for these files.
// thus we can avoid dead circle for build index operation
for (auto it_file = table_files.begin(); it_file != table_files.end();) {
auto it_failed_files = index_failed_files_.find((*it_file).table_id_);
auto it_failed_files = index_failed_files_.find((*it_file).collection_id_);
if (it_failed_files != index_failed_files_.end()) {
auto it_failed_file = it_failed_files->second.find((*it_file).file_id_);
if (it_failed_file != it_failed_files->second.end()) {

View File

@ -25,23 +25,23 @@ namespace engine {
class IndexFailedChecker {
public:
Status
CleanFailedIndexFileOfTable(const std::string& table_id);
CleanFailedIndexFileOfTable(const std::string& collection_id);
Status
GetErrMsgForTable(const std::string& table_id, std::string& err_msg);
GetErrMsgForTable(const std::string& collection_id, std::string& err_msg);
Status
MarkFailedIndexFile(const meta::TableFileSchema& file, const std::string& err_msg);
MarkFailedIndexFile(const meta::SegmentSchema& file, const std::string& err_msg);
Status
MarkSucceedIndexFile(const meta::TableFileSchema& file);
MarkSucceedIndexFile(const meta::SegmentSchema& file);
Status
IgnoreFailedIndexFiles(meta::TableFilesSchema& table_files);
IgnoreFailedIndexFiles(meta::SegmentsSchema& table_files);
private:
std::mutex mutex_;
Table2FileErr index_failed_files_; // table id mapping to (file id mapping to failed times)
Table2FileErr index_failed_files_; // collection id mapping to (file id mapping to failed times)
};
} // namespace engine

View File

@ -24,13 +24,13 @@ OngoingFileChecker::GetInstance() {
}
Status
OngoingFileChecker::MarkOngoingFile(const meta::TableFileSchema& table_file) {
OngoingFileChecker::MarkOngoingFile(const meta::SegmentSchema& table_file) {
std::lock_guard<std::mutex> lck(mutex_);
return MarkOngoingFileNoLock(table_file);
}
Status
OngoingFileChecker::MarkOngoingFiles(const meta::TableFilesSchema& table_files) {
OngoingFileChecker::MarkOngoingFiles(const meta::SegmentsSchema& table_files) {
std::lock_guard<std::mutex> lck(mutex_);
for (auto& table_file : table_files) {
@ -41,13 +41,13 @@ OngoingFileChecker::MarkOngoingFiles(const meta::TableFilesSchema& table_files)
}
Status
OngoingFileChecker::UnmarkOngoingFile(const meta::TableFileSchema& table_file) {
OngoingFileChecker::UnmarkOngoingFile(const meta::SegmentSchema& table_file) {
std::lock_guard<std::mutex> lck(mutex_);
return UnmarkOngoingFileNoLock(table_file);
}
Status
OngoingFileChecker::UnmarkOngoingFiles(const meta::TableFilesSchema& table_files) {
OngoingFileChecker::UnmarkOngoingFiles(const meta::SegmentsSchema& table_files) {
std::lock_guard<std::mutex> lck(mutex_);
for (auto& table_file : table_files) {
@ -58,10 +58,10 @@ OngoingFileChecker::UnmarkOngoingFiles(const meta::TableFilesSchema& table_files
}
bool
OngoingFileChecker::IsIgnored(const meta::TableFileSchema& schema) {
OngoingFileChecker::IsIgnored(const meta::SegmentSchema& schema) {
std::lock_guard<std::mutex> lck(mutex_);
auto iter = ongoing_files_.find(schema.table_id_);
auto iter = ongoing_files_.find(schema.collection_id_);
if (iter == ongoing_files_.end()) {
return false;
} else {
@ -75,16 +75,16 @@ OngoingFileChecker::IsIgnored(const meta::TableFileSchema& schema) {
}
Status
OngoingFileChecker::MarkOngoingFileNoLock(const meta::TableFileSchema& table_file) {
if (table_file.table_id_.empty() || table_file.file_id_.empty()) {
return Status(DB_ERROR, "Invalid table files");
OngoingFileChecker::MarkOngoingFileNoLock(const meta::SegmentSchema& table_file) {
if (table_file.collection_id_.empty() || table_file.file_id_.empty()) {
return Status(DB_ERROR, "Invalid collection files");
}
auto iter = ongoing_files_.find(table_file.table_id_);
auto iter = ongoing_files_.find(table_file.collection_id_);
if (iter == ongoing_files_.end()) {
File2RefCount files_refcount;
files_refcount.insert(std::make_pair(table_file.file_id_, 1));
ongoing_files_.insert(std::make_pair(table_file.table_id_, files_refcount));
ongoing_files_.insert(std::make_pair(table_file.collection_id_, files_refcount));
} else {
auto it_file = iter->second.find(table_file.file_id_);
if (it_file == iter->second.end()) {
@ -95,18 +95,18 @@ OngoingFileChecker::MarkOngoingFileNoLock(const meta::TableFileSchema& table_fil
}
ENGINE_LOG_DEBUG << "Mark ongoing file:" << table_file.file_id_
<< " refcount:" << ongoing_files_[table_file.table_id_][table_file.file_id_];
<< " refcount:" << ongoing_files_[table_file.collection_id_][table_file.file_id_];
return Status::OK();
}
Status
OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_file) {
if (table_file.table_id_.empty() || table_file.file_id_.empty()) {
return Status(DB_ERROR, "Invalid table files");
OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::SegmentSchema& table_file) {
if (table_file.collection_id_.empty() || table_file.file_id_.empty()) {
return Status(DB_ERROR, "Invalid collection files");
}
auto iter = ongoing_files_.find(table_file.table_id_);
auto iter = ongoing_files_.find(table_file.collection_id_);
if (iter != ongoing_files_.end()) {
auto it_file = iter->second.find(table_file.file_id_);
if (it_file != iter->second.end()) {
@ -117,7 +117,7 @@ OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_f
if (it_file->second <= 0) {
iter->second.erase(table_file.file_id_);
if (iter->second.empty()) {
ongoing_files_.erase(table_file.table_id_);
ongoing_files_.erase(table_file.collection_id_);
}
}
}

View File

@ -29,30 +29,30 @@ class OngoingFileChecker {
GetInstance();
Status
MarkOngoingFile(const meta::TableFileSchema& table_file);
MarkOngoingFile(const meta::SegmentSchema& table_file);
Status
MarkOngoingFiles(const meta::TableFilesSchema& table_files);
MarkOngoingFiles(const meta::SegmentsSchema& table_files);
Status
UnmarkOngoingFile(const meta::TableFileSchema& table_file);
UnmarkOngoingFile(const meta::SegmentSchema& table_file);
Status
UnmarkOngoingFiles(const meta::TableFilesSchema& table_files);
UnmarkOngoingFiles(const meta::SegmentsSchema& table_files);
bool
IsIgnored(const meta::TableFileSchema& schema);
IsIgnored(const meta::SegmentSchema& schema);
private:
Status
MarkOngoingFileNoLock(const meta::TableFileSchema& table_file);
MarkOngoingFileNoLock(const meta::SegmentSchema& table_file);
Status
UnmarkOngoingFileNoLock(const meta::TableFileSchema& table_file);
UnmarkOngoingFileNoLock(const meta::SegmentSchema& table_file);
private:
std::mutex mutex_;
Table2FileRef ongoing_files_; // table id mapping to (file id mapping to ongoing ref-count)
Table2FileRef ongoing_files_; // collection id mapping to (file id mapping to ongoing ref-count)
};
} // namespace engine

View File

@ -36,19 +36,19 @@ uint64_t index_file_counter = 0;
std::mutex index_file_counter_mutex;
static std::string
ConstructParentFolder(const std::string& db_path, const meta::TableFileSchema& table_file) {
std::string table_path = db_path + TABLES_FOLDER + table_file.table_id_;
ConstructParentFolder(const std::string& db_path, const meta::SegmentSchema& table_file) {
std::string table_path = db_path + TABLES_FOLDER + table_file.collection_id_;
std::string partition_path = table_path + "/" + table_file.segment_id_;
return partition_path;
}
static std::string
GetTableFileParentFolder(const DBMetaOptions& options, const meta::TableFileSchema& table_file) {
GetTableFileParentFolder(const DBMetaOptions& options, const meta::SegmentSchema& table_file) {
uint64_t path_count = options.slave_paths_.size() + 1;
std::string target_path = options.path_;
uint64_t index = 0;
if (meta::TableFileSchema::NEW_INDEX == table_file.file_type_) {
if (meta::SegmentSchema::NEW_INDEX == table_file.file_type_) {
// index file is large file and to be persisted permanently
// we need to distribute index files to each db_path averagely
// round robin according to a file counter
@ -79,9 +79,9 @@ GetMicroSecTimeStamp() {
}
Status
CreateTablePath(const DBMetaOptions& options, const std::string& table_id) {
CreateTablePath(const DBMetaOptions& options, const std::string& collection_id) {
std::string db_path = options.path_;
std::string table_path = db_path + TABLES_FOLDER + table_id;
std::string table_path = db_path + TABLES_FOLDER + collection_id;
auto status = server::CommonUtil::CreateDirectory(table_path);
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
@ -89,7 +89,7 @@ CreateTablePath(const DBMetaOptions& options, const std::string& table_id) {
}
for (auto& path : options.slave_paths_) {
table_path = path + TABLES_FOLDER + table_id;
table_path = path + TABLES_FOLDER + collection_id;
status = server::CommonUtil::CreateDirectory(table_path);
fiu_do_on("CreateTablePath.creat_slave_path", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
@ -102,18 +102,18 @@ CreateTablePath(const DBMetaOptions& options, const std::string& table_id) {
}
Status
DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool force) {
DeleteTablePath(const DBMetaOptions& options, const std::string& collection_id, bool force) {
std::vector<std::string> paths = options.slave_paths_;
paths.push_back(options.path_);
for (auto& path : paths) {
std::string table_path = path + TABLES_FOLDER + table_id;
std::string table_path = path + TABLES_FOLDER + collection_id;
if (force) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path;
} else if (boost::filesystem::exists(table_path) && boost::filesystem::is_empty(table_path)) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove table folder: " << table_path;
ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path;
}
}
@ -122,7 +122,7 @@ DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool
config.GetStorageConfigS3Enable(s3_enable);
if (s3_enable) {
std::string table_path = options.path_ + TABLES_FOLDER + table_id;
std::string table_path = options.path_ + TABLES_FOLDER + collection_id;
auto& storage_inst = milvus::storage::S3ClientWrapper::GetInstance();
Status stat = storage_inst.DeleteObjects(table_path);
@ -135,7 +135,7 @@ DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool
}
Status
CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
CreateTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) {
std::string parent_path = GetTableFileParentFolder(options, table_file);
auto status = server::CommonUtil::CreateDirectory(parent_path);
@ -151,7 +151,7 @@ CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_f
}
Status
GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
GetTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) {
std::string parent_path = ConstructParentFolder(options.path_, table_file);
std::string file_path = parent_path + "/" + table_file.file_id_;
@ -179,23 +179,23 @@ GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file
}
}
std::string msg = "Table file doesn't exist: " + file_path;
std::string msg = "Collection file doesn't exist: " + file_path;
if (table_file.file_size_ > 0) { // no need to pop error for empty file
ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for table: " << table_file.table_id_;
ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for collection: " << table_file.collection_id_;
}
return Status(DB_ERROR, msg);
}
Status
DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
DeleteTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) {
utils::GetTableFilePath(options, table_file);
boost::filesystem::remove(table_file.location_);
return Status::OK();
}
Status
DeleteSegment(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
DeleteSegment(const DBMetaOptions& options, meta::SegmentSchema& table_file) {
utils::GetTableFilePath(options, table_file);
std::string segment_dir;
GetParentPath(table_file.location_, segment_dir);

View File

@ -26,18 +26,18 @@ int64_t
GetMicroSecTimeStamp();
Status
CreateTablePath(const DBMetaOptions& options, const std::string& table_id);
CreateTablePath(const DBMetaOptions& options, const std::string& collection_id);
Status
DeleteTablePath(const DBMetaOptions& options, const std::string& table_id, bool force = true);
DeleteTablePath(const DBMetaOptions& options, const std::string& collection_id, bool force = true);
Status
CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
CreateTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
GetTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file);
DeleteTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
DeleteSegment(const DBMetaOptions& options, meta::TableFileSchema& table_file);
DeleteSegment(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
GetParentPath(const std::string& path, std::string& parent_path);

View File

@ -24,21 +24,21 @@ namespace engine {
class MemManager {
public:
virtual Status
InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
const float* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) = 0;
virtual Status
InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
const uint8_t* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) = 0;
virtual Status
DeleteVector(const std::string& table_id, IDNumber vector_id, uint64_t lsn) = 0;
DeleteVector(const std::string& collection_id, IDNumber vector_id, uint64_t lsn) = 0;
virtual Status
DeleteVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) = 0;
DeleteVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) = 0;
virtual Status
Flush(const std::string& table_id, bool apply_delete = true) = 0;
Flush(const std::string& collection_id, bool apply_delete = true) = 0;
virtual Status
Flush(std::set<std::string>& table_ids, bool apply_delete = true) = 0;
@ -47,7 +47,7 @@ class MemManager {
// Serialize(std::set<std::string>& table_ids) = 0;
virtual Status
EraseMemVector(const std::string& table_id) = 0;
EraseMemVector(const std::string& collection_id) = 0;
virtual size_t
GetCurrentMutableMem() = 0;

View File

@ -21,18 +21,18 @@ namespace milvus {
namespace engine {
MemTablePtr
MemManagerImpl::GetMemByTable(const std::string& table_id) {
auto memIt = mem_id_map_.find(table_id);
MemManagerImpl::GetMemByTable(const std::string& collection_id) {
auto memIt = mem_id_map_.find(collection_id);
if (memIt != mem_id_map_.end()) {
return memIt->second;
}
mem_id_map_[table_id] = std::make_shared<MemTable>(table_id, meta_, options_);
return mem_id_map_[table_id];
mem_id_map_[collection_id] = std::make_shared<MemTable>(collection_id, meta_, options_);
return mem_id_map_[collection_id];
}
Status
MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
const float* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) {
flushed_tables.clear();
if (GetCurrentMem() > options_.insert_buffer_size_) {
@ -54,11 +54,11 @@ MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const
std::unique_lock<std::mutex> lock(mutex_);
return InsertVectorsNoLock(table_id, source, lsn);
return InsertVectorsNoLock(collection_id, source, lsn);
}
Status
MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
const uint8_t* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) {
flushed_tables.clear();
if (GetCurrentMem() > options_.insert_buffer_size_) {
@ -80,12 +80,12 @@ MemManagerImpl::InsertVectors(const std::string& table_id, int64_t length, const
std::unique_lock<std::mutex> lock(mutex_);
return InsertVectorsNoLock(table_id, source, lsn);
return InsertVectorsNoLock(collection_id, source, lsn);
}
Status
MemManagerImpl::InsertVectorsNoLock(const std::string& table_id, const VectorSourcePtr& source, uint64_t lsn) {
MemTablePtr mem = GetMemByTable(table_id);
MemManagerImpl::InsertVectorsNoLock(const std::string& collection_id, const VectorSourcePtr& source, uint64_t lsn) {
MemTablePtr mem = GetMemByTable(collection_id);
mem->SetLSN(lsn);
auto status = mem->Add(source);
@ -93,18 +93,19 @@ MemManagerImpl::InsertVectorsNoLock(const std::string& table_id, const VectorSou
}
Status
MemManagerImpl::DeleteVector(const std::string& table_id, IDNumber vector_id, uint64_t lsn) {
MemManagerImpl::DeleteVector(const std::string& collection_id, IDNumber vector_id, uint64_t lsn) {
std::unique_lock<std::mutex> lock(mutex_);
MemTablePtr mem = GetMemByTable(table_id);
MemTablePtr mem = GetMemByTable(collection_id);
mem->SetLSN(lsn);
auto status = mem->Delete(vector_id);
return status;
}
Status
MemManagerImpl::DeleteVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) {
MemManagerImpl::DeleteVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids,
uint64_t lsn) {
std::unique_lock<std::mutex> lock(mutex_);
MemTablePtr mem = GetMemByTable(table_id);
MemTablePtr mem = GetMemByTable(collection_id);
mem->SetLSN(lsn);
IDNumbers ids;
@ -128,8 +129,8 @@ MemManagerImpl::DeleteVectors(const std::string& table_id, int64_t length, const
}
Status
MemManagerImpl::Flush(const std::string& table_id, bool apply_delete) {
ToImmutable(table_id);
MemManagerImpl::Flush(const std::string& collection_id, bool apply_delete) {
ToImmutable(collection_id);
// TODO: There is actually only one memTable in the immutable list
MemList temp_immutable_list;
{
@ -140,13 +141,13 @@ MemManagerImpl::Flush(const std::string& table_id, bool apply_delete) {
std::unique_lock<std::mutex> lock(serialization_mtx_);
auto max_lsn = GetMaxLSN(temp_immutable_list);
for (auto& mem : temp_immutable_list) {
ENGINE_LOG_DEBUG << "Flushing table: " << mem->GetTableId();
ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId();
auto status = mem->Serialize(max_lsn, apply_delete);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Flush table " << mem->GetTableId() << " failed";
ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed";
return status;
}
ENGINE_LOG_DEBUG << "Flushed table: " << mem->GetTableId();
ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId();
}
return Status::OK();
@ -166,14 +167,14 @@ MemManagerImpl::Flush(std::set<std::string>& table_ids, bool apply_delete) {
table_ids.clear();
auto max_lsn = GetMaxLSN(temp_immutable_list);
for (auto& mem : temp_immutable_list) {
ENGINE_LOG_DEBUG << "Flushing table: " << mem->GetTableId();
ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId();
auto status = mem->Serialize(max_lsn, apply_delete);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Flush table " << mem->GetTableId() << " failed";
ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed";
return status;
}
table_ids.insert(mem->GetTableId());
ENGINE_LOG_DEBUG << "Flushed table: " << mem->GetTableId();
ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId();
}
meta_->SetGlobalLastLSN(max_lsn);
@ -182,15 +183,15 @@ MemManagerImpl::Flush(std::set<std::string>& table_ids, bool apply_delete) {
}
Status
MemManagerImpl::ToImmutable(const std::string& table_id) {
MemManagerImpl::ToImmutable(const std::string& collection_id) {
std::unique_lock<std::mutex> lock(mutex_);
auto memIt = mem_id_map_.find(table_id);
auto memIt = mem_id_map_.find(collection_id);
if (memIt != mem_id_map_.end()) {
if (!memIt->second->Empty()) {
immu_mem_list_.push_back(memIt->second);
mem_id_map_.erase(memIt);
}
// std::string err_msg = "Could not find table = " + table_id + " to flush";
// std::string err_msg = "Could not find collection = " + collection_id + " to flush";
// ENGINE_LOG_ERROR << err_msg;
// return Status(DB_NOT_FOUND, err_msg);
}
@ -204,7 +205,7 @@ MemManagerImpl::ToImmutable() {
MemIdMap temp_map;
for (auto& kv : mem_id_map_) {
if (kv.second->Empty()) {
// empty table without any deletes, no need to serialize
// empty collection without any deletes, no need to serialize
temp_map.insert(kv);
} else {
immu_mem_list_.push_back(kv.second);
@ -216,17 +217,17 @@ MemManagerImpl::ToImmutable() {
}
Status
MemManagerImpl::EraseMemVector(const std::string& table_id) {
MemManagerImpl::EraseMemVector(const std::string& collection_id) {
{ // erase MemVector from rapid-insert cache
std::unique_lock<std::mutex> lock(mutex_);
mem_id_map_.erase(table_id);
mem_id_map_.erase(collection_id);
}
{ // erase MemVector from serialize cache
std::unique_lock<std::mutex> lock(serialization_mtx_);
MemList temp_list;
for (auto& mem : immu_mem_list_) {
if (mem->GetTableId() != table_id) {
if (mem->GetTableId() != collection_id) {
temp_list.push_back(mem);
}
}
@ -265,9 +266,9 @@ MemManagerImpl::GetCurrentMem() {
uint64_t
MemManagerImpl::GetMaxLSN(const MemList& tables) {
uint64_t max_lsn = 0;
for (auto& table : tables) {
auto cur_lsn = table->GetLSN();
if (table->GetLSN() > max_lsn) {
for (auto& collection : tables) {
auto cur_lsn = collection->GetLSN();
if (collection->GetLSN() > max_lsn) {
max_lsn = cur_lsn;
}
}

View File

@ -41,21 +41,21 @@ class MemManagerImpl : public MemManager, public server::CacheConfigHandler {
}
Status
InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
const float* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) override;
Status
InsertVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
InsertVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, int64_t dim,
const uint8_t* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) override;
Status
DeleteVector(const std::string& table_id, IDNumber vector_id, uint64_t lsn) override;
DeleteVector(const std::string& collection_id, IDNumber vector_id, uint64_t lsn) override;
Status
DeleteVectors(const std::string& table_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) override;
DeleteVectors(const std::string& collection_id, int64_t length, const IDNumber* vector_ids, uint64_t lsn) override;
Status
Flush(const std::string& table_id, bool apply_delete = true) override;
Flush(const std::string& collection_id, bool apply_delete = true) override;
Status
Flush(std::set<std::string>& table_ids, bool apply_delete = true) override;
@ -64,7 +64,7 @@ class MemManagerImpl : public MemManager, public server::CacheConfigHandler {
// Serialize(std::set<std::string>& table_ids) override;
Status
EraseMemVector(const std::string& table_id) override;
EraseMemVector(const std::string& collection_id) override;
size_t
GetCurrentMutableMem() override;
@ -81,16 +81,16 @@ class MemManagerImpl : public MemManager, public server::CacheConfigHandler {
private:
MemTablePtr
GetMemByTable(const std::string& table_id);
GetMemByTable(const std::string& collection_id);
Status
InsertVectorsNoLock(const std::string& table_id, const VectorSourcePtr& source, uint64_t lsn);
InsertVectorsNoLock(const std::string& collection_id, const VectorSourcePtr& source, uint64_t lsn);
Status
ToImmutable();
Status
ToImmutable(const std::string& table_id);
ToImmutable(const std::string& collection_id);
uint64_t
GetMaxLSN(const MemList& tables);

View File

@ -26,8 +26,8 @@
namespace milvus {
namespace engine {
MemTable::MemTable(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options)
: table_id_(table_id), meta_(meta), options_(options) {
MemTable::MemTable(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options)
: collection_id_(collection_id), meta_(meta), options_(options) {
SetIdentity("MemTable");
AddCacheInsertDataListener();
}
@ -42,7 +42,7 @@ MemTable::Add(const VectorSourcePtr& source) {
Status status;
if (mem_table_file_list_.empty() || current_mem_table_file->IsFull()) {
MemTableFilePtr new_mem_table_file = std::make_shared<MemTableFile>(table_id_, meta_, options_);
MemTableFilePtr new_mem_table_file = std::make_shared<MemTableFile>(collection_id_, meta_, options_);
status = new_mem_table_file->Add(source);
if (status.ok()) {
mem_table_file_list_.emplace_back(new_mem_table_file);
@ -62,7 +62,7 @@ MemTable::Add(const VectorSourcePtr& source) {
Status
MemTable::Delete(segment::doc_id_t doc_id) {
// Locate which table file the doc id lands in
// Locate which collection file the doc id lands in
for (auto& table_file : mem_table_file_list_) {
table_file->Delete(doc_id);
}
@ -74,7 +74,7 @@ MemTable::Delete(segment::doc_id_t doc_id) {
Status
MemTable::Delete(const std::vector<segment::doc_id_t>& doc_ids) {
// Locate which table file the doc id lands in
// Locate which collection file the doc id lands in
for (auto& table_file : mem_table_file_list_) {
table_file->Delete(doc_ids);
}
@ -122,7 +122,7 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) {
}
// Update flush lsn
auto status = meta_->UpdateTableFlushLSN(table_id_, wal_lsn);
auto status = meta_->UpdateTableFlushLSN(collection_id_, wal_lsn);
if (!status.ok()) {
std::string err_msg = "Failed to write flush lsn to meta: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
@ -131,7 +131,7 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) {
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
ENGINE_LOG_DEBUG << "Finished flushing for table " << table_id_ << " in " << diff.count() << " s";
ENGINE_LOG_DEBUG << "Finished flushing for collection " << collection_id_ << " in " << diff.count() << " s";
return Status::OK();
}
@ -143,7 +143,7 @@ MemTable::Empty() {
const std::string&
MemTable::GetTableId() const {
return table_id_;
return collection_id_;
}
size_t
@ -159,7 +159,7 @@ MemTable::GetCurrentMem() {
Status
MemTable::ApplyDeletes() {
// Applying deletes to other segments on disk and their corresponding cache:
// For each segment in table:
// For each segment in collection:
// Load its bloom filter
// For each id in delete list:
// If present, add the uid to segment's uid list
@ -173,16 +173,16 @@ MemTable::ApplyDeletes() {
// Serialize segment's deletedDoc TODO(zhiru): append directly to previous file for now, may have duplicates
// Serialize bloom filter
ENGINE_LOG_DEBUG << "Applying " << doc_ids_to_delete_.size() << " deletes in table: " << table_id_;
ENGINE_LOG_DEBUG << "Applying " << doc_ids_to_delete_.size() << " deletes in collection: " << collection_id_;
auto start_total = std::chrono::high_resolution_clock::now();
// auto start = std::chrono::high_resolution_clock::now();
std::vector<int> file_types{meta::TableFileSchema::FILE_TYPE::RAW, meta::TableFileSchema::FILE_TYPE::TO_INDEX,
meta::TableFileSchema::FILE_TYPE::BACKUP};
meta::TableFilesSchema table_files;
auto status = meta_->FilesByType(table_id_, file_types, table_files);
std::vector<int> file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX,
meta::SegmentSchema::FILE_TYPE::BACKUP};
meta::SegmentsSchema table_files;
auto status = meta_->FilesByType(collection_id_, file_types, table_files);
if (!status.ok()) {
std::string err_msg = "Failed to apply deletes: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
@ -209,7 +209,7 @@ MemTable::ApplyDeletes() {
}
}
meta::TableFilesSchema files_to_check;
meta::SegmentsSchema files_to_check;
for (auto& kv : ids_to_check_map) {
files_to_check.emplace_back(table_files[kv.first]);
}
@ -222,7 +222,7 @@ MemTable::ApplyDeletes() {
std::chrono::duration<double> diff0 = time0 - start_total;
ENGINE_LOG_DEBUG << "Found " << ids_to_check_map.size() << " segment to apply deletes in " << diff0.count() << " s";
meta::TableFilesSchema table_files_to_update;
meta::SegmentsSchema table_files_to_update;
for (auto& kv : ids_to_check_map) {
auto& table_file = table_files[kv.first];
@ -235,7 +235,7 @@ MemTable::ApplyDeletes() {
segment::SegmentReader segment_reader(segment_dir);
auto& segment_id = table_file.segment_id_;
meta::TableFilesSchema segment_files;
meta::SegmentsSchema segment_files;
status = meta_->GetTableFilesBySegmentId(segment_id, segment_files);
if (!status.ok()) {
break;
@ -351,10 +351,10 @@ MemTable::ApplyDeletes() {
ENGINE_LOG_DEBUG << "Updated bloom filter in segment: " << table_file.segment_id_ << " in " << diff5.count()
<< " s";
// Update table file row count
// Update collection file row count
for (auto& file : segment_files) {
if (file.file_type_ == meta::TableFileSchema::RAW || file.file_type_ == meta::TableFileSchema::TO_INDEX ||
file.file_type_ == meta::TableFileSchema::INDEX || file.file_type_ == meta::TableFileSchema::BACKUP) {
if (file.file_type_ == meta::SegmentSchema::RAW || file.file_type_ == meta::SegmentSchema::TO_INDEX ||
file.file_type_ == meta::SegmentSchema::INDEX || file.file_type_ == meta::SegmentSchema::BACKUP) {
file.row_count_ -= delete_count;
table_files_to_update.emplace_back(file);
}
@ -362,8 +362,8 @@ MemTable::ApplyDeletes() {
auto time7 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff6 = time7 - time6;
diff6 = time6 - time5;
ENGINE_LOG_DEBUG << "Update table file row count in vector of segment: " << table_file.segment_id_ << " in "
<< diff6.count() << " s";
ENGINE_LOG_DEBUG << "Update collection file row count in vector of segment: " << table_file.segment_id_
<< " in " << diff6.count() << " s";
}
auto time7 = std::chrono::high_resolution_clock::now();
@ -380,9 +380,9 @@ MemTable::ApplyDeletes() {
auto end_total = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff7 = end_total - time7;
ENGINE_LOG_DEBUG << "Update deletes to meta in table " << table_id_ << " in " << diff7.count() << " s";
ENGINE_LOG_DEBUG << "Update deletes to meta in collection " << collection_id_ << " in " << diff7.count() << " s";
std::chrono::duration<double> diff_total = end_total - start_total;
ENGINE_LOG_DEBUG << "Finished applying deletes in table " << table_id_ << " in " << diff_total.count() << " s";
ENGINE_LOG_DEBUG << "Finished deletes in collection " << collection_id_ << " in " << diff_total.count() << " s";
OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_check);

View File

@ -30,7 +30,7 @@ class MemTable : public server::CacheConfigHandler {
public:
using MemTableFileList = std::vector<MemTableFilePtr>;
MemTable(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options);
MemTable(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options);
Status
Add(const VectorSourcePtr& source);
@ -74,7 +74,7 @@ class MemTable : public server::CacheConfigHandler {
ApplyDeletes();
private:
const std::string table_id_;
const std::string collection_id_;
MemTableFileList mem_table_file_list_;

View File

@ -28,8 +28,8 @@
namespace milvus {
namespace engine {
MemTableFile::MemTableFile(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options)
: table_id_(table_id), meta_(meta), options_(options) {
MemTableFile::MemTableFile(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options)
: collection_id_(collection_id), meta_(meta), options_(options) {
current_mem_ = 0;
auto status = CreateTableFile();
if (status.ok()) {
@ -47,8 +47,8 @@ MemTableFile::MemTableFile(const std::string& table_id, const meta::MetaPtr& met
Status
MemTableFile::CreateTableFile() {
meta::TableFileSchema table_file_schema;
table_file_schema.table_id_ = table_id_;
meta::SegmentSchema table_file_schema;
table_file_schema.collection_id_ = collection_id_;
auto status = meta_->CreateTableFile(table_file_schema);
if (status.ok()) {
table_file_schema_ = table_file_schema;
@ -64,9 +64,9 @@ MemTableFile::Add(const VectorSourcePtr& source) {
if (table_file_schema_.dimension_ <= 0) {
std::string err_msg =
"MemTableFile::Add: table_file_schema dimension = " + std::to_string(table_file_schema_.dimension_) +
", table_id = " + table_file_schema_.table_id_;
", collection_id = " + table_file_schema_.collection_id_;
ENGINE_LOG_ERROR << err_msg;
return Status(DB_ERROR, "Not able to create table file");
return Status(DB_ERROR, "Not able to create collection file");
}
size_t single_vector_mem_size = source->SingleVectorSize(table_file_schema_.dimension_);
@ -162,11 +162,11 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to serialize segment: " << table_file_schema_.segment_id_;
/* Can't mark it as to_delete because data is stored in this mem table file. Any further flush
* will try to serialize the same mem table file and it won't be able to find the directory
* to write to or update the associated table file in meta.
/* Can't mark it as to_delete because data is stored in this mem collection file. Any further flush
* will try to serialize the same mem collection file and it won't be able to find the directory
* to write to or update the associated collection file in meta.
*
table_file_schema_.file_type_ = meta::TableFileSchema::TO_DELETE;
table_file_schema_.file_type_ = meta::SegmentSchema::TO_DELETE;
meta_->UpdateTableFile(table_file_schema_);
ENGINE_LOG_DEBUG << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_
<< " to to_delete";
@ -186,19 +186,19 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
// else set file type to RAW, no need to build index
if (table_file_schema_.engine_type_ != (int)EngineType::FAISS_IDMAP &&
table_file_schema_.engine_type_ != (int)EngineType::FAISS_BIN_IDMAP) {
table_file_schema_.file_type_ = (size >= table_file_schema_.index_file_size_) ? meta::TableFileSchema::TO_INDEX
: meta::TableFileSchema::RAW;
table_file_schema_.file_type_ =
(size >= table_file_schema_.index_file_size_) ? meta::SegmentSchema::TO_INDEX : meta::SegmentSchema::RAW;
} else {
table_file_schema_.file_type_ = meta::TableFileSchema::RAW;
table_file_schema_.file_type_ = meta::SegmentSchema::RAW;
}
// Set table file's flush_lsn so WAL can roll back and delete garbage files which can be obtained from
// Set collection file's flush_lsn so WAL can roll back and delete garbage files which can be obtained from
// GetTableFilesByFlushLSN() in meta.
table_file_schema_.flush_lsn_ = wal_lsn;
status = meta_->UpdateTableFile(table_file_schema_);
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::TableFileSchema::RAW) ? "raw" : "to_index")
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn;
// TODO(zhiru): cache

View File

@ -28,7 +28,7 @@ namespace engine {
class MemTableFile : public server::CacheConfigHandler {
public:
MemTableFile(const std::string& table_id, const meta::MetaPtr& meta, const DBOptions& options);
MemTableFile(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options);
~MemTableFile() = default;
@ -66,8 +66,8 @@ class MemTableFile : public server::CacheConfigHandler {
CreateTableFile();
private:
const std::string table_id_;
meta::TableFileSchema table_file_schema_;
const std::string collection_id_;
meta::SegmentSchema table_file_schema_;
meta::MetaPtr meta_;
DBOptions options_;
size_t current_mem_;

View File

@ -28,7 +28,7 @@ VectorSource::VectorSource(VectorsData vectors) : vectors_(std::move(vectors)) {
Status
VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment::SegmentWriterPtr& segment_writer_ptr,
const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add,
const meta::SegmentSchema& table_file_schema, const size_t& num_vectors_to_add,
size_t& num_vectors_added) {
uint64_t n = vectors_.vector_count_;
server::CollectAddMetrics metrics(n, table_file_schema.dimension_);

View File

@ -30,7 +30,7 @@ class VectorSource {
Status
Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment::SegmentWriterPtr& segment_writer_ptr,
const meta::TableFileSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added);
const meta::SegmentSchema& table_file_schema, const size_t& num_vectors_to_add, size_t& num_vectors_added);
size_t
GetNumVectorsAdded();

View File

@ -35,7 +35,7 @@ class Meta {
class CleanUpFilter {
public:
virtual bool
IsIgnored(const TableFileSchema& schema) = 0;
IsIgnored(const SegmentSchema& schema) = 0;
};
*/
@ -43,92 +43,92 @@ class Meta {
virtual ~Meta() = default;
virtual Status
CreateTable(TableSchema& table_schema) = 0;
CreateTable(CollectionSchema& table_schema) = 0;
virtual Status
DescribeTable(TableSchema& table_schema) = 0;
DescribeTable(CollectionSchema& table_schema) = 0;
virtual Status
HasTable(const std::string& table_id, bool& has_or_not) = 0;
HasTable(const std::string& collection_id, bool& has_or_not) = 0;
virtual Status
AllTables(std::vector<TableSchema>& table_schema_array) = 0;
AllTables(std::vector<CollectionSchema>& table_schema_array) = 0;
virtual Status
UpdateTableFlag(const std::string& table_id, int64_t flag) = 0;
UpdateTableFlag(const std::string& collection_id, int64_t flag) = 0;
virtual Status
UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) = 0;
UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) = 0;
virtual Status
GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) = 0;
GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) = 0;
virtual Status
GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) = 0;
GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) = 0;
virtual Status
DropTable(const std::string& table_id) = 0;
DropTable(const std::string& collection_id) = 0;
virtual Status
DeleteTableFiles(const std::string& table_id) = 0;
DeleteTableFiles(const std::string& collection_id) = 0;
virtual Status
CreateTableFile(TableFileSchema& file_schema) = 0;
CreateTableFile(SegmentSchema& file_schema) = 0;
virtual Status
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) = 0;
GetTableFiles(const std::string& collection_id, const std::vector<size_t>& ids, SegmentsSchema& table_files) = 0;
virtual Status
GetTableFilesBySegmentId(const std::string& segment_id, TableFilesSchema& table_files) = 0;
GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) = 0;
virtual Status
UpdateTableFile(TableFileSchema& file_schema) = 0;
UpdateTableFile(SegmentSchema& file_schema) = 0;
virtual Status
UpdateTableFiles(TableFilesSchema& files) = 0;
UpdateTableFiles(SegmentsSchema& files) = 0;
virtual Status
UpdateTableFilesRowCount(TableFilesSchema& files) = 0;
UpdateTableFilesRowCount(SegmentsSchema& files) = 0;
virtual Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) = 0;
UpdateTableIndex(const std::string& collection_id, const TableIndex& index) = 0;
virtual Status
UpdateTableFilesToIndex(const std::string& table_id) = 0;
UpdateTableFilesToIndex(const std::string& collection_id) = 0;
virtual Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) = 0;
DescribeTableIndex(const std::string& collection_id, TableIndex& index) = 0;
virtual Status
DropTableIndex(const std::string& table_id) = 0;
DropTableIndex(const std::string& collection_id) = 0;
virtual Status
CreatePartition(const std::string& table_name, const std::string& partition_name, const std::string& tag,
CreatePartition(const std::string& collection_name, const std::string& partition_name, const std::string& tag,
uint64_t lsn) = 0;
virtual Status
DropPartition(const std::string& partition_name) = 0;
virtual Status
ShowPartitions(const std::string& table_name, std::vector<meta::TableSchema>& partition_schema_array) = 0;
ShowPartitions(const std::string& collection_name, std::vector<meta::CollectionSchema>& partition_schema_array) = 0;
virtual Status
GetPartitionName(const std::string& table_name, const std::string& tag, std::string& partition_name) = 0;
GetPartitionName(const std::string& collection_name, const std::string& tag, std::string& partition_name) = 0;
virtual Status
FilesToSearch(const std::string& table_id, TableFilesSchema& files) = 0;
FilesToSearch(const std::string& collection_id, SegmentsSchema& files) = 0;
virtual Status
FilesToMerge(const std::string& table_id, TableFilesSchema& files) = 0;
FilesToMerge(const std::string& collection_id, SegmentsSchema& files) = 0;
virtual Status
FilesToIndex(TableFilesSchema&) = 0;
FilesToIndex(SegmentsSchema&) = 0;
virtual Status
FilesByType(const std::string& table_id, const std::vector<int>& file_types, TableFilesSchema& files) = 0;
FilesByType(const std::string& collection_id, const std::vector<int>& file_types, SegmentsSchema& files) = 0;
virtual Status
FilesByID(const std::vector<size_t>& ids, TableFilesSchema& files) = 0;
FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files) = 0;
virtual Status
Size(uint64_t& result) = 0;
@ -146,7 +146,7 @@ class Meta {
DropAll() = 0;
virtual Status
Count(const std::string& table_id, uint64_t& result) = 0;
Count(const std::string& collection_id, uint64_t& result) = 0;
virtual Status
SetGlobalLastLSN(uint64_t lsn) = 0;

View File

@ -40,14 +40,14 @@ struct EnvironmentSchema {
uint64_t global_lsn_ = 0;
}; // EnvironmentSchema
struct TableSchema {
struct CollectionSchema {
typedef enum {
NORMAL,
TO_DELETE,
} TABLE_STATE;
size_t id_ = 0;
std::string table_id_;
std::string collection_id_;
int32_t state_ = (int)NORMAL;
uint16_t dimension_ = 0;
int64_t created_on_ = 0;
@ -60,9 +60,9 @@ struct TableSchema {
std::string partition_tag_;
std::string version_ = CURRENT_VERSION;
uint64_t flush_lsn_ = 0;
}; // TableSchema
}; // CollectionSchema
struct TableFileSchema {
struct SegmentSchema {
typedef enum {
NEW,
RAW,
@ -75,7 +75,7 @@ struct TableFileSchema {
} FILE_TYPE;
size_t id_ = 0;
std::string table_id_;
std::string collection_id_;
std::string segment_id_;
std::string file_id_;
int32_t file_type_ = NEW;
@ -92,10 +92,10 @@ struct TableFileSchema {
std::string index_params_; // not persist to meta
int32_t metric_type_ = DEFAULT_METRIC_TYPE; // not persist to meta
uint64_t flush_lsn_ = 0;
}; // TableFileSchema
}; // SegmentSchema
using TableFileSchemaPtr = std::shared_ptr<meta::TableFileSchema>;
using TableFilesSchema = std::vector<TableFileSchema>;
using SegmentSchemaPtr = std::shared_ptr<meta::SegmentSchema>;
using SegmentsSchema = std::vector<SegmentSchema>;
} // namespace meta
} // namespace engine

File diff suppressed because it is too large Load Diff

View File

@ -32,92 +32,94 @@ class MySQLMetaImpl : public Meta {
~MySQLMetaImpl();
Status
CreateTable(TableSchema& table_schema) override;
CreateTable(CollectionSchema& table_schema) override;
Status
DescribeTable(TableSchema& table_schema) override;
DescribeTable(CollectionSchema& table_schema) override;
Status
HasTable(const std::string& table_id, bool& has_or_not) override;
HasTable(const std::string& collection_id, bool& has_or_not) override;
Status
AllTables(std::vector<TableSchema>& table_schema_array) override;
AllTables(std::vector<CollectionSchema>& table_schema_array) override;
Status
DropTable(const std::string& table_id) override;
DropTable(const std::string& collection_id) override;
Status
DeleteTableFiles(const std::string& table_id) override;
DeleteTableFiles(const std::string& collection_id) override;
Status
CreateTableFile(TableFileSchema& file_schema) override;
CreateTableFile(SegmentSchema& file_schema) override;
Status
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
GetTableFiles(const std::string& collection_id, const std::vector<size_t>& ids,
SegmentsSchema& table_files) override;
Status
GetTableFilesBySegmentId(const std::string& segment_id, TableFilesSchema& table_files) override;
GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override;
Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
UpdateTableIndex(const std::string& collection_id, const TableIndex& index) override;
Status
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
UpdateTableFlag(const std::string& collection_id, int64_t flag) override;
Status
UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) override;
UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) override;
Status
GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) override;
GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override;
Status
GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) override;
GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) override;
Status
UpdateTableFile(TableFileSchema& file_schema) override;
UpdateTableFile(SegmentSchema& file_schema) override;
Status
UpdateTableFilesToIndex(const std::string& table_id) override;
UpdateTableFilesToIndex(const std::string& collection_id) override;
Status
UpdateTableFiles(TableFilesSchema& files) override;
UpdateTableFiles(SegmentsSchema& files) override;
Status
UpdateTableFilesRowCount(TableFilesSchema& files) override;
UpdateTableFilesRowCount(SegmentsSchema& files) override;
Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
DescribeTableIndex(const std::string& collection_id, TableIndex& index) override;
Status
DropTableIndex(const std::string& table_id) override;
DropTableIndex(const std::string& collection_id) override;
Status
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag,
CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag,
uint64_t lsn) override;
Status
DropPartition(const std::string& partition_name) override;
Status
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) override;
ShowPartitions(const std::string& collection_id,
std::vector<meta::CollectionSchema>& partition_schema_array) override;
Status
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
GetPartitionName(const std::string& collection_id, const std::string& tag, std::string& partition_name) override;
Status
FilesToSearch(const std::string& table_id, TableFilesSchema& files) override;
FilesToSearch(const std::string& collection_id, SegmentsSchema& files) override;
Status
FilesToMerge(const std::string& table_id, TableFilesSchema& files) override;
FilesToMerge(const std::string& collection_id, SegmentsSchema& files) override;
Status
FilesToIndex(TableFilesSchema&) override;
FilesToIndex(SegmentsSchema&) override;
Status
FilesByType(const std::string& table_id, const std::vector<int>& file_types, TableFilesSchema& files) override;
FilesByType(const std::string& collection_id, const std::vector<int>& file_types, SegmentsSchema& files) override;
Status
FilesByID(const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
FilesByID(const std::vector<size_t>& ids, SegmentsSchema& table_files) override;
Status
Archive() override;
@ -135,7 +137,7 @@ class MySQLMetaImpl : public Meta {
DropAll() override;
Status
Count(const std::string& table_id, uint64_t& result) override;
Count(const std::string& collection_id, uint64_t& result) override;
Status
SetGlobalLastLSN(uint64_t lsn) override;
@ -147,7 +149,7 @@ class MySQLMetaImpl : public Meta {
Status
NextFileId(std::string& file_id);
Status
NextTableId(std::string& table_id);
NextTableId(std::string& collection_id);
Status
DiscardFiles(int64_t to_discard_size);

File diff suppressed because it is too large Load Diff

View File

@ -31,92 +31,94 @@ class SqliteMetaImpl : public Meta {
~SqliteMetaImpl();
Status
CreateTable(TableSchema& table_schema) override;
CreateTable(CollectionSchema& table_schema) override;
Status
DescribeTable(TableSchema& table_schema) override;
DescribeTable(CollectionSchema& table_schema) override;
Status
HasTable(const std::string& table_id, bool& has_or_not) override;
HasTable(const std::string& collection_id, bool& has_or_not) override;
Status
AllTables(std::vector<TableSchema>& table_schema_array) override;
AllTables(std::vector<CollectionSchema>& table_schema_array) override;
Status
DropTable(const std::string& table_id) override;
DropTable(const std::string& collection_id) override;
Status
DeleteTableFiles(const std::string& table_id) override;
DeleteTableFiles(const std::string& collection_id) override;
Status
CreateTableFile(TableFileSchema& file_schema) override;
CreateTableFile(SegmentSchema& file_schema) override;
Status
GetTableFiles(const std::string& table_id, const std::vector<size_t>& ids, TableFilesSchema& table_files) override;
GetTableFiles(const std::string& collection_id, const std::vector<size_t>& ids,
SegmentsSchema& table_files) override;
Status
GetTableFilesBySegmentId(const std::string& segment_id, TableFilesSchema& table_files) override;
GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override;
Status
UpdateTableIndex(const std::string& table_id, const TableIndex& index) override;
UpdateTableIndex(const std::string& collection_id, const TableIndex& index) override;
Status
UpdateTableFlag(const std::string& table_id, int64_t flag) override;
UpdateTableFlag(const std::string& collection_id, int64_t flag) override;
Status
UpdateTableFlushLSN(const std::string& table_id, uint64_t flush_lsn) override;
UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) override;
Status
GetTableFlushLSN(const std::string& table_id, uint64_t& flush_lsn) override;
GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override;
Status
GetTableFilesByFlushLSN(uint64_t flush_lsn, TableFilesSchema& table_files) override;
GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) override;
Status
UpdateTableFile(TableFileSchema& file_schema) override;
UpdateTableFile(SegmentSchema& file_schema) override;
Status
UpdateTableFilesToIndex(const std::string& table_id) override;
UpdateTableFilesToIndex(const std::string& collection_id) override;
Status
UpdateTableFiles(TableFilesSchema& files) override;
UpdateTableFiles(SegmentsSchema& files) override;
Status
UpdateTableFilesRowCount(TableFilesSchema& files) override;
UpdateTableFilesRowCount(SegmentsSchema& files) override;
Status
DescribeTableIndex(const std::string& table_id, TableIndex& index) override;
DescribeTableIndex(const std::string& collection_id, TableIndex& index) override;
Status
DropTableIndex(const std::string& table_id) override;
DropTableIndex(const std::string& collection_id) override;
Status
CreatePartition(const std::string& table_id, const std::string& partition_name, const std::string& tag,
CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag,
uint64_t lsn) override;
Status
DropPartition(const std::string& partition_name) override;
Status
ShowPartitions(const std::string& table_id, std::vector<meta::TableSchema>& partition_schema_array) override;
ShowPartitions(const std::string& collection_id,
std::vector<meta::CollectionSchema>& partition_schema_array) override;
Status
GetPartitionName(const std::string& table_id, const std::string& tag, std::string& partition_name) override;
GetPartitionName(const std::string& collection_id, const std::string& tag, std::string& partition_name) override;
Status
FilesToSearch(const std::string& table_id, TableFilesSchema& files) override;
FilesToSearch(const std::string& collection_id, SegmentsSchema& files) override;
Status
FilesToMerge(const std::string& table_id, TableFilesSchema& files) override;
FilesToMerge(const std::string& collection_id, SegmentsSchema& files) override;
Status
FilesToIndex(TableFilesSchema&) override;
FilesToIndex(SegmentsSchema&) override;
Status
FilesByType(const std::string& table_id, const std::vector<int>& file_types, TableFilesSchema& files) override;
FilesByType(const std::string& collection_id, const std::vector<int>& file_types, SegmentsSchema& files) override;
Status
FilesByID(const std::vector<size_t>& ids, TableFilesSchema& files) override;
FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files) override;
Status
Size(uint64_t& result) override;
@ -134,7 +136,7 @@ class SqliteMetaImpl : public Meta {
DropAll() override;
Status
Count(const std::string& table_id, uint64_t& result) override;
Count(const std::string& collection_id, uint64_t& result) override;
Status
SetGlobalLastLSN(uint64_t lsn) override;
@ -146,7 +148,7 @@ class SqliteMetaImpl : public Meta {
Status
NextFileId(std::string& file_id);
Status
NextTableId(std::string& table_id);
NextTableId(std::string& collection_id);
Status
DiscardFiles(int64_t to_discard_size);

View File

@ -185,7 +185,7 @@ MXLogBuffer::SurplusSpace() {
uint32_t
MXLogBuffer::RecordSize(const MXLogRecord& record) {
return SizeOfMXLogRecordHeader + (uint32_t)record.table_id.size() + (uint32_t)record.partition_tag.size() +
return SizeOfMXLogRecordHeader + (uint32_t)record.collection_id.size() + (uint32_t)record.partition_tag.size() +
record.length * (uint32_t)sizeof(IDNumber) + record.data_size;
}
@ -218,7 +218,7 @@ MXLogBuffer::Append(MXLogRecord& record) {
MXLogRecordHeader head;
BuildLsn(mxlog_buffer_writer_.file_no, mxlog_buffer_writer_.buf_offset + (uint32_t)record_size, head.mxl_lsn);
head.mxl_type = (uint8_t)record.type;
head.table_id_size = (uint16_t)record.table_id.size();
head.table_id_size = (uint16_t)record.collection_id.size();
head.partition_tag_size = (uint16_t)record.partition_tag.size();
head.vector_num = record.length;
head.data_size = record.data_size;
@ -226,9 +226,9 @@ MXLogBuffer::Append(MXLogRecord& record) {
memcpy(current_write_buf + current_write_offset, &head, SizeOfMXLogRecordHeader);
current_write_offset += SizeOfMXLogRecordHeader;
if (!record.table_id.empty()) {
memcpy(current_write_buf + current_write_offset, record.table_id.data(), record.table_id.size());
current_write_offset += record.table_id.size();
if (!record.collection_id.empty()) {
memcpy(current_write_buf + current_write_offset, record.collection_id.data(), record.collection_id.size());
current_write_offset += record.collection_id.size();
}
if (!record.partition_tag.empty()) {
@ -307,10 +307,10 @@ MXLogBuffer::Next(const uint64_t last_applied_lsn, MXLogRecord& record) {
current_read_offset += SizeOfMXLogRecordHeader;
if (head->table_id_size != 0) {
record.table_id.assign(current_read_buf + current_read_offset, head->table_id_size);
record.collection_id.assign(current_read_buf + current_read_offset, head->table_id_size);
current_read_offset += head->table_id_size;
} else {
record.table_id = "";
record.collection_id = "";
}
if (head->partition_tag_size != 0) {

View File

@ -22,7 +22,7 @@ namespace milvus {
namespace engine {
namespace wal {
using TableSchemaPtr = std::shared_ptr<milvus::engine::meta::TableSchema>;
using TableSchemaPtr = std::shared_ptr<milvus::engine::meta::CollectionSchema>;
using TableMetaPtr = std::shared_ptr<std::unordered_map<std::string, TableSchemaPtr> >;
#define UNIT_MB (1024 * 1024)
@ -33,7 +33,7 @@ enum class MXLogType { InsertBinary, InsertVector, Delete, Update, Flush, None }
struct MXLogRecord {
uint64_t lsn;
MXLogType type;
std::string table_id;
std::string collection_id;
std::string partition_tag;
uint32_t length;
const IDNumber* ids;

View File

@ -61,7 +61,7 @@ WalManager::Init(const meta::MetaPtr& meta) {
if (meta != nullptr) {
meta->GetGlobalLastLSN(recovery_start);
std::vector<meta::TableSchema> table_schema_array;
std::vector<meta::CollectionSchema> table_schema_array;
auto status = meta->AllTables(table_schema_array);
if (!status.ok()) {
return WAL_META_ERROR;
@ -89,7 +89,7 @@ WalManager::Init(const meta::MetaPtr& meta) {
for (auto& schema : table_schema_array) {
TableLsn tb_lsn = {schema.flush_lsn_, applied_lsn};
tables_[schema.table_id_] = tb_lsn;
tables_[schema.collection_id_] = tb_lsn;
}
}
}
@ -140,7 +140,7 @@ WalManager::GetNextRecovery(MXLogRecord& record) {
// background thread has not started.
// so, needn't lock here.
auto it = tables_.find(record.table_id);
auto it = tables_.find(record.collection_id);
if (it != tables_.end()) {
if (it->second.flush_lsn < record.lsn) {
break;
@ -162,11 +162,11 @@ WalManager::GetNextRecord(MXLogRecord& record) {
if (p_buffer_->GetReadLsn() >= flush_info_.lsn_) {
// can exec flush requirement
record.type = MXLogType::Flush;
record.table_id = flush_info_.table_id_;
record.collection_id = flush_info_.collection_id_;
record.lsn = flush_info_.lsn_;
flush_info_.Clear();
WAL_LOG_INFO << "record flush table " << record.table_id << " lsn " << record.lsn;
WAL_LOG_INFO << "record flush collection " << record.collection_id << " lsn " << record.lsn;
return true;
}
}
@ -187,7 +187,7 @@ WalManager::GetNextRecord(MXLogRecord& record) {
}
std::lock_guard<std::mutex> lck(mutex_);
auto it = tables_.find(record.table_id);
auto it = tables_.find(record.collection_id);
if (it != tables_.end()) {
if (it->second.flush_lsn < record.lsn) {
break;
@ -195,41 +195,42 @@ WalManager::GetNextRecord(MXLogRecord& record) {
}
}
WAL_LOG_INFO << "record type " << (int32_t)record.type << " table " << record.table_id << " lsn " << record.lsn;
WAL_LOG_INFO << "record type " << (int32_t)record.type << " collection " << record.collection_id << " lsn "
<< record.lsn;
return error_code;
}
uint64_t
WalManager::CreateTable(const std::string& table_id) {
WAL_LOG_INFO << "create table " << table_id << " " << last_applied_lsn_;
WalManager::CreateTable(const std::string& collection_id) {
WAL_LOG_INFO << "create collection " << collection_id << " " << last_applied_lsn_;
std::lock_guard<std::mutex> lck(mutex_);
uint64_t applied_lsn = last_applied_lsn_;
tables_[table_id] = {applied_lsn, applied_lsn};
tables_[collection_id] = {applied_lsn, applied_lsn};
return applied_lsn;
}
void
WalManager::DropTable(const std::string& table_id) {
WAL_LOG_INFO << "drop table " << table_id;
WalManager::DropTable(const std::string& collection_id) {
WAL_LOG_INFO << "drop collection " << collection_id;
std::lock_guard<std::mutex> lck(mutex_);
tables_.erase(table_id);
tables_.erase(collection_id);
}
void
WalManager::TableFlushed(const std::string& table_id, uint64_t lsn) {
WalManager::TableFlushed(const std::string& collection_id, uint64_t lsn) {
std::unique_lock<std::mutex> lck(mutex_);
auto it = tables_.find(table_id);
auto it = tables_.find(collection_id);
if (it != tables_.end()) {
it->second.flush_lsn = lsn;
}
lck.unlock();
WAL_LOG_INFO << table_id << " is flushed by lsn " << lsn;
WAL_LOG_INFO << collection_id << " is flushed by lsn " << lsn;
}
template <typename T>
bool
WalManager::Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids,
WalManager::Insert(const std::string& collection_id, const std::string& partition_tag, const IDNumbers& vector_ids,
const std::vector<T>& vectors) {
MXLogType log_type;
if (std::is_same<T, float>::value) {
@ -247,11 +248,11 @@ WalManager::Insert(const std::string& table_id, const std::string& partition_tag
}
size_t dim = vectors.size() / vector_num;
size_t unit_size = dim * sizeof(T) + sizeof(IDNumber);
size_t head_size = SizeOfMXLogRecordHeader + table_id.length() + partition_tag.length();
size_t head_size = SizeOfMXLogRecordHeader + collection_id.length() + partition_tag.length();
MXLogRecord record;
record.type = log_type;
record.table_id = table_id;
record.collection_id = collection_id;
record.partition_tag = partition_tag;
uint64_t new_lsn = 0;
@ -283,19 +284,19 @@ WalManager::Insert(const std::string& table_id, const std::string& partition_tag
std::unique_lock<std::mutex> lck(mutex_);
last_applied_lsn_ = new_lsn;
auto it = tables_.find(table_id);
auto it = tables_.find(collection_id);
if (it != tables_.end()) {
it->second.wal_lsn = new_lsn;
}
lck.unlock();
WAL_LOG_INFO << table_id << " insert in part " << partition_tag << " with lsn " << new_lsn;
WAL_LOG_INFO << collection_id << " insert in part " << partition_tag << " with lsn " << new_lsn;
return p_meta_handler_->SetMXLogInternalMeta(new_lsn);
}
bool
WalManager::DeleteById(const std::string& table_id, const IDNumbers& vector_ids) {
WalManager::DeleteById(const std::string& collection_id, const IDNumbers& vector_ids) {
size_t vector_num = vector_ids.size();
if (vector_num == 0) {
WAL_LOG_ERROR << "The ids is empty.";
@ -303,11 +304,11 @@ WalManager::DeleteById(const std::string& table_id, const IDNumbers& vector_ids)
}
size_t unit_size = sizeof(IDNumber);
size_t head_size = SizeOfMXLogRecordHeader + table_id.length();
size_t head_size = SizeOfMXLogRecordHeader + collection_id.length();
MXLogRecord record;
record.type = MXLogType::Delete;
record.table_id = table_id;
record.collection_id = collection_id;
record.partition_tag = "";
uint64_t new_lsn = 0;
@ -335,26 +336,26 @@ WalManager::DeleteById(const std::string& table_id, const IDNumbers& vector_ids)
std::unique_lock<std::mutex> lck(mutex_);
last_applied_lsn_ = new_lsn;
auto it = tables_.find(table_id);
auto it = tables_.find(collection_id);
if (it != tables_.end()) {
it->second.wal_lsn = new_lsn;
}
lck.unlock();
WAL_LOG_INFO << table_id << " delete rows by id, lsn " << new_lsn;
WAL_LOG_INFO << collection_id << " delete rows by id, lsn " << new_lsn;
return p_meta_handler_->SetMXLogInternalMeta(new_lsn);
}
uint64_t
WalManager::Flush(const std::string& table_id) {
WalManager::Flush(const std::string& collection_id) {
std::lock_guard<std::mutex> lck(mutex_);
// At most one flush requirement is waiting at any time.
// Otherwise, flush_info_ should be modified to a list.
__glibcxx_assert(!flush_info_.IsValid());
uint64_t lsn = 0;
if (table_id.empty()) {
if (collection_id.empty()) {
// flush all tables
for (auto& it : tables_) {
if (it.second.wal_lsn > it.second.flush_lsn) {
@ -364,8 +365,8 @@ WalManager::Flush(const std::string& table_id) {
}
} else {
// flush one table
auto it = tables_.find(table_id);
// flush one collection
auto it = tables_.find(collection_id);
if (it != tables_.end()) {
if (it->second.wal_lsn > it->second.flush_lsn) {
lsn = it->second.wal_lsn;
@ -374,11 +375,11 @@ WalManager::Flush(const std::string& table_id) {
}
if (lsn != 0) {
flush_info_.table_id_ = table_id;
flush_info_.collection_id_ = collection_id;
flush_info_.lsn_ = lsn;
}
WAL_LOG_INFO << table_id << " want to be flush, lsn " << lsn;
WAL_LOG_INFO << collection_id << " want to be flush, lsn " << lsn;
return lsn;
}
@ -391,12 +392,12 @@ WalManager::RemoveOldFiles(uint64_t flushed_lsn) {
}
template bool
WalManager::Insert<float>(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids,
const std::vector<float>& vectors);
WalManager::Insert<float>(const std::string& collection_id, const std::string& partition_tag,
const IDNumbers& vector_ids, const std::vector<float>& vectors);
template bool
WalManager::Insert<uint8_t>(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids,
const std::vector<uint8_t>& vectors);
WalManager::Insert<uint8_t>(const std::string& collection_id, const std::string& partition_tag,
const IDNumbers& vector_ids, const std::vector<uint8_t>& vectors);
} // namespace wal
} // namespace engine

View File

@ -57,57 +57,57 @@ class WalManager {
GetNextRecord(MXLogRecord& record);
/*
* Create table
* @param table_id: table id
* Create collection
* @param collection_id: collection id
* @retval lsn
*/
uint64_t
CreateTable(const std::string& table_id);
CreateTable(const std::string& collection_id);
/*
* Drop table
* @param table_id: table id
* Drop collection
* @param collection_id: collection id
* @retval none
*/
void
DropTable(const std::string& table_id);
DropTable(const std::string& collection_id);
/*
* Table is flushed
* @param table_id: table id
* Collection is flushed
* @param collection_id: collection id
* @param lsn: flushed lsn
*/
void
TableFlushed(const std::string& table_id, uint64_t lsn);
TableFlushed(const std::string& collection_id, uint64_t lsn);
/*
* Insert
* @param table_id: table id
* @param table_id: partition tag
* @param collection_id: collection id
* @param collection_id: partition tag
* @param vector_ids: vector ids
* @param vectors: vectors
*/
template <typename T>
bool
Insert(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids,
Insert(const std::string& collection_id, const std::string& partition_tag, const IDNumbers& vector_ids,
const std::vector<T>& vectors);
/*
* Insert
* @param table_id: table id
* @param collection_id: collection id
* @param vector_ids: vector ids
*/
bool
DeleteById(const std::string& table_id, const IDNumbers& vector_ids);
DeleteById(const std::string& collection_id, const IDNumbers& vector_ids);
/*
* Get flush lsn
* @param table_id: table id (empty means all tables)
* @param collection_id: collection id (empty means all tables)
* @retval if there is something not flushed, return lsn;
* else, return 0
*/
uint64_t
Flush(const std::string& table_id = "");
Flush(const std::string& collection_id = "");
void
RemoveOldFiles(uint64_t flushed_lsn);
@ -131,7 +131,7 @@ class WalManager {
// if multi-thread call Flush(), use list
struct FlushInfo {
std::string table_id_;
std::string collection_id_;
uint64_t lsn_ = 0;
bool
@ -147,12 +147,12 @@ class WalManager {
};
extern template bool
WalManager::Insert<float>(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids,
const std::vector<float>& vectors);
WalManager::Insert<float>(const std::string& collection_id, const std::string& partition_tag,
const IDNumbers& vector_ids, const std::vector<float>& vectors);
extern template bool
WalManager::Insert<uint8_t>(const std::string& table_id, const std::string& partition_tag, const IDNumbers& vector_ids,
const std::vector<uint8_t>& vectors);
WalManager::Insert<uint8_t>(const std::string& collection_id, const std::string& partition_tag,
const IDNumbers& vector_ids, const std::vector<uint8_t>& vectors);
} // namespace wal
} // namespace engine

View File

@ -23,7 +23,7 @@ AdapterMgr::GetAdapter(const IndexType type) {
RegisterAdapter();
try {
return table_.at(type)();
return collection_.at(type)();
} catch (...) {
KNOWHERE_THROW_MSG("Can not find this type of confadapter");
}

View File

@ -26,7 +26,7 @@ class AdapterMgr {
template <typename T>
struct register_t {
explicit register_t(const IndexType type) {
AdapterMgr::GetInstance().table_[type] = ([] { return std::make_shared<T>(); });
AdapterMgr::GetInstance().collection_[type] = ([] { return std::make_shared<T>(); });
}
};
@ -44,7 +44,7 @@ class AdapterMgr {
protected:
bool init_ = false;
std::unordered_map<IndexType, std::function<ConfAdapterPtr()>> table_;
std::unordered_map<IndexType, std::function<ConfAdapterPtr()>> collection_;
};
} // namespace knowhere

View File

@ -186,11 +186,11 @@ class CollectDurationMetrics : CollectMetricsBase {
~CollectDurationMetrics() {
auto total_time = TimeFromBegine();
switch (index_type_) {
case engine::meta::TableFileSchema::RAW: {
case engine::meta::SegmentSchema::RAW: {
server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time);
break;
}
case engine::meta::TableFileSchema::TO_INDEX: {
case engine::meta::SegmentSchema::TO_INDEX: {
server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time);
break;
}
@ -214,11 +214,11 @@ class CollectSearchTaskMetrics : CollectMetricsBase {
~CollectSearchTaskMetrics() {
auto total_time = TimeFromBegine();
switch (index_type_) {
case engine::meta::TableFileSchema::RAW: {
case engine::meta::SegmentSchema::RAW: {
server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time);
break;
}
case engine::meta::TableFileSchema::TO_INDEX: {
case engine::meta::SegmentSchema::TO_INDEX: {
server::Metrics::GetInstance().SearchRawDataDurationSecondsHistogramObserve(total_time);
break;
}

View File

@ -440,7 +440,7 @@ class PrometheusMetrics : public MetricsBase {
prometheus::Histogram& all_build_index_duration_seconds_histogram_ =
all_build_index_duration_seconds_.Add({}, BucketBoundaries{2e6, 4e6, 6e6, 8e6, 1e7});
// record duration of merging mem table
// record duration of merging mem collection
prometheus::Family<prometheus::Histogram>& mem_table_merge_duration_seconds_ =
prometheus::BuildHistogram()
.Name("mem_table_merge_duration_microseconds")

View File

@ -27,8 +27,8 @@
namespace milvus {
namespace scheduler {
using TableFileSchemaPtr = engine::meta::TableFileSchemaPtr;
using TableFileSchema = engine::meta::TableFileSchema;
using SegmentSchemaPtr = engine::meta::SegmentSchemaPtr;
using SegmentSchema = engine::meta::SegmentSchema;
using ExecutionEnginePtr = engine::ExecutionEnginePtr;
using EngineFactory = engine::EngineFactory;

View File

@ -153,22 +153,22 @@ TaskTable::PickToLoad(uint64_t limit) {
std::vector<uint64_t> indexes;
bool cross = false;
uint64_t available_begin = table_.front() + 1;
for (uint64_t i = 0, loaded_count = 0, pick_count = 0; i < table_.size() && pick_count < limit; ++i) {
uint64_t available_begin = collection_.front() + 1;
for (uint64_t i = 0, loaded_count = 0, pick_count = 0; i < collection_.size() && pick_count < limit; ++i) {
auto index = available_begin + i;
if (not table_[index])
if (not collection_[index])
break;
if (index % table_.capacity() == table_.rear())
if (index % collection_.capacity() == collection_.rear())
break;
if (not cross && table_[index]->IsFinish()) {
table_.set_front(index);
} else if (table_[index]->state == TaskTableItemState::LOADED) {
if (not cross && collection_[index]->IsFinish()) {
collection_.set_front(index);
} else if (collection_[index]->state == TaskTableItemState::LOADED) {
cross = true;
++loaded_count;
if (loaded_count > 2)
return std::vector<uint64_t>();
} else if (table_[index]->state == TaskTableItemState::START) {
auto task = table_[index]->task;
} else if (collection_[index]->state == TaskTableItemState::START) {
auto task = collection_[index]->task;
// if task is a build index task, limit it
if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") {
@ -186,18 +186,19 @@ TaskTable::PickToLoad(uint64_t limit) {
return indexes;
#else
size_t count = 0;
for (uint64_t j = last_finish_ + 1; j < table_.size(); ++j) {
if (not table_[j]) {
SERVER_LOG_WARNING << "table[" << j << "] is nullptr";
for (uint64_t j = last_finish_ + 1; j < collection_.size(); ++j) {
if (not collection_[j]) {
SERVER_LOG_WARNING << "collection[" << j << "] is nullptr";
}
if (table_[j]->task->path().Current() == "cpu") {
if (table_[j]->task->Type() == TaskType::BuildIndexTask && BuildMgrInst::GetInstance()->numoftasks() < 1) {
if (collection_[j]->task->path().Current() == "cpu") {
if (collection_[j]->task->Type() == TaskType::BuildIndexTask &&
BuildMgrInst::GetInstance()->numoftasks() < 1) {
return std::vector<uint64_t>();
}
}
if (table_[j]->state == TaskTableItemState::LOADED) {
if (collection_[j]->state == TaskTableItemState::LOADED) {
++count;
if (count > 2)
return std::vector<uint64_t>();
@ -206,11 +207,11 @@ TaskTable::PickToLoad(uint64_t limit) {
std::vector<uint64_t> indexes;
bool cross = false;
for (uint64_t i = last_finish_ + 1, count = 0; i < table_.size() && count < limit; ++i) {
if (not cross && table_[i]->IsFinish()) {
for (uint64_t i = last_finish_ + 1, count = 0; i < collection_.size() && count < limit; ++i) {
if (not cross && collection_[i]->IsFinish()) {
last_finish_ = i;
} else if (table_[i]->state == TaskTableItemState::START) {
auto task = table_[i]->task;
} else if (collection_[i]->state == TaskTableItemState::START) {
auto task = collection_[i]->task;
if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") {
if (BuildMgrInst::GetInstance()->numoftasks() == 0) {
break;
@ -236,19 +237,19 @@ TaskTable::PickToExecute(uint64_t limit) {
// TimeRecorder rc("");
std::vector<uint64_t> indexes;
bool cross = false;
uint64_t available_begin = table_.front() + 1;
for (uint64_t i = 0, pick_count = 0; i < table_.size() && pick_count < limit; ++i) {
uint64_t available_begin = collection_.front() + 1;
for (uint64_t i = 0, pick_count = 0; i < collection_.size() && pick_count < limit; ++i) {
uint64_t index = available_begin + i;
if (not table_[index]) {
if (not collection_[index]) {
break;
}
if (index % table_.capacity() == table_.rear()) {
if (index % collection_.capacity() == collection_.rear()) {
break;
}
if (not cross && table_[index]->IsFinish()) {
table_.set_front(index);
} else if (table_[index]->state == TaskTableItemState::LOADED) {
if (not cross && collection_[index]->IsFinish()) {
collection_.set_front(index);
} else if (collection_[index]->state == TaskTableItemState::LOADED) {
cross = true;
indexes.push_back(index);
++pick_count;
@ -265,7 +266,7 @@ TaskTable::Put(TaskPtr task, TaskTableItemPtr from) {
item->task = std::move(task);
item->state = TaskTableItemState::START;
item->timestamp.start = get_current_timestamp();
table_.put(std::move(item));
collection_.put(std::move(item));
if (subscriber_) {
subscriber_();
}
@ -274,10 +275,10 @@ TaskTable::Put(TaskPtr task, TaskTableItemPtr from) {
size_t
TaskTable::TaskToExecute() {
size_t count = 0;
auto begin = table_.front() + 1;
for (size_t i = 0; i < table_.size(); ++i) {
auto begin = collection_.front() + 1;
for (size_t i = 0; i < collection_.size(); ++i) {
auto index = begin + i;
if (table_[index] && table_[index]->state == TaskTableItemState::LOADED) {
if (collection_[index] && collection_[index]->state == TaskTableItemState::LOADED) {
++count;
}
}

View File

@ -97,7 +97,7 @@ struct TaskTableItem : public interface::dumpable {
class TaskTable : public interface::dumpable {
public:
TaskTable() : table_(1ULL << 16ULL) {
TaskTable() : collection_(1ULL << 16ULL) {
}
TaskTable(const TaskTable&) = delete;
@ -127,22 +127,22 @@ class TaskTable : public interface::dumpable {
public:
inline const TaskTableItemPtr& operator[](uint64_t index) {
return table_[index];
return collection_[index];
}
inline const TaskTableItemPtr&
at(uint64_t index) {
return table_[index];
return collection_[index];
}
inline size_t
capacity() {
return table_.capacity();
return collection_.capacity();
}
inline size_t
size() {
return table_.size();
return collection_.size();
}
public:
@ -156,7 +156,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Load(uint64_t index) {
return table_[index]->Load();
return collection_[index]->Load();
}
/*
@ -166,7 +166,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Loaded(uint64_t index) {
return table_[index]->Loaded();
return collection_[index]->Loaded();
}
/*
@ -176,7 +176,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Execute(uint64_t index) {
return table_[index]->Execute();
return collection_[index]->Execute();
}
/*
@ -186,7 +186,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Executed(uint64_t index) {
return table_[index]->Executed();
return collection_[index]->Executed();
}
/*
@ -197,7 +197,7 @@ class TaskTable : public interface::dumpable {
inline bool
Move(uint64_t index) {
return table_[index]->Move();
return collection_[index]->Move();
}
/*
@ -207,12 +207,12 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Moved(uint64_t index) {
return table_[index]->Moved();
return collection_[index]->Moved();
}
private:
std::uint64_t id_ = 0;
CircleQueue<TaskTableItemPtr> table_;
CircleQueue<TaskTableItemPtr> collection_;
std::function<void(void)> subscriber_ = nullptr;
// cache last finish avoid Pick task from begin always

View File

@ -25,7 +25,7 @@ BuildIndexJob::BuildIndexJob(engine::meta::MetaPtr meta_ptr, engine::DBOptions o
}
bool
BuildIndexJob::AddToIndexFiles(const engine::meta::TableFileSchemaPtr& to_index_file) {
BuildIndexJob::AddToIndexFiles(const engine::meta::SegmentSchemaPtr& to_index_file) {
std::unique_lock<std::mutex> lock(mutex_);
if (to_index_file == nullptr || to_index_files_.find(to_index_file->id_) != to_index_files_.end()) {
return false;

View File

@ -29,10 +29,10 @@
namespace milvus {
namespace scheduler {
using engine::meta::TableFileSchemaPtr;
using engine::meta::SegmentSchemaPtr;
using Id2ToIndexMap = std::unordered_map<size_t, TableFileSchemaPtr>;
using Id2ToTableFileMap = std::unordered_map<size_t, TableFileSchema>;
using Id2ToIndexMap = std::unordered_map<size_t, SegmentSchemaPtr>;
using Id2ToTableFileMap = std::unordered_map<size_t, SegmentSchema>;
class BuildIndexJob : public Job, public server::CacheConfigHandler {
public:
@ -42,7 +42,7 @@ class BuildIndexJob : public Job, public server::CacheConfigHandler {
public:
bool
AddToIndexFiles(const TableFileSchemaPtr& to_index_file);
AddToIndexFiles(const SegmentSchemaPtr& to_index_file);
void
WaitBuildIndexFinish();

View File

@ -16,9 +16,9 @@
namespace milvus {
namespace scheduler {
DeleteJob::DeleteJob(std::string table_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource)
DeleteJob::DeleteJob(std::string collection_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource)
: Job(JobType::DELETE),
table_id_(std::move(table_id)),
collection_id_(std::move(collection_id)),
meta_ptr_(std::move(meta_ptr)),
num_resource_(num_resource) {
}
@ -27,7 +27,7 @@ void
DeleteJob::WaitAndDelete() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [&] { return done_resource == num_resource_; });
meta_ptr_->DeleteTableFiles(table_id_);
meta_ptr_->DeleteTableFiles(collection_id_);
}
void
@ -42,7 +42,7 @@ DeleteJob::ResourceDone() {
json
DeleteJob::Dump() const {
json ret{
{"table_id", table_id_},
{"collection_id", collection_id_},
{"number_of_resource", num_resource_},
{"number_of_done", done_resource},
};

View File

@ -29,7 +29,7 @@ namespace scheduler {
class DeleteJob : public Job {
public:
DeleteJob(std::string table_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource);
DeleteJob(std::string collection_id, engine::meta::MetaPtr meta_ptr, uint64_t num_resource);
public:
void
@ -43,8 +43,8 @@ class DeleteJob : public Job {
public:
std::string
table_id() const {
return table_id_;
collection_id() const {
return collection_id_;
}
engine::meta::MetaPtr
@ -53,7 +53,7 @@ class DeleteJob : public Job {
}
private:
std::string table_id_;
std::string collection_id_;
engine::meta::MetaPtr meta_ptr_;
uint64_t num_resource_ = 0;

View File

@ -22,7 +22,7 @@ SearchJob::SearchJob(const std::shared_ptr<server::Context>& context, uint64_t t
}
bool
SearchJob::AddIndexFile(const TableFileSchemaPtr& index_file) {
SearchJob::AddIndexFile(const SegmentSchemaPtr& index_file) {
std::unique_lock<std::mutex> lock(mutex_);
if (index_file == nullptr || index_files_.find(index_file->id_) != index_files_.end()) {
return false;

View File

@ -31,9 +31,9 @@
namespace milvus {
namespace scheduler {
using engine::meta::TableFileSchemaPtr;
using engine::meta::SegmentSchemaPtr;
using Id2IndexMap = std::unordered_map<size_t, TableFileSchemaPtr>;
using Id2IndexMap = std::unordered_map<size_t, SegmentSchemaPtr>;
using ResultIds = engine::ResultIds;
using ResultDistances = engine::ResultDistances;
@ -45,7 +45,7 @@ class SearchJob : public Job {
public:
bool
AddIndexFile(const TableFileSchemaPtr& index_file);
AddIndexFile(const SegmentSchemaPtr& index_file);
void
WaitResult();

View File

@ -31,13 +31,13 @@
namespace milvus {
namespace scheduler {
XBuildIndexTask::XBuildIndexTask(TableFileSchemaPtr file, TaskLabelPtr label)
XBuildIndexTask::XBuildIndexTask(SegmentSchemaPtr file, TaskLabelPtr label)
: Task(TaskType::BuildIndexTask, std::move(label)), file_(file) {
if (file_) {
EngineType engine_type;
if (file->file_type_ == TableFileSchema::FILE_TYPE::RAW ||
file->file_type_ == TableFileSchema::FILE_TYPE::TO_INDEX ||
file->file_type_ == TableFileSchema::FILE_TYPE::BACKUP) {
if (file->file_type_ == SegmentSchema::FILE_TYPE::RAW ||
file->file_type_ == SegmentSchema::FILE_TYPE::TO_INDEX ||
file->file_type_ == SegmentSchema::FILE_TYPE::BACKUP) {
engine_type = engine::utils::IsBinaryMetricType(file->metric_type_) ? EngineType::FAISS_BIN_IDMAP
: EngineType::FAISS_IDMAP;
} else {
@ -122,18 +122,18 @@ XBuildIndexTask::Execute() {
EngineType engine_type = (EngineType)file_->engine_type_;
std::shared_ptr<engine::ExecutionEngine> index;
// step 2: create table file
engine::meta::TableFileSchema table_file;
table_file.table_id_ = file_->table_id_;
// step 2: create collection file
engine::meta::SegmentSchema table_file;
table_file.collection_id_ = file_->collection_id_;
table_file.segment_id_ = file_->file_id_;
table_file.date_ = file_->date_;
table_file.file_type_ = engine::meta::TableFileSchema::NEW_INDEX;
table_file.file_type_ = engine::meta::SegmentSchema::NEW_INDEX;
engine::meta::MetaPtr meta_ptr = build_index_job->meta();
Status status = meta_ptr->CreateTableFile(table_file);
fiu_do_on("XBuildIndexTask.Execute.create_table_success", status = Status::OK());
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create table file: " << status.ToString();
ENGINE_LOG_ERROR << "Failed to create collection file: " << status.ToString();
build_index_job->BuildIndexDone(to_index_id_);
build_index_job->GetStatus() = status;
to_index_engine_ = nullptr;
@ -152,7 +152,7 @@ XBuildIndexTask::Execute() {
std::string msg = "Build index exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
table_file.file_type_ = engine::meta::TableFileSchema::TO_DELETE;
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateTableFile(table_file);
ENGINE_LOG_DEBUG << "Build index fail, mark file: " << table_file.file_id_ << " to to_delete";
@ -162,16 +162,16 @@ XBuildIndexTask::Execute() {
return;
}
// step 4: if table has been deleted, dont save index file
// step 4: if collection has been deleted, dont save index file
bool has_table = false;
meta_ptr->HasTable(file_->table_id_, has_table);
meta_ptr->HasTable(file_->collection_id_, has_table);
fiu_do_on("XBuildIndexTask.Execute.has_table", has_table = true);
if (!has_table) {
meta_ptr->DeleteTableFiles(file_->table_id_);
meta_ptr->DeleteTableFiles(file_->collection_id_);
build_index_job->BuildIndexDone(to_index_id_);
build_index_job->GetStatus() = Status(DB_ERROR, "Table has been deleted, discard index file.");
build_index_job->GetStatus() = Status(DB_ERROR, "Collection has been deleted, discard index file.");
to_index_engine_ = nullptr;
return;
}
@ -193,7 +193,7 @@ XBuildIndexTask::Execute() {
if (!status.ok()) {
// if failed to serialize index file to disk
// typical error: out of disk space, out of memory or permition denied
table_file.file_type_ = engine::meta::TableFileSchema::TO_DELETE;
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateTableFile(table_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete";
@ -207,14 +207,14 @@ XBuildIndexTask::Execute() {
}
// step 6: update meta
table_file.file_type_ = engine::meta::TableFileSchema::INDEX;
table_file.file_type_ = engine::meta::SegmentSchema::INDEX;
table_file.file_size_ = server::CommonUtil::GetFileSize(table_file.location_);
table_file.row_count_ = file_->row_count_; // index->Count();
auto origin_file = *file_;
origin_file.file_type_ = engine::meta::TableFileSchema::BACKUP;
origin_file.file_type_ = engine::meta::SegmentSchema::BACKUP;
engine::meta::TableFilesSchema update_files = {table_file, origin_file};
engine::meta::SegmentsSchema update_files = {table_file, origin_file};
if (status.ok()) { // makesure index file is sucessfully serialized to disk
status = meta_ptr->UpdateTableFiles(update_files);
@ -230,11 +230,11 @@ XBuildIndexTask::Execute() {
}
} else {
// failed to update meta, mark the new file as to_delete, don't delete old file
origin_file.file_type_ = engine::meta::TableFileSchema::TO_INDEX;
origin_file.file_type_ = engine::meta::SegmentSchema::TO_INDEX;
status = meta_ptr->UpdateTableFile(origin_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << origin_file.file_id_ << " to to_index";
table_file.file_type_ = engine::meta::TableFileSchema::TO_DELETE;
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateTableFile(table_file);
ENGINE_LOG_DEBUG << "Failed to up date file to index, mark file: " << table_file.file_id_
<< " to to_delete";

View File

@ -20,7 +20,7 @@ namespace scheduler {
class XBuildIndexTask : public Task {
public:
explicit XBuildIndexTask(TableFileSchemaPtr file, TaskLabelPtr label);
explicit XBuildIndexTask(SegmentSchemaPtr file, TaskLabelPtr label);
void
Load(LoadType type, uint8_t device_id) override;
@ -29,8 +29,8 @@ class XBuildIndexTask : public Task {
Execute() override;
public:
TableFileSchemaPtr file_;
TableFileSchema table_file_;
SegmentSchemaPtr file_;
SegmentSchema table_file_;
size_t to_index_id_ = 0;
int to_index_type_ = 0;
ExecutionEnginePtr to_index_engine_ = nullptr;

View File

@ -83,8 +83,8 @@ void
CollectFileMetrics(int file_type, size_t file_size) {
server::MetricsBase& inst = server::Metrics::GetInstance();
switch (file_type) {
case TableFileSchema::RAW:
case TableFileSchema::TO_INDEX: {
case SegmentSchema::RAW:
case SegmentSchema::TO_INDEX: {
inst.RawFileSizeHistogramObserve(file_size);
inst.RawFileSizeTotalIncrement(file_size);
inst.RawFileSizeGaugeSet(file_size);
@ -99,7 +99,7 @@ CollectFileMetrics(int file_type, size_t file_size) {
}
}
XSearchTask::XSearchTask(const std::shared_ptr<server::Context>& context, TableFileSchemaPtr file, TaskLabelPtr label)
XSearchTask::XSearchTask(const std::shared_ptr<server::Context>& context, SegmentSchemaPtr file, TaskLabelPtr label)
: Task(TaskType::SearchTask, std::move(label)), context_(context), file_(file) {
if (file_) {
// distance -- value 0 means two vectors equal, ascending reduce, L2/HAMMING/JACCARD/TONIMOTO ...
@ -110,9 +110,9 @@ XSearchTask::XSearchTask(const std::shared_ptr<server::Context>& context, TableF
}
EngineType engine_type;
if (file->file_type_ == TableFileSchema::FILE_TYPE::RAW ||
file->file_type_ == TableFileSchema::FILE_TYPE::TO_INDEX ||
file->file_type_ == TableFileSchema::FILE_TYPE::BACKUP) {
if (file->file_type_ == SegmentSchema::FILE_TYPE::RAW ||
file->file_type_ == SegmentSchema::FILE_TYPE::TO_INDEX ||
file->file_type_ == SegmentSchema::FILE_TYPE::BACKUP) {
engine_type = engine::utils::IsBinaryMetricType(file->metric_type_) ? EngineType::FAISS_BIN_IDMAP
: EngineType::FAISS_IDMAP;
} else {

View File

@ -25,7 +25,7 @@ namespace scheduler {
// TODO(wxyu): rewrite
class XSearchTask : public Task {
public:
explicit XSearchTask(const std::shared_ptr<server::Context>& context, TableFileSchemaPtr file, TaskLabelPtr label);
explicit XSearchTask(const std::shared_ptr<server::Context>& context, SegmentSchemaPtr file, TaskLabelPtr label);
void
Load(LoadType type, uint8_t device_id) override;
@ -53,7 +53,7 @@ class XSearchTask : public Task {
public:
const std::shared_ptr<server::Context> context_;
TableFileSchemaPtr file_;
SegmentSchemaPtr file_;
size_t index_id_ = 0;
int index_type_ = 0;

View File

@ -18,7 +18,7 @@
namespace milvus {
namespace scheduler {
TestTask::TestTask(const std::shared_ptr<server::Context>& context, TableFileSchemaPtr& file, TaskLabelPtr label)
TestTask::TestTask(const std::shared_ptr<server::Context>& context, SegmentSchemaPtr& file, TaskLabelPtr label)
: XSearchTask(context, file, std::move(label)) {
}

View File

@ -20,7 +20,7 @@ namespace scheduler {
class TestTask : public XSearchTask {
public:
explicit TestTask(const std::shared_ptr<server::Context>& context, TableFileSchemaPtr& file, TaskLabelPtr label);
explicit TestTask(const std::shared_ptr<server::Context>& context, SegmentSchemaPtr& file, TaskLabelPtr label);
public:
void

View File

@ -204,7 +204,7 @@ DBWrapper::StartService() {
db_->Start();
// preload table
// preload collection
std::string preload_tables;
s = config.GetDBConfigPreloadTable(preload_tables);
if (!s.ok()) {
@ -237,19 +237,19 @@ DBWrapper::PreloadTables(const std::string& preload_tables) {
// do nothing
} else if (preload_tables == "*") {
// load all tables
std::vector<engine::meta::TableSchema> table_schema_array;
std::vector<engine::meta::CollectionSchema> table_schema_array;
db_->AllTables(table_schema_array);
for (auto& schema : table_schema_array) {
auto status = db_->PreloadTable(schema.table_id_);
auto status = db_->PreloadTable(schema.collection_id_);
if (!status.ok()) {
return status;
}
}
} else {
std::vector<std::string> table_names;
StringHelpFunctions::SplitStringByDelimeter(preload_tables, ",", table_names);
for (auto& name : table_names) {
std::vector<std::string> collection_names;
StringHelpFunctions::SplitStringByDelimeter(preload_tables, ",", collection_names);
for (auto& name : collection_names) {
auto status = db_->PreloadTable(name);
if (!status.ok()) {
return status;

View File

@ -43,62 +43,62 @@ namespace milvus {
namespace server {
Status
RequestHandler::CreateTable(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t dimension,
int64_t index_file_size, int64_t metric_type) {
RequestHandler::CreateTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type) {
BaseRequestPtr request_ptr =
CreateTableRequest::Create(context, table_name, dimension, index_file_size, metric_type);
CreateTableRequest::Create(context, collection_name, dimension, index_file_size, metric_type);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::HasTable(const std::shared_ptr<Context>& context, const std::string& table_name, bool& has_table) {
BaseRequestPtr request_ptr = HasTableRequest::Create(context, table_name, has_table);
RequestHandler::HasTable(const std::shared_ptr<Context>& context, const std::string& collection_name, bool& has_table) {
BaseRequestPtr request_ptr = HasTableRequest::Create(context, collection_name, has_table);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::DropTable(const std::shared_ptr<Context>& context, const std::string& table_name) {
BaseRequestPtr request_ptr = DropTableRequest::Create(context, table_name);
RequestHandler::DropTable(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = DropTableRequest::Create(context, collection_name);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::CreateIndex(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t index_type,
const milvus::json& json_params) {
BaseRequestPtr request_ptr = CreateIndexRequest::Create(context, table_name, index_type, json_params);
RequestHandler::CreateIndex(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t index_type, const milvus::json& json_params) {
BaseRequestPtr request_ptr = CreateIndexRequest::Create(context, collection_name, index_type, json_params);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::Insert(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::Insert(const std::shared_ptr<Context>& context, const std::string& collection_name,
engine::VectorsData& vectors, const std::string& partition_tag) {
BaseRequestPtr request_ptr = InsertRequest::Create(context, table_name, vectors, partition_tag);
BaseRequestPtr request_ptr = InsertRequest::Create(context, collection_name, vectors, partition_tag);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::GetVectorByID(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::GetVectorByID(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::vector<int64_t>& ids, engine::VectorsData& vectors) {
BaseRequestPtr request_ptr = GetVectorByIDRequest::Create(context, table_name, ids, vectors);
BaseRequestPtr request_ptr = GetVectorByIDRequest::Create(context, collection_name, ids, vectors);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::GetVectorIDs(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::GetVectorIDs(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::string& segment_name, std::vector<int64_t>& vector_ids) {
BaseRequestPtr request_ptr = GetVectorIDsRequest::Create(context, table_name, segment_name, vector_ids);
BaseRequestPtr request_ptr = GetVectorIDsRequest::Create(context, collection_name, segment_name, vector_ids);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
@ -113,49 +113,50 @@ RequestHandler::ShowTables(const std::shared_ptr<Context>& context, std::vector<
}
Status
RequestHandler::ShowTableInfo(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::ShowTableInfo(const std::shared_ptr<Context>& context, const std::string& collection_name,
TableInfo& table_info) {
BaseRequestPtr request_ptr = ShowTableInfoRequest::Create(context, table_name, table_info);
BaseRequestPtr request_ptr = ShowTableInfoRequest::Create(context, collection_name, table_info);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::Search(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::Search(const std::shared_ptr<Context>& context, const std::string& collection_name,
const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, const std::vector<std::string>& file_id_list,
TopKQueryResult& result) {
BaseRequestPtr request_ptr =
SearchRequest::Create(context, table_name, vectors, topk, extra_params, partition_list, file_id_list, result);
BaseRequestPtr request_ptr = SearchRequest::Create(context, collection_name, vectors, topk, extra_params,
partition_list, file_id_list, result);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::SearchByID(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t vector_id,
int64_t topk, const milvus::json& extra_params,
RequestHandler::SearchByID(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t vector_id, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, TopKQueryResult& result) {
BaseRequestPtr request_ptr =
SearchByIDRequest::Create(context, table_name, vector_id, topk, extra_params, partition_list, result);
SearchByIDRequest::Create(context, collection_name, vector_id, topk, extra_params, partition_list, result);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::DescribeTable(const std::shared_ptr<Context>& context, const std::string& table_name,
TableSchema& table_schema) {
BaseRequestPtr request_ptr = DescribeTableRequest::Create(context, table_name, table_schema);
RequestHandler::DescribeTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionSchema& table_schema) {
BaseRequestPtr request_ptr = DescribeTableRequest::Create(context, collection_name, table_schema);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::CountTable(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t& count) {
BaseRequestPtr request_ptr = CountTableRequest::Create(context, table_name, count);
RequestHandler::CountTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t& count) {
BaseRequestPtr request_ptr = CountTableRequest::Create(context, collection_name, count);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
@ -170,77 +171,77 @@ RequestHandler::Cmd(const std::shared_ptr<Context>& context, const std::string&
}
Status
RequestHandler::DeleteByID(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::DeleteByID(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::vector<int64_t>& vector_ids) {
BaseRequestPtr request_ptr = DeleteByIDRequest::Create(context, table_name, vector_ids);
BaseRequestPtr request_ptr = DeleteByIDRequest::Create(context, collection_name, vector_ids);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::PreloadTable(const std::shared_ptr<Context>& context, const std::string& table_name) {
BaseRequestPtr request_ptr = PreloadTableRequest::Create(context, table_name);
RequestHandler::PreloadTable(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = PreloadTableRequest::Create(context, collection_name);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::DescribeIndex(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::DescribeIndex(const std::shared_ptr<Context>& context, const std::string& collection_name,
IndexParam& param) {
BaseRequestPtr request_ptr = DescribeIndexRequest::Create(context, table_name, param);
BaseRequestPtr request_ptr = DescribeIndexRequest::Create(context, collection_name, param);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::DropIndex(const std::shared_ptr<Context>& context, const std::string& table_name) {
BaseRequestPtr request_ptr = DropIndexRequest::Create(context, table_name);
RequestHandler::DropIndex(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = DropIndexRequest::Create(context, collection_name);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::CreatePartition(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::CreatePartition(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::string& tag) {
BaseRequestPtr request_ptr = CreatePartitionRequest::Create(context, table_name, tag);
BaseRequestPtr request_ptr = CreatePartitionRequest::Create(context, collection_name, tag);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::ShowPartitions(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::ShowPartitions(const std::shared_ptr<Context>& context, const std::string& collection_name,
std::vector<PartitionParam>& partitions) {
BaseRequestPtr request_ptr = ShowPartitionsRequest::Create(context, table_name, partitions);
BaseRequestPtr request_ptr = ShowPartitionsRequest::Create(context, collection_name, partitions);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::DropPartition(const std::shared_ptr<Context>& context, const std::string& table_name,
RequestHandler::DropPartition(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::string& tag) {
BaseRequestPtr request_ptr = DropPartitionRequest::Create(context, table_name, tag);
BaseRequestPtr request_ptr = DropPartitionRequest::Create(context, collection_name, tag);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::Flush(const std::shared_ptr<Context>& context, const std::vector<std::string>& table_names) {
BaseRequestPtr request_ptr = FlushRequest::Create(context, table_names);
RequestHandler::Flush(const std::shared_ptr<Context>& context, const std::vector<std::string>& collection_names) {
BaseRequestPtr request_ptr = FlushRequest::Create(context, collection_names);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::Compact(const std::shared_ptr<Context>& context, const std::string& table_name) {
BaseRequestPtr request_ptr = CompactRequest::Create(context, table_name);
RequestHandler::Compact(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = CompactRequest::Create(context, collection_name);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();

View File

@ -27,84 +27,87 @@ class RequestHandler {
RequestHandler() = default;
Status
CreateTable(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t dimension,
CreateTable(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t dimension,
int64_t index_file_size, int64_t metric_type);
Status
HasTable(const std::shared_ptr<Context>& context, const std::string& table_name, bool& has_table);
HasTable(const std::shared_ptr<Context>& context, const std::string& collection_name, bool& has_table);
Status
DropTable(const std::shared_ptr<Context>& context, const std::string& table_name);
DropTable(const std::shared_ptr<Context>& context, const std::string& collection_name);
Status
CreateIndex(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t index_type,
CreateIndex(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t index_type,
const milvus::json& json_params);
Status
Insert(const std::shared_ptr<Context>& context, const std::string& table_name, engine::VectorsData& vectors,
Insert(const std::shared_ptr<Context>& context, const std::string& collection_name, engine::VectorsData& vectors,
const std::string& partition_tag);
Status
GetVectorByID(const std::shared_ptr<Context>& context, const std::string& table_name,
GetVectorByID(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::vector<int64_t>& ids, engine::VectorsData& vectors);
Status
GetVectorIDs(const std::shared_ptr<Context>& context, const std::string& table_name,
GetVectorIDs(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::string& segment_name, std::vector<int64_t>& vector_ids);
Status
ShowTables(const std::shared_ptr<Context>& context, std::vector<std::string>& tables);
Status
ShowTableInfo(const std::shared_ptr<Context>& context, const std::string& table_name, TableInfo& table_info);
ShowTableInfo(const std::shared_ptr<Context>& context, const std::string& collection_name, TableInfo& table_info);
Status
Search(const std::shared_ptr<Context>& context, const std::string& table_name, const engine::VectorsData& vectors,
int64_t topk, const milvus::json& extra_params, const std::vector<std::string>& partition_list,
const std::vector<std::string>& file_id_list, TopKQueryResult& result);
Search(const std::shared_ptr<Context>& context, const std::string& collection_name,
const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, const std::vector<std::string>& file_id_list,
TopKQueryResult& result);
Status
SearchByID(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t vector_id, int64_t topk,
const milvus::json& extra_params, const std::vector<std::string>& partition_list,
SearchByID(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t vector_id,
int64_t topk, const milvus::json& extra_params, const std::vector<std::string>& partition_list,
TopKQueryResult& result);
Status
DescribeTable(const std::shared_ptr<Context>& context, const std::string& table_name, TableSchema& table_schema);
DescribeTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionSchema& table_schema);
Status
CountTable(const std::shared_ptr<Context>& context, const std::string& table_name, int64_t& count);
CountTable(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t& count);
Status
Cmd(const std::shared_ptr<Context>& context, const std::string& cmd, std::string& reply);
Status
DeleteByID(const std::shared_ptr<Context>& context, const std::string& table_name,
DeleteByID(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::vector<int64_t>& vector_ids);
Status
PreloadTable(const std::shared_ptr<Context>& context, const std::string& table_name);
PreloadTable(const std::shared_ptr<Context>& context, const std::string& collection_name);
Status
DescribeIndex(const std::shared_ptr<Context>& context, const std::string& table_name, IndexParam& param);
DescribeIndex(const std::shared_ptr<Context>& context, const std::string& collection_name, IndexParam& param);
Status
DropIndex(const std::shared_ptr<Context>& context, const std::string& table_name);
DropIndex(const std::shared_ptr<Context>& context, const std::string& collection_name);
Status
CreatePartition(const std::shared_ptr<Context>& context, const std::string& table_name, const std::string& tag);
CreatePartition(const std::shared_ptr<Context>& context, const std::string& collection_name,
const std::string& tag);
Status
ShowPartitions(const std::shared_ptr<Context>& context, const std::string& table_name,
ShowPartitions(const std::shared_ptr<Context>& context, const std::string& collection_name,
std::vector<PartitionParam>& partitions);
Status
DropPartition(const std::shared_ptr<Context>& context, const std::string& table_name, const std::string& tag);
DropPartition(const std::shared_ptr<Context>& context, const std::string& collection_name, const std::string& tag);
Status
Flush(const std::shared_ptr<Context>& context, const std::vector<std::string>& table_names);
Flush(const std::shared_ptr<Context>& context, const std::vector<std::string>& collection_names);
Status
Compact(const std::shared_ptr<Context>& context, const std::string& table_name);
Compact(const std::shared_ptr<Context>& context, const std::string& collection_name);
};
} // namespace server

View File

@ -38,7 +38,7 @@ RequestGroup(BaseRequest::RequestType type) {
{BaseRequest::kGetVectorByID, INFO_REQUEST_GROUP},
{BaseRequest::kGetVectorIDs, INFO_REQUEST_GROUP},
// table operations
// collection operations
{BaseRequest::kShowTables, INFO_REQUEST_GROUP},
{BaseRequest::kCreateTable, DDL_DML_REQUEST_GROUP},
{BaseRequest::kHasTable, INFO_REQUEST_GROUP},
@ -130,10 +130,10 @@ BaseRequest::set_status(const Status& status) {
}
std::string
BaseRequest::TableNotExistMsg(const std::string& table_name) {
return "Table " + table_name +
" does not exist. Use milvus.has_table to verify whether the table exists. "
"You also can check whether the table name exists.";
BaseRequest::TableNotExistMsg(const std::string& collection_name) {
return "Collection " + collection_name +
" does not exist. Use milvus.has_table to verify whether the collection exists. "
"You also can check whether the collection name exists.";
}
Status

View File

@ -31,20 +31,21 @@
namespace milvus {
namespace server {
struct TableSchema {
std::string table_name_;
struct CollectionSchema {
std::string collection_name_;
int64_t dimension_;
int64_t index_file_size_;
int64_t metric_type_;
TableSchema() {
CollectionSchema() {
dimension_ = 0;
index_file_size_ = 0;
metric_type_ = 0;
}
TableSchema(const std::string& table_name, int64_t dimension, int64_t index_file_size, int64_t metric_type) {
table_name_ = table_name;
CollectionSchema(const std::string& collection_name, int64_t dimension, int64_t index_file_size,
int64_t metric_type) {
collection_name_ = collection_name;
dimension_ = dimension;
index_file_size_ = index_file_size;
metric_type_ = metric_type;
@ -68,7 +69,7 @@ struct TopKQueryResult {
};
struct IndexParam {
std::string table_name_;
std::string collection_name_;
int64_t index_type_;
std::string extra_params_;
@ -76,20 +77,20 @@ struct IndexParam {
index_type_ = 0;
}
IndexParam(const std::string& table_name, int64_t index_type) {
table_name_ = table_name;
IndexParam(const std::string& collection_name, int64_t index_type) {
collection_name_ = collection_name;
index_type_ = index_type;
}
};
struct PartitionParam {
std::string table_name_;
std::string collection_name_;
std::string tag_;
PartitionParam() = default;
PartitionParam(const std::string& table_name, const std::string& tag) {
table_name_ = table_name;
PartitionParam(const std::string& collection_name, const std::string& tag) {
collection_name_ = collection_name;
tag_ = tag;
}
};
@ -126,7 +127,7 @@ class BaseRequest {
kGetVectorByID,
kGetVectorIDs,
// table operations
// collection operations
kShowTables = 300,
kCreateTable,
kHasTable,
@ -208,7 +209,7 @@ class BaseRequest {
OnPostExecute();
std::string
TableNotExistMsg(const std::string& table_name);
TableNotExistMsg(const std::string& collection_name);
protected:
const std::shared_ptr<milvus::server::Context> context_;

View File

@ -26,47 +26,48 @@
namespace milvus {
namespace server {
CompactRequest::CompactRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name)
: BaseRequest(context, BaseRequest::kCompact), table_name_(table_name) {
CompactRequest::CompactRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kCompact), collection_name_(collection_name) {
}
BaseRequestPtr
CompactRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name) {
return std::shared_ptr<BaseRequest>(new CompactRequest(context, table_name));
CompactRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new CompactRequest(context, collection_name));
}
Status
CompactRequest::OnExecute() {
try {
std::string hdr = "CompactRequest(table=" + table_name_ + ")";
std::string hdr = "CompactRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
rc.RecordSection("check validation");
// step 2: check table existence
status = DBWrapper::DB()->Compact(table_name_);
// step 2: check collection existence
status = DBWrapper::DB()->Compact(collection_name_);
if (!status.ok()) {
return status;
}

View File

@ -28,16 +28,16 @@ namespace server {
class CompactRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
protected:
CompactRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
CompactRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
};
} // namespace server

View File

@ -23,41 +23,41 @@ namespace milvus {
namespace server {
CountTableRequest::CountTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, int64_t& row_count)
: BaseRequest(context, BaseRequest::kCountTable), table_name_(table_name), row_count_(row_count) {
const std::string& collection_name, int64_t& row_count)
: BaseRequest(context, BaseRequest::kCountTable), collection_name_(collection_name), row_count_(row_count) {
}
BaseRequestPtr
CountTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CountTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count) {
return std::shared_ptr<BaseRequest>(new CountTableRequest(context, table_name, row_count));
return std::shared_ptr<BaseRequest>(new CountTableRequest(context, collection_name, row_count));
}
Status
CountTableRequest::OnExecute() {
try {
std::string hdr = "CountTableRequest(table=" + table_name_ + ")";
std::string hdr = "CountTableRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -65,13 +65,13 @@ CountTableRequest::OnExecute() {
// step 2: get row count
uint64_t row_count = 0;
status = DBWrapper::DB()->GetTableRowCount(table_name_, row_count);
status = DBWrapper::DB()->GetTableRowCount(collection_name_, row_count);
fiu_do_on("CountTableRequest.OnExecute.db_not_found", status = Status(DB_NOT_FOUND, ""));
fiu_do_on("CountTableRequest.OnExecute.status_error", status = Status(SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CountTableRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}

View File

@ -22,17 +22,18 @@ namespace server {
class CountTableRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name, int64_t& row_count);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count);
protected:
CountTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CountTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
int64_t& row_count_;
};

View File

@ -25,47 +25,47 @@ namespace milvus {
namespace server {
CreateIndexRequest::CreateIndexRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, int64_t index_type,
const std::string& collection_name, int64_t index_type,
const milvus::json& json_params)
: BaseRequest(context, BaseRequest::kCreateIndex),
table_name_(table_name),
collection_name_(collection_name),
index_type_(index_type),
json_params_(json_params) {
}
BaseRequestPtr
CreateIndexRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CreateIndexRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t index_type, const milvus::json& json_params) {
return std::shared_ptr<BaseRequest>(new CreateIndexRequest(context, table_name, index_type, json_params));
return std::shared_ptr<BaseRequest>(new CreateIndexRequest(context, collection_name, index_type, json_params));
}
Status
CreateIndexRequest::OnExecute() {
try {
std::string hdr = "CreateIndexRequest(table=" + table_name_ + ")";
std::string hdr = "CreateIndexRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("CreateIndexRequest.OnExecute.not_has_table", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CreateIndexRequest.OnExecute.throw_std.exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -80,8 +80,8 @@ CreateIndexRequest::OnExecute() {
}
// step 2: binary and float vector support different index/metric type, need to adapt here
engine::meta::TableSchema table_info;
table_info.table_id_ = table_name_;
engine::meta::CollectionSchema table_info;
table_info.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_info);
int32_t adapter_index_type = index_type_;
@ -91,7 +91,7 @@ CreateIndexRequest::OnExecute() {
} else if (adapter_index_type == static_cast<int32_t>(engine::EngineType::FAISS_IVFFLAT)) {
adapter_index_type = static_cast<int32_t>(engine::EngineType::FAISS_BIN_IVFFLAT);
} else {
return Status(SERVER_INVALID_INDEX_TYPE, "Invalid index type for table metric type");
return Status(SERVER_INVALID_INDEX_TYPE, "Invalid index type for collection metric type");
}
}
@ -115,7 +115,7 @@ CreateIndexRequest::OnExecute() {
engine::TableIndex index;
index.engine_type_ = adapter_index_type;
index.extra_params_ = json_params_;
status = DBWrapper::DB()->CreateIndex(table_name_, index);
status = DBWrapper::DB()->CreateIndex(collection_name_, index);
fiu_do_on("CreateIndexRequest.OnExecute.create_index_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {

View File

@ -21,18 +21,18 @@ namespace server {
class CreateIndexRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name, int64_t index_type,
const milvus::json& json_params);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t index_type, const milvus::json& json_params);
protected:
CreateIndexRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CreateIndexRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t index_type, const milvus::json& json_params);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
const int64_t index_type_;
milvus::json json_params_;
};

View File

@ -23,24 +23,24 @@ namespace milvus {
namespace server {
CreatePartitionRequest::CreatePartitionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, const std::string& tag)
: BaseRequest(context, BaseRequest::kCreatePartition), table_name_(table_name), tag_(tag) {
const std::string& collection_name, const std::string& tag)
: BaseRequest(context, BaseRequest::kCreatePartition), collection_name_(collection_name), tag_(tag) {
}
BaseRequestPtr
CreatePartitionRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
const std::string& tag) {
return std::shared_ptr<BaseRequest>(new CreatePartitionRequest(context, table_name, tag));
CreatePartitionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, const std::string& tag) {
return std::shared_ptr<BaseRequest>(new CreatePartitionRequest(context, collection_name, tag));
}
Status
CreatePartitionRequest::OnExecute() {
std::string hdr = "CreatePartitionRequest(table=" + table_name_ + ", partition_tag=" + tag_ + ")";
std::string hdr = "CreatePartitionRequest(collection=" + collection_name_ + ", partition_tag=" + tag_ + ")";
TimeRecorderAuto rc(hdr);
try {
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
fiu_do_on("CreatePartitionRequest.OnExecute.invalid_table_name",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -58,28 +58,28 @@ CreatePartitionRequest::OnExecute() {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("CreatePartitionRequest.OnExecute.invalid_partition_tags",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
rc.RecordSection("check validation");
// step 2: create partition
status = DBWrapper::DB()->CreatePartition(table_name_, "", tag_);
status = DBWrapper::DB()->CreatePartition(collection_name_, "", tag_);
fiu_do_on("CreatePartitionRequest.OnExecute.db_already_exist", status = Status(milvus::DB_ALREADY_EXIST, ""));
fiu_do_on("CreatePartitionRequest.OnExecute.create_partition_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));

View File

@ -25,14 +25,14 @@ class CreatePartitionRequest : public BaseRequest {
const std::string& tag);
protected:
CreatePartitionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CreatePartitionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::string& tag);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
const std::string tag_;
};

View File

@ -25,30 +25,31 @@ namespace milvus {
namespace server {
CreateTableRequest::CreateTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, int64_t dimension, int64_t index_file_size,
const std::string& collection_name, int64_t dimension, int64_t index_file_size,
int64_t metric_type)
: BaseRequest(context, BaseRequest::kCreateTable),
table_name_(table_name),
collection_name_(collection_name),
dimension_(dimension),
index_file_size_(index_file_size),
metric_type_(metric_type) {
}
BaseRequestPtr
CreateTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CreateTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type) {
return std::shared_ptr<BaseRequest>(
new CreateTableRequest(context, table_name, dimension, index_file_size, metric_type));
new CreateTableRequest(context, collection_name, dimension, index_file_size, metric_type));
}
Status
CreateTableRequest::OnExecute() {
std::string hdr = "CreateTableRequest(table=" + table_name_ + ", dimension=" + std::to_string(dimension_) + ")";
std::string hdr =
"CreateTableRequest(collection=" + collection_name_ + ", dimension=" + std::to_string(dimension_) + ")";
TimeRecorderAuto rc(hdr);
try {
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
@ -72,9 +73,9 @@ CreateTableRequest::OnExecute() {
rc.RecordSection("check validation");
// step 2: construct table schema
engine::meta::TableSchema table_info;
table_info.table_id_ = table_name_;
// step 2: construct collection schema
engine::meta::CollectionSchema table_info;
table_info.collection_id_ = collection_name_;
table_info.dimension_ = static_cast<uint16_t>(dimension_);
table_info.index_file_size_ = index_file_size_;
table_info.metric_type_ = metric_type_;
@ -88,14 +89,14 @@ CreateTableRequest::OnExecute() {
}
}
// step 3: create table
// step 3: create collection
status = DBWrapper::DB()->CreateTable(table_info);
fiu_do_on("CreateTableRequest.OnExecute.db_already_exist", status = Status(milvus::DB_ALREADY_EXIST, ""));
fiu_do_on("CreateTableRequest.OnExecute.create_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CreateTableRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
// table could exist
// collection could exist
if (status.code() == DB_ALREADY_EXIST) {
return Status(SERVER_INVALID_TABLE_NAME, status.message());
}

View File

@ -22,18 +22,18 @@ namespace server {
class CreateTableRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name, int64_t dimension,
int64_t index_file_size, int64_t metric_type);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type);
protected:
CreateTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
CreateTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
int64_t dimension_;
int64_t index_file_size_;
int64_t metric_type_;

View File

@ -30,14 +30,14 @@ namespace milvus {
namespace server {
DeleteByIDRequest::DeleteByIDRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, const std::vector<int64_t>& vector_ids)
: BaseRequest(context, BaseRequest::kDeleteByID), table_name_(table_name), vector_ids_(vector_ids) {
const std::string& collection_name, const std::vector<int64_t>& vector_ids)
: BaseRequest(context, BaseRequest::kDeleteByID), collection_name_(collection_name), vector_ids_(vector_ids) {
}
BaseRequestPtr
DeleteByIDRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
DeleteByIDRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::vector<int64_t>& vector_ids) {
return std::shared_ptr<BaseRequest>(new DeleteByIDRequest(context, table_name, vector_ids));
return std::shared_ptr<BaseRequest>(new DeleteByIDRequest(context, collection_name, vector_ids));
}
Status
@ -46,28 +46,28 @@ DeleteByIDRequest::OnExecute() {
TimeRecorderAuto rc("DeleteByIDRequest");
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// step 2: check table existence
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 2: check collection existence
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// Check table's index type supports delete
// Check collection's index type supports delete
if (table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IDMAP &&
table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_BIN_IDMAP &&
table_schema.engine_type_ != (int32_t)engine::EngineType::HNSW &&
@ -84,7 +84,7 @@ DeleteByIDRequest::OnExecute() {
rc.RecordSection("check validation");
status = DBWrapper::DB()->DeleteVectors(table_name_, vector_ids_);
status = DBWrapper::DB()->DeleteVectors(collection_name_, vector_ids_);
if (!status.ok()) {
return status;
}

View File

@ -29,18 +29,18 @@ namespace server {
class DeleteByIDRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::vector<int64_t>& vector_ids);
protected:
DeleteByIDRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
DeleteByIDRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::vector<int64_t>& vector_ids);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
const std::vector<int64_t>& vector_ids_;
};

View File

@ -22,48 +22,48 @@ namespace milvus {
namespace server {
DescribeIndexRequest::DescribeIndexRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, IndexParam& index_param)
: BaseRequest(context, BaseRequest::kDescribeIndex), table_name_(table_name), index_param_(index_param) {
const std::string& collection_name, IndexParam& index_param)
: BaseRequest(context, BaseRequest::kDescribeIndex), collection_name_(collection_name), index_param_(index_param) {
}
BaseRequestPtr
DescribeIndexRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
IndexParam& index_param) {
return std::shared_ptr<BaseRequest>(new DescribeIndexRequest(context, table_name, index_param));
DescribeIndexRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, IndexParam& index_param) {
return std::shared_ptr<BaseRequest>(new DescribeIndexRequest(context, collection_name, index_param));
}
Status
DescribeIndexRequest::OnExecute() {
try {
fiu_do_on("DescribeIndexRequest.OnExecute.throw_std_exception", throw std::exception());
std::string hdr = "DescribeIndexRequest(table=" + table_name_ + ")";
std::string hdr = "DescribeIndexRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 2: check table existence
// step 2: check collection existence
engine::TableIndex index;
status = DBWrapper::DB()->DescribeIndex(table_name_, index);
status = DBWrapper::DB()->DescribeIndex(collection_name_, index);
if (!status.ok()) {
return status;
}
@ -76,7 +76,7 @@ DescribeIndexRequest::OnExecute() {
index.engine_type_ = (int32_t)engine::EngineType::FAISS_IVFFLAT;
}
index_param_.table_name_ = table_name_;
index_param_.collection_name_ = collection_name_;
index_param_.index_type_ = index.engine_type_;
index_param_.extra_params_ = index.extra_params_.dump();
} catch (std::exception& ex) {

View File

@ -22,18 +22,18 @@ namespace server {
class DescribeIndexRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
IndexParam& index_param);
protected:
DescribeIndexRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
DescribeIndexRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
IndexParam& index_param);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
IndexParam& index_param_;
};

View File

@ -22,49 +22,49 @@ namespace milvus {
namespace server {
DescribeTableRequest::DescribeTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, TableSchema& schema)
: BaseRequest(context, BaseRequest::kDescribeTable), table_name_(table_name), schema_(schema) {
const std::string& collection_name, CollectionSchema& schema)
: BaseRequest(context, BaseRequest::kDescribeTable), collection_name_(collection_name), schema_(schema) {
}
BaseRequestPtr
DescribeTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
TableSchema& schema) {
return std::shared_ptr<BaseRequest>(new DescribeTableRequest(context, table_name, schema));
DescribeTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionSchema& schema) {
return std::shared_ptr<BaseRequest>(new DescribeTableRequest(context, collection_name, schema));
}
Status
DescribeTableRequest::OnExecute() {
std::string hdr = "DescribeTableRequest(table=" + table_name_ + ")";
std::string hdr = "DescribeTableRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
try {
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// step 2: get table info
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 2: get collection info
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("DescribeTableRequest.OnExecute.describe_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("DescribeTableRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
schema_.table_name_ = table_schema.table_id_;
schema_.collection_name_ = table_schema.collection_id_;
schema_.dimension_ = static_cast<int64_t>(table_schema.dimension_);
schema_.index_file_size_ = table_schema.index_file_size_;
schema_.metric_type_ = table_schema.metric_type_;

View File

@ -22,18 +22,19 @@ namespace server {
class DescribeTableRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name, TableSchema& schema);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
CollectionSchema& schema);
protected:
DescribeTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
TableSchema& schema);
DescribeTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
CollectionSchema& schema);
Status
OnExecute() override;
private:
const std::string table_name_;
TableSchema& schema_;
const std::string collection_name_;
CollectionSchema& schema_;
};
} // namespace server

View File

@ -22,49 +22,49 @@ namespace milvus {
namespace server {
DropIndexRequest::DropIndexRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name)
: BaseRequest(context, BaseRequest::kDropIndex), table_name_(table_name) {
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kDropIndex), collection_name_(collection_name) {
}
BaseRequestPtr
DropIndexRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name) {
return std::shared_ptr<BaseRequest>(new DropIndexRequest(context, table_name));
DropIndexRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new DropIndexRequest(context, collection_name));
}
Status
DropIndexRequest::OnExecute() {
try {
fiu_do_on("DropIndexRequest.OnExecute.throw_std_exception", throw std::exception());
std::string hdr = "DropIndexRequest(table=" + table_name_ + ")";
std::string hdr = "DropIndexRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("DropIndexRequest.OnExecute.table_not_exist", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
rc.RecordSection("check validation");
// step 2: drop index
status = DBWrapper::DB()->DropIndex(table_name_);
status = DBWrapper::DB()->DropIndex(collection_name_);
fiu_do_on("DropIndexRequest.OnExecute.drop_index_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;

View File

@ -22,16 +22,16 @@ namespace server {
class DropIndexRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
protected:
DropIndexRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
DropIndexRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
};
} // namespace server

View File

@ -23,26 +23,26 @@ namespace milvus {
namespace server {
DropPartitionRequest::DropPartitionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, const std::string& tag)
: BaseRequest(context, BaseRequest::kDropPartition), table_name_(table_name), tag_(tag) {
const std::string& collection_name, const std::string& tag)
: BaseRequest(context, BaseRequest::kDropPartition), collection_name_(collection_name), tag_(tag) {
}
BaseRequestPtr
DropPartitionRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
const std::string& tag) {
return std::shared_ptr<BaseRequest>(new DropPartitionRequest(context, table_name, tag));
DropPartitionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, const std::string& tag) {
return std::shared_ptr<BaseRequest>(new DropPartitionRequest(context, collection_name, tag));
}
Status
DropPartitionRequest::OnExecute() {
std::string hdr = "DropPartitionRequest(table=" + table_name_ + ", partition_tag=" + tag_ + ")";
std::string hdr = "DropPartitionRequest(collection=" + collection_name_ + ", partition_tag=" + tag_ + ")";
TimeRecorderAuto rc(hdr);
std::string table_name = table_name_;
std::string collection_name = collection_name_;
std::string partition_tag = tag_;
// step 1: check table name
auto status = ValidationUtil::ValidateTableName(table_name);
// step 1: check collection name
auto status = ValidationUtil::ValidateCollectionName(collection_name);
fiu_do_on("DropPartitionRequest.OnExecute.invalid_table_name",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -61,27 +61,27 @@ DropPartitionRequest::OnExecute() {
return status;
}
// step 3: check table
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 3: check collection
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
rc.RecordSection("check validation");
// step 4: drop partition
return DBWrapper::DB()->DropPartitionByTag(table_name, partition_tag);
return DBWrapper::DB()->DropPartitionByTag(collection_name, partition_tag);
}
} // namespace server

View File

@ -21,18 +21,18 @@ namespace server {
class DropPartitionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::string& tag);
protected:
DropPartitionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
DropPartitionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::string& tag);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
const std::string tag_;
};

View File

@ -23,31 +23,31 @@ namespace milvus {
namespace server {
DropTableRequest::DropTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name)
: BaseRequest(context, BaseRequest::kDropTable), table_name_(table_name) {
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kDropTable), collection_name_(collection_name) {
}
BaseRequestPtr
DropTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name) {
return std::shared_ptr<BaseRequest>(new DropTableRequest(context, table_name));
DropTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new DropTableRequest(context, collection_name));
}
Status
DropTableRequest::OnExecute() {
try {
std::string hdr = "DropTableRequest(table=" + table_name_ + ")";
std::string hdr = "DropTableRequest(collection=" + collection_name_ + ")";
TimeRecorder rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// step 2: check table existence
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 2: check collection existence
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("DropTableRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, ""));
fiu_do_on("DropTableRequest.OnExecute.describe_table_fail",
@ -55,20 +55,20 @@ DropTableRequest::OnExecute() {
fiu_do_on("DropTableRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
rc.RecordSection("check validation");
// step 3: Drop table
status = DBWrapper::DB()->DropTable(table_name_);
// step 3: Drop collection
status = DBWrapper::DB()->DropTable(collection_name_);
fiu_do_on("DropTableRequest.OnExecute.drop_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;

View File

@ -22,16 +22,16 @@ namespace server {
class DropTableRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
protected:
DropTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
DropTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
Status
OnExecute() override;
private:
std::string table_name_;
std::string collection_name_;
};
} // namespace server

View File

@ -27,20 +27,20 @@ namespace milvus {
namespace server {
FlushRequest::FlushRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::vector<std::string>& table_names)
: BaseRequest(context, BaseRequest::kFlush), table_names_(table_names) {
const std::vector<std::string>& collection_names)
: BaseRequest(context, BaseRequest::kFlush), collection_names_(collection_names) {
}
BaseRequestPtr
FlushRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::vector<std::string>& table_names) {
return std::shared_ptr<BaseRequest>(new FlushRequest(context, table_names));
const std::vector<std::string>& collection_names) {
return std::shared_ptr<BaseRequest>(new FlushRequest(context, collection_names));
}
Status
FlushRequest::OnExecute() {
std::string hdr = "FlushRequest flush tables: ";
for (auto& name : table_names_) {
for (auto& name : collection_names_) {
hdr += name;
hdr += ", ";
}
@ -49,10 +49,10 @@ FlushRequest::OnExecute() {
Status status = Status::OK();
SERVER_LOG_DEBUG << hdr;
for (auto& name : table_names_) {
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = name;
for (auto& name : collection_names_) {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = name;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {

View File

@ -29,16 +29,17 @@ namespace server {
class FlushRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::vector<std::string>& table_names);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::vector<std::string>& collection_names);
protected:
FlushRequest(const std::shared_ptr<milvus::server::Context>& context, const std::vector<std::string>& table_names);
FlushRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::vector<std::string>& collection_names);
Status
OnExecute() override;
private:
std::vector<std::string> table_names_;
std::vector<std::string> collection_names_;
};
} // namespace server

View File

@ -28,25 +28,29 @@ namespace milvus {
namespace server {
GetVectorByIDRequest::GetVectorByIDRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, const std::vector<int64_t>& ids,
const std::string& collection_name, const std::vector<int64_t>& ids,
engine::VectorsData& vectors)
: BaseRequest(context, BaseRequest::kGetVectorByID), table_name_(table_name), ids_(ids), vectors_(vectors) {
: BaseRequest(context, BaseRequest::kGetVectorByID),
collection_name_(collection_name),
ids_(ids),
vectors_(vectors) {
}
BaseRequestPtr
GetVectorByIDRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
const std::vector<int64_t>& ids, engine::VectorsData& vectors) {
return std::shared_ptr<BaseRequest>(new GetVectorByIDRequest(context, table_name, ids, vectors));
GetVectorByIDRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, const std::vector<int64_t>& ids,
engine::VectorsData& vectors) {
return std::shared_ptr<BaseRequest>(new GetVectorByIDRequest(context, collection_name, ids, vectors));
}
Status
GetVectorByIDRequest::OnExecute() {
try {
std::string hdr = "GetVectorByIDRequest(table=" + table_name_ + ")";
std::string hdr = "GetVectorByIDRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
@ -55,24 +59,24 @@ GetVectorByIDRequest::OnExecute() {
return Status(SERVER_INVALID_ARGUMENT, "No vector id specified");
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 2: get vector data, now only support get one id
return DBWrapper::DB()->GetVectorByID(table_name_, ids_[0], vectors_);
return DBWrapper::DB()->GetVectorByID(collection_name_, ids_[0], vectors_);
} catch (std::exception& ex) {
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}

View File

@ -29,18 +29,18 @@ namespace server {
class GetVectorByIDRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::vector<int64_t>& ids, engine::VectorsData& vectors);
protected:
GetVectorByIDRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
GetVectorByIDRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::vector<int64_t>& ids, engine::VectorsData& vectors);
Status
OnExecute() override;
private:
std::string table_name_;
std::string collection_name_;
std::vector<int64_t> ids_;
engine::VectorsData& vectors_;
};

View File

@ -28,51 +28,51 @@ namespace milvus {
namespace server {
GetVectorIDsRequest::GetVectorIDsRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, const std::string& segment_name,
const std::string& collection_name, const std::string& segment_name,
std::vector<int64_t>& vector_ids)
: BaseRequest(context, BaseRequest::kGetVectorIDs),
table_name_(table_name),
collection_name_(collection_name),
segment_name_(segment_name),
vector_ids_(vector_ids) {
}
BaseRequestPtr
GetVectorIDsRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
GetVectorIDsRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::string& segment_name, std::vector<int64_t>& vector_ids) {
return std::shared_ptr<BaseRequest>(new GetVectorIDsRequest(context, table_name, segment_name, vector_ids));
return std::shared_ptr<BaseRequest>(new GetVectorIDsRequest(context, collection_name, segment_name, vector_ids));
}
Status
GetVectorIDsRequest::OnExecute() {
try {
std::string hdr = "GetVectorIDsRequest(table=" + table_name_ + " segment=" + segment_name_ + ")";
std::string hdr = "GetVectorIDsRequest(collection=" + collection_name_ + " segment=" + segment_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 2: get vector data, now only support get one id
vector_ids_.clear();
return DBWrapper::DB()->GetVectorIDs(table_name_, segment_name_, vector_ids_);
return DBWrapper::DB()->GetVectorIDs(collection_name_, segment_name_, vector_ids_);
} catch (std::exception& ex) {
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}

View File

@ -29,18 +29,18 @@ namespace server {
class GetVectorIDsRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::string& segment_name, std::vector<int64_t>& vector_ids);
protected:
GetVectorIDsRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
GetVectorIDsRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const std::string& segment_name, std::vector<int64_t>& vector_ids);
Status
OnExecute() override;
private:
std::string table_name_;
std::string collection_name_;
std::string segment_name_;
std::vector<int64_t>& vector_ids_;
};

View File

@ -21,37 +21,37 @@
namespace milvus {
namespace server {
HasTableRequest::HasTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
bool& has_table)
: BaseRequest(context, BaseRequest::kHasTable), table_name_(table_name), has_table_(has_table) {
HasTableRequest::HasTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, bool& has_table)
: BaseRequest(context, BaseRequest::kHasTable), collection_name_(collection_name), has_table_(has_table) {
}
BaseRequestPtr
HasTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
HasTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_table) {
return std::shared_ptr<BaseRequest>(new HasTableRequest(context, table_name, has_table));
return std::shared_ptr<BaseRequest>(new HasTableRequest(context, collection_name, has_table));
}
Status
HasTableRequest::OnExecute() {
try {
std::string hdr = "HasTableRequest(table=" + table_name_ + ")";
std::string hdr = "HasTableRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// step 2: check table existence
status = DBWrapper::DB()->HasNativeTable(table_name_, has_table_);
status = DBWrapper::DB()->HasNativeTable(collection_name_, has_table_);
fiu_do_on("HasTableRequest.OnExecute.throw_std_exception", throw std::exception());
// only process root table, ignore partition table
// only process root collection, ignore partition collection
if (has_table_) {
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!table_schema.owner_table_.empty()) {
has_table_ = false;

View File

@ -22,17 +22,18 @@ namespace server {
class HasTableRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name, bool& has_table);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_table);
protected:
HasTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
HasTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_table);
Status
OnExecute() override;
private:
std::string table_name_;
std::string collection_name_;
bool& has_table_;
};

View File

@ -21,6 +21,7 @@
#include <memory>
#include <string>
#include <vector>
#ifdef MILVUS_ENABLE_PROFILING
#include <gperftools/profiler.h>
#endif
@ -28,18 +29,19 @@
namespace milvus {
namespace server {
InsertRequest::InsertRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
engine::VectorsData& vectors, const std::string& partition_tag)
InsertRequest::InsertRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, engine::VectorsData& vectors,
const std::string& partition_tag)
: BaseRequest(context, BaseRequest::kInsert),
table_name_(table_name),
collection_name_(collection_name),
vectors_data_(vectors),
partition_tag_(partition_tag) {
}
BaseRequestPtr
InsertRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
InsertRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
engine::VectorsData& vectors, const std::string& partition_tag) {
return std::shared_ptr<BaseRequest>(new InsertRequest(context, table_name, vectors, partition_tag));
return std::shared_ptr<BaseRequest>(new InsertRequest(context, collection_name, vectors, partition_tag));
}
Status
@ -47,12 +49,12 @@ InsertRequest::OnExecute() {
try {
int64_t vector_count = vectors_data_.vector_count_;
fiu_do_on("InsertRequest.OnExecute.throw_std_exception", throw std::exception());
std::string hdr = "InsertRequest(table=" + table_name_ + ", n=" + std::to_string(vector_count) +
std::string hdr = "InsertRequest(collection=" + collection_name_ + ", n=" + std::to_string(vector_count) +
", partition_tag=" + partition_tag_ + ")";
TimeRecorder rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
@ -69,26 +71,26 @@ InsertRequest::OnExecute() {
}
}
// step 2: check table existence
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 2: check collection existence
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("InsertRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, ""));
fiu_do_on("InsertRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 3: check table flag
// step 3: check collection flag
// all user provide id, or all internal id
bool user_provide_ids = !vectors_data_.id_array_.empty();
fiu_do_on("InsertRequest.OnExecute.illegal_vector_id", user_provide_ids = false;
@ -96,7 +98,7 @@ InsertRequest::OnExecute() {
// user already provided id before, all insert action require user id
if ((table_schema.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) {
return Status(SERVER_ILLEGAL_VECTOR_ID,
"Table vector IDs are user-defined. Please provide IDs for all vectors of this table.");
"Entities IDs are user-defined. Please provide IDs for all entities of the collection.");
}
fiu_do_on("InsertRequest.OnExecute.illegal_vector_id2", user_provide_ids = true;
@ -105,7 +107,7 @@ InsertRequest::OnExecute() {
if ((table_schema.flag_ & engine::meta::FLAG_MASK_NO_USERID) != 0 && user_provide_ids) {
return Status(
SERVER_ILLEGAL_VECTOR_ID,
"Table vector IDs are auto-generated. All vectors of this table must use auto-generated IDs.");
"Entities IDs are auto-generated. All vectors of this collection must use auto-generated IDs.");
}
rc.RecordSection("check validation");
@ -117,34 +119,34 @@ InsertRequest::OnExecute() {
// step 4: some metric type doesn't support float vectors
if (!vectors_data_.float_data_.empty()) { // insert float vectors
if (engine::utils::IsBinaryMetricType(table_schema.metric_type_)) {
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Table metric type doesn't support float vectors.");
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Collection metric type doesn't support float vectors.");
}
// check prepared float data
if (vectors_data_.float_data_.size() % vector_count != 0) {
return Status(SERVER_INVALID_ROWRECORD_ARRAY,
"The vector dimension must be equal to the table dimension.");
"The vector dimension must be equal to the collection dimension.");
}
fiu_do_on("InsertRequest.OnExecute.invalid_dim", table_schema.dimension_ = -1);
if (vectors_data_.float_data_.size() / vector_count != table_schema.dimension_) {
return Status(SERVER_INVALID_VECTOR_DIMENSION,
"The vector dimension must be equal to the table dimension.");
"The vector dimension must be equal to the collection dimension.");
}
} else if (!vectors_data_.binary_data_.empty()) { // insert binary vectors
if (!engine::utils::IsBinaryMetricType(table_schema.metric_type_)) {
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Table metric type doesn't support binary vectors.");
return Status(SERVER_INVALID_ROWRECORD_ARRAY, "Collection metric type doesn't support binary vectors.");
}
// check prepared binary data
if (vectors_data_.binary_data_.size() % vector_count != 0) {
return Status(SERVER_INVALID_ROWRECORD_ARRAY,
"The vector dimension must be equal to the table dimension.");
"The vector dimension must be equal to the collection dimension.");
}
if (vectors_data_.binary_data_.size() * 8 / vector_count != table_schema.dimension_) {
return Status(SERVER_INVALID_VECTOR_DIMENSION,
"The vector dimension must be equal to the table dimension.");
"The vector dimension must be equal to the collection dimension.");
}
}
@ -152,7 +154,7 @@ InsertRequest::OnExecute() {
auto vec_count = static_cast<uint64_t>(vector_count);
rc.RecordSection("prepare vectors data");
status = DBWrapper::DB()->InsertVectors(table_name_, partition_tag_, vectors_data_);
status = DBWrapper::DB()->InsertVectors(collection_name_, partition_tag_, vectors_data_);
fiu_do_on("InsertRequest.OnExecute.insert_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;
@ -166,10 +168,10 @@ InsertRequest::OnExecute() {
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
// step 6: update table flag
// step 6: update collection flag
user_provide_ids ? table_schema.flag_ |= engine::meta::FLAG_MASK_HAS_USERID
: table_schema.flag_ |= engine::meta::FLAG_MASK_NO_USERID;
status = DBWrapper::DB()->UpdateTableFlag(table_name_, table_schema.flag_);
status = DBWrapper::DB()->UpdateTableFlag(collection_name_, table_schema.flag_);
#ifdef MILVUS_ENABLE_PROFILING
ProfilerStop();

View File

@ -23,18 +23,18 @@ namespace server {
class InsertRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
engine::VectorsData& vectors, const std::string& partition_tag);
protected:
InsertRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
InsertRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
engine::VectorsData& vectors, const std::string& partition_tag);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
engine::VectorsData& vectors_data_;
const std::string partition_tag_;
};

View File

@ -22,45 +22,46 @@ namespace milvus {
namespace server {
PreloadTableRequest::PreloadTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name)
: BaseRequest(context, BaseRequest::kPreloadTable), table_name_(table_name) {
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kPreloadTable), collection_name_(collection_name) {
}
BaseRequestPtr
PreloadTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name) {
return std::shared_ptr<BaseRequest>(new PreloadTableRequest(context, table_name));
PreloadTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new PreloadTableRequest(context, collection_name));
}
Status
PreloadTableRequest::OnExecute() {
try {
std::string hdr = "PreloadTableRequest(table=" + table_name_ + ")";
std::string hdr = "PreloadTableRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 2: check table existence
status = DBWrapper::DB()->PreloadTable(table_name_);
// step 2: check collection existence
status = DBWrapper::DB()->PreloadTable(collection_name_);
fiu_do_on("PreloadTableRequest.OnExecute.preload_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("PreloadTableRequest.OnExecute.throw_std_exception", throw std::exception());

View File

@ -22,16 +22,16 @@ namespace server {
class PreloadTableRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
protected:
PreloadTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name);
PreloadTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
};
} // namespace server

View File

@ -34,11 +34,11 @@ namespace milvus {
namespace server {
SearchByIDRequest::SearchByIDRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, int64_t vector_id, int64_t topk,
const std::string& collection_name, int64_t vector_id, int64_t topk,
const milvus::json& extra_params, const std::vector<std::string>& partition_list,
TopKQueryResult& result)
: BaseRequest(context, BaseRequest::kSearchByID),
table_name_(table_name),
collection_name_(collection_name),
vector_id_(vector_id),
topk_(topk),
extra_params_(extra_params),
@ -47,11 +47,11 @@ SearchByIDRequest::SearchByIDRequest(const std::shared_ptr<milvus::server::Conte
}
BaseRequestPtr
SearchByIDRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
SearchByIDRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t vector_id, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, TopKQueryResult& result) {
return std::shared_ptr<BaseRequest>(
new SearchByIDRequest(context, table_name, vector_id, topk, extra_params, partition_list, result));
new SearchByIDRequest(context, collection_name, vector_id, topk, extra_params, partition_list, result));
}
Status
@ -59,15 +59,15 @@ SearchByIDRequest::OnExecute() {
try {
auto pre_query_ctx = context_->Child("Pre query");
std::string hdr = "SearchByIDRequest(table=" + table_name_ + ", id=" + std::to_string(vector_id_) +
std::string hdr = "SearchByIDRequest(collection=" + collection_name_ + ", id=" + std::to_string(vector_id_) +
", k=" + std::to_string(topk_) + ", extra_params=" + extra_params_.dump() + ")";
TimeRecorder rc(hdr);
// step 1: check empty id
// step 2: check table name
auto status = ValidationUtil::ValidateTableName(table_name_);
// step 2: check collection name
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
@ -78,20 +78,20 @@ SearchByIDRequest::OnExecute() {
return status;
}
// step 4: check table existence
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 4: check collection existence
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -117,7 +117,7 @@ SearchByIDRequest::OnExecute() {
}
#endif
// step 7: check table's index type supports search by id
// step 7: check collection's index type supports search by id
if (table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IDMAP &&
table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_BIN_IDMAP &&
table_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IVFFLAT &&
@ -142,7 +142,7 @@ SearchByIDRequest::OnExecute() {
pre_query_ctx->GetTraceContext()->GetSpan()->Finish();
status = DBWrapper::DB()->QueryByID(context_, table_name_, partition_list_, (size_t)topk_, extra_params_,
status = DBWrapper::DB()->QueryByID(context_, collection_name_, partition_list_, (size_t)topk_, extra_params_,
vector_id_, result_ids, result_distances);
#ifdef MILVUS_ENABLE_PROFILING
@ -155,7 +155,7 @@ SearchByIDRequest::OnExecute() {
}
if (result_ids.empty()) {
return Status::OK(); // empty table
return Status::OK(); // empty collection
}
auto post_query_ctx = context_->Child("Constructing result");

View File

@ -29,12 +29,12 @@ namespace server {
class SearchByIDRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name, int64_t vector_id,
int64_t topk, const milvus::json& extra_params, const std::vector<std::string>& partition_list,
TopKQueryResult& result);
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t vector_id, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, TopKQueryResult& result);
protected:
SearchByIDRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
SearchByIDRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t vector_id, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, TopKQueryResult& result);
@ -42,7 +42,7 @@ class SearchByIDRequest : public BaseRequest {
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
const int64_t vector_id_;
int64_t topk_;
milvus::json extra_params_;

View File

@ -106,7 +106,7 @@ SearchCombineRequest::Combine(const SearchRequestPtr& request) {
// reset some parameters in necessary
if (request_list_.empty()) {
// validate first request input
auto status = ValidationUtil::ValidateTableName(request->TableName());
auto status = ValidationUtil::ValidateCollectionName(request->CollectionName());
if (!status.ok()) {
return status;
}
@ -117,7 +117,7 @@ SearchCombineRequest::Combine(const SearchRequestPtr& request) {
}
// assign base parameters
table_name_ = request->TableName();
collection_name_ = request->CollectionName();
min_topk_ = request->TopK() - MAX_TOPK_GAP / 2;
if (min_topk_ < 0) {
min_topk_ = 0;
@ -138,7 +138,7 @@ SearchCombineRequest::Combine(const SearchRequestPtr& request) {
bool
SearchCombineRequest::CanCombine(const SearchRequestPtr& request) {
if (table_name_ != request->TableName()) {
if (collection_name_ != request->CollectionName()) {
return false;
}
@ -170,7 +170,7 @@ SearchCombineRequest::CanCombine(const SearchRequestPtr& request) {
bool
SearchCombineRequest::CanCombine(const SearchRequestPtr& left, const SearchRequestPtr& right) {
if (left->TableName() != right->TableName()) {
if (left->CollectionName() != right->CollectionName()) {
return false;
}
@ -226,18 +226,19 @@ SearchCombineRequest::OnExecute() {
size_t combined_request = request_list_.size();
SERVER_LOG_DEBUG << "SearchCombineRequest execute, request count=" << combined_request
<< ", extra_params=" << extra_params_.dump();
std::string hdr = "SearchCombineRequest(table=" + table_name_ + ")";
std::string hdr = "SearchCombineRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check table existence
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
auto status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
status = Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
status = Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
FreeRequests(status);
return status;
} else {
@ -246,7 +247,7 @@ SearchCombineRequest::OnExecute() {
}
} else {
if (!table_schema.owner_table_.empty()) {
status = Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
status = Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
FreeRequests(status);
return status;
}
@ -352,7 +353,7 @@ SearchCombineRequest::OnExecute() {
context_list.CreateChild(request_list_, "Combine Query");
if (file_id_list_.empty()) {
status = DBWrapper::DB()->Query(nullptr, table_name_, partition_list, (size_t)search_topk_,
status = DBWrapper::DB()->Query(nullptr, collection_name_, partition_list, (size_t)search_topk_,
extra_params_, vectors_data_, result_ids, result_distances);
} else {
status = DBWrapper::DB()->QueryByFileID(nullptr, file_id_list, (size_t)search_topk_, extra_params_,

View File

@ -44,7 +44,7 @@ class SearchCombineRequest : public BaseRequest {
FreeRequests(const Status& status);
private:
std::string table_name_;
std::string collection_name_;
engine::VectorsData vectors_data_;
int64_t min_topk_ = 0;
int64_t search_topk_ = 0;

View File

@ -27,12 +27,12 @@
namespace milvus {
namespace server {
SearchRequest::SearchRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list,
SearchRequest::SearchRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, const engine::VectorsData& vectors, int64_t topk,
const milvus::json& extra_params, const std::vector<std::string>& partition_list,
const std::vector<std::string>& file_id_list, TopKQueryResult& result)
: BaseRequest(context, BaseRequest::kSearch),
table_name_(table_name),
collection_name_(collection_name),
vectors_data_(vectors),
topk_(topk),
extra_params_(extra_params),
@ -42,22 +42,22 @@ SearchRequest::SearchRequest(const std::shared_ptr<milvus::server::Context>& con
}
BaseRequestPtr
SearchRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
SearchRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, const std::vector<std::string>& file_id_list,
TopKQueryResult& result) {
return std::shared_ptr<BaseRequest>(
new SearchRequest(context, table_name, vectors, topk, extra_params, partition_list, file_id_list, result));
new SearchRequest(context, collection_name, vectors, topk, extra_params, partition_list, file_id_list, result));
}
Status
SearchRequest::OnPreExecute() {
std::string hdr = "SearchRequest pre-execute(table=" + table_name_ + ")";
std::string hdr = "SearchRequest pre-execute(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
milvus::server::ContextChild tracer_pre(context_, "Pre Query");
// step 1: check table name
auto status = ValidationUtil::ValidateTableName(table_name_);
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
@ -83,35 +83,36 @@ SearchRequest::OnExecute() {
try {
uint64_t vector_count = vectors_data_.vector_count_;
fiu_do_on("SearchRequest.OnExecute.throw_std_exception", throw std::exception());
std::string hdr = "SearchRequest execute(table=" + table_name_ + ", nq=" + std::to_string(vector_count) +
", k=" + std::to_string(topk_) + ")";
std::string hdr = "SearchRequest execute(collection=" + collection_name_ +
", nq=" + std::to_string(vector_count) + ", k=" + std::to_string(topk_) + ")";
TimeRecorderAuto rc(hdr);
// step 4: check table existence
// only process root table, ignore partition table
table_schema_.table_id_ = table_name_;
auto status = DBWrapper::DB()->DescribeTable(table_schema_);
collection_schema_.collection_id_ = collection_name_;
auto status = DBWrapper::DB()->DescribeTable(collection_schema_);
fiu_do_on("SearchRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema_.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
if (!collection_schema_.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 5: check search parameters
status = ValidationUtil::ValidateSearchParams(extra_params_, table_schema_, topk_);
status = ValidationUtil::ValidateSearchParams(extra_params_, collection_schema_, topk_);
if (!status.ok()) {
return status;
}
// step 6: check vector data according to metric type
status = ValidationUtil::ValidateVectorData(vectors_data_, table_schema_);
status = ValidationUtil::ValidateVectorData(vectors_data_, collection_schema_);
if (!status.ok()) {
return status;
}
@ -128,7 +129,7 @@ SearchRequest::OnExecute() {
engine::ResultDistances result_distances;
if (file_id_list_.empty()) {
status = DBWrapper::DB()->Query(context_, table_name_, partition_list_, (size_t)topk_, extra_params_,
status = DBWrapper::DB()->Query(context_, collection_name_, partition_list_, (size_t)topk_, extra_params_,
vectors_data_, result_ids, result_distances);
} else {
status = DBWrapper::DB()->QueryByFileID(context_, file_id_list_, (size_t)topk_, extra_params_,
@ -146,7 +147,7 @@ SearchRequest::OnExecute() {
}
fiu_do_on("SearchRequest.OnExecute.empty_result_ids", result_ids.clear());
if (result_ids.empty()) {
return Status::OK(); // empty table
return Status::OK(); // empty collection
}
// step 8: construct result array

View File

@ -23,14 +23,14 @@ namespace server {
class SearchRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, const std::vector<std::string>& file_id_list,
TopKQueryResult& result);
const std::string&
TableName() const {
return table_name_;
CollectionName() const {
return collection_name_;
}
const engine::VectorsData&
@ -63,13 +63,13 @@ class SearchRequest : public BaseRequest {
return result_;
}
const milvus::engine::meta::TableSchema&
const milvus::engine::meta::CollectionSchema&
TableSchema() const {
return table_schema_;
return collection_schema_;
}
protected:
SearchRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
SearchRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
const engine::VectorsData& vectors, int64_t topk, const milvus::json& extra_params,
const std::vector<std::string>& partition_list, const std::vector<std::string>& file_id_list,
TopKQueryResult& result);
@ -81,7 +81,7 @@ class SearchRequest : public BaseRequest {
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
const engine::VectorsData& vectors_data_;
int64_t topk_;
milvus::json extra_params_;
@ -91,7 +91,7 @@ class SearchRequest : public BaseRequest {
TopKQueryResult& result_;
// for validation
milvus::engine::meta::TableSchema table_schema_;
milvus::engine::meta::CollectionSchema collection_schema_;
};
using SearchRequestPtr = std::shared_ptr<SearchRequest>;

View File

@ -23,49 +23,52 @@ namespace milvus {
namespace server {
ShowPartitionsRequest::ShowPartitionsRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, std::vector<PartitionParam>& partition_list)
: BaseRequest(context, BaseRequest::kShowPartitions), table_name_(table_name), partition_list_(partition_list) {
const std::string& collection_name,
std::vector<PartitionParam>& partition_list)
: BaseRequest(context, BaseRequest::kShowPartitions),
collection_name_(collection_name),
partition_list_(partition_list) {
}
BaseRequestPtr
ShowPartitionsRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
std::vector<PartitionParam>& partition_list) {
return std::shared_ptr<BaseRequest>(new ShowPartitionsRequest(context, table_name, partition_list));
ShowPartitionsRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, std::vector<PartitionParam>& partition_list) {
return std::shared_ptr<BaseRequest>(new ShowPartitionsRequest(context, collection_name, partition_list));
}
Status
ShowPartitionsRequest::OnExecute() {
std::string hdr = "ShowPartitionsRequest(table=" + table_name_ + ")";
std::string hdr = "ShowPartitionsRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check table name
auto status = ValidationUtil::ValidateTableName(table_name_);
// step 1: check collection name
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
fiu_do_on("ShowPartitionsRequest.OnExecute.invalid_table_name",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;
}
// step 2: check table existence
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 2: check collection existence
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 3: get partitions
std::vector<engine::meta::TableSchema> schema_array;
status = DBWrapper::DB()->ShowPartitions(table_name_, schema_array);
std::vector<engine::meta::CollectionSchema> schema_array;
status = DBWrapper::DB()->ShowPartitions(collection_name_, schema_array);
fiu_do_on("ShowPartitionsRequest.OnExecute.show_partition_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -73,7 +76,7 @@ ShowPartitionsRequest::OnExecute() {
}
partition_list_.clear();
partition_list_.emplace_back(table_name_, milvus::engine::DEFAULT_PARTITON_TAG);
partition_list_.emplace_back(collection_name_, milvus::engine::DEFAULT_PARTITON_TAG);
for (auto& schema : schema_array) {
partition_list_.emplace_back(schema.owner_table_, schema.partition_tag_);
}

View File

@ -23,18 +23,18 @@ namespace server {
class ShowPartitionsRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
std::vector<PartitionParam>& partition_list);
protected:
ShowPartitionsRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
ShowPartitionsRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
std::vector<PartitionParam>& partition_list);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
std::vector<PartitionParam>& partition_list_;
};

View File

@ -44,47 +44,47 @@ ConstructPartitionStat(const engine::PartitionStat& partition_stat, PartitionSta
}
ShowTableInfoRequest::ShowTableInfoRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& table_name, TableInfo& table_info)
: BaseRequest(context, BaseRequest::kShowTableInfo), table_name_(table_name), table_info_(table_info) {
const std::string& collection_name, TableInfo& table_info)
: BaseRequest(context, BaseRequest::kShowTableInfo), collection_name_(collection_name), table_info_(table_info) {
}
BaseRequestPtr
ShowTableInfoRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
TableInfo& table_info) {
return std::shared_ptr<BaseRequest>(new ShowTableInfoRequest(context, table_name, table_info));
ShowTableInfoRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, TableInfo& table_info) {
return std::shared_ptr<BaseRequest>(new ShowTableInfoRequest(context, collection_name, table_info));
}
Status
ShowTableInfoRequest::OnExecute() {
std::string hdr = "ShowTableInfoRequest(table=" + table_name_ + ")";
std::string hdr = "ShowTableInfoRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check table name
auto status = ValidationUtil::ValidateTableName(table_name_);
// step 1: check collection name
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
return status;
}
// step 2: check table existence
// only process root table, ignore partition table
engine::meta::TableSchema table_schema;
table_schema.table_id_ = table_name_;
// step 2: check collection existence
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(table_name_));
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
} else {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(table_name_));
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 3: get partitions
engine::TableInfo table_info;
status = DBWrapper::DB()->GetTableInfo(table_name_, table_info);
status = DBWrapper::DB()->GetTableInfo(collection_name_, table_info);
if (!status.ok()) {
return status;
}

View File

@ -29,18 +29,18 @@ namespace server {
class ShowTableInfoRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
TableInfo& table_info);
protected:
ShowTableInfoRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& table_name,
ShowTableInfoRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
TableInfo& table_info);
Status
OnExecute() override;
private:
const std::string table_name_;
const std::string collection_name_;
TableInfo& table_info_;
};

View File

@ -37,7 +37,7 @@ Status
ShowTablesRequest::OnExecute() {
TimeRecorderAuto rc("ShowTablesRequest");
std::vector<engine::meta::TableSchema> schema_array;
std::vector<engine::meta::CollectionSchema> schema_array;
auto status = DBWrapper::DB()->AllTables(schema_array);
fiu_do_on("ShowTablesRequest.OnExecute.show_tables_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -45,7 +45,7 @@ ShowTablesRequest::OnExecute() {
}
for (auto& schema : schema_array) {
table_name_list_.push_back(schema.table_id_);
table_name_list_.push_back(schema.collection_id_);
}
return Status::OK();
}

View File

@ -481,9 +481,9 @@ GrpcRequestHandler::DescribeTable(::grpc::ServerContext* context, const ::milvus
::milvus::grpc::TableSchema* response) {
CHECK_NULLPTR_RETURN(request);
TableSchema table_schema;
CollectionSchema table_schema;
Status status = request_handler_.DescribeTable(context_map_[context], request->table_name(), table_schema);
response->set_table_name(table_schema.table_name_);
response->set_table_name(table_schema.collection_name_);
response->set_dimension(table_schema.dimension_);
response->set_index_file_size(table_schema.index_file_size_);
response->set_metric_type(table_schema.metric_type_);
@ -511,8 +511,8 @@ GrpcRequestHandler::ShowTables(::grpc::ServerContext* context, const ::milvus::g
std::vector<std::string> tables;
Status status = request_handler_.ShowTables(context_map_[context], tables);
for (auto& table : tables) {
response->add_table_names(table);
for (auto& collection : tables) {
response->add_table_names(collection);
}
SET_RESPONSE(response->mutable_status(), status, context);
@ -581,7 +581,7 @@ GrpcRequestHandler::DescribeIndex(::grpc::ServerContext* context, const ::milvus
IndexParam param;
Status status = request_handler_.DescribeIndex(context_map_[context], request->table_name(), param);
response->set_table_name(param.table_name_);
response->set_table_name(param.collection_name_);
response->set_index_type(param.index_type_);
::milvus::grpc::KeyValuePair* kv = response->add_extra_params();
kv->set_key(EXTRA_PARAM_KEY);
@ -645,11 +645,11 @@ GrpcRequestHandler::Flush(::grpc::ServerContext* context, const ::milvus::grpc::
::milvus::grpc::Status* response) {
CHECK_NULLPTR_RETURN(request);
std::vector<std::string> table_names;
std::vector<std::string> collection_names;
for (int32_t i = 0; i < request->table_name_array().size(); i++) {
table_names.push_back(request->table_name_array(i));
collection_names.push_back(request->table_name_array(i));
}
Status status = request_handler_.Flush(context_map_[context], table_names);
Status status = request_handler_.Flush(context_map_[context], collection_names);
SET_RESPONSE(response, status, context);
return ::grpc::Status::OK;

View File

@ -80,36 +80,36 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
random_id() const;
// *
// @brief This method is used to create table
// @brief This method is used to create collection
//
// @param TableSchema, use to provide table information to be created.
// @param TableSchema, use to provide collection information to be created.
//
// @return Status
::grpc::Status
CreateTable(::grpc::ServerContext* context, const ::milvus::grpc::TableSchema* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to test table existence.
// @brief This method is used to test collection existence.
//
// @param TableName, table name is going to be tested.
// @param CollectionName, collection name is going to be tested.
//
// @return BoolReply
::grpc::Status
HasTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::BoolReply* response) override;
// *
// @brief This method is used to get table schema.
// @brief This method is used to get collection schema.
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return TableSchema
::grpc::Status
DescribeTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::TableSchema* response) override;
// *
// @brief This method is used to get table schema.
// @brief This method is used to get collection schema.
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return TableRowCount
::grpc::Status
@ -120,14 +120,14 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
//
// @param Command, dummy parameter.
//
// @return TableNameList
// @return CollectionNameList
::grpc::Status
ShowTables(::grpc::ServerContext* context, const ::milvus::grpc::Command* request,
::milvus::grpc::TableNameList* response) override;
// *
// @brief This method is used to get table detail information.
// @brief This method is used to get collection detail information.
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return TableInfo
::grpc::Status
@ -135,16 +135,16 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
::milvus::grpc::TableInfo* response);
// *
// @brief This method is used to delete table.
// @brief This method is used to delete collection.
//
// @param TableName, table name is going to be deleted.
// @param CollectionName, collection name is going to be deleted.
//
// @return TableNameList
// @return CollectionNameList
::grpc::Status
DropTable(::grpc::ServerContext* context, const ::milvus::grpc::TableName* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to build index by table in sync mode.
// @brief This method is used to build index by collection in sync mode.
//
// @param IndexParam, index paramters.
//
@ -155,7 +155,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
// *
// @brief This method is used to describe index
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return IndexParam
::grpc::Status
@ -164,7 +164,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
// *
// @brief This method is used to drop index
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return Status
::grpc::Status
@ -182,7 +182,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
// *
// @brief This method is used to show partition information
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return PartitionList
::grpc::Status
@ -198,7 +198,7 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
DropPartition(::grpc::ServerContext* context, const ::milvus::grpc::PartitionParam* request,
::milvus::grpc::Status* response) override;
// *
// @brief This method is used to add vector array to table.
// @brief This method is used to add vector array to collection.
//
// @param InsertParam, insert parameters.
//
@ -218,14 +218,14 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
// *
// @brief This method is used to get vector ids from a segment
//
// @param GetVectorIDsParam, target table and segment
// @param GetVectorIDsParam, target collection and segment
//
// @return VectorIds
::grpc::Status
GetVectorIDs(::grpc::ServerContext* context, const ::milvus::grpc::GetVectorIDsParam* request,
::milvus::grpc::VectorIds* response);
// *
// @brief This method is used to query vector in table.
// @brief This method is used to query vector in collection.
//
// @param SearchParam, search parameters.
//
@ -275,9 +275,9 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
::milvus::grpc::Status* response);
// *
// @brief This method is used to preload table
// @brief This method is used to preload collection
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return Status
::grpc::Status
@ -294,9 +294,9 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
Flush(::grpc::ServerContext* context, const ::milvus::grpc::FlushParam* request, ::milvus::grpc::Status* response);
// *
// @brief This method is used to compact table
// @brief This method is used to compact collection
//
// @param TableName, target table name.
// @param CollectionName, target collection name.
//
// @return Status
::grpc::Status

Some files were not shown because too many files have changed in this diff Show More