Rename from table to collection (#1857)

* Change HasTable to HasCollection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change HasTable to HasCollection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix compile

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* Change table to collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change Table to Collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change Table to Collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change Table to Collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change Table to Collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Change Table to Collection

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix compiling error

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix compiling error

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix compiling error

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix lint

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>

* Fix Unit test

Signed-off-by: jinhai <hai.jin@zilliz.com>

* Fix lint

Signed-off-by: JinHai-CN <hai.jin@zilliz.com>
pull/1869/head^2
Jin Hai 2020-04-03 23:12:41 +08:00 committed by GitHub
parent dc707bb8a3
commit 35276ffcb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
83 changed files with 1724 additions and 1708 deletions

View File

@ -134,7 +134,7 @@ Config::ValidateConfig() {
CONFIG_CHECK(GetDBConfigBackendUrl(db_backend_url));
std::string db_preload_table;
CONFIG_CHECK(GetDBConfigPreloadTable(db_preload_table));
CONFIG_CHECK(GetDBConfigPreloadCollection(db_preload_table));
int64_t db_archive_disk_threshold;
CONFIG_CHECK(GetDBConfigArchiveDiskThreshold(db_archive_disk_threshold));
@ -261,7 +261,7 @@ Config::ResetDefaultConfig() {
/* db config */
CONFIG_CHECK(SetDBConfigBackendUrl(CONFIG_DB_BACKEND_URL_DEFAULT));
CONFIG_CHECK(SetDBConfigPreloadTable(CONFIG_DB_PRELOAD_TABLE_DEFAULT));
CONFIG_CHECK(SetDBConfigPreloadCollection(CONFIG_DB_PRELOAD_TABLE_DEFAULT));
CONFIG_CHECK(SetDBConfigArchiveDiskThreshold(CONFIG_DB_ARCHIVE_DISK_THRESHOLD_DEFAULT));
CONFIG_CHECK(SetDBConfigArchiveDaysThreshold(CONFIG_DB_ARCHIVE_DAYS_THRESHOLD_DEFAULT));
CONFIG_CHECK(SetDBConfigAutoFlushInterval(CONFIG_DB_AUTO_FLUSH_INTERVAL_DEFAULT));
@ -354,7 +354,7 @@ Config::SetConfigCli(const std::string& parent_key, const std::string& child_key
if (child_key == CONFIG_DB_BACKEND_URL) {
status = SetDBConfigBackendUrl(value);
} else if (child_key == CONFIG_DB_PRELOAD_TABLE) {
status = SetDBConfigPreloadTable(value);
status = SetDBConfigPreloadCollection(value);
} else if (child_key == CONFIG_DB_AUTO_FLUSH_INTERVAL) {
status = SetDBConfigAutoFlushInterval(value);
} else {
@ -776,7 +776,7 @@ Config::CheckDBConfigBackendUrl(const std::string& value) {
}
Status
Config::CheckDBConfigPreloadTable(const std::string& value) {
Config::CheckDBConfigPreloadCollection(const std::string& value) {
fiu_return_on("check_config_preload_table_fail", Status(SERVER_INVALID_ARGUMENT, ""));
if (value.empty() || value == "*") {
@ -793,7 +793,7 @@ Config::CheckDBConfigPreloadTable(const std::string& value) {
return Status(SERVER_INVALID_ARGUMENT, "Invalid collection name: " + collection);
}
bool exist = false;
auto status = DBWrapper::DB()->HasNativeTable(collection, exist);
auto status = DBWrapper::DB()->HasNativeCollection(collection, exist);
if (!(status.ok() && exist)) {
return Status(SERVER_TABLE_NOT_EXIST, "Collection " + collection + " not exist");
}
@ -1502,7 +1502,7 @@ Config::GetDBConfigArchiveDaysThreshold(int64_t& value) {
}
Status
Config::GetDBConfigPreloadTable(std::string& value) {
Config::GetDBConfigPreloadCollection(std::string& value) {
value = GetConfigStr(CONFIG_DB, CONFIG_DB_PRELOAD_TABLE);
return Status::OK();
}
@ -1853,8 +1853,8 @@ Config::SetDBConfigBackendUrl(const std::string& value) {
}
Status
Config::SetDBConfigPreloadTable(const std::string& value) {
CONFIG_CHECK(CheckDBConfigPreloadTable(value));
Config::SetDBConfigPreloadCollection(const std::string& value) {
CONFIG_CHECK(CheckDBConfigPreloadCollection(value));
std::string cor_value = value == "*" ? "\'*\'" : value;
return SetConfigValueInMem(CONFIG_DB, CONFIG_DB_PRELOAD_TABLE, cor_value);
}

View File

@ -214,7 +214,7 @@ class Config {
Status
CheckDBConfigBackendUrl(const std::string& value);
Status
CheckDBConfigPreloadTable(const std::string& value);
CheckDBConfigPreloadCollection(const std::string& value);
Status
CheckDBConfigArchiveDiskThreshold(const std::string& value);
Status
@ -329,7 +329,7 @@ class Config {
Status
GetDBConfigArchiveDaysThreshold(int64_t& value);
Status
GetDBConfigPreloadTable(std::string& value);
GetDBConfigPreloadCollection(std::string& value);
Status
GetDBConfigAutoFlushInterval(int64_t& value);
@ -428,7 +428,7 @@ class Config {
Status
SetDBConfigBackendUrl(const std::string& value);
Status
SetDBConfigPreloadTable(const std::string& value);
SetDBConfigPreloadCollection(const std::string& value);
Status
SetDBConfigArchiveDiskThreshold(const std::string& value);
Status

View File

@ -44,34 +44,34 @@ class DB {
Stop() = 0;
virtual Status
CreateTable(meta::CollectionSchema& table_schema_) = 0;
CreateCollection(meta::CollectionSchema& table_schema_) = 0;
virtual Status
DropTable(const std::string& collection_id) = 0;
DropCollection(const std::string& collection_id) = 0;
virtual Status
DescribeTable(meta::CollectionSchema& table_schema_) = 0;
DescribeCollection(meta::CollectionSchema& table_schema_) = 0;
virtual Status
HasTable(const std::string& collection_id, bool& has_or_not_) = 0;
HasCollection(const std::string& collection_id, bool& has_or_not_) = 0;
virtual Status
HasNativeTable(const std::string& collection_id, bool& has_or_not_) = 0;
HasNativeCollection(const std::string& collection_id, bool& has_or_not_) = 0;
virtual Status
AllTables(std::vector<meta::CollectionSchema>& table_schema_array) = 0;
AllCollections(std::vector<meta::CollectionSchema>& table_schema_array) = 0;
virtual Status
GetTableInfo(const std::string& collection_id, TableInfo& table_info) = 0;
GetCollectionInfo(const std::string& collection_id, CollectionInfo& collection_info) = 0;
virtual Status
GetTableRowCount(const std::string& collection_id, uint64_t& row_count) = 0;
GetCollectionRowCount(const std::string& collection_id, uint64_t& row_count) = 0;
virtual Status
PreloadTable(const std::string& collection_id) = 0;
PreloadCollection(const std::string& collection_id) = 0;
virtual Status
UpdateTableFlag(const std::string& collection_id, int64_t flag) = 0;
UpdateCollectionFlag(const std::string& collection_id, int64_t flag) = 0;
virtual Status
CreatePartition(const std::string& collection_id, const std::string& partition_name,
@ -132,10 +132,10 @@ class DB {
Size(uint64_t& result) = 0;
virtual Status
CreateIndex(const std::string& collection_id, const TableIndex& index) = 0;
CreateIndex(const std::string& collection_id, const CollectionIndex& index) = 0;
virtual Status
DescribeIndex(const std::string& collection_id, TableIndex& index) = 0;
DescribeIndex(const std::string& collection_id, CollectionIndex& index) = 0;
virtual Status
DropIndex(const std::string& collection_id) = 0;

View File

@ -178,67 +178,67 @@ DBImpl::DropAll() {
}
Status
DBImpl::CreateTable(meta::CollectionSchema& table_schema) {
DBImpl::CreateCollection(meta::CollectionSchema& collection_schema) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
meta::CollectionSchema temp_schema = table_schema;
meta::CollectionSchema temp_schema = collection_schema;
temp_schema.index_file_size_ *= ONE_MB; // store as MB
if (options_.wal_enable_) {
temp_schema.flush_lsn_ = wal_mgr_->CreateTable(table_schema.collection_id_);
temp_schema.flush_lsn_ = wal_mgr_->CreateCollection(collection_schema.collection_id_);
}
return meta_ptr_->CreateTable(temp_schema);
return meta_ptr_->CreateCollection(temp_schema);
}
Status
DBImpl::DropTable(const std::string& collection_id) {
DBImpl::DropCollection(const std::string& collection_id) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
if (options_.wal_enable_) {
wal_mgr_->DropTable(collection_id);
wal_mgr_->DropCollection(collection_id);
}
return DropTableRecursively(collection_id);
return DropCollectionRecursively(collection_id);
}
Status
DBImpl::DescribeTable(meta::CollectionSchema& table_schema) {
DBImpl::DescribeCollection(meta::CollectionSchema& collection_schema) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
auto stat = meta_ptr_->DescribeTable(table_schema);
table_schema.index_file_size_ /= ONE_MB; // return as MB
auto stat = meta_ptr_->DescribeCollection(collection_schema);
collection_schema.index_file_size_ /= ONE_MB; // return as MB
return stat;
}
Status
DBImpl::HasTable(const std::string& collection_id, bool& has_or_not) {
DBImpl::HasCollection(const std::string& collection_id, bool& has_or_not) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
return meta_ptr_->HasTable(collection_id, has_or_not);
return meta_ptr_->HasCollection(collection_id, has_or_not);
}
Status
DBImpl::HasNativeTable(const std::string& collection_id, bool& has_or_not_) {
DBImpl::HasNativeCollection(const std::string& collection_id, bool& has_or_not_) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
engine::meta::CollectionSchema collection_schema;
collection_schema.collection_id_ = collection_id;
auto status = DescribeCollection(collection_schema);
if (!status.ok()) {
has_or_not_ = false;
return status;
} else {
if (!table_schema.owner_table_.empty()) {
if (!collection_schema.owner_collection_.empty()) {
has_or_not_ = false;
return Status(DB_NOT_FOUND, "");
}
@ -249,19 +249,19 @@ DBImpl::HasNativeTable(const std::string& collection_id, bool& has_or_not_) {
}
Status
DBImpl::AllTables(std::vector<meta::CollectionSchema>& table_schema_array) {
DBImpl::AllCollections(std::vector<meta::CollectionSchema>& collection_schema_array) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
std::vector<meta::CollectionSchema> all_tables;
auto status = meta_ptr_->AllTables(all_tables);
std::vector<meta::CollectionSchema> all_collections;
auto status = meta_ptr_->AllCollections(all_collections);
// only return real tables, dont return partition tables
table_schema_array.clear();
for (auto& schema : all_tables) {
if (schema.owner_table_.empty()) {
table_schema_array.push_back(schema);
// only return real collections, dont return partition collections
collection_schema_array.clear();
for (auto& schema : all_collections) {
if (schema.owner_collection_.empty()) {
collection_schema_array.push_back(schema);
}
}
@ -269,7 +269,7 @@ DBImpl::AllTables(std::vector<meta::CollectionSchema>& table_schema_array) {
}
Status
DBImpl::GetTableInfo(const std::string& collection_id, TableInfo& table_info) {
DBImpl::GetCollectionInfo(const std::string& collection_id, CollectionInfo& collection_info) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
@ -301,8 +301,8 @@ DBImpl::GetTableInfo(const std::string& collection_id, TableInfo& table_info) {
};
for (auto& name_tag : name2tag) {
meta::SegmentsSchema table_files;
status = meta_ptr_->FilesByType(name_tag.first, file_types, table_files);
meta::SegmentsSchema collection_files;
status = meta_ptr_->FilesByType(name_tag.first, file_types, collection_files);
if (!status.ok()) {
std::string err_msg = "Failed to get collection info: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
@ -310,7 +310,7 @@ DBImpl::GetTableInfo(const std::string& collection_id, TableInfo& table_info) {
}
std::vector<SegmentStat> segments_stat;
for (auto& file : table_files) {
for (auto& file : collection_files) {
SegmentStat seg_stat;
seg_stat.name_ = file.segment_id_;
seg_stat.row_count_ = (int64_t)file.row_count_;
@ -327,14 +327,14 @@ DBImpl::GetTableInfo(const std::string& collection_id, TableInfo& table_info) {
}
partition_stat.segments_stat_.swap(segments_stat);
table_info.partitions_stat_.emplace_back(partition_stat);
collection_info.partitions_stat_.emplace_back(partition_stat);
}
return Status::OK();
}
Status
DBImpl::PreloadTable(const std::string& collection_id) {
DBImpl::PreloadCollection(const std::string& collection_id) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
@ -346,7 +346,7 @@ DBImpl::PreloadTable(const std::string& collection_id) {
return status;
}
// step 2: get files from partition tables
// step 2: get files from partition collections
std::vector<meta::CollectionSchema> partition_array;
status = meta_ptr_->ShowPartitions(collection_id, partition_array);
for (auto& schema : partition_array) {
@ -376,16 +376,16 @@ DBImpl::PreloadTable(const std::string& collection_id) {
auto json = milvus::json::parse(file.index_params_);
ExecutionEnginePtr engine =
EngineFactory::Build(file.dimension_, file.location_, engine_type, (MetricType)file.metric_type_, json);
fiu_do_on("DBImpl.PreloadTable.null_engine", engine = nullptr);
fiu_do_on("DBImpl.PreloadCollection.null_engine", engine = nullptr);
if (engine == nullptr) {
ENGINE_LOG_ERROR << "Invalid engine type";
return Status(DB_ERROR, "Invalid engine type");
}
fiu_do_on("DBImpl.PreloadTable.exceed_cache", size = available_size + 1);
fiu_do_on("DBImpl.PreloadCollection.exceed_cache", size = available_size + 1);
try {
fiu_do_on("DBImpl.PreloadTable.engine_throw_exception", throw std::exception());
fiu_do_on("DBImpl.PreloadCollection.engine_throw_exception", throw std::exception());
std::string msg = "Pre-loaded file: " + file.file_id_ + " size: " + std::to_string(file.file_size_);
TimeRecorderAuto rc_1(msg);
engine->Load(true);
@ -406,21 +406,21 @@ DBImpl::PreloadTable(const std::string& collection_id) {
}
Status
DBImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) {
DBImpl::UpdateCollectionFlag(const std::string& collection_id, int64_t flag) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
return meta_ptr_->UpdateTableFlag(collection_id, flag);
return meta_ptr_->UpdateCollectionFlag(collection_id, flag);
}
Status
DBImpl::GetTableRowCount(const std::string& collection_id, uint64_t& row_count) {
DBImpl::GetCollectionRowCount(const std::string& collection_id, uint64_t& row_count) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
return GetTableRowCountRecursively(collection_id, row_count);
return GetCollectionRowCountRecursively(collection_id, row_count);
}
Status
@ -431,7 +431,7 @@ DBImpl::CreatePartition(const std::string& collection_id, const std::string& par
}
uint64_t lsn = 0;
meta_ptr_->GetTableFlushLSN(collection_id, lsn);
meta_ptr_->GetCollectionFlushLSN(collection_id, lsn);
return meta_ptr_->CreatePartition(collection_id, partition_name, partition_tag, lsn);
}
@ -501,8 +501,8 @@ DBImpl::InsertVectors(const std::string& collection_id, const std::string& parti
Status status;
if (options_.wal_enable_) {
std::string target_table_name;
status = GetPartitionByTag(collection_id, partition_tag, target_table_name);
std::string target_collection_name;
status = GetPartitionByTag(collection_id, partition_tag, target_collection_name);
if (!status.ok()) {
return status;
}
@ -578,12 +578,12 @@ DBImpl::Flush(const std::string& collection_id) {
}
Status status;
bool has_table;
status = HasTable(collection_id, has_table);
bool has_collection;
status = HasCollection(collection_id, has_collection);
if (!status.ok()) {
return status;
}
if (!has_table) {
if (!has_collection) {
ENGINE_LOG_ERROR << "Collection to flush does not exist: " << collection_id;
return Status(DB_NOT_FOUND, "Collection to flush does not exist");
}
@ -619,7 +619,7 @@ DBImpl::Flush() {
return SHUTDOWN_ERROR;
}
ENGINE_LOG_DEBUG << "Begin flush all tables";
ENGINE_LOG_DEBUG << "Begin flush all collections";
Status status;
if (options_.wal_enable_) {
@ -636,7 +636,7 @@ DBImpl::Flush() {
status = ExecWalRecord(record);
}
ENGINE_LOG_DEBUG << "End flush all tables";
ENGINE_LOG_DEBUG << "End flush all collections";
return status;
}
@ -647,9 +647,9 @@ DBImpl::Compact(const std::string& collection_id) {
return SHUTDOWN_ERROR;
}
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
engine::meta::CollectionSchema collection_schema;
collection_schema.collection_id_ = collection_id;
auto status = DescribeCollection(collection_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
ENGINE_LOG_ERROR << "Collection to compact does not exist: " << collection_id;
@ -658,7 +658,7 @@ DBImpl::Compact(const std::string& collection_id) {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!collection_schema.owner_collection_.empty()) {
ENGINE_LOG_ERROR << "Collection to compact does not exist: " << collection_id;
return Status(DB_NOT_FOUND, "Collection to compact does not exist");
}
@ -722,7 +722,7 @@ DBImpl::Compact(const std::string& collection_id) {
}
ENGINE_LOG_DEBUG << "Updating meta after compaction...";
status = meta_ptr_->UpdateTableFiles(files_to_update);
status = meta_ptr_->UpdateCollectionFiles(files_to_update);
OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
if (!status.ok()) {
compact_status = status;
@ -749,7 +749,7 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
compacted_file.collection_id_ = collection_id;
// compacted_file.date_ = date;
compacted_file.file_type_ = meta::SegmentSchema::NEW_MERGE; // TODO: use NEW_MERGE for now
Status status = meta_ptr_->CreateTableFile(compacted_file);
Status status = meta_ptr_->CreateCollectionFile(compacted_file);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection file: " << status.message();
@ -774,7 +774,7 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to serialize compacted segment: " << status.message();
compacted_file.file_type_ = meta::SegmentSchema::TO_DELETE;
auto mark_status = meta_ptr_->UpdateTableFile(compacted_file);
auto mark_status = meta_ptr_->UpdateCollectionFile(compacted_file);
if (mark_status.ok()) {
ENGINE_LOG_DEBUG << "Mark file: " << compacted_file.file_id_ << " to to_delete";
}
@ -804,7 +804,7 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
// Set all files in segment to TO_DELETE
auto& segment_id = file.segment_id_;
meta::SegmentsSchema segment_files;
status = meta_ptr_->GetTableFilesBySegmentId(segment_id, segment_files);
status = meta_ptr_->GetCollectionFilesBySegmentId(segment_id, segment_files);
if (!status.ok()) {
return status;
}
@ -830,9 +830,9 @@ DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_i
return SHUTDOWN_ERROR;
}
bool has_table;
auto status = HasTable(collection_id, has_table);
if (!has_table) {
bool has_collection;
auto status = HasCollection(collection_id, has_collection);
if (!has_collection) {
ENGINE_LOG_ERROR << "Collection " << collection_id << " does not exist: ";
return Status(DB_NOT_FOUND, "Collection does not exist");
}
@ -844,7 +844,7 @@ DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_i
std::vector<int> file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX,
meta::SegmentSchema::FILE_TYPE::BACKUP};
meta::SegmentsSchema table_files;
meta::SegmentsSchema collection_files;
status = meta_ptr_->FilesByType(collection_id, file_types, files_to_query);
if (!status.ok()) {
std::string err_msg = "Failed to get files for GetVectorByID: " + status.message();
@ -889,9 +889,9 @@ DBImpl::GetVectorIDs(const std::string& collection_id, const std::string& segmen
}
// step 1: check collection existence
bool has_table;
auto status = HasTable(collection_id, has_table);
if (!has_table) {
bool has_collection;
auto status = HasCollection(collection_id, has_collection);
if (!has_collection) {
ENGINE_LOG_ERROR << "Collection " << collection_id << " does not exist: ";
return Status(DB_NOT_FOUND, "Collection does not exist");
}
@ -900,30 +900,30 @@ DBImpl::GetVectorIDs(const std::string& collection_id, const std::string& segmen
}
// step 2: find segment
meta::SegmentsSchema table_files;
status = meta_ptr_->GetTableFilesBySegmentId(segment_id, table_files);
meta::SegmentsSchema collection_files;
status = meta_ptr_->GetCollectionFilesBySegmentId(segment_id, collection_files);
if (!status.ok()) {
return status;
}
if (table_files.empty()) {
if (collection_files.empty()) {
return Status(DB_NOT_FOUND, "Segment does not exist");
}
// check the segment is belong to this collection
if (table_files[0].collection_id_ != collection_id) {
if (collection_files[0].collection_id_ != collection_id) {
// the segment could be in a partition under this collection
meta::CollectionSchema table_schema;
table_schema.collection_id_ = table_files[0].collection_id_;
status = DescribeTable(table_schema);
if (table_schema.owner_table_ != collection_id) {
meta::CollectionSchema collection_schema;
collection_schema.collection_id_ = collection_files[0].collection_id_;
status = DescribeCollection(collection_schema);
if (collection_schema.owner_collection_ != collection_id) {
return Status(DB_NOT_FOUND, "Segment does not belong to this collection");
}
}
// step 3: load segment ids and delete offset
std::string segment_dir;
engine::utils::GetParentPath(table_files[0].location_, segment_dir);
engine::utils::GetParentPath(collection_files[0].location_, segment_dir);
segment::SegmentReader segment_reader(segment_dir);
std::vector<segment::doc_id_t> uids;
@ -1020,21 +1020,21 @@ DBImpl::GetVectorByIdHelper(const std::string& collection_id, IDNumber vector_id
}
Status
DBImpl::CreateIndex(const std::string& collection_id, const TableIndex& index) {
DBImpl::CreateIndex(const std::string& collection_id, const CollectionIndex& index) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
// serialize memory data
// std::set<std::string> sync_table_ids;
// auto status = SyncMemData(sync_table_ids);
// std::set<std::string> sync_collection_ids;
// auto status = SyncMemData(sync_collection_ids);
auto status = Flush();
{
std::unique_lock<std::mutex> lock(build_index_mutex_);
// step 1: check index difference
TableIndex old_index;
CollectionIndex old_index;
status = DescribeIndex(collection_id, old_index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to get collection index info for collection: " << collection_id;
@ -1042,10 +1042,10 @@ DBImpl::CreateIndex(const std::string& collection_id, const TableIndex& index) {
}
// step 2: update index info
TableIndex new_index = index;
new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateTable
CollectionIndex new_index = index;
new_index.metric_type_ = old_index.metric_type_; // dont change metric type, it was defined by CreateCollection
if (!utils::IsSameIndex(old_index, new_index)) {
status = UpdateTableIndexRecursively(collection_id, new_index);
status = UpdateCollectionIndexRecursively(collection_id, new_index);
if (!status.ok()) {
return status;
}
@ -1057,19 +1057,19 @@ DBImpl::CreateIndex(const std::string& collection_id, const TableIndex& index) {
WaitMergeFileFinish();
// step 4: wait and build index
status = index_failed_checker_.CleanFailedIndexFileOfTable(collection_id);
status = WaitTableIndexRecursively(collection_id, index);
status = index_failed_checker_.CleanFailedIndexFileOfCollection(collection_id);
status = WaitCollectionIndexRecursively(collection_id, index);
return status;
}
Status
DBImpl::DescribeIndex(const std::string& collection_id, TableIndex& index) {
DBImpl::DescribeIndex(const std::string& collection_id, CollectionIndex& index) {
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
return meta_ptr_->DescribeTableIndex(collection_id, index);
return meta_ptr_->DescribeCollectionIndex(collection_id, index);
}
Status
@ -1079,7 +1079,7 @@ DBImpl::DropIndex(const std::string& collection_id) {
}
ENGINE_LOG_DEBUG << "Drop index for collection: " << collection_id;
return DropTableIndexRecursively(collection_id);
return DropCollectionIndexRecursively(collection_id);
}
Status
@ -1343,21 +1343,21 @@ DBImpl::StartMergeTask() {
{
std::lock_guard<std::mutex> lck(merge_result_mutex_);
if (merge_thread_results_.empty()) {
// collect merge files for all tables(if merge_table_ids_ is empty) for two reasons:
// 1. other tables may still has un-merged files
// collect merge files for all collections(if merge_collection_ids_ is empty) for two reasons:
// 1. other collections may still has un-merged files
// 2. server may be closed unexpected, these un-merge files need to be merged when server restart
if (merge_table_ids_.empty()) {
std::vector<meta::CollectionSchema> table_schema_array;
meta_ptr_->AllTables(table_schema_array);
for (auto& schema : table_schema_array) {
merge_table_ids_.insert(schema.collection_id_);
if (merge_collection_ids_.empty()) {
std::vector<meta::CollectionSchema> collection_schema_array;
meta_ptr_->AllCollections(collection_schema_array);
for (auto& schema : collection_schema_array) {
merge_collection_ids_.insert(schema.collection_id_);
}
}
// start merge file thread
merge_thread_results_.push_back(
merge_thread_pool_.enqueue(&DBImpl::BackgroundMerge, this, merge_table_ids_));
merge_table_ids_.clear();
merge_thread_pool_.enqueue(&DBImpl::BackgroundMerge, this, merge_collection_ids_));
merge_collection_ids_.clear();
}
}
@ -1371,10 +1371,10 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
ENGINE_LOG_DEBUG << "Merge files for collection: " << collection_id;
// step 1: create collection file
meta::SegmentSchema table_file;
table_file.collection_id_ = collection_id;
table_file.file_type_ = meta::SegmentSchema::NEW_MERGE;
Status status = meta_ptr_->CreateTableFile(table_file);
meta::SegmentSchema collection_file;
collection_file.collection_id_ = collection_id;
collection_file.file_type_ = meta::SegmentSchema::NEW_MERGE;
Status status = meta_ptr_->CreateCollectionFile(collection_file);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection: " << status.ToString();
@ -1384,20 +1384,20 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
// step 2: merge files
/*
ExecutionEnginePtr index =
EngineFactory::Build(table_file.dimension_, table_file.location_, (EngineType)table_file.engine_type_,
(MetricType)table_file.metric_type_, table_file.nlist_);
EngineFactory::Build(collection_file.dimension_, collection_file.location_,
(EngineType)collection_file.engine_type_, (MetricType)collection_file.metric_type_, collection_file.nlist_);
*/
meta::SegmentsSchema updated;
std::string new_segment_dir;
utils::GetParentPath(table_file.location_, new_segment_dir);
utils::GetParentPath(collection_file.location_, new_segment_dir);
auto segment_writer_ptr = std::make_shared<segment::SegmentWriter>(new_segment_dir);
for (auto& file : files) {
server::CollectMergeFilesMetrics metrics;
std::string segment_dir_to_merge;
utils::GetParentPath(file.location_, segment_dir_to_merge);
segment_writer_ptr->Merge(segment_dir_to_merge, table_file.file_id_);
segment_writer_ptr->Merge(segment_dir_to_merge, collection_file.file_id_);
auto file_schema = file;
file_schema.file_type_ = meta::SegmentSchema::TO_DELETE;
updated.push_back(file_schema);
@ -1423,9 +1423,10 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
// if failed to serialize merge file to disk
// typical error: out of disk space, out of memory or permission denied
table_file.file_type_ = meta::SegmentSchema::TO_DELETE;
status = meta_ptr_->UpdateTableFile(table_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete";
collection_file.file_type_ = meta::SegmentSchema::TO_DELETE;
status = meta_ptr_->UpdateCollectionFile(collection_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << collection_file.file_id_
<< " to to_delete";
return status;
}
@ -1433,19 +1434,19 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
// step 4: update collection files state
// if index type isn't IDMAP, set file type to TO_INDEX if file size exceed index_file_size
// else set file type to RAW, no need to build index
if (!utils::IsRawIndexType(table_file.engine_type_)) {
table_file.file_type_ = (segment_writer_ptr->Size() >= table_file.index_file_size_)
? meta::SegmentSchema::TO_INDEX
: meta::SegmentSchema::RAW;
if (!utils::IsRawIndexType(collection_file.engine_type_)) {
collection_file.file_type_ = (segment_writer_ptr->Size() >= collection_file.index_file_size_)
? meta::SegmentSchema::TO_INDEX
: meta::SegmentSchema::RAW;
} else {
table_file.file_type_ = meta::SegmentSchema::RAW;
collection_file.file_type_ = meta::SegmentSchema::RAW;
}
table_file.file_size_ = segment_writer_ptr->Size();
table_file.row_count_ = segment_writer_ptr->VectorCount();
updated.push_back(table_file);
status = meta_ptr_->UpdateTableFiles(updated);
ENGINE_LOG_DEBUG << "New merged segment " << table_file.segment_id_ << " of size " << segment_writer_ptr->Size()
<< " bytes";
collection_file.file_size_ = segment_writer_ptr->Size();
collection_file.row_count_ = segment_writer_ptr->VectorCount();
updated.push_back(collection_file);
status = meta_ptr_->UpdateCollectionFiles(updated);
ENGINE_LOG_DEBUG << "New merged segment " << collection_file.segment_id_ << " of size "
<< segment_writer_ptr->Size() << " bytes";
if (options_.insert_cache_immediately_) {
segment_writer_ptr->Cache();
@ -1482,11 +1483,11 @@ DBImpl::BackgroundMergeFiles(const std::string& collection_id) {
}
void
DBImpl::BackgroundMerge(std::set<std::string> table_ids) {
DBImpl::BackgroundMerge(std::set<std::string> collection_ids) {
// ENGINE_LOG_TRACE << " Background merge thread start";
Status status;
for (auto& collection_id : table_ids) {
for (auto& collection_id : collection_ids) {
status = BackgroundMergeFiles(collection_id);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Merge files for collection " << collection_id << " failed: " << status.ToString();
@ -1678,18 +1679,18 @@ DBImpl::GetPartitionsByTags(const std::string& collection_id, const std::vector<
}
Status
DBImpl::DropTableRecursively(const std::string& collection_id) {
DBImpl::DropCollectionRecursively(const std::string& collection_id) {
// dates partly delete files of the collection but currently we don't support
ENGINE_LOG_DEBUG << "Prepare to delete collection " << collection_id;
Status status;
if (options_.wal_enable_) {
wal_mgr_->DropTable(collection_id);
wal_mgr_->DropCollection(collection_id);
}
status = mem_mgr_->EraseMemVector(collection_id); // not allow insert
status = meta_ptr_->DropTable(collection_id); // soft delete collection
index_failed_checker_.CleanFailedIndexFileOfTable(collection_id);
status = mem_mgr_->EraseMemVector(collection_id); // not allow insert
status = meta_ptr_->DropCollection(collection_id); // soft delete collection
index_failed_checker_.CleanFailedIndexFileOfCollection(collection_id);
// scheduler will determine when to delete collection files
auto nres = scheduler::ResMgrInst::GetInstance()->GetNumOfComputeResource();
@ -1700,8 +1701,8 @@ DBImpl::DropTableRecursively(const std::string& collection_id) {
std::vector<meta::CollectionSchema> partition_array;
status = meta_ptr_->ShowPartitions(collection_id, partition_array);
for (auto& schema : partition_array) {
status = DropTableRecursively(schema.collection_id_);
fiu_do_on("DBImpl.DropTableRecursively.failed", status = Status(DB_ERROR, ""));
status = DropCollectionRecursively(schema.collection_id_);
fiu_do_on("DBImpl.DropCollectionRecursively.failed", status = Status(DB_ERROR, ""));
if (!status.ok()) {
return status;
}
@ -1711,11 +1712,11 @@ DBImpl::DropTableRecursively(const std::string& collection_id) {
}
Status
DBImpl::UpdateTableIndexRecursively(const std::string& collection_id, const TableIndex& index) {
DBImpl::UpdateCollectionIndexRecursively(const std::string& collection_id, const CollectionIndex& index) {
DropIndex(collection_id);
auto status = meta_ptr_->UpdateTableIndex(collection_id, index);
fiu_do_on("DBImpl.UpdateTableIndexRecursively.fail_update_table_index",
auto status = meta_ptr_->UpdateCollectionIndex(collection_id, index);
fiu_do_on("DBImpl.UpdateCollectionIndexRecursively.fail_update_collection_index",
status = Status(DB_META_TRANSACTION_FAILED, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to update collection index info for collection: " << collection_id;
@ -1725,7 +1726,7 @@ DBImpl::UpdateTableIndexRecursively(const std::string& collection_id, const Tabl
std::vector<meta::CollectionSchema> partition_array;
status = meta_ptr_->ShowPartitions(collection_id, partition_array);
for (auto& schema : partition_array) {
status = UpdateTableIndexRecursively(schema.collection_id_, index);
status = UpdateCollectionIndexRecursively(schema.collection_id_, index);
if (!status.ok()) {
return status;
}
@ -1735,7 +1736,7 @@ DBImpl::UpdateTableIndexRecursively(const std::string& collection_id, const Tabl
}
Status
DBImpl::WaitTableIndexRecursively(const std::string& collection_id, const TableIndex& index) {
DBImpl::WaitCollectionIndexRecursively(const std::string& collection_id, const CollectionIndex& index) {
// for IDMAP type, only wait all NEW file converted to RAW file
// for other type, wait NEW/RAW/NEW_MERGE/NEW_INDEX/TO_INDEX files converted to INDEX files
std::vector<int> file_types;
@ -1753,29 +1754,29 @@ DBImpl::WaitTableIndexRecursively(const std::string& collection_id, const TableI
}
// get files to build index
meta::SegmentsSchema table_files;
auto status = GetFilesToBuildIndex(collection_id, file_types, table_files);
meta::SegmentsSchema collection_files;
auto status = GetFilesToBuildIndex(collection_id, file_types, collection_files);
int times = 1;
while (!table_files.empty()) {
while (!collection_files.empty()) {
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
if (!utils::IsRawIndexType(index.engine_type_)) {
status = meta_ptr_->UpdateTableFilesToIndex(collection_id);
status = meta_ptr_->UpdateCollectionFilesToIndex(collection_id);
}
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
GetFilesToBuildIndex(collection_id, file_types, table_files);
GetFilesToBuildIndex(collection_id, file_types, collection_files);
++times;
index_failed_checker_.IgnoreFailedIndexFiles(table_files);
index_failed_checker_.IgnoreFailedIndexFiles(collection_files);
}
// build index for partition
std::vector<meta::CollectionSchema> partition_array;
status = meta_ptr_->ShowPartitions(collection_id, partition_array);
for (auto& schema : partition_array) {
status = WaitTableIndexRecursively(schema.collection_id_, index);
fiu_do_on("DBImpl.WaitTableIndexRecursively.fail_build_table_Index_for_partition",
status = WaitCollectionIndexRecursively(schema.collection_id_, index);
fiu_do_on("DBImpl.WaitCollectionIndexRecursively.fail_build_collection_Index_for_partition",
status = Status(DB_ERROR, ""));
if (!status.ok()) {
return status;
@ -1784,8 +1785,8 @@ DBImpl::WaitTableIndexRecursively(const std::string& collection_id, const TableI
// failed to build index for some files, return error
std::string err_msg;
index_failed_checker_.GetErrMsgForTable(collection_id, err_msg);
fiu_do_on("DBImpl.WaitTableIndexRecursively.not_empty_err_msg", err_msg.append("fiu"));
index_failed_checker_.GetErrMsgForCollection(collection_id, err_msg);
fiu_do_on("DBImpl.WaitCollectionIndexRecursively.not_empty_err_msg", err_msg.append("fiu"));
if (!err_msg.empty()) {
return Status(DB_ERROR, err_msg);
}
@ -1794,10 +1795,10 @@ DBImpl::WaitTableIndexRecursively(const std::string& collection_id, const TableI
}
Status
DBImpl::DropTableIndexRecursively(const std::string& collection_id) {
DBImpl::DropCollectionIndexRecursively(const std::string& collection_id) {
ENGINE_LOG_DEBUG << "Drop index for collection: " << collection_id;
index_failed_checker_.CleanFailedIndexFileOfTable(collection_id);
auto status = meta_ptr_->DropTableIndex(collection_id);
index_failed_checker_.CleanFailedIndexFileOfCollection(collection_id);
auto status = meta_ptr_->DropCollectionIndex(collection_id);
if (!status.ok()) {
return status;
}
@ -1806,8 +1807,8 @@ DBImpl::DropTableIndexRecursively(const std::string& collection_id) {
std::vector<meta::CollectionSchema> partition_array;
status = meta_ptr_->ShowPartitions(collection_id, partition_array);
for (auto& schema : partition_array) {
status = DropTableIndexRecursively(schema.collection_id_);
fiu_do_on("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition",
status = DropCollectionIndexRecursively(schema.collection_id_);
fiu_do_on("DBImpl.DropCollectionIndexRecursively.fail_drop_collection_Index_for_partition",
status = Status(DB_ERROR, ""));
if (!status.ok()) {
return status;
@ -1818,7 +1819,7 @@ DBImpl::DropTableIndexRecursively(const std::string& collection_id) {
}
Status
DBImpl::GetTableRowCountRecursively(const std::string& collection_id, uint64_t& row_count) {
DBImpl::GetCollectionRowCountRecursively(const std::string& collection_id, uint64_t& row_count) {
row_count = 0;
auto status = meta_ptr_->Count(collection_id, row_count);
if (!status.ok()) {
@ -1830,8 +1831,8 @@ DBImpl::GetTableRowCountRecursively(const std::string& collection_id, uint64_t&
status = meta_ptr_->ShowPartitions(collection_id, partition_array);
for (auto& schema : partition_array) {
uint64_t partition_row_count = 0;
status = GetTableRowCountRecursively(schema.collection_id_, partition_row_count);
fiu_do_on("DBImpl.GetTableRowCountRecursively.fail_get_table_rowcount_for_partition",
status = GetCollectionRowCountRecursively(schema.collection_id_, partition_row_count);
fiu_do_on("DBImpl.GetCollectionRowCountRecursively.fail_get_collection_rowcount_for_partition",
status = Status(DB_ERROR, ""));
if (!status.ok()) {
return status;
@ -1847,17 +1848,17 @@ Status
DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
fiu_return_on("DBImpl.ExexWalRecord.return", Status(););
auto tables_flushed = [&](const std::set<std::string>& table_ids) -> uint64_t {
if (table_ids.empty()) {
auto collections_flushed = [&](const std::set<std::string>& collection_ids) -> uint64_t {
if (collection_ids.empty()) {
return 0;
}
uint64_t max_lsn = 0;
if (options_.wal_enable_) {
for (auto& collection : table_ids) {
for (auto& collection : collection_ids) {
uint64_t lsn = 0;
meta_ptr_->GetTableFlushLSN(collection, lsn);
wal_mgr_->TableFlushed(collection, lsn);
meta_ptr_->GetCollectionFlushLSN(collection, lsn);
wal_mgr_->CollectionFlushed(collection, lsn);
if (lsn > max_lsn) {
max_lsn = lsn;
}
@ -1865,8 +1866,8 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
}
std::lock_guard<std::mutex> lck(merge_result_mutex_);
for (auto& collection : table_ids) {
merge_table_ids_.insert(collection);
for (auto& collection : collection_ids) {
merge_collection_ids_.insert(collection);
}
return max_lsn;
};
@ -1875,18 +1876,18 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
switch (record.type) {
case wal::MXLogType::InsertBinary: {
std::string target_table_name;
status = GetPartitionByTag(record.collection_id, record.partition_tag, target_table_name);
std::string target_collection_name;
status = GetPartitionByTag(record.collection_id, record.partition_tag, target_collection_name);
if (!status.ok()) {
return status;
}
std::set<std::string> flushed_tables;
status = mem_mgr_->InsertVectors(target_table_name, record.length, record.ids,
std::set<std::string> flushed_collections;
status = mem_mgr_->InsertVectors(target_collection_name, record.length, record.ids,
(record.data_size / record.length / sizeof(uint8_t)),
(const u_int8_t*)record.data, record.lsn, flushed_tables);
(const u_int8_t*)record.data, record.lsn, flushed_collections);
// even though !status.ok, run
tables_flushed(flushed_tables);
collections_flushed(flushed_collections);
// metrics
milvus::server::CollectInsertMetrics metrics(record.length, status);
@ -1894,18 +1895,18 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
}
case wal::MXLogType::InsertVector: {
std::string target_table_name;
status = GetPartitionByTag(record.collection_id, record.partition_tag, target_table_name);
std::string target_collection_name;
status = GetPartitionByTag(record.collection_id, record.partition_tag, target_collection_name);
if (!status.ok()) {
return status;
}
std::set<std::string> flushed_tables;
status = mem_mgr_->InsertVectors(target_table_name, record.length, record.ids,
std::set<std::string> flushed_collections;
status = mem_mgr_->InsertVectors(target_collection_name, record.length, record.ids,
(record.data_size / record.length / sizeof(float)),
(const float*)record.data, record.lsn, flushed_tables);
(const float*)record.data, record.lsn, flushed_collections);
// even though !status.ok, run
tables_flushed(flushed_tables);
collections_flushed(flushed_collections);
// metrics
milvus::server::CollectInsertMetrics metrics(record.length, status);
@ -1919,21 +1920,21 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
return status;
}
std::vector<std::string> table_ids{record.collection_id};
std::vector<std::string> collection_ids{record.collection_id};
for (auto& partition : partition_array) {
auto& partition_table_id = partition.collection_id_;
table_ids.emplace_back(partition_table_id);
auto& partition_collection_id = partition.collection_id_;
collection_ids.emplace_back(partition_collection_id);
}
if (record.length == 1) {
for (auto& collection_id : table_ids) {
for (auto& collection_id : collection_ids) {
status = mem_mgr_->DeleteVector(collection_id, *record.ids, record.lsn);
if (!status.ok()) {
return status;
}
}
} else {
for (auto& collection_id : table_ids) {
for (auto& collection_id : collection_ids) {
status = mem_mgr_->DeleteVectors(collection_id, record.length, record.ids, record.lsn);
if (!status.ok()) {
return status;
@ -1952,33 +1953,33 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
return status;
}
std::vector<std::string> table_ids{record.collection_id};
std::vector<std::string> collection_ids{record.collection_id};
for (auto& partition : partition_array) {
auto& partition_table_id = partition.collection_id_;
table_ids.emplace_back(partition_table_id);
auto& partition_collection_id = partition.collection_id_;
collection_ids.emplace_back(partition_collection_id);
}
std::set<std::string> flushed_tables;
for (auto& collection_id : table_ids) {
std::set<std::string> flushed_collections;
for (auto& collection_id : collection_ids) {
const std::lock_guard<std::mutex> lock(flush_merge_compact_mutex_);
status = mem_mgr_->Flush(collection_id);
if (!status.ok()) {
break;
}
flushed_tables.insert(collection_id);
flushed_collections.insert(collection_id);
}
tables_flushed(flushed_tables);
collections_flushed(flushed_collections);
} else {
// flush all tables
std::set<std::string> table_ids;
// flush all collections
std::set<std::string> collection_ids;
{
const std::lock_guard<std::mutex> lock(flush_merge_compact_mutex_);
status = mem_mgr_->Flush(table_ids);
status = mem_mgr_->Flush(collection_ids);
}
uint64_t lsn = tables_flushed(table_ids);
uint64_t lsn = collections_flushed(collection_ids);
if (options_.wal_enable_) {
wal_mgr_->RemoveOldFiles(lsn);
}

View File

@ -55,34 +55,34 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
DropAll() override;
Status
CreateTable(meta::CollectionSchema& table_schema) override;
CreateCollection(meta::CollectionSchema& collection_schema) override;
Status
DropTable(const std::string& collection_id) override;
DropCollection(const std::string& collection_id) override;
Status
DescribeTable(meta::CollectionSchema& table_schema) override;
DescribeCollection(meta::CollectionSchema& collection_schema) override;
Status
HasTable(const std::string& collection_id, bool& has_or_not) override;
HasCollection(const std::string& collection_id, bool& has_or_not) override;
Status
HasNativeTable(const std::string& collection_id, bool& has_or_not_) override;
HasNativeCollection(const std::string& collection_id, bool& has_or_not_) override;
Status
AllTables(std::vector<meta::CollectionSchema>& table_schema_array) override;
AllCollections(std::vector<meta::CollectionSchema>& collection_schema_array) override;
Status
GetTableInfo(const std::string& collection_id, TableInfo& table_info) override;
GetCollectionInfo(const std::string& collection_id, CollectionInfo& collection_info) override;
Status
PreloadTable(const std::string& collection_id) override;
PreloadCollection(const std::string& collection_id) override;
Status
UpdateTableFlag(const std::string& collection_id, int64_t flag) override;
UpdateCollectionFlag(const std::string& collection_id, int64_t flag) override;
Status
GetTableRowCount(const std::string& collection_id, uint64_t& row_count) override;
GetCollectionRowCount(const std::string& collection_id, uint64_t& row_count) override;
Status
CreatePartition(const std::string& collection_id, const std::string& partition_name,
@ -123,13 +123,13 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
GetVectorIDs(const std::string& collection_id, const std::string& segment_id, IDNumbers& vector_ids) override;
// Status
// Merge(const std::set<std::string>& table_ids) override;
// Merge(const std::set<std::string>& collection_ids) override;
Status
CreateIndex(const std::string& collection_id, const TableIndex& index) override;
CreateIndex(const std::string& collection_id, const CollectionIndex& index) override;
Status
DescribeIndex(const std::string& collection_id, TableIndex& index) override;
DescribeIndex(const std::string& collection_id, CollectionIndex& index) override;
Status
DropIndex(const std::string& collection_id) override;
@ -191,7 +191,7 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
BackgroundMergeFiles(const std::string& collection_id);
void
BackgroundMerge(std::set<std::string> table_ids);
BackgroundMerge(std::set<std::string> collection_ids);
void
StartBuildIndexTask(bool force = false);
@ -205,7 +205,7 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
/*
Status
SyncMemData(std::set<std::string>& sync_table_ids);
SyncMemData(std::set<std::string>& sync_collection_ids);
*/
Status
@ -223,19 +223,19 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
std::set<std::string>& partition_name_array);
Status
DropTableRecursively(const std::string& collection_id);
DropCollectionRecursively(const std::string& collection_id);
Status
UpdateTableIndexRecursively(const std::string& collection_id, const TableIndex& index);
UpdateCollectionIndexRecursively(const std::string& collection_id, const CollectionIndex& index);
Status
WaitTableIndexRecursively(const std::string& collection_id, const TableIndex& index);
WaitCollectionIndexRecursively(const std::string& collection_id, const CollectionIndex& index);
Status
DropTableIndexRecursively(const std::string& collection_id);
DropCollectionIndexRecursively(const std::string& collection_id);
Status
GetTableRowCountRecursively(const std::string& collection_id, uint64_t& row_count);
GetCollectionRowCountRecursively(const std::string& collection_id, uint64_t& row_count);
Status
ExecWalRecord(const wal::MXLogRecord& record);
@ -303,7 +303,7 @@ class DBImpl : public DB, public server::CacheConfigHandler, public server::Engi
ThreadPool merge_thread_pool_;
std::mutex merge_result_mutex_;
std::list<std::future<void>> merge_thread_results_;
std::set<std::string> merge_table_ids_;
std::set<std::string> merge_collection_ids_;
ThreadPool index_thread_pool_;
std::mutex index_result_mutex_;

View File

@ -20,7 +20,7 @@ namespace engine {
constexpr uint64_t INDEX_FAILED_RETRY_TIME = 1;
Status
IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& collection_id) {
IndexFailedChecker::CleanFailedIndexFileOfCollection(const std::string& collection_id) {
std::lock_guard<std::mutex> lck(mutex_);
index_failed_files_.erase(collection_id); // rebuild failed index files for this collection
@ -28,7 +28,7 @@ IndexFailedChecker::CleanFailedIndexFileOfTable(const std::string& collection_id
}
Status
IndexFailedChecker::GetErrMsgForTable(const std::string& collection_id, std::string& err_msg) {
IndexFailedChecker::GetErrMsgForCollection(const std::string& collection_id, std::string& err_msg) {
std::lock_guard<std::mutex> lck(mutex_);
auto iter = index_failed_files_.find(collection_id);
if (iter != index_failed_files_.end()) {

View File

@ -25,10 +25,10 @@ namespace engine {
class IndexFailedChecker {
public:
Status
CleanFailedIndexFileOfTable(const std::string& collection_id);
CleanFailedIndexFileOfCollection(const std::string& collection_id);
Status
GetErrMsgForTable(const std::string& collection_id, std::string& err_msg);
GetErrMsgForCollection(const std::string& collection_id, std::string& err_msg);
Status
MarkFailedIndexFile(const meta::SegmentSchema& file, const std::string& err_msg);

View File

@ -34,7 +34,7 @@ typedef std::vector<IDNumber> IDNumbers;
typedef std::vector<faiss::Index::idx_t> ResultIds;
typedef std::vector<faiss::Index::distance_t> ResultDistances;
struct TableIndex {
struct CollectionIndex {
int32_t engine_type_ = (int)EngineType::FAISS_IDMAP;
int32_t metric_type_ = (int)MetricType::L2;
milvus::json extra_params_ = {{"nlist", 16384}};
@ -64,7 +64,7 @@ struct PartitionStat {
std::vector<SegmentStat> segments_stat_;
};
struct TableInfo {
struct CollectionInfo {
std::vector<PartitionStat> partitions_stat_;
};

View File

@ -79,7 +79,7 @@ GetMicroSecTimeStamp() {
}
Status
CreateTablePath(const DBMetaOptions& options, const std::string& collection_id) {
CreateCollectionPath(const DBMetaOptions& options, const std::string& collection_id) {
std::string db_path = options.path_;
std::string table_path = db_path + TABLES_FOLDER + collection_id;
auto status = server::CommonUtil::CreateDirectory(table_path);
@ -91,7 +91,7 @@ CreateTablePath(const DBMetaOptions& options, const std::string& collection_id)
for (auto& path : options.slave_paths_) {
table_path = path + TABLES_FOLDER + collection_id;
status = server::CommonUtil::CreateDirectory(table_path);
fiu_do_on("CreateTablePath.creat_slave_path", status = Status(DB_INVALID_PATH, ""));
fiu_do_on("CreateCollectionPath.creat_slave_path", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
return status;
@ -135,11 +135,11 @@ DeleteTablePath(const DBMetaOptions& options, const std::string& collection_id,
}
Status
CreateTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) {
CreateCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file) {
std::string parent_path = GetTableFileParentFolder(options, table_file);
auto status = server::CommonUtil::CreateDirectory(parent_path);
fiu_do_on("CreateTableFilePath.fail_create", status = Status(DB_INVALID_PATH, ""));
fiu_do_on("CreateCollectionFilePath.fail_create", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
return status;
@ -211,7 +211,7 @@ GetParentPath(const std::string& path, std::string& parent_path) {
}
bool
IsSameIndex(const TableIndex& index1, const TableIndex& index2) {
IsSameIndex(const CollectionIndex& index1, const CollectionIndex& index2) {
return index1.engine_type_ == index2.engine_type_ && index1.extra_params_ == index2.extra_params_ &&
index1.metric_type_ == index2.metric_type_;
}

View File

@ -26,12 +26,12 @@ int64_t
GetMicroSecTimeStamp();
Status
CreateTablePath(const DBMetaOptions& options, const std::string& collection_id);
CreateCollectionPath(const DBMetaOptions& options, const std::string& collection_id);
Status
DeleteTablePath(const DBMetaOptions& options, const std::string& collection_id, bool force = true);
Status
CreateTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
CreateCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
GetTableFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
@ -43,7 +43,7 @@ Status
GetParentPath(const std::string& path, std::string& parent_path);
bool
IsSameIndex(const TableIndex& index1, const TableIndex& index2);
IsSameIndex(const CollectionIndex& index1, const CollectionIndex& index2);
bool
IsRawIndexType(int32_t type);

View File

@ -236,7 +236,7 @@ MemTable::ApplyDeletes() {
auto& segment_id = table_file.segment_id_;
meta::SegmentsSchema segment_files;
status = meta_->GetTableFilesBySegmentId(segment_id, segment_files);
status = meta_->GetCollectionFilesBySegmentId(segment_id, segment_files);
if (!status.ok()) {
break;
}
@ -368,7 +368,7 @@ MemTable::ApplyDeletes() {
auto time7 = std::chrono::high_resolution_clock::now();
status = meta_->UpdateTableFilesRowCount(table_files_to_update);
status = meta_->UpdateCollectionFilesRowCount(table_files_to_update);
if (!status.ok()) {
std::string err_msg = "Failed to apply deletes: " + status.ToString();

View File

@ -31,7 +31,7 @@ namespace engine {
MemTableFile::MemTableFile(const std::string& collection_id, const meta::MetaPtr& meta, const DBOptions& options)
: collection_id_(collection_id), meta_(meta), options_(options) {
current_mem_ = 0;
auto status = CreateTableFile();
auto status = CreateCollectionFile();
if (status.ok()) {
/*execution_engine_ = EngineFactory::Build(
table_file_schema_.dimension_, table_file_schema_.location_, (EngineType)table_file_schema_.engine_type_,
@ -46,14 +46,14 @@ MemTableFile::MemTableFile(const std::string& collection_id, const meta::MetaPtr
}
Status
MemTableFile::CreateTableFile() {
MemTableFile::CreateCollectionFile() {
meta::SegmentSchema table_file_schema;
table_file_schema.collection_id_ = collection_id_;
auto status = meta_->CreateTableFile(table_file_schema);
auto status = meta_->CreateCollectionFile(table_file_schema);
if (status.ok()) {
table_file_schema_ = table_file_schema;
} else {
std::string err_msg = "MemTableFile::CreateTableFile failed: " + status.ToString();
std::string err_msg = "MemTableFile::CreateCollectionFile failed: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
}
return status;
@ -167,7 +167,7 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
* to write to or update the associated collection file in meta.
*
table_file_schema_.file_type_ = meta::SegmentSchema::TO_DELETE;
meta_->UpdateTableFile(table_file_schema_);
meta_->UpdateCollectionFile(table_file_schema_);
ENGINE_LOG_DEBUG << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_
<< " to to_delete";
*/
@ -196,7 +196,7 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
// GetTableFilesByFlushLSN() in meta.
table_file_schema_.flush_lsn_ = wal_lsn;
status = meta_->UpdateTableFile(table_file_schema_);
status = meta_->UpdateCollectionFile(table_file_schema_);
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn;

View File

@ -63,7 +63,7 @@ class MemTableFile : public server::CacheConfigHandler {
private:
Status
CreateTableFile();
CreateCollectionFile();
private:
const std::string collection_id_;

View File

@ -43,64 +43,64 @@ class Meta {
virtual ~Meta() = default;
virtual Status
CreateTable(CollectionSchema& table_schema) = 0;
CreateCollection(CollectionSchema& table_schema) = 0;
virtual Status
DescribeTable(CollectionSchema& table_schema) = 0;
DescribeCollection(CollectionSchema& table_schema) = 0;
virtual Status
HasTable(const std::string& collection_id, bool& has_or_not) = 0;
HasCollection(const std::string& collection_id, bool& has_or_not) = 0;
virtual Status
AllTables(std::vector<CollectionSchema>& table_schema_array) = 0;
AllCollections(std::vector<CollectionSchema>& table_schema_array) = 0;
virtual Status
UpdateTableFlag(const std::string& collection_id, int64_t flag) = 0;
UpdateCollectionFlag(const std::string& collection_id, int64_t flag) = 0;
virtual Status
UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) = 0;
virtual Status
GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) = 0;
GetCollectionFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) = 0;
virtual Status
GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) = 0;
virtual Status
DropTable(const std::string& collection_id) = 0;
DropCollection(const std::string& collection_id) = 0;
virtual Status
DeleteTableFiles(const std::string& collection_id) = 0;
virtual Status
CreateTableFile(SegmentSchema& file_schema) = 0;
CreateCollectionFile(SegmentSchema& file_schema) = 0;
virtual Status
GetTableFiles(const std::string& collection_id, const std::vector<size_t>& ids, SegmentsSchema& table_files) = 0;
virtual Status
GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) = 0;
GetCollectionFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) = 0;
virtual Status
UpdateTableFile(SegmentSchema& file_schema) = 0;
UpdateCollectionFile(SegmentSchema& file_schema) = 0;
virtual Status
UpdateTableFiles(SegmentsSchema& files) = 0;
UpdateCollectionFiles(SegmentsSchema& files) = 0;
virtual Status
UpdateTableFilesRowCount(SegmentsSchema& files) = 0;
UpdateCollectionFilesRowCount(SegmentsSchema& files) = 0;
virtual Status
UpdateTableIndex(const std::string& collection_id, const TableIndex& index) = 0;
UpdateCollectionIndex(const std::string& collection_id, const CollectionIndex& index) = 0;
virtual Status
UpdateTableFilesToIndex(const std::string& collection_id) = 0;
UpdateCollectionFilesToIndex(const std::string& collection_id) = 0;
virtual Status
DescribeTableIndex(const std::string& collection_id, TableIndex& index) = 0;
DescribeCollectionIndex(const std::string& collection_id, CollectionIndex& index) = 0;
virtual Status
DropTableIndex(const std::string& collection_id) = 0;
DropCollectionIndex(const std::string& collection_id) = 0;
virtual Status
CreatePartition(const std::string& collection_name, const std::string& partition_name, const std::string& tag,

View File

@ -56,7 +56,7 @@ struct CollectionSchema {
int32_t engine_type_ = DEFAULT_ENGINE_TYPE;
std::string index_params_ = "{}";
int32_t metric_type_ = DEFAULT_METRIC_TYPE;
std::string owner_table_;
std::string owner_collection_;
std::string partition_tag_;
std::string version_ = CURRENT_VERSION;
uint64_t flush_lsn_ = 0;

View File

@ -376,15 +376,15 @@ MySQLMetaImpl::Initialize() {
}
Status
MySQLMetaImpl::CreateTable(CollectionSchema& table_schema) {
MySQLMetaImpl::CreateCollection(CollectionSchema& table_schema) {
try {
server::MetricCollector metric;
{
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.CreateTable.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.CreateTable.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.CreateCollection.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.CreateCollection.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
@ -397,13 +397,14 @@ MySQLMetaImpl::CreateTable(CollectionSchema& table_schema) {
createTableQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< table_schema.collection_id_ << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateCollection: " << createTableQuery.str();
mysqlpp::StoreQueryResult res = createTableQuery.store();
if (res.num_rows() == 1) {
int state = res[0]["state"];
fiu_do_on("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE", state = CollectionSchema::TO_DELETE);
fiu_do_on("MySQLMetaImpl.CreateCollectionTable.schema_TO_DELETE",
state = CollectionSchema::TO_DELETE);
if (CollectionSchema::TO_DELETE == state) {
return Status(DB_ERROR,
"Collection already exists and it is in delete state, please wait a second");
@ -426,7 +427,7 @@ MySQLMetaImpl::CreateTable(CollectionSchema& table_schema) {
std::string engine_type = std::to_string(table_schema.engine_type_);
std::string& index_params = table_schema.index_params_;
std::string metric_type = std::to_string(table_schema.metric_type_);
std::string& owner_table = table_schema.owner_table_;
std::string& owner_table = table_schema.owner_collection_;
std::string& partition_tag = table_schema.partition_tag_;
std::string& version = table_schema.version_;
std::string flush_lsn = std::to_string(table_schema.flush_lsn_);
@ -438,7 +439,7 @@ MySQLMetaImpl::CreateTable(CollectionSchema& table_schema) {
<< mysqlpp::quote << partition_tag << ", " << mysqlpp::quote << version << ", "
<< flush_lsn << ");";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTable: " << createTableQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateCollection: " << createTableQuery.str();
if (mysqlpp::SimpleResult res = createTableQuery.execute()) {
table_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()?
@ -450,14 +451,14 @@ MySQLMetaImpl::CreateTable(CollectionSchema& table_schema) {
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully create collection: " << table_schema.collection_id_;
return utils::CreateTablePath(options_, table_schema.collection_id_);
return utils::CreateCollectionPath(options_, table_schema.collection_id_);
} catch (std::exception& e) {
return HandleException("GENERAL ERROR WHEN CREATING TABLE", e.what());
}
}
Status
MySQLMetaImpl::DescribeTable(CollectionSchema& table_schema) {
MySQLMetaImpl::DescribeCollection(CollectionSchema& table_schema) {
try {
server::MetricCollector metric;
mysqlpp::StoreQueryResult res;
@ -465,8 +466,8 @@ MySQLMetaImpl::DescribeTable(CollectionSchema& table_schema) {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.DescribeTable.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DescribeTable.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.DescribeCollection.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DescribeCollection.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
@ -478,7 +479,7 @@ MySQLMetaImpl::DescribeTable(CollectionSchema& table_schema) {
<< " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << table_schema.collection_id_
<< " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTable: " << describeTableQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeCollection: " << describeTableQuery.str();
res = describeTableQuery.store();
} // Scoped Connection
@ -494,7 +495,7 @@ MySQLMetaImpl::DescribeTable(CollectionSchema& table_schema) {
table_schema.engine_type_ = resRow["engine_type"];
resRow["index_params"].to_string(table_schema.index_params_);
table_schema.metric_type_ = resRow["metric_type"];
resRow["owner_table"].to_string(table_schema.owner_table_);
resRow["owner_table"].to_string(table_schema.owner_collection_);
resRow["partition_tag"].to_string(table_schema.partition_tag_);
resRow["version"].to_string(table_schema.version_);
table_schema.flush_lsn_ = resRow["flush_lsn"];
@ -509,7 +510,7 @@ MySQLMetaImpl::DescribeTable(CollectionSchema& table_schema) {
}
Status
MySQLMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) {
MySQLMetaImpl::HasCollection(const std::string& collection_id, bool& has_or_not) {
try {
server::MetricCollector metric;
mysqlpp::StoreQueryResult res;
@ -517,23 +518,24 @@ MySQLMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.HasTable.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.HasTable.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.HasCollection.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.HasCollection.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
mysqlpp::Query hasTableQuery = connectionPtr->query();
mysqlpp::Query HasCollectionQuery = connectionPtr->query();
// since collection_id is a unique column we just need to check whether it exists or not
hasTableQuery << "SELECT EXISTS"
<< " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ")"
<< " AS " << mysqlpp::quote << "check"
<< ";";
HasCollectionQuery << "SELECT EXISTS"
<< " (SELECT 1 FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE)
<< ")"
<< " AS " << mysqlpp::quote << "check"
<< ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::HasTable: " << hasTableQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::HasCollection: " << HasCollectionQuery.str();
res = hasTableQuery.store();
res = HasCollectionQuery.store();
} // Scoped Connection
int check = res[0]["check"];
@ -546,7 +548,7 @@ MySQLMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) {
}
Status
MySQLMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
MySQLMetaImpl::AllCollections(std::vector<CollectionSchema>& table_schema_array) {
try {
server::MetricCollector metric;
mysqlpp::StoreQueryResult res;
@ -566,7 +568,7 @@ MySQLMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
<< " FROM " << META_TABLES << " WHERE state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << " AND owner_table = \"\";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allTablesQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllCollections: " << allTablesQuery.str();
res = allTablesQuery.store();
} // Scoped Connection
@ -580,7 +582,7 @@ MySQLMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
table_schema.engine_type_ = resRow["engine_type"];
resRow["index_params"].to_string(table_schema.index_params_);
table_schema.metric_type_ = resRow["metric_type"];
resRow["owner_table"].to_string(table_schema.owner_table_);
resRow["owner_table"].to_string(table_schema.owner_collection_);
resRow["partition_tag"].to_string(table_schema.partition_tag_);
resRow["version"].to_string(table_schema.version_);
table_schema.flush_lsn_ = resRow["flush_lsn"];
@ -595,15 +597,15 @@ MySQLMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
}
Status
MySQLMetaImpl::DropTable(const std::string& collection_id) {
MySQLMetaImpl::DropCollection(const std::string& collection_id) {
try {
server::MetricCollector metric;
{
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.DropTable.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DropTable.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.DropCollection.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DropCollection.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
@ -624,7 +626,7 @@ MySQLMetaImpl::DropTable(const std::string& collection_id) {
} // Scoped Connection
bool is_writable_mode{mode_ == DBOptions::MODE::CLUSTER_WRITABLE};
fiu_do_on("MySQLMetaImpl.DropTable.CLUSTER_WRITABLE_MODE", is_writable_mode = true);
fiu_do_on("MySQLMetaImpl.DropCollection.CLUSTER_WRITABLE_MODE", is_writable_mode = true);
if (is_writable_mode) {
DeleteTableFiles(collection_id);
}
@ -677,13 +679,13 @@ MySQLMetaImpl::DeleteTableFiles(const std::string& collection_id) {
}
Status
MySQLMetaImpl::CreateTableFile(SegmentSchema& file_schema) {
MySQLMetaImpl::CreateCollectionFile(SegmentSchema& file_schema) {
if (file_schema.date_ == EmptyDate) {
file_schema.date_ = utils::GetDate();
}
CollectionSchema table_schema;
table_schema.collection_id_ = file_schema.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -722,8 +724,8 @@ MySQLMetaImpl::CreateTableFile(SegmentSchema& file_schema) {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.CreateTableFiles.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.CreateTableFiles.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.CreateCollectionFiles.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.CreateCollectionFiles.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
@ -736,7 +738,7 @@ MySQLMetaImpl::CreateTableFile(SegmentSchema& file_schema) {
<< row_count << ", " << updated_time << ", " << created_on << ", " << date << ", "
<< flush_lsn << ");";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateTableFile: " << createTableFileQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::CreateCollectionFile: " << createTableFileQuery.str();
if (mysqlpp::SimpleResult res = createTableFileQuery.execute()) {
file_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()?
@ -748,7 +750,7 @@ MySQLMetaImpl::CreateTableFile(SegmentSchema& file_schema) {
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_;
return utils::CreateTableFilePath(options_, file_schema);
return utils::CreateCollectionFilePath(options_, file_schema);
} catch (std::exception& e) {
return HandleException("GENERAL ERROR WHEN CREATING TABLE FILE", e.what());
}
@ -794,7 +796,7 @@ MySQLMetaImpl::GetTableFiles(const std::string& collection_id, const std::vector
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
DescribeTable(table_schema);
DescribeCollection(table_schema);
Status ret;
for (auto& resRow : res) {
@ -826,8 +828,8 @@ MySQLMetaImpl::GetTableFiles(const std::string& collection_id, const std::vector
}
Status
MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
milvus::engine::meta::SegmentsSchema& table_files) {
MySQLMetaImpl::GetCollectionFilesBySegmentId(const std::string& segment_id,
milvus::engine::meta::SegmentsSchema& table_files) {
try {
mysqlpp::StoreQueryResult res;
{
@ -843,7 +845,7 @@ MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
<< " FROM " << META_TABLEFILES << " WHERE segment_id = " << mysqlpp::quote << segment_id
<< " AND file_type <> " << std::to_string(SegmentSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFilesBySegmentId: " << getTableFileQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetCollectionFilesBySegmentId: " << getTableFileQuery.str();
res = getTableFileQuery.store();
} // Scoped Connection
@ -851,7 +853,7 @@ MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
if (!res.empty()) {
CollectionSchema table_schema;
res[0]["table_id"].to_string(table_schema.collection_id_);
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -886,7 +888,7 @@ MySQLMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
}
Status
MySQLMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIndex& index) {
MySQLMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const CollectionIndex& index) {
try {
server::MetricCollector metric;
@ -894,21 +896,21 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableInd
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.UpdateTableIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateTableIndex.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.UpdateCollectionIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateCollectionIndex.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
mysqlpp::Query updateTableIndexParamQuery = connectionPtr->query();
updateTableIndexParamQuery << "SELECT id, state, dimension, created_on"
<< " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
mysqlpp::Query updateCollectionIndexParamQuery = connectionPtr->query();
updateCollectionIndexParamQuery << "SELECT id, state, dimension, created_on"
<< " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionIndex: " << updateCollectionIndexParamQuery.str();
mysqlpp::StoreQueryResult res = updateTableIndexParamQuery.store();
mysqlpp::StoreQueryResult res = updateCollectionIndexParamQuery.store();
if (res.num_rows() == 1) {
const mysqlpp::Row& resRow = res[0];
@ -918,18 +920,18 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableInd
uint16_t dimension = resRow["dimension"];
int64_t created_on = resRow["created_on"];
updateTableIndexParamQuery << "UPDATE " << META_TABLES << " SET id = " << id << " ,state = " << state
<< " ,dimension = " << dimension << " ,created_on = " << created_on
<< " ,engine_type = " << index.engine_type_
<< " ,index_params = " << mysqlpp::quote << index.extra_params_.dump()
<< " ,metric_type = " << index.metric_type_
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
updateCollectionIndexParamQuery
<< "UPDATE " << META_TABLES << " SET id = " << id << " ,state = " << state
<< " ,dimension = " << dimension << " ,created_on = " << created_on
<< " ,engine_type = " << index.engine_type_ << " ,index_params = " << mysqlpp::quote
<< index.extra_params_.dump() << " ,metric_type = " << index.metric_type_
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableIndex: " << updateTableIndexParamQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionIndex: " << updateCollectionIndexParamQuery.str();
if (!updateTableIndexParamQuery.exec()) {
if (!updateCollectionIndexParamQuery.exec()) {
return HandleException("QUERY ERROR WHEN UPDATING TABLE INDEX PARAM",
updateTableIndexParamQuery.error());
updateCollectionIndexParamQuery.error());
}
} else {
return Status(DB_NOT_FOUND, "Collection " + collection_id + " not found");
@ -945,7 +947,7 @@ MySQLMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableInd
}
Status
MySQLMetaImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) {
MySQLMetaImpl::UpdateCollectionFlag(const std::string& collection_id, int64_t flag) {
try {
server::MetricCollector metric;
@ -953,8 +955,8 @@ MySQLMetaImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.UpdateTableFlag.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateTableFlag.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.UpdateCollectionFlag.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateCollectionFlag.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
@ -963,7 +965,7 @@ MySQLMetaImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) {
updateTableFlagQuery << "UPDATE " << META_TABLES << " SET flag = " << flag
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFlag: " << updateTableFlagQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFlag: " << updateTableFlagQuery.str();
if (!updateTableFlagQuery.exec()) {
return HandleException("QUERY ERROR WHEN UPDATING TABLE FLAG", updateTableFlagQuery.error());
@ -1010,7 +1012,7 @@ MySQLMetaImpl::UpdateTableFlushLSN(const std::string& collection_id, uint64_t fl
}
Status
MySQLMetaImpl::GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) {
MySQLMetaImpl::GetCollectionFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) {
try {
server::MetricCollector metric;
@ -1025,7 +1027,7 @@ MySQLMetaImpl::GetTableFlushLSN(const std::string& collection_id, uint64_t& flus
statement << "SELECT flush_lsn FROM " << META_TABLES << " WHERE collection_id = " << mysqlpp::quote
<< collection_id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetTableFlushLSN: " << statement.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::GetCollectionFlushLSN: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1083,7 +1085,7 @@ MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table
if (groupItr == groups.end()) {
CollectionSchema table_schema;
table_schema.collection_id_ = table_file.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1113,7 +1115,7 @@ MySQLMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table
// ZR: this function assumes all fields in file_schema have value
Status
MySQLMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
MySQLMetaImpl::UpdateCollectionFile(SegmentSchema& file_schema) {
file_schema.updated_time_ = utils::GetMicroSecTimeStamp();
try {
@ -1122,8 +1124,8 @@ MySQLMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.UpdateTableFile.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateTableFile.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.UpdateCollectionFile.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateCollectionFile.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
@ -1135,7 +1137,7 @@ MySQLMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
updateTableFileQuery << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< file_schema.collection_id_ << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFile: " << updateTableFileQuery.str();
mysqlpp::StoreQueryResult res = updateTableFileQuery.store();
@ -1166,7 +1168,7 @@ MySQLMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
<< " ,updated_time = " << updated_time << " ,created_on = " << created_on
<< " ,date = " << date << " WHERE id = " << id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFile: " << updateTableFileQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFile: " << updateTableFileQuery.str();
if (!updateTableFileQuery.exec()) {
ENGINE_LOG_DEBUG << "collection_id= " << file_schema.collection_id_
@ -1184,13 +1186,13 @@ MySQLMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
}
Status
MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) {
MySQLMetaImpl::UpdateCollectionFilesToIndex(const std::string& collection_id) {
try {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.UpdateTableFilesToIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateTableFilesToIndex.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.UpdateCollectionFilesToIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateCollectionFilesToIndex.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
@ -1203,7 +1205,7 @@ MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) {
<< " AND row_count >= " << std::to_string(meta::BUILD_INDEX_THRESHOLD)
<< " AND file_type = " << std::to_string(SegmentSchema::RAW) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesToIndex: " << updateTableFilesToIndexQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFilesToIndex: " << updateTableFilesToIndexQuery.str();
if (!updateTableFilesToIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN UPDATING TABLE FILE TO INDEX",
@ -1219,24 +1221,24 @@ MySQLMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) {
}
Status
MySQLMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
MySQLMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
try {
server::MetricCollector metric;
{
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.UpdateTableFiles.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateTableFiles.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.UpdateCollectionFiles.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.UpdateCollectionFiles.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
mysqlpp::Query updateTableFilesQuery = connectionPtr->query();
std::map<std::string, bool> has_tables;
std::map<std::string, bool> has_collections;
for (auto& file_schema : files) {
if (has_tables.find(file_schema.collection_id_) != has_tables.end()) {
if (has_collections.find(file_schema.collection_id_) != has_collections.end()) {
continue;
}
@ -1247,16 +1249,16 @@ MySQLMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
<< " AS " << mysqlpp::quote << "check"
<< ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFiles: " << updateTableFilesQuery.str();
mysqlpp::StoreQueryResult res = updateTableFilesQuery.store();
int check = res[0]["check"];
has_tables[file_schema.collection_id_] = (check == 1);
has_collections[file_schema.collection_id_] = (check == 1);
}
for (auto& file_schema : files) {
if (!has_tables[file_schema.collection_id_]) {
if (!has_collections[file_schema.collection_id_]) {
file_schema.file_type_ = SegmentSchema::TO_DELETE;
}
file_schema.updated_time_ = utils::GetMicroSecTimeStamp();
@ -1279,7 +1281,7 @@ MySQLMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
<< " ,updated_time = " << updated_time << " ,created_on = " << created_on
<< " ,date = " << date << " WHERE id = " << id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFiles: " << updateTableFilesQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFiles: " << updateTableFilesQuery.str();
if (!updateTableFilesQuery.exec()) {
return HandleException("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error());
@ -1296,7 +1298,7 @@ MySQLMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
}
Status
MySQLMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) {
MySQLMetaImpl::UpdateCollectionFilesRowCount(SegmentsSchema& files) {
try {
server::MetricCollector metric;
{
@ -1317,7 +1319,7 @@ MySQLMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) {
<< " , updated_time = " << updated_time << " WHERE file_id = " << file.file_id_
<< ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateTableFilesRowCount: " << updateTableFilesQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::UpdateCollectionFilesRowCount: " << updateTableFilesQuery.str();
if (!updateTableFilesQuery.exec()) {
return HandleException("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error());
@ -1336,7 +1338,7 @@ MySQLMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) {
}
Status
MySQLMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex& index) {
MySQLMetaImpl::DescribeCollectionIndex(const std::string& collection_id, CollectionIndex& index) {
try {
server::MetricCollector metric;
@ -1344,21 +1346,21 @@ MySQLMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex&
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.DescribeTableIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DescribeTableIndex.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.DescribeCollectionIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DescribeCollectionIndex.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
mysqlpp::Query describeTableIndexQuery = connectionPtr->query();
describeTableIndexQuery << "SELECT engine_type, index_params, index_file_size, metric_type"
<< " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND state <> " << std::to_string(CollectionSchema::TO_DELETE)
<< ";";
mysqlpp::Query describeCollectionIndexQuery = connectionPtr->query();
describeCollectionIndexQuery << "SELECT engine_type, index_params, index_file_size, metric_type"
<< " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeTableIndex: " << describeTableIndexQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DescribeCollectionIndex: " << describeCollectionIndexQuery.str();
mysqlpp::StoreQueryResult res = describeTableIndexQuery.store();
mysqlpp::StoreQueryResult res = describeCollectionIndexQuery.store();
if (res.num_rows() == 1) {
const mysqlpp::Row& resRow = res[0];
@ -1380,7 +1382,7 @@ MySQLMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex&
}
Status
MySQLMetaImpl::DropTableIndex(const std::string& collection_id) {
MySQLMetaImpl::DropCollectionIndex(const std::string& collection_id) {
try {
server::MetricCollector metric;
@ -1388,54 +1390,54 @@ MySQLMetaImpl::DropTableIndex(const std::string& collection_id) {
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
fiu_do_on("MySQLMetaImpl.DropTableIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DropTableIndex.throw_exception", throw std::exception(););
fiu_do_on("MySQLMetaImpl.DropCollectionIndex.null_connection", is_null_connection = true);
fiu_do_on("MySQLMetaImpl.DropCollectionIndex.throw_exception", throw std::exception(););
if (is_null_connection) {
return Status(DB_ERROR, "Failed to connect to meta server(mysql)");
}
mysqlpp::Query dropTableIndexQuery = connectionPtr->query();
mysqlpp::Query dropCollectionIndexQuery = connectionPtr->query();
// soft delete index files
dropTableIndexQuery << "UPDATE " << META_TABLEFILES
<< " SET file_type = " << std::to_string(SegmentSchema::TO_DELETE)
<< " ,updated_time = " << utils::GetMicroSecTimeStamp()
<< " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND file_type = " << std::to_string(SegmentSchema::INDEX) << ";";
dropCollectionIndexQuery << "UPDATE " << META_TABLEFILES
<< " SET file_type = " << std::to_string(SegmentSchema::TO_DELETE)
<< " ,updated_time = " << utils::GetMicroSecTimeStamp()
<< " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND file_type = " << std::to_string(SegmentSchema::INDEX) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropCollectionIndex: " << dropCollectionIndexQuery.str();
if (!dropTableIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error());
if (!dropCollectionIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropCollectionIndexQuery.error());
}
// set all backup file to raw
dropTableIndexQuery << "UPDATE " << META_TABLEFILES
<< " SET file_type = " << std::to_string(SegmentSchema::RAW)
<< " ,updated_time = " << utils::GetMicroSecTimeStamp()
<< " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND file_type = " << std::to_string(SegmentSchema::BACKUP) << ";";
dropCollectionIndexQuery << "UPDATE " << META_TABLEFILES
<< " SET file_type = " << std::to_string(SegmentSchema::RAW)
<< " ,updated_time = " << utils::GetMicroSecTimeStamp()
<< " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND file_type = " << std::to_string(SegmentSchema::BACKUP) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropCollectionIndex: " << dropCollectionIndexQuery.str();
if (!dropTableIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error());
if (!dropCollectionIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropCollectionIndexQuery.error());
}
// set collection index type to raw
dropTableIndexQuery << "UPDATE " << META_TABLES << " SET engine_type = "
<< " (CASE"
<< " WHEN metric_type in (" << (int32_t)MetricType::HAMMING << " ,"
<< (int32_t)MetricType::JACCARD << " ," << (int32_t)MetricType::TANIMOTO << ")"
<< " THEN " << (int32_t)EngineType::FAISS_BIN_IDMAP << " ELSE "
<< (int32_t)EngineType::FAISS_IDMAP << " END)"
<< " , index_params = '{}'"
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
dropCollectionIndexQuery << "UPDATE " << META_TABLES << " SET engine_type = "
<< " (CASE"
<< " WHEN metric_type in (" << (int32_t)MetricType::HAMMING << " ,"
<< (int32_t)MetricType::JACCARD << " ," << (int32_t)MetricType::TANIMOTO << ")"
<< " THEN " << (int32_t)EngineType::FAISS_BIN_IDMAP << " ELSE "
<< (int32_t)EngineType::FAISS_IDMAP << " END)"
<< " , index_params = '{}'"
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropTableIndex: " << dropTableIndexQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::DropCollectionIndex: " << dropCollectionIndexQuery.str();
if (!dropTableIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropTableIndexQuery.error());
if (!dropCollectionIndexQuery.exec()) {
return HandleException("QUERY ERROR WHEN DROPPING TABLE INDEX", dropCollectionIndexQuery.error());
}
} // Scoped Connection
@ -1454,13 +1456,13 @@ MySQLMetaImpl::CreatePartition(const std::string& collection_id, const std::stri
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
// not allow create partition under partition
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(DB_ERROR, "Nested partition is not allowed");
}
@ -1486,11 +1488,11 @@ MySQLMetaImpl::CreatePartition(const std::string& collection_id, const std::stri
table_schema.id_ = -1;
table_schema.flag_ = 0;
table_schema.created_on_ = utils::GetMicroSecTimeStamp();
table_schema.owner_table_ = collection_id;
table_schema.owner_collection_ = collection_id;
table_schema.partition_tag_ = valid_tag;
table_schema.flush_lsn_ = lsn;
status = CreateTable(table_schema);
status = CreateCollection(table_schema);
fiu_do_on("MySQLMetaImpl.CreatePartition.aleady_exist", status = Status(DB_ALREADY_EXIST, ""));
if (status.code() == DB_ALREADY_EXIST) {
return Status(DB_ALREADY_EXIST, "Partition already exists");
@ -1501,7 +1503,7 @@ MySQLMetaImpl::CreatePartition(const std::string& collection_id, const std::stri
Status
MySQLMetaImpl::DropPartition(const std::string& partition_name) {
return DropTable(partition_name);
return DropCollection(partition_name);
}
Status
@ -1526,7 +1528,7 @@ MySQLMetaImpl::ShowPartitions(const std::string& collection_id,
<< " WHERE owner_table = " << mysqlpp::quote << collection_id << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllCollections: " << allPartitionsQuery.str();
res = allPartitionsQuery.store();
} // Scoped Connection
@ -1543,7 +1545,7 @@ MySQLMetaImpl::ShowPartitions(const std::string& collection_id,
partition_schema.engine_type_ = resRow["engine_type"];
resRow["index_params"].to_string(partition_schema.index_params_);
partition_schema.metric_type_ = resRow["metric_type"];
partition_schema.owner_table_ = collection_id;
partition_schema.owner_collection_ = collection_id;
resRow["partition_tag"].to_string(partition_schema.partition_tag_);
resRow["version"].to_string(partition_schema.version_);
@ -1582,7 +1584,7 @@ MySQLMetaImpl::GetPartitionName(const std::string& collection_id, const std::str
<< collection_id << " AND partition_tag = " << mysqlpp::quote << valid_tag
<< " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllTables: " << allPartitionsQuery.str();
ENGINE_LOG_DEBUG << "MySQLMetaImpl::AllCollections: " << allPartitionsQuery.str();
res = allPartitionsQuery.store();
} // Scoped Connection
@ -1635,7 +1637,7 @@ MySQLMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema& f
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1684,7 +1686,7 @@ MySQLMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& fi
// check collection existence
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1800,7 +1802,7 @@ MySQLMetaImpl::FilesToIndex(SegmentsSchema& files) {
if (groupItr == groups.end()) {
CollectionSchema table_schema;
table_schema.collection_id_ = table_file.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1873,7 +1875,7 @@ MySQLMetaImpl::FilesByType(const std::string& collection_id, const std::vector<i
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -2034,7 +2036,7 @@ MySQLMetaImpl::FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files)
if (tables.find(table_file.collection_id_) == tables.end()) {
CollectionSchema table_schema;
table_schema.collection_id_ = table_file.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -2451,7 +2453,7 @@ MySQLMetaImpl::Count(const std::string& collection_id, uint64_t& result) {
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;

View File

@ -32,65 +32,65 @@ class MySQLMetaImpl : public Meta {
~MySQLMetaImpl();
Status
CreateTable(CollectionSchema& table_schema) override;
CreateCollection(CollectionSchema& table_schema) override;
Status
DescribeTable(CollectionSchema& table_schema) override;
DescribeCollection(CollectionSchema& table_schema) override;
Status
HasTable(const std::string& collection_id, bool& has_or_not) override;
HasCollection(const std::string& collection_id, bool& has_or_not) override;
Status
AllTables(std::vector<CollectionSchema>& table_schema_array) override;
AllCollections(std::vector<CollectionSchema>& table_schema_array) override;
Status
DropTable(const std::string& collection_id) override;
DropCollection(const std::string& collection_id) override;
Status
DeleteTableFiles(const std::string& collection_id) override;
Status
CreateTableFile(SegmentSchema& file_schema) override;
CreateCollectionFile(SegmentSchema& file_schema) override;
Status
GetTableFiles(const std::string& collection_id, const std::vector<size_t>& ids,
SegmentsSchema& table_files) override;
Status
GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override;
GetCollectionFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override;
Status
UpdateTableIndex(const std::string& collection_id, const TableIndex& index) override;
UpdateCollectionIndex(const std::string& collection_id, const CollectionIndex& index) override;
Status
UpdateTableFlag(const std::string& collection_id, int64_t flag) override;
UpdateCollectionFlag(const std::string& collection_id, int64_t flag) override;
Status
UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) override;
Status
GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override;
GetCollectionFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override;
Status
GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) override;
Status
UpdateTableFile(SegmentSchema& file_schema) override;
UpdateCollectionFile(SegmentSchema& file_schema) override;
Status
UpdateTableFilesToIndex(const std::string& collection_id) override;
UpdateCollectionFilesToIndex(const std::string& collection_id) override;
Status
UpdateTableFiles(SegmentsSchema& files) override;
UpdateCollectionFiles(SegmentsSchema& files) override;
Status
UpdateTableFilesRowCount(SegmentsSchema& files) override;
UpdateCollectionFilesRowCount(SegmentsSchema& files) override;
Status
DescribeTableIndex(const std::string& collection_id, TableIndex& index) override;
DescribeCollectionIndex(const std::string& collection_id, CollectionIndex& index) override;
Status
DropTableIndex(const std::string& collection_id) override;
DropCollectionIndex(const std::string& collection_id) override;
Status
CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag,

View File

@ -71,7 +71,7 @@ StoragePrototype(const std::string& path) {
make_column("engine_type", &CollectionSchema::engine_type_),
make_column("index_params", &CollectionSchema::index_params_),
make_column("metric_type", &CollectionSchema::metric_type_),
make_column("owner_table", &CollectionSchema::owner_table_, default_value("")),
make_column("owner_table", &CollectionSchema::owner_collection_, default_value("")),
make_column("partition_tag", &CollectionSchema::partition_tag_, default_value("")),
make_column("version", &CollectionSchema::version_, default_value(CURRENT_VERSION)),
make_column("flush_lsn", &CollectionSchema::flush_lsn_)),
@ -164,7 +164,7 @@ SqliteMetaImpl::Initialize() {
}
Status
SqliteMetaImpl::CreateTable(CollectionSchema& table_schema) {
SqliteMetaImpl::CreateCollection(CollectionSchema& table_schema) {
try {
server::MetricCollector metric;
@ -174,7 +174,7 @@ SqliteMetaImpl::CreateTable(CollectionSchema& table_schema) {
if (table_schema.collection_id_ == "") {
NextTableId(table_schema.collection_id_);
} else {
fiu_do_on("SqliteMetaImpl.CreateTable.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.CreateCollection.throw_exception", throw std::exception());
auto collection = ConnectorPtr->select(columns(&CollectionSchema::state_),
where(c(&CollectionSchema::collection_id_) == table_schema.collection_id_));
if (collection.size() == 1) {
@ -191,7 +191,7 @@ SqliteMetaImpl::CreateTable(CollectionSchema& table_schema) {
table_schema.created_on_ = utils::GetMicroSecTimeStamp();
try {
fiu_do_on("SqliteMetaImpl.CreateTable.insert_throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.CreateCollection.insert_throw_exception", throw std::exception());
auto id = ConnectorPtr->insert(table_schema);
table_schema.id_ = id;
} catch (std::exception& e) {
@ -200,24 +200,24 @@ SqliteMetaImpl::CreateTable(CollectionSchema& table_schema) {
ENGINE_LOG_DEBUG << "Successfully create collection: " << table_schema.collection_id_;
return utils::CreateTablePath(options_, table_schema.collection_id_);
return utils::CreateCollectionPath(options_, table_schema.collection_id_);
} catch (std::exception& e) {
return HandleException("Encounter exception when create collection", e.what());
}
}
Status
SqliteMetaImpl::DescribeTable(CollectionSchema& table_schema) {
SqliteMetaImpl::DescribeCollection(CollectionSchema& table_schema) {
try {
server::MetricCollector metric;
// multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
fiu_do_on("SqliteMetaImpl.DescribeTable.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.DescribeCollection.throw_exception", throw std::exception());
auto groups = ConnectorPtr->select(
columns(&CollectionSchema::id_, &CollectionSchema::state_, &CollectionSchema::dimension_, &CollectionSchema::created_on_,
&CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::engine_type_,
&CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::owner_table_,
&CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::owner_collection_,
&CollectionSchema::partition_tag_, &CollectionSchema::version_, &CollectionSchema::flush_lsn_),
where(c(&CollectionSchema::collection_id_) == table_schema.collection_id_ and
c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
@ -232,7 +232,7 @@ SqliteMetaImpl::DescribeTable(CollectionSchema& table_schema) {
table_schema.engine_type_ = std::get<6>(groups[0]);
table_schema.index_params_ = std::get<7>(groups[0]);
table_schema.metric_type_ = std::get<8>(groups[0]);
table_schema.owner_table_ = std::get<9>(groups[0]);
table_schema.owner_collection_ = std::get<9>(groups[0]);
table_schema.partition_tag_ = std::get<10>(groups[0]);
table_schema.version_ = std::get<11>(groups[0]);
table_schema.flush_lsn_ = std::get<12>(groups[0]);
@ -247,11 +247,11 @@ SqliteMetaImpl::DescribeTable(CollectionSchema& table_schema) {
}
Status
SqliteMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) {
SqliteMetaImpl::HasCollection(const std::string& collection_id, bool& has_or_not) {
has_or_not = false;
try {
fiu_do_on("SqliteMetaImpl.HasTable.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.HasCollection.throw_exception", throw std::exception());
server::MetricCollector metric;
auto tables = ConnectorPtr->select(
columns(&CollectionSchema::id_),
@ -269,16 +269,16 @@ SqliteMetaImpl::HasTable(const std::string& collection_id, bool& has_or_not) {
}
Status
SqliteMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
SqliteMetaImpl::AllCollections(std::vector<CollectionSchema>& table_schema_array) {
try {
fiu_do_on("SqliteMetaImpl.AllTables.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.AllCollections.throw_exception", throw std::exception());
server::MetricCollector metric;
auto selected = ConnectorPtr->select(
columns(&CollectionSchema::id_, &CollectionSchema::collection_id_, &CollectionSchema::dimension_, &CollectionSchema::created_on_,
&CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::engine_type_,
&CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::owner_table_,
&CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::owner_collection_,
&CollectionSchema::partition_tag_, &CollectionSchema::version_, &CollectionSchema::flush_lsn_),
where(c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE and c(&CollectionSchema::owner_table_) == ""));
where(c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE and c(&CollectionSchema::owner_collection_) == ""));
for (auto& collection : selected) {
CollectionSchema schema;
schema.id_ = std::get<0>(collection);
@ -290,7 +290,7 @@ SqliteMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
schema.engine_type_ = std::get<6>(collection);
schema.index_params_ = std::get<7>(collection);
schema.metric_type_ = std::get<8>(collection);
schema.owner_table_ = std::get<9>(collection);
schema.owner_collection_ = std::get<9>(collection);
schema.partition_tag_ = std::get<10>(collection);
schema.version_ = std::get<11>(collection);
schema.flush_lsn_ = std::get<12>(collection);
@ -305,9 +305,9 @@ SqliteMetaImpl::AllTables(std::vector<CollectionSchema>& table_schema_array) {
}
Status
SqliteMetaImpl::DropTable(const std::string& collection_id) {
SqliteMetaImpl::DropCollection(const std::string& collection_id) {
try {
fiu_do_on("SqliteMetaImpl.DropTable.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.DropCollection.throw_exception", throw std::exception());
server::MetricCollector metric;
@ -352,19 +352,19 @@ SqliteMetaImpl::DeleteTableFiles(const std::string& collection_id) {
}
Status
SqliteMetaImpl::CreateTableFile(SegmentSchema& file_schema) {
SqliteMetaImpl::CreateCollectionFile(SegmentSchema& file_schema) {
if (file_schema.date_ == EmptyDate) {
file_schema.date_ = utils::GetDate();
}
CollectionSchema table_schema;
table_schema.collection_id_ = file_schema.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
try {
fiu_do_on("SqliteMetaImpl.CreateTableFile.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.CreateCollectionFile.throw_exception", throw std::exception());
server::MetricCollector metric;
NextFileId(file_schema.file_id_);
@ -388,7 +388,7 @@ SqliteMetaImpl::CreateTableFile(SegmentSchema& file_schema) {
file_schema.id_ = id;
ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_;
return utils::CreateTableFilePath(options_, file_schema);
return utils::CreateCollectionFilePath(options_, file_schema);
} catch (std::exception& e) {
return HandleException("Encounter exception when create collection file", e.what());
}
@ -411,7 +411,7 @@ SqliteMetaImpl::GetTableFiles(const std::string& collection_id, const std::vecto
c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE));
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -447,7 +447,7 @@ SqliteMetaImpl::GetTableFiles(const std::string& collection_id, const std::vecto
}
Status
SqliteMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
SqliteMetaImpl::GetCollectionFilesBySegmentId(const std::string& segment_id,
milvus::engine::meta::SegmentsSchema& table_files) {
try {
table_files.clear();
@ -462,7 +462,7 @@ SqliteMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
if (!files.empty()) {
CollectionSchema table_schema;
table_schema.collection_id_ = std::get<1>(files[0]);
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -497,10 +497,10 @@ SqliteMetaImpl::GetTableFilesBySegmentId(const std::string& segment_id,
}
Status
SqliteMetaImpl::UpdateTableFlag(const std::string& collection_id, int64_t flag) {
SqliteMetaImpl::UpdateCollectionFlag(const std::string& collection_id, int64_t flag) {
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.UpdateTableFlag.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.UpdateCollectionFlag.throw_exception", throw std::exception());
// set all backup file to raw
ConnectorPtr->update_all(set(c(&CollectionSchema::flag_) = flag), where(c(&CollectionSchema::collection_id_) == collection_id));
@ -530,7 +530,7 @@ SqliteMetaImpl::UpdateTableFlushLSN(const std::string& collection_id, uint64_t f
}
Status
SqliteMetaImpl::GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) {
SqliteMetaImpl::GetCollectionFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) {
try {
server::MetricCollector metric;
@ -588,7 +588,7 @@ SqliteMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& tabl
if (groupItr == groups.end()) {
CollectionSchema table_schema;
table_schema.collection_id_ = table_file.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -611,11 +611,11 @@ SqliteMetaImpl::GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& tabl
}
Status
SqliteMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
SqliteMetaImpl::UpdateCollectionFile(SegmentSchema& file_schema) {
file_schema.updated_time_ = utils::GetMicroSecTimeStamp();
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.UpdateTableFile.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.UpdateCollectionFile.throw_exception", throw std::exception());
// multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
@ -641,32 +641,32 @@ SqliteMetaImpl::UpdateTableFile(SegmentSchema& file_schema) {
}
Status
SqliteMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
SqliteMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.UpdateTableFiles.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.UpdateCollectionFiles.throw_exception", throw std::exception());
// multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
std::map<std::string, bool> has_tables;
std::map<std::string, bool> has_collections;
for (auto& file : files) {
if (has_tables.find(file.collection_id_) != has_tables.end()) {
if (has_collections.find(file.collection_id_) != has_collections.end()) {
continue;
}
auto tables = ConnectorPtr->select(columns(&CollectionSchema::id_),
where(c(&CollectionSchema::collection_id_) == file.collection_id_ and
c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
if (tables.size() >= 1) {
has_tables[file.collection_id_] = true;
has_collections[file.collection_id_] = true;
} else {
has_tables[file.collection_id_] = false;
has_collections[file.collection_id_] = false;
}
}
auto commited = ConnectorPtr->transaction([&]() mutable {
for (auto& file : files) {
if (!has_tables[file.collection_id_]) {
if (!has_collections[file.collection_id_]) {
file.file_type_ = SegmentSchema::TO_DELETE;
}
@ -675,10 +675,10 @@ SqliteMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
}
return true;
});
fiu_do_on("SqliteMetaImpl.UpdateTableFiles.fail_commited", commited = false);
fiu_do_on("SqliteMetaImpl.UpdateCollectionFiles.fail_commited", commited = false);
if (!commited) {
return HandleException("UpdateTableFiles error: sqlite transaction failed");
return HandleException("UpdateCollectionFiles error: sqlite transaction failed");
}
ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files";
@ -689,7 +689,7 @@ SqliteMetaImpl::UpdateTableFiles(SegmentsSchema& files) {
}
Status
SqliteMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) {
SqliteMetaImpl::UpdateCollectionFilesRowCount(SegmentsSchema& files) {
try {
server::MetricCollector metric;
@ -709,10 +709,10 @@ SqliteMetaImpl::UpdateTableFilesRowCount(SegmentsSchema& files) {
}
Status
SqliteMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIndex& index) {
SqliteMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const CollectionIndex& index) {
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.UpdateTableIndex.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.UpdateCollectionIndex.throw_exception", throw std::exception());
// multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
@ -720,7 +720,7 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIn
auto tables = ConnectorPtr->select(
columns(&CollectionSchema::id_, &CollectionSchema::state_, &CollectionSchema::dimension_, &CollectionSchema::created_on_,
&CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::owner_table_,
&CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::owner_collection_,
&CollectionSchema::partition_tag_, &CollectionSchema::version_, &CollectionSchema::flush_lsn_),
where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
@ -733,7 +733,7 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIn
table_schema.created_on_ = std::get<3>(tables[0]);
table_schema.flag_ = std::get<4>(tables[0]);
table_schema.index_file_size_ = std::get<5>(tables[0]);
table_schema.owner_table_ = std::get<6>(tables[0]);
table_schema.owner_collection_ = std::get<6>(tables[0]);
table_schema.partition_tag_ = std::get<7>(tables[0]);
table_schema.version_ = std::get<8>(tables[0]);
table_schema.flush_lsn_ = std::get<9>(tables[0]);
@ -762,10 +762,10 @@ SqliteMetaImpl::UpdateTableIndex(const std::string& collection_id, const TableIn
}
Status
SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) {
SqliteMetaImpl::UpdateCollectionFilesToIndex(const std::string& collection_id) {
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.UpdateTableFilesToIndex.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.UpdateCollectionFilesToIndex.throw_exception", throw std::exception());
// multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
@ -784,10 +784,10 @@ SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& collection_id) {
}
Status
SqliteMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex& index) {
SqliteMetaImpl::DescribeCollectionIndex(const std::string& collection_id, CollectionIndex& index) {
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.DescribeTableIndex.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.DescribeCollectionIndex.throw_exception", throw std::exception());
auto groups = ConnectorPtr->select(
columns(&CollectionSchema::engine_type_, &CollectionSchema::index_params_, &CollectionSchema::metric_type_),
@ -808,10 +808,10 @@ SqliteMetaImpl::DescribeTableIndex(const std::string& collection_id, TableIndex&
}
Status
SqliteMetaImpl::DropTableIndex(const std::string& collection_id) {
SqliteMetaImpl::DropCollectionIndex(const std::string& collection_id) {
try {
server::MetricCollector metric;
fiu_do_on("SqliteMetaImpl.DropTableIndex.throw_exception", throw std::exception());
fiu_do_on("SqliteMetaImpl.DropCollectionIndex.throw_exception", throw std::exception());
// multi-threads call sqlite update may get exception('bad logic', etc), so we add a lock here
std::lock_guard<std::mutex> meta_lock(meta_mutex_);
@ -858,13 +858,13 @@ SqliteMetaImpl::CreatePartition(const std::string& collection_id, const std::str
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
// not allow create partition under partition
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(DB_ERROR, "Nested partition is not allowed");
}
@ -890,11 +890,11 @@ SqliteMetaImpl::CreatePartition(const std::string& collection_id, const std::str
table_schema.id_ = -1;
table_schema.flag_ = 0;
table_schema.created_on_ = utils::GetMicroSecTimeStamp();
table_schema.owner_table_ = collection_id;
table_schema.owner_collection_ = collection_id;
table_schema.partition_tag_ = valid_tag;
table_schema.flush_lsn_ = lsn;
status = CreateTable(table_schema);
status = CreateCollection(table_schema);
if (status.code() == DB_ALREADY_EXIST) {
return Status(DB_ALREADY_EXIST, "Partition already exists");
}
@ -904,7 +904,7 @@ SqliteMetaImpl::CreatePartition(const std::string& collection_id, const std::str
Status
SqliteMetaImpl::DropPartition(const std::string& partition_name) {
return DropTable(partition_name);
return DropCollection(partition_name);
}
Status
@ -918,7 +918,7 @@ SqliteMetaImpl::ShowPartitions(const std::string& collection_id, std::vector<met
&CollectionSchema::flag_, &CollectionSchema::index_file_size_, &CollectionSchema::engine_type_,
&CollectionSchema::index_params_, &CollectionSchema::metric_type_, &CollectionSchema::partition_tag_,
&CollectionSchema::version_, &CollectionSchema::collection_id_),
where(c(&CollectionSchema::owner_table_) == collection_id and
where(c(&CollectionSchema::owner_collection_) == collection_id and
c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
for (size_t i = 0; i < partitions.size(); i++) {
@ -932,7 +932,7 @@ SqliteMetaImpl::ShowPartitions(const std::string& collection_id, std::vector<met
partition_schema.engine_type_ = std::get<6>(partitions[i]);
partition_schema.index_params_ = std::get<7>(partitions[i]);
partition_schema.metric_type_ = std::get<8>(partitions[i]);
partition_schema.owner_table_ = collection_id;
partition_schema.owner_collection_ = collection_id;
partition_schema.partition_tag_ = std::get<9>(partitions[i]);
partition_schema.version_ = std::get<10>(partitions[i]);
partition_schema.collection_id_ = std::get<11>(partitions[i]);
@ -958,7 +958,7 @@ SqliteMetaImpl::GetPartitionName(const std::string& collection_id, const std::st
auto name = ConnectorPtr->select(
columns(&CollectionSchema::collection_id_),
where(c(&CollectionSchema::owner_table_) == collection_id and c(&CollectionSchema::partition_tag_) == valid_tag and
where(c(&CollectionSchema::owner_collection_) == collection_id and c(&CollectionSchema::partition_tag_) == valid_tag and
c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
if (name.size() > 0) {
partition_name = std::get<0>(name[0]);
@ -993,7 +993,7 @@ SqliteMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema&
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1052,7 +1052,7 @@ SqliteMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& f
// check collection existence
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1146,7 +1146,7 @@ SqliteMetaImpl::FilesToIndex(SegmentsSchema& files) {
if (groupItr == groups.end()) {
CollectionSchema table_schema;
table_schema.collection_id_ = table_file.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
fiu_do_on("SqliteMetaImpl_FilesToIndex_TableNotFound",
status = Status(DB_NOT_FOUND, "collection not found"));
if (!status.ok()) {
@ -1180,7 +1180,7 @@ SqliteMetaImpl::FilesByType(const std::string& collection_id, const std::vector<
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1320,7 +1320,7 @@ SqliteMetaImpl::FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files)
if (tables.find(table_file.collection_id_) == tables.end()) {
CollectionSchema table_schema;
table_schema.collection_id_ = table_file.collection_id_;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;
}
@ -1639,7 +1639,7 @@ SqliteMetaImpl::Count(const std::string& collection_id, uint64_t& result) {
CollectionSchema table_schema;
table_schema.collection_id_ = collection_id;
auto status = DescribeTable(table_schema);
auto status = DescribeCollection(table_schema);
if (!status.ok()) {
return status;

View File

@ -31,65 +31,65 @@ class SqliteMetaImpl : public Meta {
~SqliteMetaImpl();
Status
CreateTable(CollectionSchema& table_schema) override;
CreateCollection(CollectionSchema& table_schema) override;
Status
DescribeTable(CollectionSchema& table_schema) override;
DescribeCollection(CollectionSchema& table_schema) override;
Status
HasTable(const std::string& collection_id, bool& has_or_not) override;
HasCollection(const std::string& collection_id, bool& has_or_not) override;
Status
AllTables(std::vector<CollectionSchema>& table_schema_array) override;
AllCollections(std::vector<CollectionSchema>& table_schema_array) override;
Status
DropTable(const std::string& collection_id) override;
DropCollection(const std::string& collection_id) override;
Status
DeleteTableFiles(const std::string& collection_id) override;
Status
CreateTableFile(SegmentSchema& file_schema) override;
CreateCollectionFile(SegmentSchema& file_schema) override;
Status
GetTableFiles(const std::string& collection_id, const std::vector<size_t>& ids,
SegmentsSchema& table_files) override;
Status
GetTableFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override;
GetCollectionFilesBySegmentId(const std::string& segment_id, SegmentsSchema& table_files) override;
Status
UpdateTableIndex(const std::string& collection_id, const TableIndex& index) override;
UpdateCollectionIndex(const std::string& collection_id, const CollectionIndex& index) override;
Status
UpdateTableFlag(const std::string& collection_id, int64_t flag) override;
UpdateCollectionFlag(const std::string& collection_id, int64_t flag) override;
Status
UpdateTableFlushLSN(const std::string& collection_id, uint64_t flush_lsn) override;
Status
GetTableFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override;
GetCollectionFlushLSN(const std::string& collection_id, uint64_t& flush_lsn) override;
Status
GetTableFilesByFlushLSN(uint64_t flush_lsn, SegmentsSchema& table_files) override;
Status
UpdateTableFile(SegmentSchema& file_schema) override;
UpdateCollectionFile(SegmentSchema& file_schema) override;
Status
UpdateTableFilesToIndex(const std::string& collection_id) override;
UpdateCollectionFilesToIndex(const std::string& collection_id) override;
Status
UpdateTableFiles(SegmentsSchema& files) override;
UpdateCollectionFiles(SegmentsSchema& files) override;
Status
UpdateTableFilesRowCount(SegmentsSchema& files) override;
UpdateCollectionFilesRowCount(SegmentsSchema& files) override;
Status
DescribeTableIndex(const std::string& collection_id, TableIndex& index) override;
DescribeCollectionIndex(const std::string& collection_id, CollectionIndex& index) override;
Status
DropTableIndex(const std::string& collection_id) override;
DropCollectionIndex(const std::string& collection_id) override;
Status
CreatePartition(const std::string& collection_id, const std::string& partition_name, const std::string& tag,

View File

@ -62,7 +62,7 @@ WalManager::Init(const meta::MetaPtr& meta) {
meta->GetGlobalLastLSN(recovery_start);
std::vector<meta::CollectionSchema> table_schema_array;
auto status = meta->AllTables(table_schema_array);
auto status = meta->AllCollections(table_schema_array);
if (!status.ok()) {
return WAL_META_ERROR;
}
@ -201,7 +201,7 @@ WalManager::GetNextRecord(MXLogRecord& record) {
}
uint64_t
WalManager::CreateTable(const std::string& collection_id) {
WalManager::CreateCollection(const std::string& collection_id) {
WAL_LOG_INFO << "create collection " << collection_id << " " << last_applied_lsn_;
std::lock_guard<std::mutex> lck(mutex_);
uint64_t applied_lsn = last_applied_lsn_;
@ -210,14 +210,14 @@ WalManager::CreateTable(const std::string& collection_id) {
}
void
WalManager::DropTable(const std::string& collection_id) {
WalManager::DropCollection(const std::string& collection_id) {
WAL_LOG_INFO << "drop collection " << collection_id;
std::lock_guard<std::mutex> lck(mutex_);
tables_.erase(collection_id);
}
void
WalManager::TableFlushed(const std::string& collection_id, uint64_t lsn) {
WalManager::CollectionFlushed(const std::string& collection_id, uint64_t lsn) {
std::unique_lock<std::mutex> lck(mutex_);
auto it = tables_.find(collection_id);
if (it != tables_.end()) {

View File

@ -62,7 +62,7 @@ class WalManager {
* @retval lsn
*/
uint64_t
CreateTable(const std::string& collection_id);
CreateCollection(const std::string& collection_id);
/*
* Drop collection
@ -70,7 +70,7 @@ class WalManager {
* @retval none
*/
void
DropTable(const std::string& collection_id);
DropCollection(const std::string& collection_id);
/*
* Collection is flushed
@ -78,7 +78,7 @@ class WalManager {
* @param lsn: flushed lsn
*/
void
TableFlushed(const std::string& collection_id, uint64_t lsn);
CollectionFlushed(const std::string& collection_id, uint64_t lsn);
/*
* Insert

View File

@ -153,22 +153,22 @@ TaskTable::PickToLoad(uint64_t limit) {
std::vector<uint64_t> indexes;
bool cross = false;
uint64_t available_begin = collection_.front() + 1;
for (uint64_t i = 0, loaded_count = 0, pick_count = 0; i < collection_.size() && pick_count < limit; ++i) {
uint64_t available_begin = table_.front() + 1;
for (uint64_t i = 0, loaded_count = 0, pick_count = 0; i < table_.size() && pick_count < limit; ++i) {
auto index = available_begin + i;
if (not collection_[index])
if (not table_[index])
break;
if (index % collection_.capacity() == collection_.rear())
if (index % table_.capacity() == table_.rear())
break;
if (not cross && collection_[index]->IsFinish()) {
collection_.set_front(index);
} else if (collection_[index]->state == TaskTableItemState::LOADED) {
if (not cross && table_[index]->IsFinish()) {
table_.set_front(index);
} else if (table_[index]->state == TaskTableItemState::LOADED) {
cross = true;
++loaded_count;
if (loaded_count > 2)
return std::vector<uint64_t>();
} else if (collection_[index]->state == TaskTableItemState::START) {
auto task = collection_[index]->task;
} else if (table_[index]->state == TaskTableItemState::START) {
auto task = table_[index]->task;
// if task is a build index task, limit it
if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") {
@ -186,19 +186,18 @@ TaskTable::PickToLoad(uint64_t limit) {
return indexes;
#else
size_t count = 0;
for (uint64_t j = last_finish_ + 1; j < collection_.size(); ++j) {
if (not collection_[j]) {
for (uint64_t j = last_finish_ + 1; j < table_.size(); ++j) {
if (not table_[j]) {
SERVER_LOG_WARNING << "collection[" << j << "] is nullptr";
}
if (collection_[j]->task->path().Current() == "cpu") {
if (collection_[j]->task->Type() == TaskType::BuildIndexTask &&
BuildMgrInst::GetInstance()->numoftasks() < 1) {
if (table_[j]->task->path().Current() == "cpu") {
if (table_[j]->task->Type() == TaskType::BuildIndexTask && BuildMgrInst::GetInstance()->numoftasks() < 1) {
return std::vector<uint64_t>();
}
}
if (collection_[j]->state == TaskTableItemState::LOADED) {
if (table_[j]->state == TaskTableItemState::LOADED) {
++count;
if (count > 2)
return std::vector<uint64_t>();
@ -207,11 +206,11 @@ TaskTable::PickToLoad(uint64_t limit) {
std::vector<uint64_t> indexes;
bool cross = false;
for (uint64_t i = last_finish_ + 1, count = 0; i < collection_.size() && count < limit; ++i) {
if (not cross && collection_[i]->IsFinish()) {
for (uint64_t i = last_finish_ + 1, count = 0; i < table_.size() && count < limit; ++i) {
if (not cross && table_[i]->IsFinish()) {
last_finish_ = i;
} else if (collection_[i]->state == TaskTableItemState::START) {
auto task = collection_[i]->task;
} else if (table_[i]->state == TaskTableItemState::START) {
auto task = table_[i]->task;
if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") {
if (BuildMgrInst::GetInstance()->numoftasks() == 0) {
break;
@ -237,19 +236,19 @@ TaskTable::PickToExecute(uint64_t limit) {
// TimeRecorder rc("");
std::vector<uint64_t> indexes;
bool cross = false;
uint64_t available_begin = collection_.front() + 1;
for (uint64_t i = 0, pick_count = 0; i < collection_.size() && pick_count < limit; ++i) {
uint64_t available_begin = table_.front() + 1;
for (uint64_t i = 0, pick_count = 0; i < table_.size() && pick_count < limit; ++i) {
uint64_t index = available_begin + i;
if (not collection_[index]) {
if (not table_[index]) {
break;
}
if (index % collection_.capacity() == collection_.rear()) {
if (index % table_.capacity() == table_.rear()) {
break;
}
if (not cross && collection_[index]->IsFinish()) {
collection_.set_front(index);
} else if (collection_[index]->state == TaskTableItemState::LOADED) {
if (not cross && table_[index]->IsFinish()) {
table_.set_front(index);
} else if (table_[index]->state == TaskTableItemState::LOADED) {
cross = true;
indexes.push_back(index);
++pick_count;
@ -266,7 +265,7 @@ TaskTable::Put(TaskPtr task, TaskTableItemPtr from) {
item->task = std::move(task);
item->state = TaskTableItemState::START;
item->timestamp.start = get_current_timestamp();
collection_.put(std::move(item));
table_.put(std::move(item));
if (subscriber_) {
subscriber_();
}
@ -275,10 +274,10 @@ TaskTable::Put(TaskPtr task, TaskTableItemPtr from) {
size_t
TaskTable::TaskToExecute() {
size_t count = 0;
auto begin = collection_.front() + 1;
for (size_t i = 0; i < collection_.size(); ++i) {
auto begin = table_.front() + 1;
for (size_t i = 0; i < table_.size(); ++i) {
auto index = begin + i;
if (collection_[index] && collection_[index]->state == TaskTableItemState::LOADED) {
if (table_[index] && table_[index]->state == TaskTableItemState::LOADED) {
++count;
}
}

View File

@ -97,7 +97,7 @@ struct TaskTableItem : public interface::dumpable {
class TaskTable : public interface::dumpable {
public:
TaskTable() : collection_(1ULL << 16ULL) {
TaskTable() : table_(1ULL << 16ULL) {
}
TaskTable(const TaskTable&) = delete;
@ -127,22 +127,22 @@ class TaskTable : public interface::dumpable {
public:
inline const TaskTableItemPtr& operator[](uint64_t index) {
return collection_[index];
return table_[index];
}
inline const TaskTableItemPtr&
at(uint64_t index) {
return collection_[index];
return table_[index];
}
inline size_t
capacity() {
return collection_.capacity();
return table_.capacity();
}
inline size_t
size() {
return collection_.size();
return table_.size();
}
public:
@ -156,7 +156,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Load(uint64_t index) {
return collection_[index]->Load();
return table_[index]->Load();
}
/*
@ -166,7 +166,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Loaded(uint64_t index) {
return collection_[index]->Loaded();
return table_[index]->Loaded();
}
/*
@ -176,7 +176,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Execute(uint64_t index) {
return collection_[index]->Execute();
return table_[index]->Execute();
}
/*
@ -186,7 +186,7 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Executed(uint64_t index) {
return collection_[index]->Executed();
return table_[index]->Executed();
}
/*
@ -197,7 +197,7 @@ class TaskTable : public interface::dumpable {
inline bool
Move(uint64_t index) {
return collection_[index]->Move();
return table_[index]->Move();
}
/*
@ -207,12 +207,12 @@ class TaskTable : public interface::dumpable {
*/
inline bool
Moved(uint64_t index) {
return collection_[index]->Moved();
return table_[index]->Moved();
}
private:
std::uint64_t id_ = 0;
CircleQueue<TaskTableItemPtr> collection_;
CircleQueue<TaskTableItemPtr> table_;
std::function<void(void)> subscriber_ = nullptr;
// cache last finish avoid Pick task from begin always

View File

@ -130,7 +130,7 @@ XBuildIndexTask::Execute() {
table_file.file_type_ = engine::meta::SegmentSchema::NEW_INDEX;
engine::meta::MetaPtr meta_ptr = build_index_job->meta();
Status status = meta_ptr->CreateTableFile(table_file);
Status status = meta_ptr->CreateCollectionFile(table_file);
fiu_do_on("XBuildIndexTask.Execute.create_table_success", status = Status::OK());
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection file: " << status.ToString();
@ -153,7 +153,7 @@ XBuildIndexTask::Execute() {
ENGINE_LOG_ERROR << msg;
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateTableFile(table_file);
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_DEBUG << "Build index fail, mark file: " << table_file.file_id_ << " to to_delete";
build_index_job->BuildIndexDone(to_index_id_);
@ -163,11 +163,11 @@ XBuildIndexTask::Execute() {
}
// step 4: if collection has been deleted, dont save index file
bool has_table = false;
meta_ptr->HasTable(file_->collection_id_, has_table);
fiu_do_on("XBuildIndexTask.Execute.has_table", has_table = true);
bool has_collection = false;
meta_ptr->HasCollection(file_->collection_id_, has_collection);
fiu_do_on("XBuildIndexTask.Execute.has_collection", has_collection = true);
if (!has_table) {
if (!has_collection) {
meta_ptr->DeleteTableFiles(file_->collection_id_);
build_index_job->BuildIndexDone(to_index_id_);
@ -194,7 +194,7 @@ XBuildIndexTask::Execute() {
// if failed to serialize index file to disk
// typical error: out of disk space, out of memory or permition denied
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateTableFile(table_file);
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << table_file.file_id_ << " to to_delete";
ENGINE_LOG_ERROR << "Failed to persist index file: " << table_file.location_
@ -217,7 +217,7 @@ XBuildIndexTask::Execute() {
engine::meta::SegmentsSchema update_files = {table_file, origin_file};
if (status.ok()) { // makesure index file is sucessfully serialized to disk
status = meta_ptr->UpdateTableFiles(update_files);
status = meta_ptr->UpdateCollectionFiles(update_files);
}
fiu_do_on("XBuildIndexTask.Execute.update_table_file_fail", status = Status(SERVER_UNEXPECTED_ERROR, ""));
@ -231,11 +231,11 @@ XBuildIndexTask::Execute() {
} else {
// failed to update meta, mark the new file as to_delete, don't delete old file
origin_file.file_type_ = engine::meta::SegmentSchema::TO_INDEX;
status = meta_ptr->UpdateTableFile(origin_file);
status = meta_ptr->UpdateCollectionFile(origin_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << origin_file.file_id_ << " to to_index";
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateTableFile(table_file);
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_DEBUG << "Failed to up date file to index, mark file: " << table_file.file_id_
<< " to to_delete";
}

View File

@ -206,13 +206,13 @@ DBWrapper::StartService() {
// preload collection
std::string preload_tables;
s = config.GetDBConfigPreloadTable(preload_tables);
s = config.GetDBConfigPreloadCollection(preload_tables);
if (!s.ok()) {
std::cerr << s.ToString() << std::endl;
return s;
}
s = PreloadTables(preload_tables);
s = PreloadCollections(preload_tables);
if (!s.ok()) {
std::cerr << "ERROR! Failed to preload tables: " << preload_tables << std::endl;
std::cerr << s.ToString() << std::endl;
@ -232,16 +232,16 @@ DBWrapper::StopService() {
}
Status
DBWrapper::PreloadTables(const std::string& preload_tables) {
DBWrapper::PreloadCollections(const std::string& preload_tables) {
if (preload_tables.empty()) {
// do nothing
} else if (preload_tables == "*") {
// load all tables
std::vector<engine::meta::CollectionSchema> table_schema_array;
db_->AllTables(table_schema_array);
db_->AllCollections(table_schema_array);
for (auto& schema : table_schema_array) {
auto status = db_->PreloadTable(schema.collection_id_);
auto status = db_->PreloadCollection(schema.collection_id_);
if (!status.ok()) {
return status;
}
@ -250,7 +250,7 @@ DBWrapper::PreloadTables(const std::string& preload_tables) {
std::vector<std::string> collection_names;
StringHelpFunctions::SplitStringByDelimeter(preload_tables, ",", collection_names);
for (auto& name : collection_names) {
auto status = db_->PreloadTable(name);
auto status = db_->PreloadCollection(name);
if (!status.ok()) {
return status;
}

View File

@ -48,7 +48,7 @@ class DBWrapper {
private:
Status
PreloadTables(const std::string& preload_tables);
PreloadCollections(const std::string& preload_tables);
private:
engine::DBPtr db_;

View File

@ -17,52 +17,53 @@
#include "server/delivery/request/BaseRequest.h"
#include "server/delivery/request/CmdRequest.h"
#include "server/delivery/request/CompactRequest.h"
#include "server/delivery/request/CountTableRequest.h"
#include "server/delivery/request/CountCollectionRequest.h"
#include "server/delivery/request/CreateCollectionRequest.h"
#include "server/delivery/request/CreateIndexRequest.h"
#include "server/delivery/request/CreatePartitionRequest.h"
#include "server/delivery/request/CreateTableRequest.h"
#include "server/delivery/request/DeleteByIDRequest.h"
#include "server/delivery/request/DescribeCollectionRequest.h"
#include "server/delivery/request/DescribeIndexRequest.h"
#include "server/delivery/request/DescribeTableRequest.h"
#include "server/delivery/request/DropCollectionRequest.h"
#include "server/delivery/request/DropIndexRequest.h"
#include "server/delivery/request/DropPartitionRequest.h"
#include "server/delivery/request/DropTableRequest.h"
#include "server/delivery/request/FlushRequest.h"
#include "server/delivery/request/GetVectorByIDRequest.h"
#include "server/delivery/request/GetVectorIDsRequest.h"
#include "server/delivery/request/HasTableRequest.h"
#include "server/delivery/request/HasCollectionRequest.h"
#include "server/delivery/request/InsertRequest.h"
#include "server/delivery/request/PreloadTableRequest.h"
#include "server/delivery/request/PreloadCollectionRequest.h"
#include "server/delivery/request/SearchByIDRequest.h"
#include "server/delivery/request/SearchRequest.h"
#include "server/delivery/request/ShowCollectionInfoRequest.h"
#include "server/delivery/request/ShowCollectionsRequest.h"
#include "server/delivery/request/ShowPartitionsRequest.h"
#include "server/delivery/request/ShowTableInfoRequest.h"
#include "server/delivery/request/ShowTablesRequest.h"
namespace milvus {
namespace server {
Status
RequestHandler::CreateTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type) {
RequestHandler::CreateCollection(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type) {
BaseRequestPtr request_ptr =
CreateTableRequest::Create(context, collection_name, dimension, index_file_size, metric_type);
CreateCollectionRequest::Create(context, collection_name, dimension, index_file_size, metric_type);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::HasTable(const std::shared_ptr<Context>& context, const std::string& collection_name, bool& has_table) {
BaseRequestPtr request_ptr = HasTableRequest::Create(context, collection_name, has_table);
RequestHandler::HasCollection(const std::shared_ptr<Context>& context, const std::string& collection_name,
bool& has_collection) {
BaseRequestPtr request_ptr = HasCollectionRequest::Create(context, collection_name, has_collection);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::DropTable(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = DropTableRequest::Create(context, collection_name);
RequestHandler::DropCollection(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = DropCollectionRequest::Create(context, collection_name);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
@ -105,17 +106,17 @@ RequestHandler::GetVectorIDs(const std::shared_ptr<Context>& context, const std:
}
Status
RequestHandler::ShowTables(const std::shared_ptr<Context>& context, std::vector<std::string>& tables) {
BaseRequestPtr request_ptr = ShowTablesRequest::Create(context, tables);
RequestHandler::ShowCollections(const std::shared_ptr<Context>& context, std::vector<std::string>& collections) {
BaseRequestPtr request_ptr = ShowCollectionsRequest::Create(context, collections);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::ShowTableInfo(const std::shared_ptr<Context>& context, const std::string& collection_name,
TableInfo& table_info) {
BaseRequestPtr request_ptr = ShowTableInfoRequest::Create(context, collection_name, table_info);
RequestHandler::ShowCollectionInfo(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionInfo& collection_info) {
BaseRequestPtr request_ptr = ShowCollectionInfoRequest::Create(context, collection_name, collection_info);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
@ -145,18 +146,18 @@ RequestHandler::SearchByID(const std::shared_ptr<Context>& context, const std::s
}
Status
RequestHandler::DescribeTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionSchema& table_schema) {
BaseRequestPtr request_ptr = DescribeTableRequest::Create(context, collection_name, table_schema);
RequestHandler::DescribeCollection(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionSchema& collection_schema) {
BaseRequestPtr request_ptr = DescribeCollectionRequest::Create(context, collection_name, collection_schema);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
}
Status
RequestHandler::CountTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t& count) {
BaseRequestPtr request_ptr = CountTableRequest::Create(context, collection_name, count);
RequestHandler::CountCollection(const std::shared_ptr<Context>& context, const std::string& collection_name,
int64_t& count) {
BaseRequestPtr request_ptr = CountCollectionRequest::Create(context, collection_name, count);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();
@ -180,8 +181,8 @@ RequestHandler::DeleteByID(const std::shared_ptr<Context>& context, const std::s
}
Status
RequestHandler::PreloadTable(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = PreloadTableRequest::Create(context, collection_name);
RequestHandler::PreloadCollection(const std::shared_ptr<Context>& context, const std::string& collection_name) {
BaseRequestPtr request_ptr = PreloadCollectionRequest::Create(context, collection_name);
RequestScheduler::ExecRequest(request_ptr);
return request_ptr->status();

View File

@ -27,14 +27,14 @@ class RequestHandler {
RequestHandler() = default;
Status
CreateTable(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t dimension,
int64_t index_file_size, int64_t metric_type);
CreateCollection(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t dimension,
int64_t index_file_size, int64_t metric_type);
Status
HasTable(const std::shared_ptr<Context>& context, const std::string& collection_name, bool& has_table);
HasCollection(const std::shared_ptr<Context>& context, const std::string& collection_name, bool& has_collection);
Status
DropTable(const std::shared_ptr<Context>& context, const std::string& collection_name);
DropCollection(const std::shared_ptr<Context>& context, const std::string& collection_name);
Status
CreateIndex(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t index_type,
@ -53,10 +53,11 @@ class RequestHandler {
const std::string& segment_name, std::vector<int64_t>& vector_ids);
Status
ShowTables(const std::shared_ptr<Context>& context, std::vector<std::string>& tables);
ShowCollections(const std::shared_ptr<Context>& context, std::vector<std::string>& collections);
Status
ShowTableInfo(const std::shared_ptr<Context>& context, const std::string& collection_name, TableInfo& table_info);
ShowCollectionInfo(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionInfo& collection_info);
Status
Search(const std::shared_ptr<Context>& context, const std::string& collection_name,
@ -70,11 +71,11 @@ class RequestHandler {
TopKQueryResult& result);
Status
DescribeTable(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionSchema& table_schema);
DescribeCollection(const std::shared_ptr<Context>& context, const std::string& collection_name,
CollectionSchema& collection_schema);
Status
CountTable(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t& count);
CountCollection(const std::shared_ptr<Context>& context, const std::string& collection_name, int64_t& count);
Status
Cmd(const std::shared_ptr<Context>& context, const std::string& cmd, std::string& reply);
@ -84,7 +85,7 @@ class RequestHandler {
const std::vector<int64_t>& vector_ids);
Status
PreloadTable(const std::shared_ptr<Context>& context, const std::string& collection_name);
PreloadCollection(const std::shared_ptr<Context>& context, const std::string& collection_name);
Status
DescribeIndex(const std::shared_ptr<Context>& context, const std::string& collection_name, IndexParam& param);

View File

@ -39,14 +39,14 @@ RequestGroup(BaseRequest::RequestType type) {
{BaseRequest::kGetVectorIDs, INFO_REQUEST_GROUP},
// collection operations
{BaseRequest::kShowTables, INFO_REQUEST_GROUP},
{BaseRequest::kCreateTable, DDL_DML_REQUEST_GROUP},
{BaseRequest::kHasTable, INFO_REQUEST_GROUP},
{BaseRequest::kDescribeTable, INFO_REQUEST_GROUP},
{BaseRequest::kCountTable, INFO_REQUEST_GROUP},
{BaseRequest::kShowTableInfo, INFO_REQUEST_GROUP},
{BaseRequest::kDropTable, DDL_DML_REQUEST_GROUP},
{BaseRequest::kPreloadTable, DQL_REQUEST_GROUP},
{BaseRequest::kShowCollections, INFO_REQUEST_GROUP},
{BaseRequest::kCreateCollection, DDL_DML_REQUEST_GROUP},
{BaseRequest::kHasCollection, INFO_REQUEST_GROUP},
{BaseRequest::kDescribeCollection, INFO_REQUEST_GROUP},
{BaseRequest::kCountCollection, INFO_REQUEST_GROUP},
{BaseRequest::kShowCollectionInfo, INFO_REQUEST_GROUP},
{BaseRequest::kDropCollection, DDL_DML_REQUEST_GROUP},
{BaseRequest::kPreloadCollection, DQL_REQUEST_GROUP},
// partition operations
{BaseRequest::kCreatePartition, DDL_DML_REQUEST_GROUP},
@ -132,7 +132,7 @@ BaseRequest::set_status(const Status& status) {
std::string
BaseRequest::TableNotExistMsg(const std::string& collection_name) {
return "Collection " + collection_name +
" does not exist. Use milvus.has_table to verify whether the collection exists. "
" does not exist. Use milvus.has_collection to verify whether the collection exists. "
"You also can check whether the collection name exists.";
}

View File

@ -108,7 +108,7 @@ struct PartitionStat {
std::vector<SegmentStat> segments_stat_;
};
struct TableInfo {
struct CollectionInfo {
int64_t total_row_num_ = 0;
std::vector<PartitionStat> partitions_stat_;
};
@ -128,14 +128,14 @@ class BaseRequest {
kGetVectorIDs,
// collection operations
kShowTables = 300,
kCreateTable,
kHasTable,
kDescribeTable,
kCountTable,
kShowTableInfo,
kDropTable,
kPreloadTable,
kShowCollections = 300,
kCreateCollection,
kHasCollection,
kDescribeCollection,
kCountCollection,
kShowCollectionInfo,
kDropCollection,
kPreloadCollection,
// partition operations
kCreatePartition = 400,

View File

@ -51,7 +51,7 @@ CompactRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -59,7 +59,7 @@ CompactRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/CountTableRequest.h"
#include "server/delivery/request/CountCollectionRequest.h"
#include "BaseRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
@ -22,21 +22,21 @@
namespace milvus {
namespace server {
CountTableRequest::CountTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, int64_t& row_count)
: BaseRequest(context, BaseRequest::kCountTable), collection_name_(collection_name), row_count_(row_count) {
CountCollectionRequest::CountCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, int64_t& row_count)
: BaseRequest(context, BaseRequest::kCountCollection), collection_name_(collection_name), row_count_(row_count) {
}
BaseRequestPtr
CountTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count) {
return std::shared_ptr<BaseRequest>(new CountTableRequest(context, collection_name, row_count));
CountCollectionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, int64_t& row_count) {
return std::shared_ptr<BaseRequest>(new CountCollectionRequest(context, collection_name, row_count));
}
Status
CountTableRequest::OnExecute() {
CountCollectionRequest::OnExecute() {
try {
std::string hdr = "CountTableRequest(collection=" + collection_name_ + ")";
std::string hdr = "CountCollectionRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
@ -48,7 +48,7 @@ CountTableRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -56,7 +56,7 @@ CountTableRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -65,10 +65,10 @@ CountTableRequest::OnExecute() {
// step 2: get row count
uint64_t row_count = 0;
status = DBWrapper::DB()->GetTableRowCount(collection_name_, row_count);
fiu_do_on("CountTableRequest.OnExecute.db_not_found", status = Status(DB_NOT_FOUND, ""));
fiu_do_on("CountTableRequest.OnExecute.status_error", status = Status(SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CountTableRequest.OnExecute.throw_std_exception", throw std::exception());
status = DBWrapper::DB()->GetCollectionRowCount(collection_name_, row_count);
fiu_do_on("CountCollectionRequest.OnExecute.db_not_found", status = Status(DB_NOT_FOUND, ""));
fiu_do_on("CountCollectionRequest.OnExecute.status_error", status = Status(SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CountCollectionRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));

View File

@ -19,15 +19,15 @@
namespace milvus {
namespace server {
class CountTableRequest : public BaseRequest {
class CountCollectionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count);
protected:
CountTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count);
CountCollectionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t& row_count);
Status
OnExecute() override;

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/CreateTableRequest.h"
#include "server/delivery/request/CreateCollectionRequest.h"
#include "db/Utils.h"
#include "server/DBWrapper.h"
#include "server/delivery/request/BaseRequest.h"
@ -24,10 +24,10 @@
namespace milvus {
namespace server {
CreateTableRequest::CreateTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, int64_t dimension, int64_t index_file_size,
int64_t metric_type)
: BaseRequest(context, BaseRequest::kCreateTable),
CreateCollectionRequest::CreateCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, int64_t dimension,
int64_t index_file_size, int64_t metric_type)
: BaseRequest(context, BaseRequest::kCreateCollection),
collection_name_(collection_name),
dimension_(dimension),
index_file_size_(index_file_size),
@ -35,16 +35,17 @@ CreateTableRequest::CreateTableRequest(const std::shared_ptr<milvus::server::Con
}
BaseRequestPtr
CreateTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type) {
CreateCollectionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, int64_t dimension, int64_t index_file_size,
int64_t metric_type) {
return std::shared_ptr<BaseRequest>(
new CreateTableRequest(context, collection_name, dimension, index_file_size, metric_type));
new CreateCollectionRequest(context, collection_name, dimension, index_file_size, metric_type));
}
Status
CreateTableRequest::OnExecute() {
CreateCollectionRequest::OnExecute() {
std::string hdr =
"CreateTableRequest(collection=" + collection_name_ + ", dimension=" + std::to_string(dimension_) + ")";
"CreateCollectionRequest(collection=" + collection_name_ + ", dimension=" + std::to_string(dimension_) + ")";
TimeRecorderAuto rc(hdr);
try {
@ -59,14 +60,14 @@ CreateTableRequest::OnExecute() {
return status;
}
status = ValidationUtil::ValidateTableIndexFileSize(index_file_size_);
fiu_do_on("CreateTableRequest.OnExecute.invalid_index_file_size",
status = ValidationUtil::ValidateCollectionIndexFileSize(index_file_size_);
fiu_do_on("CreateCollectionRequest.OnExecute.invalid_index_file_size",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;
}
status = ValidationUtil::ValidateTableIndexMetricType(metric_type_);
status = ValidationUtil::ValidateCollectionIndexMetricType(metric_type_);
if (!status.ok()) {
return status;
}
@ -74,27 +75,27 @@ CreateTableRequest::OnExecute() {
rc.RecordSection("check validation");
// step 2: construct collection schema
engine::meta::CollectionSchema table_info;
table_info.collection_id_ = collection_name_;
table_info.dimension_ = static_cast<uint16_t>(dimension_);
table_info.index_file_size_ = index_file_size_;
table_info.metric_type_ = metric_type_;
engine::meta::CollectionSchema collection_info;
collection_info.collection_id_ = collection_name_;
collection_info.dimension_ = static_cast<uint16_t>(dimension_);
collection_info.index_file_size_ = index_file_size_;
collection_info.metric_type_ = metric_type_;
// some metric type only support binary vector, adapt the index type
if (engine::utils::IsBinaryMetricType(metric_type_)) {
if (table_info.engine_type_ == static_cast<int32_t>(engine::EngineType::FAISS_IDMAP)) {
table_info.engine_type_ = static_cast<int32_t>(engine::EngineType::FAISS_BIN_IDMAP);
} else if (table_info.engine_type_ == static_cast<int32_t>(engine::EngineType::FAISS_IVFFLAT)) {
table_info.engine_type_ = static_cast<int32_t>(engine::EngineType::FAISS_BIN_IVFFLAT);
if (collection_info.engine_type_ == static_cast<int32_t>(engine::EngineType::FAISS_IDMAP)) {
collection_info.engine_type_ = static_cast<int32_t>(engine::EngineType::FAISS_BIN_IDMAP);
} else if (collection_info.engine_type_ == static_cast<int32_t>(engine::EngineType::FAISS_IVFFLAT)) {
collection_info.engine_type_ = static_cast<int32_t>(engine::EngineType::FAISS_BIN_IVFFLAT);
}
}
// step 3: create collection
status = DBWrapper::DB()->CreateTable(table_info);
fiu_do_on("CreateTableRequest.OnExecute.db_already_exist", status = Status(milvus::DB_ALREADY_EXIST, ""));
fiu_do_on("CreateTableRequest.OnExecute.create_table_fail",
status = DBWrapper::DB()->CreateCollection(collection_info);
fiu_do_on("CreateCollectionRequest.OnExecute.db_already_exist", status = Status(milvus::DB_ALREADY_EXIST, ""));
fiu_do_on("CreateCollectionRequest.OnExecute.create_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CreateTableRequest.OnExecute.throw_std_exception", throw std::exception());
fiu_do_on("CreateCollectionRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
// collection could exist
if (status.code() == DB_ALREADY_EXIST) {

View File

@ -19,15 +19,15 @@
namespace milvus {
namespace server {
class CreateTableRequest : public BaseRequest {
class CreateCollectionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type);
protected:
CreateTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type);
CreateCollectionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
int64_t dimension, int64_t index_file_size, int64_t metric_type);
Status
OnExecute() override;

View File

@ -54,8 +54,9 @@ CreateIndexRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("CreateIndexRequest.OnExecute.not_has_table", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
status = DBWrapper::DB()->DescribeCollection(table_schema);
fiu_do_on("CreateIndexRequest.OnExecute.not_has_collection",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("CreateIndexRequest.OnExecute.throw_std.exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
@ -64,12 +65,12 @@ CreateIndexRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
status = ValidationUtil::ValidateTableIndexType(index_type_);
status = ValidationUtil::ValidateCollectionIndexType(index_type_);
if (!status.ok()) {
return status;
}
@ -80,12 +81,12 @@ CreateIndexRequest::OnExecute() {
}
// step 2: binary and float vector support different index/metric type, need to adapt here
engine::meta::CollectionSchema table_info;
table_info.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_info);
engine::meta::CollectionSchema collection_info;
collection_info.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeCollection(collection_info);
int32_t adapter_index_type = index_type_;
if (engine::utils::IsBinaryMetricType(table_info.metric_type_)) { // binary vector not allow
if (engine::utils::IsBinaryMetricType(collection_info.metric_type_)) { // binary vector not allow
if (adapter_index_type == static_cast<int32_t>(engine::EngineType::FAISS_IDMAP)) {
adapter_index_type = static_cast<int32_t>(engine::EngineType::FAISS_BIN_IDMAP);
} else if (adapter_index_type == static_cast<int32_t>(engine::EngineType::FAISS_IVFFLAT)) {
@ -101,10 +102,10 @@ CreateIndexRequest::OnExecute() {
server::Config& config = server::Config::GetInstance();
s = config.GetGpuResourceConfigEnable(enable_gpu);
fiu_do_on("CreateIndexRequest.OnExecute.ip_meteric",
table_info.metric_type_ = static_cast<int>(engine::MetricType::IP));
collection_info.metric_type_ = static_cast<int>(engine::MetricType::IP));
if (s.ok() && adapter_index_type == (int)engine::EngineType::FAISS_PQ &&
table_info.metric_type_ == (int)engine::MetricType::IP) {
collection_info.metric_type_ == (int)engine::MetricType::IP) {
return Status(SERVER_UNEXPECTED_ERROR, "PQ not support IP in GPU version!");
}
#endif
@ -112,7 +113,7 @@ CreateIndexRequest::OnExecute() {
rc.RecordSection("check validation");
// step 3: create index
engine::TableIndex index;
engine::CollectionIndex index;
index.engine_type_ = adapter_index_type;
index.extra_params_ = json_params_;
status = DBWrapper::DB()->CreateIndex(collection_name_, index);

View File

@ -61,7 +61,7 @@ CreatePartitionRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
fiu_do_on("CreatePartitionRequest.OnExecute.invalid_partition_tags",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -71,7 +71,7 @@ CreatePartitionRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -54,7 +54,7 @@ DeleteByIDRequest::OnExecute() {
// step 2: check collection existence
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -62,7 +62,7 @@ DeleteByIDRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/DescribeTableRequest.h"
#include "server/delivery/request/DescribeCollectionRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
@ -21,20 +21,20 @@
namespace milvus {
namespace server {
DescribeTableRequest::DescribeTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionSchema& schema)
: BaseRequest(context, BaseRequest::kDescribeTable), collection_name_(collection_name), schema_(schema) {
DescribeCollectionRequest::DescribeCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionSchema& schema)
: BaseRequest(context, BaseRequest::kDescribeCollection), collection_name_(collection_name), schema_(schema) {
}
BaseRequestPtr
DescribeTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionSchema& schema) {
return std::shared_ptr<BaseRequest>(new DescribeTableRequest(context, collection_name, schema));
DescribeCollectionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionSchema& schema) {
return std::shared_ptr<BaseRequest>(new DescribeCollectionRequest(context, collection_name, schema));
}
Status
DescribeTableRequest::OnExecute() {
std::string hdr = "DescribeTableRequest(collection=" + collection_name_ + ")";
DescribeCollectionRequest::OnExecute() {
std::string hdr = "DescribeCollectionRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
try {
@ -48,10 +48,10 @@ DescribeTableRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("DescribeTableRequest.OnExecute.describe_table_fail",
status = DBWrapper::DB()->DescribeCollection(table_schema);
fiu_do_on("DescribeCollectionRequest.OnExecute.describe_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("DescribeTableRequest.OnExecute.throw_std_exception", throw std::exception());
fiu_do_on("DescribeCollectionRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -59,7 +59,7 @@ DescribeTableRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -19,15 +19,15 @@
namespace milvus {
namespace server {
class DescribeTableRequest : public BaseRequest {
class DescribeCollectionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
CollectionSchema& schema);
protected:
DescribeTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
CollectionSchema& schema);
DescribeCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionSchema& schema);
Status
OnExecute() override;

View File

@ -48,7 +48,7 @@ DescribeIndexRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -56,13 +56,13 @@ DescribeIndexRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 2: check collection existence
engine::TableIndex index;
engine::CollectionIndex index;
status = DBWrapper::DB()->DescribeIndex(collection_name_, index);
if (!status.ok()) {
return status;

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/DropTableRequest.h"
#include "server/delivery/request/DropCollectionRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
@ -22,20 +22,21 @@
namespace milvus {
namespace server {
DropTableRequest::DropTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kDropTable), collection_name_(collection_name) {
DropCollectionRequest::DropCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kDropCollection), collection_name_(collection_name) {
}
BaseRequestPtr
DropTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new DropTableRequest(context, collection_name));
DropCollectionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new DropCollectionRequest(context, collection_name));
}
Status
DropTableRequest::OnExecute() {
DropCollectionRequest::OnExecute() {
try {
std::string hdr = "DropTableRequest(collection=" + collection_name_ + ")";
std::string hdr = "DropCollectionRequest(collection=" + collection_name_ + ")";
TimeRecorder rc(hdr);
// step 1: check arguments
@ -48,11 +49,11 @@ DropTableRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
fiu_do_on("DropTableRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, ""));
fiu_do_on("DropTableRequest.OnExecute.describe_table_fail",
status = DBWrapper::DB()->DescribeCollection(table_schema);
fiu_do_on("DropCollectionRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, ""));
fiu_do_on("DropCollectionRequest.OnExecute.describe_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("DropTableRequest.OnExecute.throw_std_exception", throw std::exception());
fiu_do_on("DropCollectionRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -60,7 +61,7 @@ DropTableRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -68,8 +69,9 @@ DropTableRequest::OnExecute() {
rc.RecordSection("check validation");
// step 3: Drop collection
status = DBWrapper::DB()->DropTable(collection_name_);
fiu_do_on("DropTableRequest.OnExecute.drop_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
status = DBWrapper::DB()->DropCollection(collection_name_);
fiu_do_on("DropCollectionRequest.OnExecute.drop_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;
}

View File

@ -19,13 +19,13 @@
namespace milvus {
namespace server {
class DropTableRequest : public BaseRequest {
class DropCollectionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
protected:
DropTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
DropCollectionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
Status
OnExecute() override;

View File

@ -47,7 +47,7 @@ DropIndexRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
fiu_do_on("DropIndexRequest.OnExecute.table_not_exist", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
@ -56,7 +56,7 @@ DropIndexRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -65,7 +65,7 @@ DropPartitionRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -73,7 +73,7 @@ DropPartitionRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -53,7 +53,7 @@ FlushRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = name;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(name));
@ -61,7 +61,7 @@ FlushRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(name));
}
}

View File

@ -62,7 +62,7 @@ GetVectorByIDRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -70,7 +70,7 @@ GetVectorByIDRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -57,7 +57,7 @@ GetVectorIDsRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -65,7 +65,7 @@ GetVectorIDsRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/HasTableRequest.h"
#include "server/delivery/request/HasCollectionRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
@ -21,21 +21,23 @@
namespace milvus {
namespace server {
HasTableRequest::HasTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, bool& has_table)
: BaseRequest(context, BaseRequest::kHasTable), collection_name_(collection_name), has_table_(has_table) {
HasCollectionRequest::HasCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, bool& has_collection)
: BaseRequest(context, BaseRequest::kHasCollection),
collection_name_(collection_name),
has_collection_(has_collection) {
}
BaseRequestPtr
HasTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_table) {
return std::shared_ptr<BaseRequest>(new HasTableRequest(context, collection_name, has_table));
HasCollectionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, bool& has_collection) {
return std::shared_ptr<BaseRequest>(new HasCollectionRequest(context, collection_name, has_collection));
}
Status
HasTableRequest::OnExecute() {
HasCollectionRequest::OnExecute() {
try {
std::string hdr = "HasTableRequest(collection=" + collection_name_ + ")";
std::string hdr = "HasCollectionRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
@ -45,16 +47,16 @@ HasTableRequest::OnExecute() {
}
// step 2: check table existence
status = DBWrapper::DB()->HasNativeTable(collection_name_, has_table_);
fiu_do_on("HasTableRequest.OnExecute.throw_std_exception", throw std::exception());
status = DBWrapper::DB()->HasNativeCollection(collection_name_, has_collection_);
fiu_do_on("HasCollectionRequest.OnExecute.throw_std_exception", throw std::exception());
// only process root collection, ignore partition collection
if (has_table_) {
if (has_collection_) {
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
if (!table_schema.owner_table_.empty()) {
has_table_ = false;
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!table_schema.owner_collection_.empty()) {
has_collection_ = false;
}
}
} catch (std::exception& ex) {

View File

@ -19,22 +19,22 @@
namespace milvus {
namespace server {
class HasTableRequest : public BaseRequest {
class HasCollectionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_table);
bool& has_collection);
protected:
HasTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_table);
HasCollectionRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
bool& has_collection);
Status
OnExecute() override;
private:
std::string collection_name_;
bool& has_table_;
bool& has_collection_;
};
} // namespace server

View File

@ -75,7 +75,7 @@ InsertRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
fiu_do_on("InsertRequest.OnExecute.db_not_found", status = Status(milvus::DB_NOT_FOUND, ""));
fiu_do_on("InsertRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -85,7 +85,7 @@ InsertRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -171,7 +171,7 @@ InsertRequest::OnExecute() {
// step 6: update collection flag
user_provide_ids ? table_schema.flag_ |= engine::meta::FLAG_MASK_HAS_USERID
: table_schema.flag_ |= engine::meta::FLAG_MASK_NO_USERID;
status = DBWrapper::DB()->UpdateTableFlag(collection_name_, table_schema.flag_);
status = DBWrapper::DB()->UpdateCollectionFlag(collection_name_, table_schema.flag_);
#ifdef MILVUS_ENABLE_PROFILING
ProfilerStop();

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/PreloadTableRequest.h"
#include "server/delivery/request/PreloadCollectionRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
@ -21,21 +21,21 @@
namespace milvus {
namespace server {
PreloadTableRequest::PreloadTableRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kPreloadTable), collection_name_(collection_name) {
PreloadCollectionRequest::PreloadCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name)
: BaseRequest(context, BaseRequest::kPreloadCollection), collection_name_(collection_name) {
}
BaseRequestPtr
PreloadTableRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new PreloadTableRequest(context, collection_name));
PreloadCollectionRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name) {
return std::shared_ptr<BaseRequest>(new PreloadCollectionRequest(context, collection_name));
}
Status
PreloadTableRequest::OnExecute() {
PreloadCollectionRequest::OnExecute() {
try {
std::string hdr = "PreloadTableRequest(collection=" + collection_name_ + ")";
std::string hdr = "PreloadCollectionRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check arguments
@ -47,7 +47,7 @@ PreloadTableRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -55,16 +55,16 @@ PreloadTableRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 2: check collection existence
status = DBWrapper::DB()->PreloadTable(collection_name_);
fiu_do_on("PreloadTableRequest.OnExecute.preload_table_fail",
status = DBWrapper::DB()->PreloadCollection(collection_name_);
fiu_do_on("PreloadCollectionRequest.OnExecute.preload_table_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
fiu_do_on("PreloadTableRequest.OnExecute.throw_std_exception", throw std::exception());
fiu_do_on("PreloadCollectionRequest.OnExecute.throw_std_exception", throw std::exception());
if (!status.ok()) {
return status;
}

View File

@ -19,13 +19,14 @@
namespace milvus {
namespace server {
class PreloadTableRequest : public BaseRequest {
class PreloadCollectionRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
protected:
PreloadTableRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name);
PreloadCollectionRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name);
Status
OnExecute() override;

View File

@ -82,7 +82,7 @@ SearchByIDRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -90,7 +90,7 @@ SearchByIDRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -243,7 +243,7 @@ SearchCombineRequest::OnExecute() {
// only process root table, ignore partition table
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
auto status = DBWrapper::DB()->DescribeTable(table_schema);
auto status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
@ -255,7 +255,7 @@ SearchCombineRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
status = Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
FreeRequests(status);
return status;

View File

@ -90,7 +90,7 @@ SearchRequest::OnExecute() {
// step 4: check table existence
// only process root table, ignore partition table
collection_schema_.collection_id_ = collection_name_;
auto status = DBWrapper::DB()->DescribeTable(collection_schema_);
auto status = DBWrapper::DB()->DescribeCollection(collection_schema_);
fiu_do_on("SearchRequest.OnExecute.describe_table_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
@ -100,7 +100,7 @@ SearchRequest::OnExecute() {
return status;
}
} else {
if (!collection_schema_.owner_table_.empty()) {
if (!collection_schema_.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}

View File

@ -15,7 +15,7 @@
// specific language governing permissions and limitations
// under the License.
#include "server/delivery/request/ShowTableInfoRequest.h"
#include "server/delivery/request/ShowCollectionInfoRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
@ -43,20 +43,23 @@ ConstructPartitionStat(const engine::PartitionStat& partition_stat, PartitionSta
req_partition_stat.total_row_num_ = row_count;
}
ShowTableInfoRequest::ShowTableInfoRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, TableInfo& table_info)
: BaseRequest(context, BaseRequest::kShowTableInfo), collection_name_(collection_name), table_info_(table_info) {
ShowCollectionInfoRequest::ShowCollectionInfoRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name,
CollectionInfo& collection_info)
: BaseRequest(context, BaseRequest::kShowCollectionInfo),
collection_name_(collection_name),
collection_info_(collection_info) {
}
BaseRequestPtr
ShowTableInfoRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, TableInfo& table_info) {
return std::shared_ptr<BaseRequest>(new ShowTableInfoRequest(context, collection_name, table_info));
ShowCollectionInfoRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionInfo& collection_info) {
return std::shared_ptr<BaseRequest>(new ShowCollectionInfoRequest(context, collection_name, collection_info));
}
Status
ShowTableInfoRequest::OnExecute() {
std::string hdr = "ShowTableInfoRequest(collection=" + collection_name_ + ")";
ShowCollectionInfoRequest::OnExecute() {
std::string hdr = "ShowCollectionInfoRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
// step 1: check collection name
@ -69,7 +72,7 @@ ShowTableInfoRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -77,29 +80,29 @@ ShowTableInfoRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
// step 3: get partitions
engine::TableInfo table_info;
status = DBWrapper::DB()->GetTableInfo(collection_name_, table_info);
engine::CollectionInfo collection_info;
status = DBWrapper::DB()->GetCollectionInfo(collection_name_, collection_info);
if (!status.ok()) {
return status;
}
// step 4: construct partitions info
int64_t total_row_count = 0;
table_info_.partitions_stat_.reserve(table_info.partitions_stat_.size());
for (auto& partition : table_info.partitions_stat_) {
collection_info_.partitions_stat_.reserve(collection_info.partitions_stat_.size());
for (auto& partition : collection_info.partitions_stat_) {
PartitionStat partition_stat;
ConstructPartitionStat(partition, partition_stat);
total_row_count += partition_stat.total_row_num_;
table_info_.partitions_stat_.emplace_back(partition_stat);
collection_info_.partitions_stat_.emplace_back(partition_stat);
}
table_info_.total_row_num_ = total_row_count;
collection_info_.total_row_num_ = total_row_count;
return Status::OK();
}

View File

@ -26,22 +26,22 @@
namespace milvus {
namespace server {
class ShowTableInfoRequest : public BaseRequest {
class ShowCollectionInfoRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
TableInfo& table_info);
CollectionInfo& collection_info);
protected:
ShowTableInfoRequest(const std::shared_ptr<milvus::server::Context>& context, const std::string& collection_name,
TableInfo& table_info);
ShowCollectionInfoRequest(const std::shared_ptr<milvus::server::Context>& context,
const std::string& collection_name, CollectionInfo& collection_info);
Status
OnExecute() override;
private:
const std::string collection_name_;
TableInfo& table_info_;
CollectionInfo& collection_info_;
};
} // namespace server

View File

@ -9,7 +9,7 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "server/delivery/request/ShowTablesRequest.h"
#include "server/delivery/request/ShowCollectionsRequest.h"
#include "server/DBWrapper.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
@ -22,24 +22,25 @@
namespace milvus {
namespace server {
ShowTablesRequest::ShowTablesRequest(const std::shared_ptr<milvus::server::Context>& context,
std::vector<std::string>& table_name_list)
: BaseRequest(context, BaseRequest::kShowTables), table_name_list_(table_name_list) {
ShowCollectionsRequest::ShowCollectionsRequest(const std::shared_ptr<milvus::server::Context>& context,
std::vector<std::string>& table_name_list)
: BaseRequest(context, BaseRequest::kShowCollections), table_name_list_(table_name_list) {
}
BaseRequestPtr
ShowTablesRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
std::vector<std::string>& table_name_list) {
return std::shared_ptr<BaseRequest>(new ShowTablesRequest(context, table_name_list));
ShowCollectionsRequest::Create(const std::shared_ptr<milvus::server::Context>& context,
std::vector<std::string>& table_name_list) {
return std::shared_ptr<BaseRequest>(new ShowCollectionsRequest(context, table_name_list));
}
Status
ShowTablesRequest::OnExecute() {
TimeRecorderAuto rc("ShowTablesRequest");
ShowCollectionsRequest::OnExecute() {
TimeRecorderAuto rc("ShowCollectionsRequest");
std::vector<engine::meta::CollectionSchema> schema_array;
auto status = DBWrapper::DB()->AllTables(schema_array);
fiu_do_on("ShowTablesRequest.OnExecute.show_tables_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
auto status = DBWrapper::DB()->AllCollections(schema_array);
fiu_do_on("ShowCollectionsRequest.OnExecute.show_tables_fail",
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
return status;
}

View File

@ -20,14 +20,14 @@
namespace milvus {
namespace server {
class ShowTablesRequest : public BaseRequest {
class ShowCollectionsRequest : public BaseRequest {
public:
static BaseRequestPtr
Create(const std::shared_ptr<milvus::server::Context>& context, std::vector<std::string>& table_name_list);
protected:
ShowTablesRequest(const std::shared_ptr<milvus::server::Context>& context,
std::vector<std::string>& table_name_list);
ShowCollectionsRequest(const std::shared_ptr<milvus::server::Context>& context,
std::vector<std::string>& table_name_list);
Status
OnExecute() override;

View File

@ -53,7 +53,7 @@ ShowPartitionsRequest::OnExecute() {
// only process root collection, ignore partition collection
engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = collection_name_;
status = DBWrapper::DB()->DescribeTable(table_schema);
status = DBWrapper::DB()->DescribeCollection(table_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
return Status(SERVER_TABLE_NOT_EXIST, TableNotExistMsg(collection_name_));
@ -61,7 +61,7 @@ ShowPartitionsRequest::OnExecute() {
return status;
}
} else {
if (!table_schema.owner_table_.empty()) {
if (!table_schema.owner_collection_.empty()) {
return Status(SERVER_INVALID_TABLE_NAME, TableNotExistMsg(collection_name_));
}
}
@ -78,7 +78,7 @@ ShowPartitionsRequest::OnExecute() {
partition_list_.clear();
partition_list_.emplace_back(collection_name_, milvus::engine::DEFAULT_PARTITON_TAG);
for (auto& schema : schema_array) {
partition_list_.emplace_back(schema.owner_table_, schema.partition_tag_);
partition_list_.emplace_back(schema.owner_collection_, schema.partition_tag_);
}
return Status::OK();

View File

@ -142,14 +142,14 @@ ConstructPartitionStat(const PartitionStat& partition_stat, ::milvus::grpc::Part
}
void
ConstructTableInfo(const TableInfo& table_info, ::milvus::grpc::TableInfo* response) {
ConstructTableInfo(const CollectionInfo& collection_info, ::milvus::grpc::TableInfo* response) {
if (!response) {
return;
}
response->set_total_row_count(table_info.total_row_num_);
response->set_total_row_count(collection_info.total_row_num_);
for (auto& partition_stat : table_info.partitions_stat_) {
for (auto& partition_stat : collection_info.partitions_stat_) {
::milvus::grpc::PartitionStat* grpc_partiton_stat = response->mutable_partitions_stat()->Add();
ConstructPartitionStat(partition_stat, grpc_partiton_stat);
}
@ -242,8 +242,9 @@ GrpcRequestHandler::CreateTable(::grpc::ServerContext* context, const ::milvus::
::milvus::grpc::Status* response) {
CHECK_NULLPTR_RETURN(request);
Status status = request_handler_.CreateTable(context_map_[context], request->table_name(), request->dimension(),
request->index_file_size(), request->metric_type());
Status status =
request_handler_.CreateCollection(context_map_[context], request->table_name(), request->dimension(),
request->index_file_size(), request->metric_type());
SET_RESPONSE(response, status, context);
return ::grpc::Status::OK;
@ -254,10 +255,10 @@ GrpcRequestHandler::HasTable(::grpc::ServerContext* context, const ::milvus::grp
::milvus::grpc::BoolReply* response) {
CHECK_NULLPTR_RETURN(request);
bool has_table = false;
bool has_collection = false;
Status status = request_handler_.HasTable(context_map_[context], request->table_name(), has_table);
response->set_bool_reply(has_table);
Status status = request_handler_.HasCollection(context_map_[context], request->table_name(), has_collection);
response->set_bool_reply(has_collection);
SET_RESPONSE(response->mutable_status(), status, context);
return ::grpc::Status::OK;
@ -268,7 +269,7 @@ GrpcRequestHandler::DropTable(::grpc::ServerContext* context, const ::milvus::gr
::milvus::grpc::Status* response) {
CHECK_NULLPTR_RETURN(request);
Status status = request_handler_.DropTable(context_map_[context], request->table_name());
Status status = request_handler_.DropCollection(context_map_[context], request->table_name());
SET_RESPONSE(response, status, context);
return ::grpc::Status::OK;
@ -482,7 +483,7 @@ GrpcRequestHandler::DescribeTable(::grpc::ServerContext* context, const ::milvus
CHECK_NULLPTR_RETURN(request);
CollectionSchema table_schema;
Status status = request_handler_.DescribeTable(context_map_[context], request->table_name(), table_schema);
Status status = request_handler_.DescribeCollection(context_map_[context], request->table_name(), table_schema);
response->set_table_name(table_schema.collection_name_);
response->set_dimension(table_schema.dimension_);
response->set_index_file_size(table_schema.index_file_size_);
@ -498,7 +499,7 @@ GrpcRequestHandler::CountTable(::grpc::ServerContext* context, const ::milvus::g
CHECK_NULLPTR_RETURN(request);
int64_t row_count = 0;
Status status = request_handler_.CountTable(context_map_[context], request->table_name(), row_count);
Status status = request_handler_.CountCollection(context_map_[context], request->table_name(), row_count);
response->set_table_row_count(row_count);
SET_RESPONSE(response->mutable_status(), status, context);
return ::grpc::Status::OK;
@ -510,7 +511,7 @@ GrpcRequestHandler::ShowTables(::grpc::ServerContext* context, const ::milvus::g
CHECK_NULLPTR_RETURN(request);
std::vector<std::string> tables;
Status status = request_handler_.ShowTables(context_map_[context], tables);
Status status = request_handler_.ShowCollections(context_map_[context], tables);
for (auto& collection : tables) {
response->add_table_names(collection);
}
@ -524,9 +525,9 @@ GrpcRequestHandler::ShowTableInfo(::grpc::ServerContext* context, const ::milvus
::milvus::grpc::TableInfo* response) {
CHECK_NULLPTR_RETURN(request);
TableInfo table_info;
Status status = request_handler_.ShowTableInfo(context_map_[context], request->table_name(), table_info);
ConstructTableInfo(table_info, response);
CollectionInfo collection_info;
Status status = request_handler_.ShowCollectionInfo(context_map_[context], request->table_name(), collection_info);
ConstructTableInfo(collection_info, response);
SET_RESPONSE(response->mutable_status(), status, context);
return ::grpc::Status::OK;
@ -568,7 +569,7 @@ GrpcRequestHandler::PreloadTable(::grpc::ServerContext* context, const ::milvus:
::milvus::grpc::Status* response) {
CHECK_NULLPTR_RETURN(request);
Status status = request_handler_.PreloadTable(context_map_[context], request->table_name());
Status status = request_handler_.PreloadCollection(context_map_[context], request->table_name());
SET_RESPONSE(response, status, context);
return ::grpc::Status::OK;

View File

@ -164,7 +164,7 @@ WebRequestHandler::ParsePartitionStat(const milvus::server::PartitionStat& par_s
Status
WebRequestHandler::IsBinaryTable(const std::string& collection_name, bool& bin) {
CollectionSchema schema;
auto status = request_handler_.DescribeTable(context_ptr_, collection_name, schema);
auto status = request_handler_.DescribeCollection(context_ptr_, collection_name, schema);
if (status.ok()) {
auto metric = engine::MetricType(schema.metric_type_);
bin = engine::MetricType::HAMMING == metric || engine::MetricType::JACCARD == metric ||
@ -210,13 +210,13 @@ WebRequestHandler::CopyRecordsFromJson(const nlohmann::json& json, engine::Vecto
Status
WebRequestHandler::GetTableMetaInfo(const std::string& collection_name, nlohmann::json& json_out) {
CollectionSchema schema;
auto status = request_handler_.DescribeTable(context_ptr_, collection_name, schema);
auto status = request_handler_.DescribeCollection(context_ptr_, collection_name, schema);
if (!status.ok()) {
return status;
}
int64_t count;
status = request_handler_.CountTable(context_ptr_, collection_name, count);
status = request_handler_.CountCollection(context_ptr_, collection_name, count);
if (!status.ok()) {
return status;
}
@ -240,8 +240,8 @@ WebRequestHandler::GetTableMetaInfo(const std::string& collection_name, nlohmann
Status
WebRequestHandler::GetTableStat(const std::string& collection_name, nlohmann::json& json_out) {
struct TableInfo collection_info;
auto status = request_handler_.ShowTableInfo(context_ptr_, collection_name, collection_info);
struct CollectionInfo collection_info;
auto status = request_handler_.ShowCollectionInfo(context_ptr_, collection_name, collection_info);
if (status.ok()) {
json_out["count"] = collection_info.total_row_num_;
@ -336,7 +336,7 @@ WebRequestHandler::PreLoadTable(const nlohmann::json& json, std::string& result_
}
auto collection_name = json["collection_name"];
auto status = request_handler_.PreloadTable(context_ptr_, collection_name.get<std::string>());
auto status = request_handler_.PreloadCollection(context_ptr_, collection_name.get<std::string>());
if (status.ok()) {
nlohmann::json result;
AddStatusToJson(result, status.code(), status.message());
@ -922,10 +922,10 @@ WebRequestHandler::CreateTable(const TableRequestDto::ObjectWrapper& collection_
RETURN_STATUS_DTO(ILLEGAL_METRIC_TYPE, "metric_type is illegal")
}
auto status =
request_handler_.CreateTable(context_ptr_, collection_schema->collection_name->std_str(),
collection_schema->dimension, collection_schema->index_file_size,
static_cast<int64_t>(MetricNameMap.at(collection_schema->metric_type->std_str())));
auto status = request_handler_.CreateCollection(
context_ptr_, collection_schema->collection_name->std_str(), collection_schema->dimension,
collection_schema->index_file_size,
static_cast<int64_t>(MetricNameMap.at(collection_schema->metric_type->std_str())));
ASSIGN_RETURN_STATUS_DTO(status)
}
@ -955,7 +955,7 @@ WebRequestHandler::ShowTables(const OQueryParams& query_params, OString& result)
}
std::vector<std::string> collections;
status = request_handler_.ShowTables(context_ptr_, collections);
status = request_handler_.ShowCollections(context_ptr_, collections);
if (!status.ok()) {
ASSIGN_RETURN_STATUS_DTO(status)
}
@ -1018,7 +1018,7 @@ WebRequestHandler::GetTable(const OString& collection_name, const OQueryParams&
StatusDto::ObjectWrapper
WebRequestHandler::DropTable(const OString& collection_name) {
auto status = request_handler_.DropTable(context_ptr_, collection_name->std_str());
auto status = request_handler_.DropCollection(context_ptr_, collection_name->std_str());
ASSIGN_RETURN_STATUS_DTO(status)
}
@ -1202,8 +1202,8 @@ WebRequestHandler::ShowSegments(const OString& collection_name, const OQueryPara
tag = query_params.get("partition_tag")->std_str();
}
TableInfo info;
status = request_handler_.ShowTableInfo(context_ptr_, collection_name->std_str(), info);
CollectionInfo info;
status = request_handler_.ShowCollectionInfo(context_ptr_, collection_name->std_str(), info);
if (!status.ok()) {
ASSIGN_RETURN_STATUS_DTO(status)
}

View File

@ -159,7 +159,7 @@ ValidationUtil::ValidateTableDimension(int64_t dimension, int64_t metric_type) {
}
Status
ValidationUtil::ValidateTableIndexType(int32_t index_type) {
ValidationUtil::ValidateCollectionIndexType(int32_t index_type) {
int engine_type = static_cast<int>(engine::EngineType(index_type));
if (engine_type <= 0 || engine_type > static_cast<int>(engine::EngineType::MAX_VALUE)) {
std::string msg = "Invalid index type: " + std::to_string(index_type) + ". " +
@ -359,7 +359,7 @@ ValidationUtil::ValidateVectorData(const engine::VectorsData& vectors,
}
Status
ValidationUtil::ValidateTableIndexFileSize(int64_t index_file_size) {
ValidationUtil::ValidateCollectionIndexFileSize(int64_t index_file_size) {
if (index_file_size <= 0 || index_file_size > INDEX_FILE_SIZE_LIMIT) {
std::string msg = "Invalid index file size: " + std::to_string(index_file_size) + ". " +
"The index file size must be within the range of 1 ~ " +
@ -372,7 +372,7 @@ ValidationUtil::ValidateTableIndexFileSize(int64_t index_file_size) {
}
Status
ValidationUtil::ValidateTableIndexMetricType(int32_t metric_type) {
ValidationUtil::ValidateCollectionIndexMetricType(int32_t metric_type) {
if (metric_type <= 0 || metric_type > static_cast<int32_t>(engine::MetricType::MAX_VALUE)) {
std::string msg = "Invalid index metric type: " + std::to_string(metric_type) + ". " +
"Make sure the metric type is in MetricType list.";

View File

@ -36,7 +36,7 @@ class ValidationUtil {
ValidateTableDimension(int64_t dimension, int64_t metric_type);
static Status
ValidateTableIndexType(int32_t index_type);
ValidateCollectionIndexType(int32_t index_type);
static Status
ValidateIndexParams(const milvus::json& index_params, const engine::meta::CollectionSchema& table_schema,
@ -50,10 +50,10 @@ class ValidationUtil {
ValidateVectorData(const engine::VectorsData& vectors, const engine::meta::CollectionSchema& table_schema);
static Status
ValidateTableIndexFileSize(int64_t index_file_size);
ValidateCollectionIndexFileSize(int64_t index_file_size);
static Status
ValidateTableIndexMetricType(int32_t metric_type);
ValidateCollectionIndexMetricType(int32_t metric_type);
static Status
ValidateSearchTopk(int64_t top_k);

View File

@ -39,10 +39,10 @@ static constexpr int64_t DAY_SECONDS = 24 * 60 * 60;
milvus::engine::meta::CollectionSchema
BuildTableSchema() {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = TABLE_NAME;
return table_info;
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = TABLE_NAME;
return collection_info;
}
void
@ -163,14 +163,14 @@ TEST_F(DBTest, CONFIG_TEST) {
}
TEST_F(DBTest, DB_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
uint64_t qb = 5;
milvus::engine::VectorsData qxb;
@ -242,7 +242,7 @@ TEST_F(DBTest, DB_TEST) {
search.join();
uint64_t count;
stat = db_->GetTableRowCount(TABLE_NAME, count);
stat = db_->GetCollectionRowCount(TABLE_NAME, count);
ASSERT_TRUE(stat.ok());
ASSERT_GT(count, 0);
@ -267,14 +267,14 @@ TEST_F(DBTest, SEARCH_TEST) {
milvus::server::Config& config = milvus::server::Config::GetInstance();
milvus::Status s = config.LoadConfigFile(config_path);
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
// prepare raw data
size_t nb = VECTOR_COUNT;
@ -311,7 +311,7 @@ TEST_F(DBTest, SEARCH_TEST) {
ASSERT_TRUE(stat.ok());
milvus::json json_params = {{"nprobe", 10}};
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
index.extra_params_ = {{"nlist", 16384}};
// db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
@ -437,14 +437,14 @@ TEST_F(DBTest, SEARCH_TEST) {
TEST_F(DBTest, PRELOADTABLE_TEST) {
fiu_init(0);
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int loop = 5;
for (auto i = 0; i < loop; ++i) {
@ -456,54 +456,54 @@ TEST_F(DBTest, PRELOADTABLE_TEST) {
ASSERT_EQ(xb.id_array_.size(), nb);
}
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
int64_t prev_cache_usage = milvus::cache::CpuCacheMgr::GetInstance()->CacheUsage();
stat = db_->PreloadTable(TABLE_NAME);
stat = db_->PreloadCollection(TABLE_NAME);
ASSERT_TRUE(stat.ok());
int64_t cur_cache_usage = milvus::cache::CpuCacheMgr::GetInstance()->CacheUsage();
ASSERT_TRUE(prev_cache_usage < cur_cache_usage);
FIU_ENABLE_FIU("SqliteMetaImpl.FilesToSearch.throw_exception");
stat = db_->PreloadTable(TABLE_NAME);
stat = db_->PreloadCollection(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.FilesToSearch.throw_exception");
// create a partition
stat = db_->CreatePartition(TABLE_NAME, "part0", "0");
ASSERT_TRUE(stat.ok());
stat = db_->PreloadTable(TABLE_NAME);
stat = db_->PreloadCollection(TABLE_NAME);
ASSERT_TRUE(stat.ok());
FIU_ENABLE_FIU("DBImpl.PreloadTable.null_engine");
stat = db_->PreloadTable(TABLE_NAME);
FIU_ENABLE_FIU("DBImpl.PreloadCollection.null_engine");
stat = db_->PreloadCollection(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.PreloadTable.null_engine");
fiu_disable("DBImpl.PreloadCollection.null_engine");
FIU_ENABLE_FIU("DBImpl.PreloadTable.exceed_cache");
stat = db_->PreloadTable(TABLE_NAME);
FIU_ENABLE_FIU("DBImpl.PreloadCollection.exceed_cache");
stat = db_->PreloadCollection(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.PreloadTable.exceed_cache");
fiu_disable("DBImpl.PreloadCollection.exceed_cache");
FIU_ENABLE_FIU("DBImpl.PreloadTable.engine_throw_exception");
stat = db_->PreloadTable(TABLE_NAME);
FIU_ENABLE_FIU("DBImpl.PreloadCollection.engine_throw_exception");
stat = db_->PreloadCollection(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.PreloadTable.engine_throw_exception");
fiu_disable("DBImpl.PreloadCollection.engine_throw_exception");
}
TEST_F(DBTest, SHUTDOWN_TEST) {
db_->Stop();
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_FALSE(stat.ok());
stat = db_->DescribeTable(table_info);
stat = db_->DescribeCollection(collection_info);
ASSERT_FALSE(stat.ok());
stat = db_->UpdateTableFlag(TABLE_NAME, 0);
stat = db_->UpdateCollectionFlag(TABLE_NAME, 0);
ASSERT_FALSE(stat.ok());
stat = db_->CreatePartition(TABLE_NAME, "part0", "0");
@ -519,47 +519,47 @@ TEST_F(DBTest, SHUTDOWN_TEST) {
stat = db_->ShowPartitions(TABLE_NAME, partition_schema_array);
ASSERT_FALSE(stat.ok());
std::vector<milvus::engine::meta::CollectionSchema> table_infos;
stat = db_->AllTables(table_infos);
std::vector<milvus::engine::meta::CollectionSchema> collection_infos;
stat = db_->AllCollections(collection_infos);
ASSERT_EQ(stat.code(), milvus::DB_ERROR);
bool has_table = false;
stat = db_->HasTable(table_info.collection_id_, has_table);
bool has_collection = false;
stat = db_->HasCollection(collection_info.collection_id_, has_collection);
ASSERT_FALSE(stat.ok());
milvus::engine::VectorsData xb;
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_FALSE(stat.ok());
stat = db_->Flush();
ASSERT_FALSE(stat.ok());
stat = db_->DeleteVector(table_info.collection_id_, 0);
stat = db_->DeleteVector(collection_info.collection_id_, 0);
ASSERT_FALSE(stat.ok());
milvus::engine::IDNumbers ids_to_delete{0};
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
ASSERT_FALSE(stat.ok());
stat = db_->Compact(table_info.collection_id_);
stat = db_->Compact(collection_info.collection_id_);
ASSERT_FALSE(stat.ok());
milvus::engine::VectorsData vector;
stat = db_->GetVectorByID(table_info.collection_id_, 0, vector);
stat = db_->GetVectorByID(collection_info.collection_id_, 0, vector);
ASSERT_FALSE(stat.ok());
stat = db_->PreloadTable(table_info.collection_id_);
stat = db_->PreloadCollection(collection_info.collection_id_);
ASSERT_FALSE(stat.ok());
uint64_t row_count = 0;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_FALSE(stat.ok());
milvus::engine::TableIndex index;
stat = db_->CreateIndex(table_info.collection_id_, index);
milvus::engine::CollectionIndex index;
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_FALSE(stat.ok());
stat = db_->DescribeIndex(table_info.collection_id_, index);
stat = db_->DescribeIndex(collection_info.collection_id_, index);
ASSERT_FALSE(stat.ok());
stat = db_->DropIndex(TABLE_NAME);
@ -570,7 +570,7 @@ TEST_F(DBTest, SHUTDOWN_TEST) {
milvus::engine::ResultDistances result_distances;
milvus::json json_params = {{"nprobe", 1}};
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, 1, json_params, xb, result_ids, result_distances);
collection_info.collection_id_, tags, 1, json_params, xb, result_ids, result_distances);
ASSERT_FALSE(stat.ok());
std::vector<std::string> file_ids;
stat = db_->QueryByFileID(dummy_context_,
@ -583,7 +583,7 @@ TEST_F(DBTest, SHUTDOWN_TEST) {
ASSERT_FALSE(stat.ok());
stat = db_->Query(dummy_context_,
table_info.collection_id_,
collection_info.collection_id_,
tags,
1,
json_params,
@ -592,19 +592,19 @@ TEST_F(DBTest, SHUTDOWN_TEST) {
result_distances);
ASSERT_FALSE(stat.ok());
stat = db_->DropTable(table_info.collection_id_);
stat = db_->DropCollection(collection_info.collection_id_);
ASSERT_FALSE(stat.ok());
}
TEST_F(DBTest, BACK_TIMER_THREAD_1) {
fiu_init(0);
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
milvus::Status stat;
// test background timer thread
{
FIU_ENABLE_FIU("DBImpl.StartMetricTask.InvalidTotalCache");
FIU_ENABLE_FIU("SqliteMetaImpl.FilesToMerge.throw_exception");
stat = db_->CreateTable(table_info);
stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
@ -633,9 +633,9 @@ TEST_F(DBTest, BACK_TIMER_THREAD_1) {
TEST_F(DBTest, BACK_TIMER_THREAD_2) {
fiu_init(0);
milvus::Status stat;
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
stat = db_->CreateTable(table_info);
stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
@ -648,18 +648,18 @@ TEST_F(DBTest, BACK_TIMER_THREAD_2) {
ASSERT_EQ(xb.id_array_.size(), nb);
}
FIU_ENABLE_FIU("SqliteMetaImpl.CreateTableFile.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.CreateCollectionFile.throw_exception");
std::this_thread::sleep_for(std::chrono::seconds(2));
db_->Stop();
fiu_disable("SqliteMetaImpl.CreateTableFile.throw_exception");
fiu_disable("SqliteMetaImpl.CreateCollectionFile.throw_exception");
}
TEST_F(DBTest, BACK_TIMER_THREAD_3) {
fiu_init(0);
milvus::Status stat;
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
stat = db_->CreateTable(table_info);
stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
@ -682,9 +682,9 @@ TEST_F(DBTest, BACK_TIMER_THREAD_3) {
TEST_F(DBTest, BACK_TIMER_THREAD_4) {
fiu_init(0);
milvus::Status stat;
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
stat = db_->CreateTable(table_info);
stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
// insert some vector to create some tablefiles
@ -705,8 +705,8 @@ TEST_F(DBTest, BACK_TIMER_THREAD_4) {
}
TEST_F(DBTest, INDEX_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
uint64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
@ -715,48 +715,48 @@ TEST_F(DBTest, INDEX_TEST) {
db_->InsertVectors(TABLE_NAME, "", xb);
ASSERT_EQ(xb.id_array_.size(), nb);
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
index.metric_type_ = (int)milvus::engine::MetricType::IP;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
fiu_init(0);
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTableIndex.throw_exception");
stat = db_->CreateIndex(table_info.collection_id_, index);
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeCollectionIndex.throw_exception");
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.DescribeTableIndex.throw_exception");
fiu_disable("SqliteMetaImpl.DescribeCollectionIndex.throw_exception");
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_PQ;
FIU_ENABLE_FIU("DBImpl.UpdateTableIndexRecursively.fail_update_table_index");
stat = db_->CreateIndex(table_info.collection_id_, index);
FIU_ENABLE_FIU("DBImpl.UpdateCollectionIndexRecursively.fail_update_collection_index");
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.UpdateTableIndexRecursively.fail_update_table_index");
fiu_disable("DBImpl.UpdateCollectionIndexRecursively.fail_update_collection_index");
#ifdef MILVUS_GPU_VERSION
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
#endif
milvus::engine::TableIndex index_out;
stat = db_->DescribeIndex(table_info.collection_id_, index_out);
milvus::engine::CollectionIndex index_out;
stat = db_->DescribeIndex(collection_info.collection_id_, index_out);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(index.engine_type_, index_out.engine_type_);
ASSERT_EQ(index.extra_params_, index_out.extra_params_);
ASSERT_EQ(table_info.metric_type_, index_out.metric_type_);
ASSERT_EQ(collection_info.metric_type_, index_out.metric_type_);
stat = db_->DropIndex(table_info.collection_id_);
stat = db_->DropIndex(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTest, PARTITION_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
// create partition and insert data
@ -810,44 +810,44 @@ TEST_F(DBTest, PARTITION_TEST) {
std::string special_part = "special";
stat = db_->CreatePartition(collection_name, special_part, special_part);
ASSERT_TRUE(stat.ok());
bool has_table = false;
stat = db_->HasNativeTable(special_part, has_table);
ASSERT_FALSE(has_table);
stat = db_->HasTable(special_part, has_table);
ASSERT_TRUE(has_table);
bool has_collection = false;
stat = db_->HasNativeCollection(special_part, has_collection);
ASSERT_FALSE(has_collection);
stat = db_->HasCollection(special_part, has_collection);
ASSERT_TRUE(has_collection);
{ // build index
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
index.metric_type_ = (int)milvus::engine::MetricType::L2;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
fiu_init(0);
FIU_ENABLE_FIU("DBImpl.WaitTableIndexRecursively.fail_build_table_Index_for_partition");
stat = db_->CreateIndex(table_info.collection_id_, index);
FIU_ENABLE_FIU("DBImpl.WaitCollectionIndexRecursively.fail_build_collection_Index_for_partition");
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.WaitTableIndexRecursively.fail_build_table_Index_for_partition");
fiu_disable("DBImpl.WaitCollectionIndexRecursively.fail_build_collection_Index_for_partition");
FIU_ENABLE_FIU("DBImpl.WaitTableIndexRecursively.not_empty_err_msg");
stat = db_->CreateIndex(table_info.collection_id_, index);
FIU_ENABLE_FIU("DBImpl.WaitCollectionIndexRecursively.not_empty_err_msg");
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.WaitTableIndexRecursively.not_empty_err_msg");
fiu_disable("DBImpl.WaitCollectionIndexRecursively.not_empty_err_msg");
uint64_t row_count = 0;
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
stat = db_->GetCollectionRowCount(TABLE_NAME, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, INSERT_BATCH * PARTITION_COUNT);
FIU_ENABLE_FIU("SqliteMetaImpl.Count.throw_exception");
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
stat = db_->GetCollectionRowCount(TABLE_NAME, row_count);
ASSERT_FALSE(stat.ok());
fiu_disable("SqliteMetaImpl.Count.throw_exception");
FIU_ENABLE_FIU("DBImpl.GetTableRowCountRecursively.fail_get_table_rowcount_for_partition");
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
FIU_ENABLE_FIU("DBImpl.GetCollectionRowCountRecursively.fail_get_collection_rowcount_for_partition");
stat = db_->GetCollectionRowCount(TABLE_NAME, row_count);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.GetTableRowCountRecursively.fail_get_table_rowcount_for_partition");
fiu_disable("DBImpl.GetCollectionRowCountRecursively.fail_get_collection_rowcount_for_partition");
}
{ // search
@ -890,29 +890,29 @@ TEST_F(DBTest, PARTITION_TEST) {
stat = db_->DropPartitionByTag(collection_name, "1");
ASSERT_TRUE(stat.ok());
FIU_ENABLE_FIU("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
stat = db_->DropIndex(table_info.collection_id_);
FIU_ENABLE_FIU("DBImpl.DropCollectionIndexRecursively.fail_drop_collection_Index_for_partition");
stat = db_->DropIndex(collection_info.collection_id_);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
fiu_disable("DBImpl.DropCollectionIndexRecursively.fail_drop_collection_Index_for_partition");
FIU_ENABLE_FIU("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
stat = db_->DropIndex(table_info.collection_id_);
FIU_ENABLE_FIU("DBImpl.DropCollectionIndexRecursively.fail_drop_collection_Index_for_partition");
stat = db_->DropIndex(collection_info.collection_id_);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.DropTableIndexRecursively.fail_drop_table_Index_for_partition");
fiu_disable("DBImpl.DropCollectionIndexRecursively.fail_drop_collection_Index_for_partition");
stat = db_->DropIndex(collection_name);
ASSERT_TRUE(stat.ok());
stat = db_->DropTable(collection_name);
stat = db_->DropCollection(collection_name);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
std::vector<milvus::engine::meta::CollectionSchema> table_schema_array;
stat = db_->AllTables(table_schema_array);
stat = db_->AllCollections(table_schema_array);
ASSERT_TRUE(stat.ok());
bool bfound = false;
for (auto& schema : table_schema_array) {
@ -923,11 +923,11 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
}
ASSERT_TRUE(bfound);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
uint64_t size;
db_->Size(size);
@ -950,17 +950,17 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
}
TEST_F(DBTest2, DELETE_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
bool has_table = false;
db_->HasTable(TABLE_NAME, has_table);
ASSERT_TRUE(has_table);
bool has_collection = false;
db_->HasCollection(TABLE_NAME, has_collection);
ASSERT_TRUE(has_collection);
uint64_t size;
db_->Size(size);
@ -971,7 +971,7 @@ TEST_F(DBTest2, DELETE_TEST) {
milvus::engine::IDNumbers vector_ids;
stat = db_->InsertVectors(TABLE_NAME, "", xb);
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
stat = db_->CreateIndex(TABLE_NAME, index);
// create partition, drop collection will drop partition recursively
@ -980,24 +980,24 @@ TEST_F(DBTest2, DELETE_TEST) {
// fail drop collection
fiu_init(0);
FIU_ENABLE_FIU("DBImpl.DropTableRecursively.failed");
stat = db_->DropTable(TABLE_NAME);
FIU_ENABLE_FIU("DBImpl.DropCollectionRecursively.failed");
stat = db_->DropCollection(TABLE_NAME);
ASSERT_FALSE(stat.ok());
fiu_disable("DBImpl.DropTableRecursively.failed");
fiu_disable("DBImpl.DropCollectionRecursively.failed");
stat = db_->DropTable(TABLE_NAME);
stat = db_->DropCollection(TABLE_NAME);
std::this_thread::sleep_for(std::chrono::seconds(2));
ASSERT_TRUE(stat.ok());
db_->HasTable(TABLE_NAME, has_table);
ASSERT_FALSE(has_table);
db_->HasCollection(TABLE_NAME, has_collection);
ASSERT_FALSE(has_collection);
}
TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) {
std::string collection_name = TABLE_NAME;
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
auto stat = db_->CreateTable(table_schema);
auto stat = db_->CreateCollection(table_schema);
uint64_t nb = VECTOR_COUNT;
milvus::engine::VectorsData xb;
@ -1025,11 +1025,11 @@ TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) {
ASSERT_TRUE(stat.ok());
{
milvus::engine::TableInfo table_info;
stat = db_->GetTableInfo(collection_name, table_info);
milvus::engine::CollectionInfo collection_info;
stat = db_->GetCollectionInfo(collection_name, collection_info);
ASSERT_TRUE(stat.ok());
int64_t row_count = 0;
for (auto& part : table_info.partitions_stat_) {
for (auto& part : collection_info.partitions_stat_) {
row_count = 0;
for (auto& stat : part.segments_stat_) {
row_count += stat.row_count_;
@ -1046,8 +1046,8 @@ TEST_F(DBTest2, SHOW_TABLE_INFO_TEST) {
}
TEST_F(DBTestWAL, DB_INSERT_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
@ -1056,34 +1056,34 @@ TEST_F(DBTestWAL, DB_INSERT_TEST) {
std::string partition_name = "part_name";
std::string partition_tag = "part_tag";
stat = db_->CreatePartition(table_info.collection_id_, partition_name, partition_tag);
stat = db_->CreatePartition(collection_info.collection_id_, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.collection_id_, partition_tag, qxb);
stat = db_->InsertVectors(collection_info.collection_id_, partition_tag, qxb);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.collection_id_, "", qxb);
stat = db_->InsertVectors(collection_info.collection_id_, "", qxb);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.collection_id_, "not exist", qxb);
stat = db_->InsertVectors(collection_info.collection_id_, "not exist", qxb);
ASSERT_FALSE(stat.ok());
db_->Flush(table_info.collection_id_);
db_->Flush(collection_info.collection_id_);
stat = db_->DropTable(table_info.collection_id_);
stat = db_->DropCollection(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTestWAL, DB_STOP_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
for (int i = 0; i < 5; i++) {
milvus::engine::VectorsData qxb;
BuildVectors(qb, i, qxb);
stat = db_->InsertVectors(table_info.collection_id_, "", qxb);
stat = db_->InsertVectors(collection_info.collection_id_, "", qxb);
ASSERT_TRUE(stat.ok());
}
@ -1098,17 +1098,17 @@ TEST_F(DBTestWAL, DB_STOP_TEST) {
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
stat = db_->Query(dummy_context_,
table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
collection_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, qb);
stat = db_->DropTable(table_info.collection_id_);
stat = db_->DropCollection(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
}
TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
@ -1116,7 +1116,7 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) {
for (int i = 0; i < 5; i++) {
milvus::engine::VectorsData qxb;
BuildVectors(qb, i, qxb);
stat = db_->InsertVectors(table_info.collection_id_, "", qxb);
stat = db_->InsertVectors(collection_info.collection_id_, "", qxb);
ASSERT_TRUE(stat.ok());
}
@ -1128,7 +1128,7 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) {
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
stat = db_->Query(dummy_context_,
table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
collection_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_NE(result_ids.size() / topk, qb);
@ -1142,7 +1142,7 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) {
result_ids.clear();
result_distances.clear();
stat = db_->Query(dummy_context_,
table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
collection_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size(), 0);
@ -1150,21 +1150,21 @@ TEST_F(DBTestWALRecovery, RECOVERY_WITH_NO_ERROR) {
result_ids.clear();
result_distances.clear();
stat = db_->Query(dummy_context_,
table_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
collection_info.collection_id_, {}, topk, json_params, qxb, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids.size() / topk, qb);
}
TEST_F(DBTestWALRecovery_Error, RECOVERY_WITH_INVALID_LOG_FILE) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 100;
milvus::engine::VectorsData qxb;
BuildVectors(qb, 0, qxb);
stat = db_->InsertVectors(table_info.collection_id_, "", qxb);
stat = db_->InsertVectors(collection_info.collection_id_, "", qxb);
ASSERT_TRUE(stat.ok());
fiu_init(0);
@ -1190,8 +1190,8 @@ TEST_F(DBTest2, GET_VECTOR_NON_EXISTING_TABLE) {
}
TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
uint64_t qb = 1000;
@ -1200,13 +1200,13 @@ TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) {
std::string partition_name = "part_name";
std::string partition_tag = "part_tag";
stat = db_->CreatePartition(table_info.collection_id_, partition_name, partition_tag);
stat = db_->CreatePartition(collection_info.collection_id_, partition_name, partition_tag);
ASSERT_TRUE(stat.ok());
stat = db_->InsertVectors(table_info.collection_id_, partition_tag, qxb);
stat = db_->InsertVectors(collection_info.collection_id_, partition_tag, qxb);
ASSERT_TRUE(stat.ok());
db_->Flush(table_info.collection_id_);
db_->Flush(collection_info.collection_id_);
milvus::engine::VectorsData vector_data;
stat = db_->GetVectorByID(TABLE_NAME, qxb.id_array_[0], vector_data);
@ -1221,7 +1221,7 @@ TEST_F(DBTest2, GET_VECTOR_BY_ID_TEST) {
TEST_F(DBTest2, GET_VECTOR_IDS_TEST) {
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
auto stat = db_->CreateTable(table_schema);
auto stat = db_->CreateCollection(table_schema);
ASSERT_TRUE(stat.ok());
uint64_t BATCH_COUNT = 1000;
@ -1242,13 +1242,13 @@ TEST_F(DBTest2, GET_VECTOR_IDS_TEST) {
db_->Flush();
milvus::engine::TableInfo table_info;
stat = db_->GetTableInfo(TABLE_NAME, table_info);
milvus::engine::CollectionInfo collection_info;
stat = db_->GetCollectionInfo(TABLE_NAME, collection_info);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info.partitions_stat_.size(), 2UL);
ASSERT_EQ(collection_info.partitions_stat_.size(), 2UL);
std::string default_segment = table_info.partitions_stat_[0].segments_stat_[0].name_;
std::string partition_segment = table_info.partitions_stat_[1].segments_stat_[0].name_;
std::string default_segment = collection_info.partitions_stat_[0].segments_stat_[0].name_;
std::string partition_segment = collection_info.partitions_stat_[1].segments_stat_[0].name_;
milvus::engine::IDNumbers vector_ids;
stat = db_->GetVectorIDs(TABLE_NAME, default_segment, vector_ids);
@ -1280,7 +1280,7 @@ TEST_F(DBTest2, INSERT_DUPLICATE_ID) {
db_ = milvus::engine::DBFactory::Build(options);
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
auto stat = db_->CreateTable(table_schema);
auto stat = db_->CreateCollection(table_schema);
ASSERT_TRUE(stat.ok());
uint64_t size = 20;
@ -1300,9 +1300,9 @@ TEST_F(DBTest2, INSERT_DUPLICATE_ID) {
/*
TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
// table_info.index_file_size_ = 1 * milvus::engine::M;
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
// collection_info.index_file_size_ = 1 * milvus::engine::M;
auto stat = db_->CreateCollection(collection_info);
int loop = 10;
uint64_t nb = 100000;
@ -1326,13 +1326,13 @@ TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) {
ids_to_search.emplace_back(index);
}
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
// index.metric_type_ = (int)milvus::engine::MetricType::IP;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
stat = db_->PreloadTable(table_info.collection_id_);
stat = db_->PreloadCollection(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
int topk = 10, nprobe = 10;
@ -1344,20 +1344,20 @@ TEST_F(DBTest2, SEARCH_WITH_DIFFERENT_INDEX) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, id, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);
ASSERT_LT(result_distances[0], 1e-4);
}
db_->DropIndex(table_info.collection_id_);
db_->DropIndex(collection_info.collection_id_);
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
stat = db_->PreloadTable(table_info.collection_id_);
stat = db_->PreloadCollection(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
for (auto id : ids_to_search) {
@ -1366,7 +1366,7 @@ result_distances);
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, id, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);

View File

@ -31,11 +31,11 @@ static constexpr int64_t INSERT_LOOP = 1000;
milvus::engine::meta::CollectionSchema
BuildTableSchema() {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = TABLE_NAME;
table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
return table_info;
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = TABLE_NAME;
collection_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
return collection_info;
}
void
@ -55,14 +55,14 @@ BuildVectors(uint64_t n, uint64_t batch_index, milvus::engine::VectorsData& vect
} // namespace
TEST_F(MySqlDBTest, DB_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
uint64_t qb = 5;
milvus::engine::VectorsData qxb;
@ -134,20 +134,20 @@ TEST_F(MySqlDBTest, DB_TEST) {
search.join();
uint64_t count;
stat = db_->GetTableRowCount(TABLE_NAME, count);
stat = db_->GetCollectionRowCount(TABLE_NAME, count);
ASSERT_TRUE(stat.ok());
ASSERT_GT(count, 0);
}
TEST_F(MySqlDBTest, SEARCH_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
// prepare raw data
size_t nb = VECTOR_COUNT;
@ -196,11 +196,11 @@ TEST_F(MySqlDBTest, SEARCH_TEST) {
}
TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
std::vector<milvus::engine::meta::CollectionSchema> table_schema_array;
stat = db_->AllTables(table_schema_array);
stat = db_->AllCollections(table_schema_array);
ASSERT_TRUE(stat.ok());
bool bfound = false;
for (auto& schema : table_schema_array) {
@ -213,20 +213,20 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) {
fiu_init(0);
FIU_ENABLE_FIU("MySQLMetaImpl.AllTable.null_connection");
stat = db_->AllTables(table_schema_array);
stat = db_->AllCollections(table_schema_array);
ASSERT_FALSE(stat.ok());
FIU_ENABLE_FIU("MySQLMetaImpl.AllTable.throw_exception");
stat = db_->AllTables(table_schema_array);
stat = db_->AllCollections(table_schema_array);
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.AllTable.null_connection");
fiu_disable("MySQLMetaImpl.AllTable.throw_exception");
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
milvus::engine::IDNumbers vector_ids;
milvus::engine::IDNumbers target_ids;
@ -263,18 +263,18 @@ TEST_F(MySqlDBTest, ARHIVE_DISK_CHECK) {
}
TEST_F(MySqlDBTest, DELETE_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
// std::cout << stat.ToString() << std::endl;
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = TABLE_NAME;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
bool has_table = false;
db_->HasTable(TABLE_NAME, has_table);
ASSERT_TRUE(has_table);
bool has_collection = false;
db_->HasCollection(TABLE_NAME, has_collection);
ASSERT_TRUE(has_collection);
milvus::engine::IDNumbers vector_ids;
@ -294,19 +294,19 @@ TEST_F(MySqlDBTest, DELETE_TEST) {
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
stat = db_->DropTable(TABLE_NAME);
stat = db_->DropCollection(TABLE_NAME);
//// std::cout << "5 sec start" << std::endl;
// std::this_thread::sleep_for(std::chrono::seconds(5));
//// std::cout << "5 sec finish" << std::endl;
ASSERT_TRUE(stat.ok());
//
db_->HasTable(TABLE_NAME, has_table);
ASSERT_FALSE(has_table);
db_->HasCollection(TABLE_NAME, has_collection);
ASSERT_FALSE(has_collection);
}
TEST_F(MySqlDBTest, PARTITION_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
// create partition and insert data
@ -359,14 +359,14 @@ TEST_F(MySqlDBTest, PARTITION_TEST) {
}
{ // build index
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
index.metric_type_ = (int)milvus::engine::MetricType::L2;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
uint64_t row_count = 0;
stat = db_->GetTableRowCount(TABLE_NAME, row_count);
stat = db_->GetCollectionRowCount(TABLE_NAME, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, INSERT_BATCH * PARTITION_COUNT);
}
@ -410,18 +410,18 @@ TEST_F(MySqlDBTest, PARTITION_TEST) {
stat = db_->CreatePartition(collection_name, "", "6");
ASSERT_TRUE(stat.ok());
// ensure DescribeTable failed
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception");
// ensure DescribeCollection failed
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollection.throw_exception");
stat = db_->CreatePartition(collection_name, "", "7");
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception");
fiu_disable("MySQLMetaImpl.DescribeCollection.throw_exception");
//Drop partition will failed,since it firstly drop partition meta collection.
FIU_ENABLE_FIU("MySQLMetaImpl.DropTable.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.DropCollection.null_connection");
stat = db_->DropPartition(collection_name + "_5");
//TODO(sjh): add assert expr, since DropPartion always return Status::OK() for now.
//ASSERT_TRUE(stat.ok());
fiu_disable("MySQLMetaImpl.DropTable.null_connection");
fiu_disable("MySQLMetaImpl.DropCollection.null_connection");
std::vector<milvus::engine::meta::CollectionSchema> partition_schema_array;
stat = db_->ShowPartitions(collection_name, partition_schema_array);
@ -436,9 +436,9 @@ TEST_F(MySqlDBTest, PARTITION_TEST) {
stat = db_->ShowPartitions(collection_name, partition_schema_array);
ASSERT_FALSE(stat.ok());
FIU_ENABLE_FIU("MySQLMetaImpl.DropTable.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.DropCollection.throw_exception");
stat = db_->DropPartition(collection_name + "_4");
fiu_disable("MySQLMetaImpl.DropTable.throw_exception");
fiu_disable("MySQLMetaImpl.DropCollection.throw_exception");
stat = db_->DropPartition(collection_name + "_0");
ASSERT_TRUE(stat.ok());
@ -469,15 +469,15 @@ TEST_F(MySqlDBTest, PARTITION_TEST) {
}
{
FIU_ENABLE_FIU("MySQLMetaImpl.DropTableIndex.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.DropCollectionIndex.null_connection");
stat = db_->DropIndex(collection_name);
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.DropTableIndex.null_connection");
fiu_disable("MySQLMetaImpl.DropCollectionIndex.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.DropTableIndex.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.DropCollectionIndex.throw_exception");
stat = db_->DropIndex(collection_name);
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.DropTableIndex.throw_exception");
fiu_disable("MySQLMetaImpl.DropCollectionIndex.throw_exception");
stat = db_->DropIndex(collection_name);
ASSERT_TRUE(stat.ok());

View File

@ -43,12 +43,12 @@ GetTableName() {
milvus::engine::meta::CollectionSchema
BuildTableSchema() {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = GetTableName();
table_info.metric_type_ = (int32_t)milvus::engine::MetricType::L2;
table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
return table_info;
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = GetTableName();
collection_info.metric_type_ = (int32_t)milvus::engine::MetricType::L2;
collection_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
return collection_info;
}
void
@ -64,14 +64,14 @@ BuildVectors(uint64_t n, milvus::engine::VectorsData& vectors) {
} // namespace
TEST_F(DeleteTest, delete_in_mem) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -81,7 +81,7 @@ TEST_F(DeleteTest, delete_in_mem) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -105,7 +105,7 @@ TEST_F(DeleteTest, delete_in_mem) {
ids_to_delete.emplace_back(kv.first);
}
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
ASSERT_TRUE(stat.ok());
// std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk
@ -113,7 +113,7 @@ TEST_F(DeleteTest, delete_in_mem) {
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - search_vectors.size());
@ -124,7 +124,7 @@ TEST_F(DeleteTest, delete_in_mem) {
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk,
stat = db_->Query(dummy_context_, collection_info.collection_id_, tags, topk,
{{"nprobe", nprobe}}, search, result_ids, result_distances);
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);
@ -133,14 +133,14 @@ TEST_F(DeleteTest, delete_in_mem) {
}
TEST_F(DeleteTest, delete_on_disk) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -150,7 +150,7 @@ TEST_F(DeleteTest, delete_on_disk) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -174,7 +174,7 @@ TEST_F(DeleteTest, delete_on_disk) {
ASSERT_TRUE(stat.ok());
for (auto& kv : search_vectors) {
stat = db_->DeleteVector(table_info.collection_id_, kv.first);
stat = db_->DeleteVector(collection_info.collection_id_, kv.first);
ASSERT_TRUE(stat.ok());
}
@ -182,7 +182,7 @@ TEST_F(DeleteTest, delete_on_disk) {
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - search_vectors.size());
@ -194,7 +194,7 @@ TEST_F(DeleteTest, delete_on_disk) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
collection_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);
ASSERT_GT(result_distances[0], 1);
@ -202,14 +202,14 @@ TEST_F(DeleteTest, delete_on_disk) {
}
TEST_F(DeleteTest, delete_multiple_times) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -219,7 +219,7 @@ TEST_F(DeleteTest, delete_multiple_times) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -245,7 +245,7 @@ TEST_F(DeleteTest, delete_multiple_times) {
int topk = 10, nprobe = 10;
for (auto& pair : search_vectors) {
std::vector<int64_t> to_delete{pair.first};
stat = db_->DeleteVectors(table_info.collection_id_, to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, to_delete);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
@ -257,7 +257,7 @@ TEST_F(DeleteTest, delete_multiple_times) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
collection_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);
ASSERT_GT(result_distances[0], 1);
@ -265,15 +265,15 @@ TEST_F(DeleteTest, delete_multiple_times) {
}
TEST_F(DeleteTest, delete_before_create_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
table_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFFLAT;
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
collection_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFFLAT;
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 10000;
milvus::engine::VectorsData xb;
@ -283,7 +283,7 @@ TEST_F(DeleteTest, delete_before_create_index) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
@ -309,19 +309,19 @@ TEST_F(DeleteTest, delete_before_create_index) {
for (auto& kv : search_vectors) {
ids_to_delete.emplace_back(kv.first);
}
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
index.extra_params_ = {{"nlist", 100}};
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - ids_to_delete.size());
@ -333,7 +333,7 @@ TEST_F(DeleteTest, delete_before_create_index) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
collection_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);
ASSERT_GT(result_distances[0], 1);
@ -341,15 +341,15 @@ TEST_F(DeleteTest, delete_before_create_index) {
}
TEST_F(DeleteTest, delete_with_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
table_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFFLAT;
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
collection_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFFLAT;
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 10000;
milvus::engine::VectorsData xb;
@ -359,7 +359,7 @@ TEST_F(DeleteTest, delete_with_index) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -378,10 +378,10 @@ TEST_F(DeleteTest, delete_with_index) {
search_vectors.insert(std::make_pair(xb.id_array_[index], search));
}
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
index.extra_params_ = {{"nlist", 100}};
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
// std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk
@ -392,13 +392,13 @@ TEST_F(DeleteTest, delete_with_index) {
for (auto& kv : search_vectors) {
ids_to_delete.emplace_back(kv.first);
}
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - ids_to_delete.size());
@ -410,7 +410,7 @@ TEST_F(DeleteTest, delete_with_index) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
collection_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);
ASSERT_GT(result_distances[0], 1);
@ -418,14 +418,14 @@ TEST_F(DeleteTest, delete_with_index) {
}
TEST_F(DeleteTest, delete_multiple_times_with_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -435,7 +435,7 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -458,17 +458,17 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) {
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
index.extra_params_ = {{"nlist", 1}};
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
int topk = 10, nprobe = 10;
int deleted = 0;
for (auto& pair : search_vectors) {
std::vector<int64_t> to_delete{pair.first};
stat = db_->DeleteVectors(table_info.collection_id_, to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, to_delete);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
@ -477,7 +477,7 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) {
++deleted;
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - deleted);
@ -487,7 +487,7 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
collection_info.collection_id_, tags, topk, {{"nprobe", nprobe}}, search, result_ids, result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);
@ -496,34 +496,34 @@ TEST_F(DeleteTest, delete_multiple_times_with_index) {
}
TEST_F(DeleteTest, delete_single_vector) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 1;
milvus::engine::VectorsData xb;
BuildVectors(nb, xb);
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
// std::this_thread::sleep_for(std::chrono::seconds(3)); // ensure raw data write to disk
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
stat = db_->DeleteVectors(table_info.collection_id_, xb.id_array_);
stat = db_->DeleteVectors(collection_info.collection_id_, xb.id_array_);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, 0);
@ -534,7 +534,7 @@ TEST_F(DeleteTest, delete_single_vector) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, json_params, xb, result_ids, result_distances);
collection_info.collection_id_, tags, topk, json_params, xb, result_ids, result_distances);
ASSERT_TRUE(result_ids.empty());
ASSERT_TRUE(result_distances.empty());
// ASSERT_EQ(result_ids[0], -1);
@ -543,48 +543,48 @@ TEST_F(DeleteTest, delete_single_vector) {
}
TEST_F(DeleteTest, delete_add_create_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 3000;
milvus::engine::VectorsData xb;
BuildVectors(nb, xb);
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
// stat = db_->Flush();
// ASSERT_TRUE(stat.ok());
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
index.extra_params_ = {{"nlist", 100}};
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
std::vector<milvus::engine::IDNumber> ids_to_delete;
ids_to_delete.emplace_back(xb.id_array_.front());
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
ASSERT_TRUE(stat.ok());
milvus::engine::VectorsData xb2 = xb;
xb2.id_array_.clear(); // same vector, different id
stat = db_->InsertVectors(table_info.collection_id_, "", xb2);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb2);
ASSERT_TRUE(stat.ok());
// stat = db_->Flush();
// ASSERT_TRUE(stat.ok());
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb * 2 - 1);
@ -599,64 +599,64 @@ TEST_F(DeleteTest, delete_add_create_index) {
qb.vector_count_ = 1;
qb.id_array_.clear();
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, json_params, qb, result_ids, result_distances);
collection_info.collection_id_, tags, topk, json_params, qb, result_ids, result_distances);
ASSERT_EQ(result_ids[0], xb2.id_array_.front());
ASSERT_LT(result_distances[0], 1e-4);
result_ids.clear();
result_distances.clear();
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, ids_to_delete.front(),
result_ids, result_distances);
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk,
json_params, ids_to_delete.front(), result_ids, result_distances);
ASSERT_EQ(result_ids[0], -1);
ASSERT_EQ(result_distances[0], std::numeric_limits<float>::max());
}
TEST_F(DeleteTest, delete_add_auto_flush) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 3000;
milvus::engine::VectorsData xb;
BuildVectors(nb, xb);
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::this_thread::sleep_for(std::chrono::seconds(2));
// stat = db_->Flush();
// ASSERT_TRUE(stat.ok());
// milvus::engine::TableIndex index;
// milvus::engine::CollectionIndex index;
// index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
// stat = db_->CreateIndex(table_info.collection_id_, index);
// stat = db_->CreateIndex(collection_info.collection_id_, index);
// ASSERT_TRUE(stat.ok());
std::vector<milvus::engine::IDNumber> ids_to_delete;
ids_to_delete.emplace_back(xb.id_array_.front());
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
ASSERT_TRUE(stat.ok());
milvus::engine::VectorsData xb2 = xb;
xb2.id_array_.clear(); // same vector, different id
stat = db_->InsertVectors(table_info.collection_id_, "", xb2);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb2);
ASSERT_TRUE(stat.ok());
std::this_thread::sleep_for(std::chrono::seconds(2));
// stat = db_->Flush();
// ASSERT_TRUE(stat.ok());
// stat = db_->CreateIndex(table_info.collection_id_, index);
// stat = db_->CreateIndex(collection_info.collection_id_, index);
// ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb * 2 - 1);
@ -671,7 +671,7 @@ TEST_F(DeleteTest, delete_add_auto_flush) {
qb.vector_count_ = 1;
qb.id_array_.clear();
stat = db_->Query(dummy_context_,
table_info.collection_id_, tags, topk, json_params, qb, result_ids, result_distances);
collection_info.collection_id_, tags, topk, json_params, qb, result_ids, result_distances);
ASSERT_EQ(result_ids[0], xb2.id_array_.front());
ASSERT_LT(result_distances[0], 1e-4);
@ -679,27 +679,27 @@ TEST_F(DeleteTest, delete_add_auto_flush) {
result_ids.clear();
result_distances.clear();
stat = db_->QueryByID(dummy_context_,
table_info.collection_id_, tags, topk, {{"nprobe", nprobe}},
collection_info.collection_id_, tags, topk, {{"nprobe", nprobe}},
ids_to_delete.front(), result_ids, result_distances);
ASSERT_EQ(result_ids[0], -1);
ASSERT_EQ(result_distances[0], std::numeric_limits<float>::max());
}
TEST_F(CompactTest, compact_basic) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100;
milvus::engine::VectorsData xb;
BuildVectors(nb, xb);
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
@ -708,18 +708,18 @@ TEST_F(CompactTest, compact_basic) {
std::vector<milvus::engine::IDNumber> ids_to_delete;
ids_to_delete.emplace_back(xb.id_array_.front());
ids_to_delete.emplace_back(xb.id_array_.back());
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - 2);
stat = db_->Compact(table_info.collection_id_);
stat = db_->Compact(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
const int topk = 1, nprobe = 1;
@ -731,7 +731,7 @@ TEST_F(CompactTest, compact_basic) {
milvus::engine::VectorsData qb = xb;
for (auto& id : ids_to_delete) {
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, id, result_ids,
result_distances);
ASSERT_EQ(result_ids[0], -1);
ASSERT_EQ(result_distances[0], std::numeric_limits<float>::max());
@ -739,16 +739,16 @@ TEST_F(CompactTest, compact_basic) {
}
TEST_F(CompactTest, compact_with_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
table_info.index_file_size_ = milvus::engine::ONE_KB;
table_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFSQ8;
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
collection_info.index_file_size_ = milvus::engine::ONE_KB;
collection_info.engine_type_ = (int32_t)milvus::engine::EngineType::FAISS_IVFSQ8;
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 3000;
milvus::engine::VectorsData xb;
@ -759,7 +759,7 @@ TEST_F(CompactTest, compact_with_index) {
xb.id_array_.emplace_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -778,9 +778,9 @@ TEST_F(CompactTest, compact_with_index) {
search_vectors.insert(std::make_pair(xb.id_array_[index], search));
}
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
stat = db_->Flush();
@ -790,25 +790,25 @@ TEST_F(CompactTest, compact_with_index) {
for (auto& kv : search_vectors) {
ids_to_delete.emplace_back(kv.first);
}
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - ids_to_delete.size());
stat = db_->Compact(table_info.collection_id_);
stat = db_->Compact(collection_info.collection_id_);
ASSERT_TRUE(stat.ok());
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb - ids_to_delete.size());
milvus::engine::TableIndex table_index;
stat = db_->DescribeIndex(table_info.collection_id_, table_index);
milvus::engine::CollectionIndex table_index;
stat = db_->DescribeIndex(collection_info.collection_id_, table_index);
ASSERT_TRUE(stat.ok());
ASSERT_FLOAT_EQ(table_index.engine_type_, index.engine_type_);
@ -821,7 +821,7 @@ TEST_F(CompactTest, compact_with_index) {
std::vector<std::string> tags;
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, search, result_ids,
stat = db_->Query(dummy_context_, collection_info.collection_id_, tags, topk, json_params, search, result_ids,
result_distances);
ASSERT_NE(result_ids[0], pair.first);
// ASSERT_LT(result_distances[0], 1e-4);

View File

@ -45,11 +45,11 @@ GetTableName() {
milvus::engine::meta::CollectionSchema
BuildTableSchema() {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = GetTableName();
table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
return table_info;
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = GetTableName();
collection_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IDMAP;
return collection_info;
}
void
@ -66,12 +66,12 @@ BuildVectors(uint64_t n, milvus::engine::VectorsData& vectors) {
TEST_F(MemManagerTest, VECTOR_SOURCE_TEST) {
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
auto status = impl_->CreateTable(table_schema);
auto status = impl_->CreateCollection(table_schema);
ASSERT_TRUE(status.ok());
milvus::engine::meta::SegmentSchema table_file_schema;
table_file_schema.collection_id_ = GetTableName();
status = impl_->CreateTableFile(table_file_schema);
status = impl_->CreateCollectionFile(table_file_schema);
ASSERT_TRUE(status.ok());
int64_t n = 100;
@ -114,7 +114,7 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) {
fiu_init(0);
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
auto status = impl_->CreateTable(table_schema);
auto status = impl_->CreateCollection(table_schema);
ASSERT_TRUE(status.ok());
milvus::engine::MemTableFile mem_table_file(GetTableName(), impl_, options);
@ -151,9 +151,9 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) {
{
//test fail create collection file
FIU_ENABLE_FIU("SqliteMetaImpl.CreateTableFile.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.CreateCollectionFile.throw_exception");
milvus::engine::MemTableFile mem_table_file_1(GetTableName(), impl_, options);
fiu_disable("SqliteMetaImpl.CreateTableFile.throw_exception");
fiu_disable("SqliteMetaImpl.CreateCollectionFile.throw_exception");
status = mem_table_file_1.Add(source);
ASSERT_FALSE(status.ok());
@ -165,7 +165,7 @@ TEST_F(MemManagerTest, MEM_TABLE_FILE_TEST) {
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
table_schema.collection_id_ = "faiss_pq";
table_schema.engine_type_ = (int)milvus::engine::EngineType::FAISS_PQ;
auto status = impl_->CreateTable(table_schema);
auto status = impl_->CreateCollection(table_schema);
ASSERT_TRUE(status.ok());
milvus::engine::MemTableFile mem_table_file_1("faiss_pq", impl_, options);
@ -177,7 +177,7 @@ TEST_F(MemManagerTest, MEM_TABLE_TEST) {
auto options = GetOptions();
milvus::engine::meta::CollectionSchema table_schema = BuildTableSchema();
auto status = impl_->CreateTable(table_schema);
auto status = impl_->CreateCollection(table_schema);
ASSERT_TRUE(status.ok());
int64_t n_100 = 100;
@ -238,21 +238,21 @@ TEST_F(MemManagerTest, MEM_TABLE_TEST) {
status = mem_table.Add(source_10);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFile.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionFile.throw_exception");
status = mem_table.Serialize(0);
ASSERT_FALSE(status.ok());
fiu_disable("SqliteMetaImpl.UpdateTableFile.throw_exception");
fiu_disable("SqliteMetaImpl.UpdateCollectionFile.throw_exception");
}
TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = GetTableName();
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = GetTableName();
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -302,14 +302,14 @@ TEST_F(MemManagerTest2, SERIAL_INSERT_SEARCH_TEST) {
}
TEST_F(MemManagerTest2, INSERT_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = GetTableName();
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = GetTableName();
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
auto start_time = METRICS_NOW_TIME;
@ -328,19 +328,19 @@ TEST_F(MemManagerTest2, INSERT_TEST) {
}
TEST_F(MemManagerTest2, INSERT_BINARY_TEST) {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = GetTableName();
table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_BIN_IDMAP;
table_info.metric_type_ = (int32_t)milvus::engine::MetricType::JACCARD;
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = GetTableName();
collection_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_BIN_IDMAP;
collection_info.metric_type_ = (int32_t)milvus::engine::MetricType::JACCARD;
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = GetTableName();
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = GetTableName();
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int insert_loop = 10;
for (int k = 0; k < insert_loop; ++k) {
@ -363,14 +363,14 @@ TEST_F(MemManagerTest2, INSERT_BINARY_TEST) {
}
}
// TEST_F(MemManagerTest2, CONCURRENT_INSERT_SEARCH_TEST) {
// milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
// auto stat = db_->CreateTable(table_info);
// milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
// auto stat = db_->CreateCollection(collection_info);
//
// milvus::engine::meta::CollectionSchema table_info_get;
// table_info_get.collection_id_ = GetTableName();
// stat = db_->DescribeTable(table_info_get);
// milvus::engine::meta::CollectionSchema collection_info_get;
// collection_info_get.collection_id_ = GetTableName();
// stat = db_->DescribeCollection(collection_info_get);
// ASSERT_TRUE(stat.ok());
// ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
// ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
//
// int64_t nb = 40960;
// milvus::engine::VectorsData xb;
@ -439,14 +439,14 @@ TEST_F(MemManagerTest2, INSERT_BINARY_TEST) {
//}
TEST_F(MemManagerTest2, VECTOR_IDS_TEST) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = GetTableName();
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = GetTableName();
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;

View File

@ -29,32 +29,32 @@ TEST_F(MetaTest, TABLE_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
ASSERT_TRUE(status.ok());
auto gid = collection.id_;
collection.id_ = -1;
status = impl_->DescribeTable(collection);
status = impl_->DescribeCollection(collection);
ASSERT_TRUE(status.ok());
ASSERT_EQ(collection.id_, gid);
ASSERT_EQ(collection.collection_id_, collection_id);
collection.collection_id_ = "not_found";
status = impl_->DescribeTable(collection);
status = impl_->DescribeCollection(collection);
ASSERT_TRUE(!status.ok());
collection.collection_id_ = collection_id;
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
ASSERT_EQ(status.code(), milvus::DB_ALREADY_EXIST);
status = impl_->DropTable(collection.collection_id_);
status = impl_->DropCollection(collection.collection_id_);
ASSERT_TRUE(status.ok());
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
ASSERT_EQ(status.code(), milvus::DB_ERROR);
collection.collection_id_ = "";
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
ASSERT_TRUE(status.ok());
}
@ -86,61 +86,61 @@ TEST_F(MetaTest, FALID_TEST) {
boost::filesystem::remove_all(options_1.meta_.path_);
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.CreateTable.throw_exception");
status = impl_->CreateTable(collection);
FIU_ENABLE_FIU("SqliteMetaImpl.CreateCollection.throw_exception");
status = impl_->CreateCollection(collection);
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.CreateTable.throw_exception");
fiu_disable("SqliteMetaImpl.CreateCollection.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.CreateTable.insert_throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.CreateCollection.insert_throw_exception");
collection.collection_id_ = "";
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
ASSERT_FALSE(status.ok());
fiu_disable("SqliteMetaImpl.CreateTable.insert_throw_exception");
fiu_disable("SqliteMetaImpl.CreateCollection.insert_throw_exception");
//success create collection
collection.collection_id_ = collection_id;
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
ASSERT_TRUE(status.ok());
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTable.throw_exception");
status = impl_->DescribeTable(collection);
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeCollection.throw_exception");
status = impl_->DescribeCollection(collection);
ASSERT_FALSE(status.ok());
fiu_disable("SqliteMetaImpl.DescribeTable.throw_exception");
fiu_disable("SqliteMetaImpl.DescribeCollection.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.HasTable.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.HasCollection.throw_exception");
bool has = false;
status = impl_->HasTable(collection.collection_id_, has);
status = impl_->HasCollection(collection.collection_id_, has);
ASSERT_FALSE(status.ok());
ASSERT_FALSE(has);
fiu_disable("SqliteMetaImpl.HasTable.throw_exception");
fiu_disable("SqliteMetaImpl.HasCollection.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.AllTables.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.AllCollections.throw_exception");
std::vector<milvus::engine::meta::CollectionSchema> table_schema_array;
status = impl_->AllTables(table_schema_array);
status = impl_->AllCollections(table_schema_array);
ASSERT_FALSE(status.ok());
fiu_disable("SqliteMetaImpl.AllTables.throw_exception");
fiu_disable("SqliteMetaImpl.AllCollections.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.DropTable.throw_exception");
status = impl_->DropTable(collection.collection_id_);
FIU_ENABLE_FIU("SqliteMetaImpl.DropCollection.throw_exception");
status = impl_->DropCollection(collection.collection_id_);
ASSERT_FALSE(status.ok());
fiu_disable("SqliteMetaImpl.DropTable.throw_exception");
fiu_disable("SqliteMetaImpl.DropCollection.throw_exception");
}
{
milvus::engine::meta::SegmentSchema schema;
schema.collection_id_ = "notexist";
status = impl_->CreateTableFile(schema);
status = impl_->CreateCollectionFile(schema);
ASSERT_FALSE(status.ok());
FIU_ENABLE_FIU("SqliteMetaImpl.CreateTableFile.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.CreateCollectionFile.throw_exception");
schema.collection_id_ = collection_id;
status = impl_->CreateTableFile(schema);
status = impl_->CreateCollectionFile(schema);
ASSERT_FALSE(status.ok());
fiu_disable("SqliteMetaImpl.CreateTableFile.throw_exception");
fiu_disable("SqliteMetaImpl.CreateCollectionFile.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.DeleteTableFiles.throw_exception");
@ -161,22 +161,22 @@ TEST_F(MetaTest, FALID_TEST) {
fiu_disable("SqliteMetaImpl.GetTableFiles.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFlag.throw_exception");
status = impl_->UpdateTableFlag(collection_id, 0);
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionFlag.throw_exception");
status = impl_->UpdateCollectionFlag(collection_id, 0);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.UpdateTableFlag.throw_exception");
fiu_disable("SqliteMetaImpl.UpdateCollectionFlag.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFile.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionFile.throw_exception");
milvus::engine::meta::SegmentSchema schema;
schema.collection_id_ = collection_id;
status = impl_->UpdateTableFile(schema);
status = impl_->UpdateCollectionFile(schema);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.UpdateTableFile.throw_exception");
fiu_disable("SqliteMetaImpl.UpdateCollectionFile.throw_exception");
schema = {};
schema.collection_id_ = "notexist";
status = impl_->UpdateTableFile(schema);
status = impl_->UpdateCollectionFile(schema);
ASSERT_TRUE(status.ok());
}
{
@ -184,45 +184,45 @@ TEST_F(MetaTest, FALID_TEST) {
milvus::engine::meta::SegmentSchema schema;
schema.collection_id_ = "notexits";
schemas.emplace_back(schema);
status = impl_->UpdateTableFiles(schemas);
status = impl_->UpdateCollectionFiles(schemas);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFiles.throw_exception");
status = impl_->UpdateTableFiles(schemas);
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionFiles.throw_exception");
status = impl_->UpdateCollectionFiles(schemas);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.UpdateTableFiles.throw_exception");
fiu_disable("SqliteMetaImpl.UpdateCollectionFiles.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFiles.fail_commited");
status = impl_->UpdateTableFiles(schemas);
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionFiles.fail_commited");
status = impl_->UpdateCollectionFiles(schemas);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.UpdateTableFiles.fail_commited");
fiu_disable("SqliteMetaImpl.UpdateCollectionFiles.fail_commited");
}
{
milvus::engine::TableIndex index;
status = impl_->UpdateTableIndex("notexist", index);
milvus::engine::CollectionIndex index;
status = impl_->UpdateCollectionIndex("notexist", index);
ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND);
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableIndex.throw_exception");
status = impl_->UpdateTableIndex("notexist", index);
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionIndex.throw_exception");
status = impl_->UpdateCollectionIndex("notexist", index);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.UpdateTableIndex.throw_exception");
fiu_disable("SqliteMetaImpl.UpdateCollectionIndex.throw_exception");
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeTableIndex.throw_exception");
status = impl_->DescribeTableIndex(collection_id, index);
FIU_ENABLE_FIU("SqliteMetaImpl.DescribeCollectionIndex.throw_exception");
status = impl_->DescribeCollectionIndex(collection_id, index);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.DescribeTableIndex.throw_exception");
fiu_disable("SqliteMetaImpl.DescribeCollectionIndex.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateTableFilesToIndex.throw_exception");
status = impl_->UpdateTableFilesToIndex(collection_id);
FIU_ENABLE_FIU("SqliteMetaImpl.UpdateCollectionFilesToIndex.throw_exception");
status = impl_->UpdateCollectionFilesToIndex(collection_id);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.UpdateTableFilesToIndex.throw_exception");
fiu_disable("SqliteMetaImpl.UpdateCollectionFilesToIndex.throw_exception");
}
{
FIU_ENABLE_FIU("SqliteMetaImpl.DropTableIndex.throw_exception");
status = impl_->DropTableIndex(collection_id);
FIU_ENABLE_FIU("SqliteMetaImpl.DropCollectionIndex.throw_exception");
status = impl_->DropCollectionIndex(collection_id);
ASSERT_EQ(status.code(), milvus::DB_META_TRANSACTION_FAILED);
fiu_disable("SqliteMetaImpl.DropTableIndex.throw_exception");
fiu_disable("SqliteMetaImpl.DropCollectionIndex.throw_exception");
}
{
std::string partition = "part0";
@ -272,10 +272,10 @@ TEST_F(MetaTest, FALID_TEST) {
{
milvus::engine::meta::SegmentSchema file;
file.collection_id_ = collection_id;
status = impl_->CreateTableFile(file);
status = impl_->CreateCollectionFile(file);
ASSERT_TRUE(status.ok());
file.file_type_ = milvus::engine::meta::SegmentSchema::TO_INDEX;
impl_->UpdateTableFile(file);
impl_->UpdateCollectionFile(file);
milvus::engine::meta::SegmentsSchema files;
FIU_ENABLE_FIU("SqliteMetaImpl_FilesToIndex_TableNotFound");
@ -359,11 +359,11 @@ TEST_F(MetaTest, TABLE_FILE_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
collection.dimension_ = 256;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
milvus::engine::meta::SegmentSchema table_file;
table_file.collection_id_ = collection.collection_id_;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, milvus::engine::meta::SegmentSchema::NEW);
@ -377,7 +377,7 @@ TEST_F(MetaTest, TABLE_FILE_TEST) {
auto new_file_type = milvus::engine::meta::SegmentSchema::INDEX;
table_file.file_type_ = new_file_type;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, new_file_type);
}
@ -388,13 +388,13 @@ TEST_F(MetaTest, TABLE_FILE_ROW_COUNT_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
collection.dimension_ = 256;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
milvus::engine::meta::SegmentSchema table_file;
table_file.row_count_ = 100;
table_file.collection_id_ = collection.collection_id_;
table_file.file_type_ = 1;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
uint64_t cnt = 0;
status = impl_->Count(collection_id, cnt);
@ -402,7 +402,7 @@ TEST_F(MetaTest, TABLE_FILE_ROW_COUNT_TEST) {
table_file.row_count_ = 99999;
milvus::engine::meta::SegmentsSchema table_files = {table_file};
status = impl_->UpdateTableFilesRowCount(table_files);
status = impl_->UpdateCollectionFilesRowCount(table_files);
ASSERT_TRUE(status.ok());
cnt = 0;
@ -438,7 +438,7 @@ TEST_F(MetaTest, ARCHIVE_TEST_DAYS) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl.CreateTable(collection);
auto status = impl.CreateCollection(collection);
milvus::engine::meta::SegmentsSchema files;
milvus::engine::meta::SegmentSchema table_file;
@ -449,11 +449,11 @@ TEST_F(MetaTest, ARCHIVE_TEST_DAYS) {
std::vector<int> days;
std::vector<size_t> ids;
for (auto i = 0; i < cnt; ++i) {
status = impl.CreateTableFile(table_file);
status = impl.CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
int day = rand_r(&seed) % (days_num * 2);
table_file.created_on_ = ts - day * milvus::engine::meta::DAY * milvus::engine::meta::US_PS - 10000;
status = impl.UpdateTableFile(table_file);
status = impl.UpdateCollectionFile(table_file);
files.push_back(table_file);
days.push_back(day);
ids.push_back(table_file.id_);
@ -494,7 +494,7 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl.CreateTable(collection);
auto status = impl.CreateCollection(collection);
milvus::engine::meta::SegmentsSchema files;
milvus::engine::meta::SegmentSchema table_file;
@ -504,10 +504,10 @@ TEST_F(MetaTest, ARCHIVE_TEST_DISK) {
auto each_size = 2UL;
std::vector<size_t> ids;
for (auto i = 0; i < cnt; ++i) {
status = impl.CreateTableFile(table_file);
status = impl.CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
table_file.file_size_ = each_size * milvus::engine::G;
status = impl.UpdateTableFile(table_file);
status = impl.UpdateCollectionFile(table_file);
files.push_back(table_file);
ids.push_back(table_file.id_);
}
@ -545,7 +545,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
uint64_t new_merge_files_cnt = 1;
uint64_t new_index_files_cnt = 2;
@ -559,49 +559,49 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
table_file.collection_id_ = collection.collection_id_;
for (auto i = 0; i < new_merge_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_MERGE;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < new_index_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_INDEX;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < backup_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::BACKUP;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < new_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < raw_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < to_index_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_INDEX;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < index_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::INDEX;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
uint64_t total_row_count = 0;
@ -659,15 +659,15 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
status = impl_->DeleteTableFiles(collection_id);
ASSERT_TRUE(status.ok());
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
status = impl_->CleanUpShadowFiles();
ASSERT_TRUE(status.ok());
table_file.collection_id_ = collection.collection_id_;
table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_DELETE;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
std::vector<int> files_to_delete;
milvus::engine::meta::SegmentsSchema files_schema;
@ -682,7 +682,7 @@ TEST_F(MetaTest, TABLE_FILES_TEST) {
status = impl_->CleanUpFilesWithTTL(1UL);
ASSERT_TRUE(status.ok());
status = impl_->DropTable(collection_id);
status = impl_->DropCollection(collection_id);
ASSERT_TRUE(status.ok());
}
@ -691,37 +691,37 @@ TEST_F(MetaTest, INDEX_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.metric_type_ = 2;
index.extra_params_ = {{"nlist", 1234}};
index.engine_type_ = 3;
status = impl_->UpdateTableIndex(collection_id, index);
status = impl_->UpdateCollectionIndex(collection_id, index);
ASSERT_TRUE(status.ok());
int64_t flag = 65536;
status = impl_->UpdateTableFlag(collection_id, flag);
status = impl_->UpdateCollectionFlag(collection_id, flag);
ASSERT_TRUE(status.ok());
milvus::engine::meta::CollectionSchema table_info;
table_info.collection_id_ = collection_id;
status = impl_->DescribeTable(table_info);
ASSERT_EQ(table_info.flag_, flag);
milvus::engine::meta::CollectionSchema collection_info;
collection_info.collection_id_ = collection_id;
status = impl_->DescribeCollection(collection_info);
ASSERT_EQ(collection_info.flag_, flag);
milvus::engine::TableIndex index_out;
status = impl_->DescribeTableIndex(collection_id, index_out);
milvus::engine::CollectionIndex index_out;
status = impl_->DescribeCollectionIndex(collection_id, index_out);
ASSERT_EQ(index_out.metric_type_, index.metric_type_);
ASSERT_EQ(index_out.extra_params_, index.extra_params_);
ASSERT_EQ(index_out.engine_type_, index.engine_type_);
status = impl_->DropTableIndex(collection_id);
status = impl_->DropCollectionIndex(collection_id);
ASSERT_TRUE(status.ok());
status = impl_->DescribeTableIndex(collection_id, index_out);
status = impl_->DescribeCollectionIndex(collection_id, index_out);
ASSERT_EQ(index_out.metric_type_, index.metric_type_);
ASSERT_NE(index_out.engine_type_, index.engine_type_);
status = impl_->UpdateTableFilesToIndex(collection_id);
status = impl_->UpdateCollectionFilesToIndex(collection_id);
ASSERT_TRUE(status.ok());
}
@ -731,13 +731,13 @@ TEST_F(MetaTest, LSN_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
status = impl_->UpdateTableFlushLSN(collection_id, lsn);
ASSERT_TRUE(status.ok());
uint64_t temp_lsb = 0;
status = impl_->GetTableFlushLSN(collection_id, temp_lsb);
status = impl_->GetCollectionFlushLSN(collection_id, temp_lsb);
ASSERT_EQ(temp_lsb, lsn);
status = impl_->SetGlobalLastLSN(lsn);

View File

@ -34,79 +34,79 @@ TEST_F(MySqlMetaTest, TABLE_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
ASSERT_TRUE(status.ok());
auto gid = collection.id_;
collection.id_ = -1;
status = impl_->DescribeTable(collection);
status = impl_->DescribeCollection(collection);
ASSERT_TRUE(status.ok());
ASSERT_EQ(collection.id_, gid);
ASSERT_EQ(collection.collection_id_, collection_id);
collection.collection_id_ = "not_found";
status = impl_->DescribeTable(collection);
status = impl_->DescribeCollection(collection);
ASSERT_TRUE(!status.ok());
collection.collection_id_ = collection_id;
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
ASSERT_EQ(status.code(), milvus::DB_ALREADY_EXIST);
collection.collection_id_ = "";
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
// ASSERT_TRUE(status.ok());
collection.collection_id_ = collection_id;
FIU_ENABLE_FIU("MySQLMetaImpl.CreateTable.null_connection");
auto stat = impl_->CreateTable(collection);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateCollection.null_connection");
auto stat = impl_->CreateCollection(collection);
ASSERT_FALSE(stat.ok());
ASSERT_EQ(stat.message(), FAILED_CONNECT_SQL_SERVER);
fiu_disable("MySQLMetaImpl.CreateTable.null_connection");
fiu_disable("MySQLMetaImpl.CreateCollection.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.CreateTable.throw_exception");
stat = impl_->CreateTable(collection);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateCollection.throw_exception");
stat = impl_->CreateCollection(collection);
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.CreateTable.throw_exception");
fiu_disable("MySQLMetaImpl.CreateCollection.throw_exception");
//ensure collection exists
stat = impl_->CreateTable(collection);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE");
stat = impl_->CreateTable(collection);
stat = impl_->CreateCollection(collection);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateCollectionTable.schema_TO_DELETE");
stat = impl_->CreateCollection(collection);
ASSERT_FALSE(stat.ok());
ASSERT_EQ(stat.message(), TABLE_ALREADY_EXISTS);
fiu_disable("MySQLMetaImpl.CreateTableTable.schema_TO_DELETE");
fiu_disable("MySQLMetaImpl.CreateCollectionTable.schema_TO_DELETE");
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.null_connection");
stat = impl_->DescribeTable(collection);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollection.null_connection");
stat = impl_->DescribeCollection(collection);
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.DescribeTable.null_connection");
fiu_disable("MySQLMetaImpl.DescribeCollection.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception");
stat = impl_->DescribeTable(collection);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollection.throw_exception");
stat = impl_->DescribeCollection(collection);
ASSERT_FALSE(stat.ok());
fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception");
fiu_disable("MySQLMetaImpl.DescribeCollection.throw_exception");
bool has_table = false;
stat = impl_->HasTable(collection_id, has_table);
bool has_collection = false;
stat = impl_->HasCollection(collection_id, has_collection);
ASSERT_TRUE(stat.ok());
ASSERT_TRUE(has_table);
ASSERT_TRUE(has_collection);
has_table = false;
FIU_ENABLE_FIU("MySQLMetaImpl.HasTable.null_connection");
stat = impl_->HasTable(collection_id, has_table);
has_collection = false;
FIU_ENABLE_FIU("MySQLMetaImpl.HasCollection.null_connection");
stat = impl_->HasCollection(collection_id, has_collection);
ASSERT_FALSE(stat.ok());
ASSERT_FALSE(has_table);
fiu_disable("MySQLMetaImpl.HasTable.null_connection");
ASSERT_FALSE(has_collection);
fiu_disable("MySQLMetaImpl.HasCollection.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.HasTable.throw_exception");
stat = impl_->HasTable(collection_id, has_table);
FIU_ENABLE_FIU("MySQLMetaImpl.HasCollection.throw_exception");
stat = impl_->HasCollection(collection_id, has_collection);
ASSERT_FALSE(stat.ok());
ASSERT_FALSE(has_table);
fiu_disable("MySQLMetaImpl.HasTable.throw_exception");
ASSERT_FALSE(has_collection);
fiu_disable("MySQLMetaImpl.HasCollection.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.DropTable.CLUSTER_WRITABLE_MODE");
stat = impl_->DropTable(collection_id);
fiu_disable("MySQLMetaImpl.DropTable.CLUSTER_WRITABLE_MODE");
FIU_ENABLE_FIU("MySQLMetaImpl.DropCollection.CLUSTER_WRITABLE_MODE");
stat = impl_->DropCollection(collection_id);
fiu_disable("MySQLMetaImpl.DropCollection.CLUSTER_WRITABLE_MODE");
FIU_ENABLE_FIU("MySQLMetaImpl.DropAll.null_connection");
status = impl_->DropAll();
@ -134,29 +134,29 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
collection.dimension_ = 256;
status = impl_->CreateTable(collection);
status = impl_->CreateCollection(collection);
//CreateTableFile
//CreateCollectionFile
milvus::engine::meta::SegmentSchema table_file;
table_file.collection_id_ = collection.collection_id_;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, milvus::engine::meta::SegmentSchema::NEW);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateTableFiles.null_connection");
status = impl_->CreateTableFile(table_file);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateCollectionFiles.null_connection");
status = impl_->CreateCollectionFile(table_file);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.CreateTableFiles.null_connection");
fiu_disable("MySQLMetaImpl.CreateCollectionFiles.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.CreateTableFiles.throw_exception");
status = impl_->CreateTableFile(table_file);
FIU_ENABLE_FIU("MySQLMetaImpl.CreateCollectionFiles.throw_exception");
status = impl_->CreateCollectionFile(table_file);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.CreateTableFiles.throw_exception");
fiu_disable("MySQLMetaImpl.CreateCollectionFiles.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception");
status = impl_->CreateTableFile(table_file);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollection.throw_exception");
status = impl_->CreateCollectionFile(table_file);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception");
fiu_disable("MySQLMetaImpl.DescribeCollection.throw_exception");
//Count
uint64_t cnt = 0;
@ -164,10 +164,10 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
// ASSERT_TRUE(status.ok());
// ASSERT_EQ(cnt, 0UL);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollection.throw_exception");
status = impl_->Count(collection_id, cnt);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception");
fiu_disable("MySQLMetaImpl.DescribeCollection.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.Count.null_connection");
status = impl_->Count(collection_id, cnt);
@ -183,24 +183,24 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
auto new_file_type = milvus::engine::meta::SegmentSchema::INDEX;
table_file.file_type_ = new_file_type;
//UpdateTableFile
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFile.null_connection");
status = impl_->UpdateTableFile(table_file);
//UpdateCollectionFile
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFile.null_connection");
status = impl_->UpdateCollectionFile(table_file);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFile.null_connection");
fiu_disable("MySQLMetaImpl.UpdateCollectionFile.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFile.throw_exception");
status = impl_->UpdateTableFile(table_file);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFile.throw_exception");
status = impl_->UpdateCollectionFile(table_file);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFile.throw_exception");
fiu_disable("MySQLMetaImpl.UpdateCollectionFile.throw_exception");
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
ASSERT_TRUE(status.ok());
ASSERT_EQ(table_file.file_type_, new_file_type);
auto no_table_file = table_file;
no_table_file.collection_id_ = "notexist";
status = impl_->UpdateTableFile(no_table_file);
status = impl_->UpdateCollectionFile(no_table_file);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("MySQLMetaImpl.CleanUpShadowFiles.null_connection");
@ -217,17 +217,17 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
ASSERT_TRUE(status.ok());
milvus::engine::meta::SegmentsSchema files_schema;
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFiles.null_connection");
status = impl_->UpdateTableFiles(files_schema);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFiles.null_connection");
status = impl_->UpdateCollectionFiles(files_schema);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFiles.null_connection");
fiu_disable("MySQLMetaImpl.UpdateCollectionFiles.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFiles.throw_exception");
status = impl_->UpdateTableFiles(files_schema);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFiles.throw_exception");
status = impl_->UpdateCollectionFiles(files_schema);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFiles.throw_exception");
fiu_disable("MySQLMetaImpl.UpdateCollectionFiles.throw_exception");
status = impl_->UpdateTableFiles(files_schema);
status = impl_->UpdateCollectionFiles(files_schema);
ASSERT_TRUE(status.ok());
std::vector<size_t> ids = {table_file.id_};
@ -251,14 +251,14 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
table_file.collection_id_ = collection.collection_id_;
table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
ids = {table_file.id_};
status = impl_->FilesByID(ids, files);
ASSERT_EQ(files.size(), 1UL);
table_file.collection_id_ = collection.collection_id_;
table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_DELETE;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
std::vector<int> files_to_delete;
files_to_delete.push_back(milvus::engine::meta::SegmentSchema::TO_DELETE);
@ -272,9 +272,9 @@ TEST_F(MySqlMetaTest, TABLE_FILE_TEST) {
status = impl_->CleanUpFilesWithTTL(1UL);
ASSERT_TRUE(status.ok());
status = impl_->DropTable(table_file.collection_id_);
status = impl_->DropCollection(table_file.collection_id_);
ASSERT_TRUE(status.ok());
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
ASSERT_TRUE(status.ok());
}
@ -284,13 +284,13 @@ TEST_F(MySqlMetaTest, TABLE_FILE_ROW_COUNT_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
collection.dimension_ = 256;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
milvus::engine::meta::SegmentSchema table_file;
table_file.row_count_ = 100;
table_file.collection_id_ = collection.collection_id_;
table_file.file_type_ = 1;
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
uint64_t cnt = 0;
status = impl_->Count(collection_id, cnt);
@ -298,7 +298,7 @@ TEST_F(MySqlMetaTest, TABLE_FILE_ROW_COUNT_TEST) {
table_file.row_count_ = 99999;
milvus::engine::meta::SegmentsSchema table_files = {table_file};
status = impl_->UpdateTableFilesRowCount(table_files);
status = impl_->UpdateCollectionFilesRowCount(table_files);
ASSERT_TRUE(status.ok());
cnt = 0;
@ -337,7 +337,7 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl.CreateTable(collection);
auto status = impl.CreateCollection(collection);
milvus::engine::meta::SegmentsSchema files;
milvus::engine::meta::SegmentSchema table_file;
@ -348,11 +348,11 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) {
std::vector<int> days;
std::vector<size_t> ids;
for (auto i = 0; i < cnt; ++i) {
status = impl.CreateTableFile(table_file);
status = impl.CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
int day = rand_r(&seed) % (days_num * 2);
table_file.created_on_ = ts - day * milvus::engine::meta::DAY * milvus::engine::meta::US_PS - 10000;
status = impl.UpdateTableFile(table_file);
status = impl.UpdateCollectionFile(table_file);
files.push_back(table_file);
days.push_back(day);
ids.push_back(table_file.id_);
@ -402,18 +402,18 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DAYS) {
ASSERT_TRUE(table_files.empty());
fiu_disable("MySQLMetaImpl.FilesByType.throw_exception");
status = impl.UpdateTableFilesToIndex(collection_id);
status = impl.UpdateCollectionFilesToIndex(collection_id);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFilesToIndex.null_connection");
status = impl.UpdateTableFilesToIndex(collection_id);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFilesToIndex.null_connection");
status = impl.UpdateCollectionFilesToIndex(collection_id);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFilesToIndex.null_connection");
fiu_disable("MySQLMetaImpl.UpdateCollectionFilesToIndex.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFilesToIndex.throw_exception");
status = impl.UpdateTableFilesToIndex(collection_id);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFilesToIndex.throw_exception");
status = impl.UpdateCollectionFilesToIndex(collection_id);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFilesToIndex.throw_exception");
fiu_disable("MySQLMetaImpl.UpdateCollectionFilesToIndex.throw_exception");
status = impl.DropAll();
ASSERT_TRUE(status.ok());
@ -430,11 +430,11 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DISK) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl.CreateTable(collection);
auto status = impl.CreateCollection(collection);
milvus::engine::meta::CollectionSchema table_schema;
table_schema.collection_id_ = "";
status = impl.CreateTable(table_schema);
status = impl.CreateCollection(table_schema);
milvus::engine::meta::SegmentsSchema files;
milvus::engine::meta::SegmentSchema table_file;
@ -444,10 +444,10 @@ TEST_F(MySqlMetaTest, ARCHIVE_TEST_DISK) {
auto each_size = 2UL;
std::vector<size_t> ids;
for (auto i = 0; i < cnt; ++i) {
status = impl.CreateTableFile(table_file);
status = impl.CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
table_file.file_size_ = each_size * milvus::engine::G;
status = impl.UpdateTableFile(table_file);
status = impl.UpdateCollectionFile(table_file);
files.push_back(table_file);
ids.push_back(table_file.id_);
}
@ -532,7 +532,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
uint64_t new_merge_files_cnt = 1;
uint64_t new_index_files_cnt = 2;
@ -546,49 +546,49 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
table_file.collection_id_ = collection.collection_id_;
for (auto i = 0; i < new_merge_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_MERGE;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < new_index_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW_INDEX;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < backup_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::BACKUP;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < new_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::NEW;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < raw_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < to_index_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::TO_INDEX;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
for (auto i = 0; i < index_files_cnt; ++i) {
status = impl_->CreateTableFile(table_file);
status = impl_->CreateCollectionFile(table_file);
table_file.file_type_ = milvus::engine::meta::SegmentSchema::INDEX;
table_file.row_count_ = 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
}
uint64_t total_row_count = 0;
@ -619,7 +619,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
table_file.file_type_ = milvus::engine::meta::SegmentSchema::RAW;
table_file.file_size_ = milvus::engine::ONE_GB + 1;
status = impl_->UpdateTableFile(table_file);
status = impl_->UpdateCollectionFile(table_file);
ASSERT_TRUE(status.ok());
#if 0
{
@ -632,10 +632,10 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
status = impl_->FilesToIndex(files);
ASSERT_EQ(files.size(), to_index_files_cnt);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTable.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollection.throw_exception");
status = impl_->FilesToIndex(files);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.DescribeTable.throw_exception");
fiu_disable("MySQLMetaImpl.DescribeCollection.throw_exception");
FIU_ENABLE_FIU("MySQLMetaImpl.FilesToIndex.null_connection");
status = impl_->FilesToIndex(files);
@ -700,7 +700,7 @@ TEST_F(MySqlMetaTest, TABLE_FILES_TEST) {
status = impl_->DeleteTableFiles(collection_id);
ASSERT_TRUE(status.ok());
status = impl_->DropTable(collection_id);
status = impl_->DropCollection(collection_id);
ASSERT_TRUE(status.ok());
status = impl_->CleanUpFilesWithTTL(0UL);
@ -743,73 +743,73 @@ TEST_F(MySqlMetaTest, INDEX_TEST) {
milvus::engine::meta::CollectionSchema collection;
collection.collection_id_ = collection_id;
auto status = impl_->CreateTable(collection);
auto status = impl_->CreateCollection(collection);
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.metric_type_ = 2;
index.extra_params_ = {{"nlist", 1234}};
index.engine_type_ = 3;
status = impl_->UpdateTableIndex(collection_id, index);
status = impl_->UpdateCollectionIndex(collection_id, index);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableIndex.null_connection");
status = impl_->UpdateTableIndex(collection_id, index);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionIndex.null_connection");
status = impl_->UpdateCollectionIndex(collection_id, index);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableIndex.null_connection");
fiu_disable("MySQLMetaImpl.UpdateCollectionIndex.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableIndex.throw_exception");
status = impl_->UpdateTableIndex(collection_id, index);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionIndex.throw_exception");
status = impl_->UpdateCollectionIndex(collection_id, index);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableIndex.throw_exception");
fiu_disable("MySQLMetaImpl.UpdateCollectionIndex.throw_exception");
status = impl_->UpdateTableIndex("notexist", index);
status = impl_->UpdateCollectionIndex("notexist", index);
ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND);
int64_t flag = 65536;
status = impl_->UpdateTableFlag(collection_id, flag);
status = impl_->UpdateCollectionFlag(collection_id, flag);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFlag.null_connection");
status = impl_->UpdateTableFlag(collection_id, flag);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFlag.null_connection");
status = impl_->UpdateCollectionFlag(collection_id, flag);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFlag.null_connection");
fiu_disable("MySQLMetaImpl.UpdateCollectionFlag.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateTableFlag.throw_exception");
status = impl_->UpdateTableFlag(collection_id, flag);
FIU_ENABLE_FIU("MySQLMetaImpl.UpdateCollectionFlag.throw_exception");
status = impl_->UpdateCollectionFlag(collection_id, flag);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.UpdateTableFlag.throw_exception");
fiu_disable("MySQLMetaImpl.UpdateCollectionFlag.throw_exception");
milvus::engine::meta::CollectionSchema table_info;
table_info.collection_id_ = collection_id;
status = impl_->DescribeTable(table_info);
ASSERT_EQ(table_info.flag_, flag);
milvus::engine::meta::CollectionSchema collection_info;
collection_info.collection_id_ = collection_id;
status = impl_->DescribeCollection(collection_info);
ASSERT_EQ(collection_info.flag_, flag);
milvus::engine::TableIndex index_out;
status = impl_->DescribeTableIndex(collection_id, index_out);
milvus::engine::CollectionIndex index_out;
status = impl_->DescribeCollectionIndex(collection_id, index_out);
ASSERT_EQ(index_out.metric_type_, index.metric_type_);
ASSERT_EQ(index_out.extra_params_, index.extra_params_);
ASSERT_EQ(index_out.engine_type_, index.engine_type_);
status = impl_->DropTableIndex(collection_id);
status = impl_->DropCollectionIndex(collection_id);
ASSERT_TRUE(status.ok());
status = impl_->DescribeTableIndex(collection_id, index_out);
status = impl_->DescribeCollectionIndex(collection_id, index_out);
ASSERT_EQ(index_out.metric_type_, index.metric_type_);
ASSERT_NE(index_out.engine_type_, index.engine_type_);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTableIndex.null_connection");
status = impl_->DescribeTableIndex(collection_id, index_out);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollectionIndex.null_connection");
status = impl_->DescribeCollectionIndex(collection_id, index_out);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.DescribeTableIndex.null_connection");
fiu_disable("MySQLMetaImpl.DescribeCollectionIndex.null_connection");
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeTableIndex.throw_exception");
status = impl_->DescribeTableIndex(collection_id, index_out);
FIU_ENABLE_FIU("MySQLMetaImpl.DescribeCollectionIndex.throw_exception");
status = impl_->DescribeCollectionIndex(collection_id, index_out);
ASSERT_FALSE(status.ok());
fiu_disable("MySQLMetaImpl.DescribeTableIndex.throw_exception");
fiu_disable("MySQLMetaImpl.DescribeCollectionIndex.throw_exception");
status = impl_->DescribeTableIndex("notexist", index_out);
status = impl_->DescribeCollectionIndex("notexist", index_out);
ASSERT_EQ(status.code(), milvus::DB_NOT_FOUND);
status = impl_->UpdateTableFilesToIndex(collection_id);
status = impl_->UpdateCollectionFilesToIndex(collection_id);
ASSERT_TRUE(status.ok());
}

View File

@ -95,16 +95,16 @@ TEST(DBMiscTest, UTILS_TEST) {
fiu_init(0);
milvus::Status status;
FIU_ENABLE_FIU("CommonUtil.CreateDirectory.create_parent_fail");
status = milvus::engine::utils::CreateTablePath(options, TABLE_NAME);
status = milvus::engine::utils::CreateCollectionPath(options, TABLE_NAME);
ASSERT_FALSE(status.ok());
fiu_disable("CommonUtil.CreateDirectory.create_parent_fail");
FIU_ENABLE_FIU("CreateTablePath.creat_slave_path");
status = milvus::engine::utils::CreateTablePath(options, TABLE_NAME);
FIU_ENABLE_FIU("CreateCollectionPath.creat_slave_path");
status = milvus::engine::utils::CreateCollectionPath(options, TABLE_NAME);
ASSERT_FALSE(status.ok());
fiu_disable("CreateTablePath.creat_slave_path");
fiu_disable("CreateCollectionPath.creat_slave_path");
status = milvus::engine::utils::CreateTablePath(options, TABLE_NAME);
status = milvus::engine::utils::CreateCollectionPath(options, TABLE_NAME);
ASSERT_TRUE(status.ok());
ASSERT_TRUE(boost::filesystem::exists(options.path_));
for (auto& path : options.slave_paths_) {
@ -112,11 +112,11 @@ TEST(DBMiscTest, UTILS_TEST) {
}
// options.slave_paths.push_back("/");
// status = engine::utils::CreateTablePath(options, TABLE_NAME);
// status = engine::utils::CreateCollectionPath(options, TABLE_NAME);
// ASSERT_FALSE(status.ok());
//
// options.path = "/";
// status = engine::utils::CreateTablePath(options, TABLE_NAME);
// status = engine::utils::CreateCollectionPath(options, TABLE_NAME);
// ASSERT_FALSE(status.ok());
milvus::engine::meta::SegmentSchema file;
@ -134,13 +134,13 @@ TEST(DBMiscTest, UTILS_TEST) {
status = milvus::engine::utils::DeleteTableFilePath(options, file);
ASSERT_TRUE(status.ok());
status = milvus::engine::utils::CreateTableFilePath(options, file);
status = milvus::engine::utils::CreateCollectionFilePath(options, file);
ASSERT_TRUE(status.ok());
FIU_ENABLE_FIU("CreateTableFilePath.fail_create");
status = milvus::engine::utils::CreateTableFilePath(options, file);
FIU_ENABLE_FIU("CreateCollectionFilePath.fail_create");
status = milvus::engine::utils::CreateCollectionFilePath(options, file);
ASSERT_FALSE(status.ok());
fiu_disable("CreateTableFilePath.fail_create");
fiu_disable("CreateCollectionFilePath.fail_create");
status = milvus::engine::utils::GetTableFilePath(options, file);
ASSERT_FALSE(file.location_.empty());
@ -190,7 +190,7 @@ TEST(DBMiscTest, CHECKER_TEST) {
checker.MarkFailedIndexFile(schema, "5001 fail");
std::string err_msg;
checker.GetErrMsgForTable("aaa", err_msg);
checker.GetErrMsgForCollection("aaa", err_msg);
ASSERT_EQ(err_msg, "5000 fail");
schema.collection_id_ = "bbb";
@ -202,11 +202,11 @@ TEST(DBMiscTest, CHECKER_TEST) {
checker.IgnoreFailedIndexFiles(table_files);
ASSERT_TRUE(table_files.empty());
checker.GetErrMsgForTable("bbb", err_msg);
checker.GetErrMsgForCollection("bbb", err_msg);
ASSERT_EQ(err_msg, "5001 fail");
checker.MarkSucceedIndexFile(schema);
checker.GetErrMsgForTable("bbb", err_msg);
checker.GetErrMsgForCollection("bbb", err_msg);
ASSERT_EQ(err_msg, "5001 fail");
}

View File

@ -43,12 +43,12 @@ GetTableName() {
milvus::engine::meta::CollectionSchema
BuildTableSchema() {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = GetTableName();
table_info.metric_type_ = (int32_t)milvus::engine::MetricType::L2;
table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
return table_info;
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = GetTableName();
collection_info.metric_type_ = (int32_t)milvus::engine::MetricType::L2;
collection_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFFLAT;
return collection_info;
}
void
@ -64,14 +64,14 @@ BuildVectors(uint64_t n, milvus::engine::VectorsData& vectors) {
} // namespace
TEST_F(SearchByIdTest, basic) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -81,7 +81,7 @@ TEST_F(SearchByIdTest, basic) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -108,7 +108,7 @@ TEST_F(SearchByIdTest, basic) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, i, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, i, result_ids,
result_distances);
ASSERT_EQ(result_ids[0], i);
ASSERT_LT(result_distances[0], 1e-4);
@ -116,14 +116,14 @@ TEST_F(SearchByIdTest, basic) {
}
TEST_F(SearchByIdTest, with_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 10000;
milvus::engine::VectorsData xb;
@ -133,7 +133,7 @@ TEST_F(SearchByIdTest, with_index) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -151,10 +151,10 @@ TEST_F(SearchByIdTest, with_index) {
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
index.extra_params_ = {{"nlist", 10}};
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
const int topk = 10, nprobe = 10;
@ -166,7 +166,7 @@ TEST_F(SearchByIdTest, with_index) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, i, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, i, result_ids,
result_distances);
ASSERT_EQ(result_ids[0], i);
ASSERT_LT(result_distances[0], 1e-3);
@ -174,14 +174,14 @@ TEST_F(SearchByIdTest, with_index) {
}
TEST_F(SearchByIdTest, with_delete) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -191,7 +191,7 @@ TEST_F(SearchByIdTest, with_delete) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -213,7 +213,7 @@ TEST_F(SearchByIdTest, with_delete) {
for (auto& id : ids_to_search) {
ids_to_delete.emplace_back(id);
}
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
@ -227,7 +227,7 @@ TEST_F(SearchByIdTest, with_delete) {
milvus::engine::ResultIds result_ids;
milvus::engine::ResultDistances result_distances;
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, i, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, i, result_ids,
result_distances);
ASSERT_EQ(result_ids[0], -1);
ASSERT_EQ(result_distances[0], std::numeric_limits<float>::max());
@ -235,14 +235,14 @@ TEST_F(SearchByIdTest, with_delete) {
}
TEST_F(GetVectorByIdTest, basic) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -252,7 +252,7 @@ TEST_F(GetVectorByIdTest, basic) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -280,10 +280,10 @@ TEST_F(GetVectorByIdTest, basic) {
milvus::engine::ResultDistances result_distances;
milvus::engine::VectorsData vector;
stat = db_->GetVectorByID(table_info.collection_id_, id, vector);
stat = db_->GetVectorByID(collection_info.collection_id_, id, vector);
ASSERT_TRUE(stat.ok());
stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, vector, result_ids,
stat = db_->Query(dummy_context_, collection_info.collection_id_, tags, topk, json_params, vector, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);
@ -292,14 +292,14 @@ TEST_F(GetVectorByIdTest, basic) {
}
TEST_F(GetVectorByIdTest, with_index) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 10000;
milvus::engine::VectorsData xb;
@ -309,7 +309,7 @@ TEST_F(GetVectorByIdTest, with_index) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -327,10 +327,10 @@ TEST_F(GetVectorByIdTest, with_index) {
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
milvus::engine::TableIndex index;
milvus::engine::CollectionIndex index;
index.extra_params_ = {{"nlist", 10}};
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8;
stat = db_->CreateIndex(table_info.collection_id_, index);
stat = db_->CreateIndex(collection_info.collection_id_, index);
ASSERT_TRUE(stat.ok());
const int topk = 10, nprobe = 10;
@ -343,10 +343,10 @@ TEST_F(GetVectorByIdTest, with_index) {
milvus::engine::ResultDistances result_distances;
milvus::engine::VectorsData vector;
stat = db_->GetVectorByID(table_info.collection_id_, id, vector);
stat = db_->GetVectorByID(collection_info.collection_id_, id, vector);
ASSERT_TRUE(stat.ok());
stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, vector, result_ids,
stat = db_->Query(dummy_context_, collection_info.collection_id_, tags, topk, json_params, vector, result_ids,
result_distances);
ASSERT_EQ(result_ids[0], id);
ASSERT_LT(result_distances[0], 1e-3);
@ -354,14 +354,14 @@ TEST_F(GetVectorByIdTest, with_index) {
}
TEST_F(GetVectorByIdTest, with_delete) {
milvus::engine::meta::CollectionSchema table_info = BuildTableSchema();
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info = BuildTableSchema();
auto stat = db_->CreateCollection(collection_info);
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int64_t nb = 100000;
milvus::engine::VectorsData xb;
@ -371,7 +371,7 @@ TEST_F(GetVectorByIdTest, with_delete) {
xb.id_array_.push_back(i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", xb);
stat = db_->InsertVectors(collection_info.collection_id_, "", xb);
ASSERT_TRUE(stat.ok());
std::random_device rd;
@ -393,7 +393,7 @@ TEST_F(GetVectorByIdTest, with_delete) {
for (auto& id : ids_to_search) {
ids_to_delete.emplace_back(id);
}
stat = db_->DeleteVectors(table_info.collection_id_, ids_to_delete);
stat = db_->DeleteVectors(collection_info.collection_id_, ids_to_delete);
stat = db_->Flush();
ASSERT_TRUE(stat.ok());
@ -405,7 +405,7 @@ TEST_F(GetVectorByIdTest, with_delete) {
milvus::engine::ResultDistances result_distances;
milvus::engine::VectorsData vector;
stat = db_->GetVectorByID(table_info.collection_id_, id, vector);
stat = db_->GetVectorByID(collection_info.collection_id_, id, vector);
ASSERT_TRUE(stat.ok());
ASSERT_TRUE(vector.float_data_.empty());
ASSERT_EQ(vector.vector_count_, 0);
@ -413,19 +413,19 @@ TEST_F(GetVectorByIdTest, with_delete) {
}
TEST_F(SearchByIdTest, BINARY) {
milvus::engine::meta::CollectionSchema table_info;
table_info.dimension_ = TABLE_DIM;
table_info.collection_id_ = GetTableName();
table_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_BIN_IDMAP;
table_info.metric_type_ = (int32_t)milvus::engine::MetricType::JACCARD;
auto stat = db_->CreateTable(table_info);
milvus::engine::meta::CollectionSchema collection_info;
collection_info.dimension_ = TABLE_DIM;
collection_info.collection_id_ = GetTableName();
collection_info.engine_type_ = (int)milvus::engine::EngineType::FAISS_BIN_IDMAP;
collection_info.metric_type_ = (int32_t)milvus::engine::MetricType::JACCARD;
auto stat = db_->CreateCollection(collection_info);
ASSERT_TRUE(stat.ok());
milvus::engine::meta::CollectionSchema table_info_get;
table_info_get.collection_id_ = table_info.collection_id_;
stat = db_->DescribeTable(table_info_get);
milvus::engine::meta::CollectionSchema collection_info_get;
collection_info_get.collection_id_ = collection_info.collection_id_;
stat = db_->DescribeCollection(collection_info_get);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(table_info_get.dimension_, TABLE_DIM);
ASSERT_EQ(collection_info_get.dimension_, TABLE_DIM);
int insert_loop = 10;
int64_t nb = 1000;
@ -448,7 +448,7 @@ TEST_F(SearchByIdTest, BINARY) {
vectors.id_array_.emplace_back(k * nb + i);
}
stat = db_->InsertVectors(table_info.collection_id_, "", vectors);
stat = db_->InsertVectors(collection_info.collection_id_, "", vectors);
ASSERT_TRUE(stat.ok());
}
@ -468,7 +468,7 @@ TEST_F(SearchByIdTest, BINARY) {
ASSERT_TRUE(stat.ok());
uint64_t row_count;
stat = db_->GetTableRowCount(table_info.collection_id_, row_count);
stat = db_->GetCollectionRowCount(collection_info.collection_id_, row_count);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(row_count, nb * insert_loop);
@ -482,11 +482,11 @@ TEST_F(SearchByIdTest, BINARY) {
milvus::engine::ResultDistances result_distances;
milvus::engine::VectorsData vector;
stat = db_->GetVectorByID(table_info.collection_id_, id, vector);
stat = db_->GetVectorByID(collection_info.collection_id_, id, vector);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(vector.vector_count_, 1);
stat = db_->Query(dummy_context_, table_info.collection_id_, tags, topk, json_params, vector, result_ids,
stat = db_->Query(dummy_context_, collection_info.collection_id_, tags, topk, json_params, vector, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);
@ -496,7 +496,7 @@ TEST_F(SearchByIdTest, BINARY) {
result_ids.clear();
result_distances.clear();
stat = db_->QueryByID(dummy_context_, table_info.collection_id_, tags, topk, json_params, id, result_ids,
stat = db_->QueryByID(dummy_context_, collection_info.collection_id_, tags, topk, json_params, id, result_ids,
result_distances);
ASSERT_TRUE(stat.ok());
ASSERT_EQ(result_ids[0], id);

View File

@ -53,13 +53,13 @@ class TestWalMeta : public SqliteMetaImpl {
}
Status
CreateTable(CollectionSchema& table_schema) override {
CreateCollection(CollectionSchema& table_schema) override {
tables_.push_back(table_schema);
return Status::OK();
}
Status
AllTables(std::vector<CollectionSchema>& table_schema_array) override {
AllCollections(std::vector<CollectionSchema>& table_schema_array) override {
table_schema_array = tables_;
return Status::OK();
}
@ -87,7 +87,7 @@ class TestWalMetaError : public SqliteMetaImpl {
}
Status
AllTables(std::vector<CollectionSchema>& table_schema_array) override {
AllCollections(std::vector<CollectionSchema>& table_schema_array) override {
return Status(DB_ERROR, "error");
}
};
@ -422,17 +422,17 @@ TEST(WalTest, MANAGER_INIT_TEST) {
milvus::engine::meta::CollectionSchema table_schema_1;
table_schema_1.collection_id_ = "table1";
table_schema_1.flush_lsn_ = (uint64_t)1 << 32 | 60;
meta->CreateTable(table_schema_1);
meta->CreateCollection(table_schema_1);
milvus::engine::meta::CollectionSchema table_schema_2;
table_schema_2.collection_id_ = "table2";
table_schema_2.flush_lsn_ = (uint64_t)1 << 32 | 20;
meta->CreateTable(table_schema_2);
meta->CreateCollection(table_schema_2);
milvus::engine::meta::CollectionSchema table_schema_3;
table_schema_3.collection_id_ = "table3";
table_schema_3.flush_lsn_ = (uint64_t)2 << 32 | 40;
meta->CreateTable(table_schema_3);
meta->CreateCollection(table_schema_3);
milvus::engine::wal::MXLogConfiguration wal_config;
wal_config.mxlog_path = WAL_GTEST_PATH;
@ -468,7 +468,7 @@ TEST(WalTest, MANAGER_APPEND_FAILED) {
milvus::engine::meta::CollectionSchema schema;
schema.collection_id_ = "table1";
schema.flush_lsn_ = 0;
meta->CreateTable(schema);
meta->CreateCollection(schema);
milvus::engine::wal::MXLogConfiguration wal_config;
wal_config.mxlog_path = WAL_GTEST_PATH;
@ -511,11 +511,11 @@ TEST(WalTest, MANAGER_RECOVERY_TEST) {
milvus::engine::meta::CollectionSchema schema;
schema.collection_id_ = "collection";
schema.flush_lsn_ = 0;
meta->CreateTable(schema);
meta->CreateCollection(schema);
std::vector<int64_t> ids(1024, 0);
std::vector<float> data_float(1024 * 512, 0);
manager->CreateTable(schema.collection_id_);
manager->CreateCollection(schema.collection_id_);
ASSERT_TRUE(manager->Insert(schema.collection_id_, "", ids, data_float));
// recovery
@ -578,12 +578,12 @@ TEST(WalTest, MANAGER_TEST) {
// table1 create and insert
std::string table_id_1 = "table1";
manager->CreateTable(table_id_1);
manager->CreateCollection(table_id_1);
ASSERT_TRUE(manager->Insert(table_id_1, "", ids, data_float));
// table2 create and insert
std::string table_id_2 = "table2";
manager->CreateTable(table_id_2);
manager->CreateCollection(table_id_2);
ASSERT_TRUE(manager->Insert(table_id_2, "", ids, data_byte));
// table1 delete
@ -591,7 +591,7 @@ TEST(WalTest, MANAGER_TEST) {
// table3 create and insert
std::string table_id_3 = "table3";
manager->CreateTable(table_id_3);
manager->CreateCollection(table_id_3);
ASSERT_TRUE(manager->Insert(table_id_3, "", ids, data_float));
// flush table1
@ -606,7 +606,7 @@ TEST(WalTest, MANAGER_TEST) {
if (record.type == milvus::engine::wal::MXLogType::Flush) {
ASSERT_EQ(record.collection_id, table_id_1);
ASSERT_EQ(new_lsn, flush_lsn);
manager->TableFlushed(table_id_1, new_lsn);
manager->CollectionFlushed(table_id_1, new_lsn);
break;
} else {
@ -627,12 +627,12 @@ TEST(WalTest, MANAGER_TEST) {
ASSERT_EQ(manager->GetNextRecord(record), milvus::WAL_SUCCESS);
ASSERT_EQ(record.type, milvus::engine::wal::MXLogType::Flush);
ASSERT_EQ(record.collection_id, table_id_2);
manager->TableFlushed(table_id_2, flush_lsn);
manager->CollectionFlushed(table_id_2, flush_lsn);
ASSERT_EQ(manager->Flush(table_id_2), 0);
flush_lsn = manager->Flush();
ASSERT_NE(flush_lsn, 0);
manager->DropTable(table_id_3);
manager->DropCollection(table_id_3);
ASSERT_EQ(manager->GetNextRecord(record), milvus::WAL_SUCCESS);
ASSERT_EQ(record.type, milvus::engine::wal::MXLogType::Flush);
@ -665,8 +665,8 @@ TEST(WalTest, MANAGER_SAME_NAME_TABLE) {
std::vector<uint8_t> data_byte(1024 * 512, 0);
// create 2 tables
manager->CreateTable(table_id_1);
manager->CreateTable(table_id_2);
manager->CreateCollection(table_id_1);
manager->CreateCollection(table_id_2);
// command
ASSERT_TRUE(manager->Insert(table_id_1, "", ids, data_byte));
@ -675,8 +675,8 @@ TEST(WalTest, MANAGER_SAME_NAME_TABLE) {
ASSERT_TRUE(manager->DeleteById(table_id_2, ids));
// re-create collection
manager->DropTable(table_id_1);
manager->CreateTable(table_id_1);
manager->DropCollection(table_id_1);
manager->CreateCollection(table_id_1);
milvus::engine::wal::MXLogRecord record;
while (1) {

View File

@ -71,11 +71,11 @@ TEST_F(MetricTest, METRIC_TEST) {
milvus::engine::meta::CollectionSchema group_info;
group_info.dimension_ = group_dim;
group_info.collection_id_ = group_name;
auto stat = db_->CreateTable(group_info);
auto stat = db_->CreateCollection(group_info);
milvus::engine::meta::CollectionSchema group_info_get;
group_info_get.collection_id_ = group_name;
stat = db_->DescribeTable(group_info_get);
stat = db_->DescribeCollection(group_info_get);
int nb = 50;
milvus::engine::VectorsData xb;

View File

@ -109,8 +109,8 @@ TEST(TaskTest, TEST_TASK) {
build_index_task.Execute();
fiu_disable("XBuildIndexTask.Execute.build_index_fail");
// always enable 'has_table'
fiu_enable("XBuildIndexTask.Execute.has_table", 1, NULL, 0);
// always enable 'has_collection'
fiu_enable("XBuildIndexTask.Execute.has_collection", 1, NULL, 0);
build_index_task.to_index_engine_ =
EngineFactory::Build(file->dimension_, file->location_, (EngineType)file->engine_type_,
(MetricType)file->metric_type_, json);
@ -138,7 +138,7 @@ TEST(TaskTest, TEST_TASK) {
fiu_disable("XBuildIndexTask.Execute.update_table_file_fail");
fiu_disable("XBuildIndexTask.Execute.throw_std_exception");
fiu_disable("XBuildIndexTask.Execute.has_table");
fiu_disable("XBuildIndexTask.Execute.has_collection");
fiu_disable("XBuildIndexTask.Execute.create_table_success");
build_index_task.Execute();

View File

@ -192,7 +192,7 @@ CurrentTmDate(int64_t offset_day = 0) {
} // namespace
TEST_F(RpcHandlerTest, HAS_TABLE_TEST) {
TEST_F(RpcHandlerTest, HAS_COLLECTION_TEST) {
::grpc::ServerContext context;
handler->SetContext(&context, dummy_context);
handler->RegisterRequestHandler(milvus::server::RequestHandler());
@ -207,10 +207,10 @@ TEST_F(RpcHandlerTest, HAS_TABLE_TEST) {
fiu_init(0);
fiu_enable("HasTableRequest.OnExecute.throw_std_exception", 1, NULL, 0);
fiu_enable("HasCollectionRequest.OnExecute.throw_std_exception", 1, NULL, 0);
handler->HasTable(&context, &request, &reply);
ASSERT_NE(reply.status().error_code(), ::milvus::grpc::ErrorCode::SUCCESS);
fiu_disable("HasTableRequest.OnExecute.throw_std_exception");
fiu_disable("HasCollectionRequest.OnExecute.throw_std_exception");
}
TEST_F(RpcHandlerTest, INDEX_TEST) {
@ -238,10 +238,10 @@ TEST_F(RpcHandlerTest, INDEX_TEST) {
// ASSERT_EQ(error_code, ::milvus::grpc::ErrorCode::SUCCESS);
fiu_init(0);
fiu_enable("CreateIndexRequest.OnExecute.not_has_table", 1, NULL, 0);
fiu_enable("CreateIndexRequest.OnExecute.not_has_collection", 1, NULL, 0);
grpc_status = handler->CreateIndex(&context, &request, &response);
ASSERT_TRUE(grpc_status.ok());
fiu_disable("CreateIndexRequest.OnExecute.not_has_table");
fiu_disable("CreateIndexRequest.OnExecute.not_has_collection");
fiu_enable("CreateIndexRequest.OnExecute.throw_std.exception", 1, NULL, 0);
grpc_status = handler->CreateIndex(&context, &request, &response);
@ -673,13 +673,13 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code());
fiu_init(0);
fiu_enable("DescribeTableRequest.OnExecute.describe_table_fail", 1, NULL, 0);
fiu_enable("DescribeCollectionRequest.OnExecute.describe_table_fail", 1, NULL, 0);
handler->DescribeTable(&context, &collection_name, &table_schema);
fiu_disable("DescribeTableRequest.OnExecute.describe_table_fail");
fiu_disable("DescribeCollectionRequest.OnExecute.describe_table_fail");
fiu_enable("DescribeTableRequest.OnExecute.throw_std_exception", 1, NULL, 0);
fiu_enable("DescribeCollectionRequest.OnExecute.throw_std_exception", 1, NULL, 0);
handler->DescribeTable(&context, &collection_name, &table_schema);
fiu_disable("DescribeTableRequest.OnExecute.throw_std_exception");
fiu_disable("DescribeCollectionRequest.OnExecute.throw_std_exception");
::milvus::grpc::InsertParam request;
std::vector<std::vector<float>> record_array;
@ -723,14 +723,14 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code());
// show collection info
::milvus::grpc::TableInfo table_info;
status = handler->ShowTableInfo(&context, &collection_name, &table_info);
::milvus::grpc::TableInfo collection_info;
status = handler->ShowTableInfo(&context, &collection_name, &collection_info);
ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code());
fiu_init(0);
fiu_enable("ShowTablesRequest.OnExecute.show_tables_fail", 1, NULL, 0);
fiu_enable("ShowCollectionsRequest.OnExecute.show_tables_fail", 1, NULL, 0);
handler->ShowTables(&context, &cmd, &table_name_list);
fiu_disable("ShowTablesRequest.OnExecute.show_tables_fail");
fiu_disable("ShowCollectionsRequest.OnExecute.show_tables_fail");
// Count Collection
::milvus::grpc::TableRowCount count;
@ -741,17 +741,17 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code());
// ASSERT_EQ(count.table_row_count(), vector_ids.vector_id_array_size());
fiu_init(0);
fiu_enable("CountTableRequest.OnExecute.db_not_found", 1, NULL, 0);
fiu_enable("CountCollectionRequest.OnExecute.db_not_found", 1, NULL, 0);
status = handler->CountTable(&context, &collection_name, &count);
fiu_disable("CountTableRequest.OnExecute.db_not_found");
fiu_disable("CountCollectionRequest.OnExecute.db_not_found");
fiu_enable("CountTableRequest.OnExecute.status_error", 1, NULL, 0);
fiu_enable("CountCollectionRequest.OnExecute.status_error", 1, NULL, 0);
status = handler->CountTable(&context, &collection_name, &count);
fiu_disable("CountTableRequest.OnExecute.status_error");
fiu_disable("CountCollectionRequest.OnExecute.status_error");
fiu_enable("CountTableRequest.OnExecute.throw_std_exception", 1, NULL, 0);
fiu_enable("CountCollectionRequest.OnExecute.throw_std_exception", 1, NULL, 0);
status = handler->CountTable(&context, &collection_name, &count);
fiu_disable("CountTableRequest.OnExecute.throw_std_exception");
fiu_disable("CountCollectionRequest.OnExecute.throw_std_exception");
// Preload Collection
collection_name.Clear();
@ -760,38 +760,38 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
status = handler->PreloadTable(&context, &collection_name, &response);
ASSERT_EQ(status.error_code(), ::grpc::Status::OK.error_code());
fiu_enable("PreloadTableRequest.OnExecute.preload_table_fail", 1, NULL, 0);
fiu_enable("PreloadCollectionRequest.OnExecute.preload_table_fail", 1, NULL, 0);
handler->PreloadTable(&context, &collection_name, &response);
fiu_disable("PreloadTableRequest.OnExecute.preload_table_fail");
fiu_disable("PreloadCollectionRequest.OnExecute.preload_table_fail");
fiu_enable("PreloadTableRequest.OnExecute.throw_std_exception", 1, NULL, 0);
fiu_enable("PreloadCollectionRequest.OnExecute.throw_std_exception", 1, NULL, 0);
handler->PreloadTable(&context, &collection_name, &response);
fiu_disable("PreloadTableRequest.OnExecute.throw_std_exception");
fiu_disable("PreloadCollectionRequest.OnExecute.throw_std_exception");
fiu_init(0);
fiu_enable("CreateTableRequest.OnExecute.invalid_index_file_size", 1, NULL, 0);
fiu_enable("CreateCollectionRequest.OnExecute.invalid_index_file_size", 1, NULL, 0);
tableschema.set_table_name(tablename);
handler->CreateTable(&context, &tableschema, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("CreateTableRequest.OnExecute.invalid_index_file_size");
fiu_disable("CreateCollectionRequest.OnExecute.invalid_index_file_size");
fiu_enable("CreateTableRequest.OnExecute.db_already_exist", 1, NULL, 0);
fiu_enable("CreateCollectionRequest.OnExecute.db_already_exist", 1, NULL, 0);
tableschema.set_table_name(tablename);
handler->CreateTable(&context, &tableschema, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("CreateTableRequest.OnExecute.db_already_exist");
fiu_disable("CreateCollectionRequest.OnExecute.db_already_exist");
fiu_enable("CreateTableRequest.OnExecute.create_table_fail", 1, NULL, 0);
fiu_enable("CreateCollectionRequest.OnExecute.create_table_fail", 1, NULL, 0);
tableschema.set_table_name(tablename);
handler->CreateTable(&context, &tableschema, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("CreateTableRequest.OnExecute.create_table_fail");
fiu_disable("CreateCollectionRequest.OnExecute.create_table_fail");
fiu_enable("CreateTableRequest.OnExecute.throw_std_exception", 1, NULL, 0);
fiu_enable("CreateCollectionRequest.OnExecute.throw_std_exception", 1, NULL, 0);
tableschema.set_table_name(tablename);
handler->CreateTable(&context, &tableschema, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("CreateTableRequest.OnExecute.throw_std_exception");
fiu_disable("CreateCollectionRequest.OnExecute.throw_std_exception");
// Drop collection
collection_name.set_table_name("");
@ -799,20 +799,20 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
::grpc::Status grpc_status = handler->DropTable(&context, &collection_name, &response);
collection_name.set_table_name(tablename);
fiu_enable("DropTableRequest.OnExecute.db_not_found", 1, NULL, 0);
fiu_enable("DropCollectionRequest.OnExecute.db_not_found", 1, NULL, 0);
handler->DropTable(&context, &collection_name, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("DropTableRequest.OnExecute.db_not_found");
fiu_disable("DropCollectionRequest.OnExecute.db_not_found");
fiu_enable("DropTableRequest.OnExecute.describe_table_fail", 1, NULL, 0);
fiu_enable("DropCollectionRequest.OnExecute.describe_table_fail", 1, NULL, 0);
handler->DropTable(&context, &collection_name, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("DropTableRequest.OnExecute.describe_table_fail");
fiu_disable("DropCollectionRequest.OnExecute.describe_table_fail");
fiu_enable("DropTableRequest.OnExecute.throw_std_exception", 1, NULL, 0);
fiu_enable("DropCollectionRequest.OnExecute.throw_std_exception", 1, NULL, 0);
handler->DropTable(&context, &collection_name, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("DropTableRequest.OnExecute.throw_std_exception");
fiu_disable("DropCollectionRequest.OnExecute.throw_std_exception");
grpc_status = handler->DropTable(&context, &collection_name, &response);
ASSERT_EQ(grpc_status.error_code(), ::grpc::Status::OK.error_code());
@ -825,10 +825,10 @@ TEST_F(RpcHandlerTest, TABLES_TEST) {
handler->CreateTable(&context, &tableschema, &response);
ASSERT_EQ(response.error_code(), ::grpc::Status::OK.error_code());
fiu_enable("DropTableRequest.OnExecute.drop_table_fail", 1, NULL, 0);
fiu_enable("DropCollectionRequest.OnExecute.drop_table_fail", 1, NULL, 0);
handler->DropTable(&context, &collection_name, &response);
ASSERT_NE(response.error_code(), ::grpc::Status::OK.error_code());
fiu_disable("DropTableRequest.OnExecute.drop_table_fail");
fiu_disable("DropCollectionRequest.OnExecute.drop_table_fail");
handler->DropTable(&context, &collection_name, &response);
}

View File

@ -410,30 +410,30 @@ TEST(ValidationUtilTest, VALIDATE_DIMENSION_TEST) {
}
TEST(ValidationUtilTest, VALIDATE_INDEX_TEST) {
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexType((int)milvus::engine::EngineType::INVALID).code(),
milvus::SERVER_INVALID_INDEX_TYPE);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexType(
(int)milvus::engine::EngineType::INVALID).code(), milvus::SERVER_INVALID_INDEX_TYPE);
for (int i = 1; i <= (int)milvus::engine::EngineType::MAX_VALUE; i++) {
#ifndef MILVUS_GPU_VERSION
if (i == (int)milvus::engine::EngineType::FAISS_IVFSQ8H) {
ASSERT_NE(milvus::server::ValidationUtil::ValidateTableIndexType(i).code(), milvus::SERVER_SUCCESS);
ASSERT_NE(milvus::server::ValidationUtil::ValidateCollectionIndexType(i).code(), milvus::SERVER_SUCCESS);
continue;
}
#endif
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexType(i).code(), milvus::SERVER_SUCCESS);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexType(i).code(), milvus::SERVER_SUCCESS);
}
ASSERT_EQ(
milvus::server::ValidationUtil::ValidateTableIndexType((int)milvus::engine::EngineType::MAX_VALUE + 1).code(),
milvus::SERVER_INVALID_INDEX_TYPE);
milvus::server::ValidationUtil::ValidateCollectionIndexType(
(int)milvus::engine::EngineType::MAX_VALUE + 1).code(), milvus::SERVER_INVALID_INDEX_TYPE);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexFileSize(0).code(),
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexFileSize(0).code(),
milvus::SERVER_INVALID_INDEX_FILE_SIZE);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexFileSize(100).code(), milvus::SERVER_SUCCESS);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexFileSize(100).code(), milvus::SERVER_SUCCESS);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexMetricType(0).code(),
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexMetricType(0).code(),
milvus::SERVER_INVALID_INDEX_METRIC_TYPE);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexMetricType(1).code(), milvus::SERVER_SUCCESS);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateTableIndexMetricType(2).code(), milvus::SERVER_SUCCESS);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexMetricType(1).code(), milvus::SERVER_SUCCESS);
ASSERT_EQ(milvus::server::ValidationUtil::ValidateCollectionIndexMetricType(2).code(), milvus::SERVER_SUCCESS);
}
TEST(ValidationUtilTest, VALIDATE_INDEX_PARAMS_TEST) {

View File

@ -294,7 +294,7 @@ TEST_F(WebHandlerTest, TABLE) {
ASSERT_EQ(StatusCode::TABLE_NOT_EXISTS, status_dto->code->getValue());
}
TEST_F(WebHandlerTest, HAS_TABLE_TEST) {
TEST_F(WebHandlerTest, HAS_COLLECTION_TEST) {
handler->RegisterRequestHandler(milvus::server::RequestHandler());
auto collection_name = milvus::server::web::OString(TABLE_NAME) + RandomName().c_str();

View File

@ -80,8 +80,8 @@ ClientTest::CreateCollection(const std::string& collection_name, int64_t dim, mi
std::cout << "CreateCollection function call status: " << stat.message() << std::endl;
milvus_sdk::Utils::PrintCollectionParam(collection_param);
bool has_table = conn_->HasCollection(collection_param.collection_name);
if (has_table) {
bool has_collection = conn_->HasCollection(collection_param.collection_name);
if (has_collection) {
std::cout << "Collection is created" << std::endl;
}
}

View File

@ -198,7 +198,7 @@ ClientProxy::HasCollection(const std::string& collection_name) {
Status status = Status::OK();
::milvus::grpc::TableName grpc_collection_name;
grpc_collection_name.set_table_name(collection_name);
bool result = client_ptr_->HasTable(grpc_collection_name, status);
bool result = client_ptr_->HasCollection(grpc_collection_name, status);
return result;
}

View File

@ -54,13 +54,13 @@ GrpcClient::CreateTable(const ::milvus::grpc::TableSchema& table_schema) {
}
bool
GrpcClient::HasTable(const ::milvus::grpc::TableName& collection_name, Status& status) {
GrpcClient::HasCollection(const ::milvus::grpc::TableName& collection_name, Status& status) {
ClientContext context;
::milvus::grpc::BoolReply response;
::grpc::Status grpc_status = stub_->HasTable(&context, collection_name, &response);
::grpc::Status grpc_status = stub_->HasCollection(&context, collection_name, &response);
if (!grpc_status.ok()) {
std::cerr << "HasTable gRPC failed!" << std::endl;
std::cerr << "HasCollection gRPC failed!" << std::endl;
status = Status(StatusCode::RPCFailed, grpc_status.error_message());
}
if (response.status().error_code() != grpc::SUCCESS) {
@ -242,10 +242,10 @@ GrpcClient::ShowTables(milvus::grpc::TableNameList& table_name_list) {
}
Status
GrpcClient::ShowTableInfo(grpc::TableName& collection_name, grpc::TableInfo& table_info) {
GrpcClient::ShowTableInfo(grpc::TableName& collection_name, grpc::TableInfo& collection_info) {
ClientContext context;
::milvus::grpc::Command command;
::grpc::Status grpc_status = stub_->ShowTableInfo(&context, collection_name, &table_info);
::grpc::Status grpc_status = stub_->ShowTableInfo(&context, collection_name, &collection_info);
if (!grpc_status.ok()) {
std::cerr << "ShowTableInfo gRPC failed!" << std::endl;
@ -253,9 +253,9 @@ GrpcClient::ShowTableInfo(grpc::TableName& collection_name, grpc::TableInfo& tab
return Status(StatusCode::RPCFailed, grpc_status.error_message());
}
if (table_info.status().error_code() != grpc::SUCCESS) {
std::cerr << table_info.status().reason() << std::endl;
return Status(StatusCode::ServerFailed, table_info.status().reason());
if (collection_info.status().error_code() != grpc::SUCCESS) {
std::cerr << collection_info.status().reason() << std::endl;
return Status(StatusCode::ServerFailed, collection_info.status().reason());
}
return Status::OK();

View File

@ -39,7 +39,7 @@ class GrpcClient {
CreateTable(const grpc::TableSchema& table_schema);
bool
HasTable(const grpc::TableName& table_name, Status& status);
HasCollection(const grpc::TableName& table_name, Status& status);
Status
DropTable(const grpc::TableName& table_name);
@ -69,7 +69,7 @@ class GrpcClient {
ShowTables(milvus::grpc::TableNameList& table_name_list);
Status
ShowTableInfo(grpc::TableName& table_name, grpc::TableInfo& table_info);
ShowTableInfo(grpc::TableName& table_name, grpc::TableInfo& collection_info);
Status
Cmd(const std::string& cmd, std::string& result);