mirror of https://github.com/milvus-io/milvus.git
refactor code and add error log (#976)
* refactor code and add error log * remove the space * refactor * refactorpull/1004/head^2
parent
5d2d34791d
commit
c787b84daa
|
@ -79,7 +79,7 @@ bool
|
|||
YamlConfigMgr::SetSequence(const YAML::Node& node, const std::string& child_name, ConfigNode& config) {
|
||||
if (node[child_name].IsDefined()) {
|
||||
size_t cnt = node[child_name].size();
|
||||
for (size_t i = 0; i < cnt; i++) {
|
||||
for (size_t i = 0; i < cnt; ++i) {
|
||||
config.AddSequenceItem(child_name, node[child_name][i].as<std::string>());
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -600,7 +600,7 @@ DBImpl::WaitBuildIndexFinish() {
|
|||
void
|
||||
DBImpl::StartMetricTask() {
|
||||
static uint64_t metric_clock_tick = 0;
|
||||
metric_clock_tick++;
|
||||
++metric_clock_tick;
|
||||
if (metric_clock_tick % METRIC_ACTION_INTERVAL != 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -654,7 +654,7 @@ DBImpl::SyncMemData(std::set<std::string>& sync_table_ids) {
|
|||
void
|
||||
DBImpl::StartCompactionTask() {
|
||||
static uint64_t compact_clock_tick = 0;
|
||||
compact_clock_tick++;
|
||||
++compact_clock_tick;
|
||||
if (compact_clock_tick % COMPACT_ACTION_INTERVAL != 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -844,7 +844,7 @@ DBImpl::BackgroundCompaction(std::set<std::string> table_ids) {
|
|||
void
|
||||
DBImpl::StartBuildIndexTask(bool force) {
|
||||
static uint64_t index_clock_tick = 0;
|
||||
index_clock_tick++;
|
||||
++index_clock_tick;
|
||||
if (!force && (index_clock_tick % INDEX_ACTION_INTERVAL != 0)) {
|
||||
return;
|
||||
}
|
||||
|
@ -924,7 +924,7 @@ DBImpl::GetFilesToBuildIndex(const std::string& table_id, const std::vector<int>
|
|||
(*it).row_count_ < meta::BUILD_INDEX_THRESHOLD) {
|
||||
it = files.erase(it);
|
||||
} else {
|
||||
it++;
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1054,7 +1054,7 @@ DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex
|
|||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(std::min(10 * 1000, times * 100)));
|
||||
GetFilesToBuildIndex(table_id, file_types, table_files);
|
||||
times++;
|
||||
++times;
|
||||
|
||||
index_failed_checker_.IgnoreFailedIndexFiles(table_files);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ GetTableFileParentFolder(const DBMetaOptions& options, const meta::TableFileSche
|
|||
// round robin according to a file counter
|
||||
std::lock_guard<std::mutex> lock(index_file_counter_mutex);
|
||||
index = index_file_counter % path_count;
|
||||
index_file_counter++;
|
||||
++index_file_counter;
|
||||
} else {
|
||||
// for other type files, they could be merged or deleted
|
||||
// so we round robin according to their file id
|
||||
|
|
|
@ -216,7 +216,7 @@ MySQLMetaImpl::ValidateMetaSchema() {
|
|||
|
||||
try {
|
||||
mysqlpp::StoreQueryResult res = query_statement.store();
|
||||
for (size_t i = 0; i < res.num_rows(); i++) {
|
||||
for (size_t i = 0; i < res.num_rows(); ++i) {
|
||||
const mysqlpp::Row& row = res[i];
|
||||
std::string name, type;
|
||||
row["Field"].to_string(name);
|
||||
|
@ -1445,7 +1445,7 @@ MySQLMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFiles
|
|||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
to_merge_files++;
|
||||
++to_merge_files;
|
||||
}
|
||||
|
||||
files[table_file.date_].push_back(table_file);
|
||||
|
@ -1589,28 +1589,28 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector<int>&
|
|||
int32_t file_type = resRow["file_type"];
|
||||
switch (file_type) {
|
||||
case (int)TableFileSchema::RAW:
|
||||
raw_count++;
|
||||
++raw_count;
|
||||
break;
|
||||
case (int)TableFileSchema::NEW:
|
||||
new_count++;
|
||||
++new_count;
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_MERGE:
|
||||
new_merge_count++;
|
||||
++new_merge_count;
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_INDEX:
|
||||
new_index_count++;
|
||||
++new_index_count;
|
||||
break;
|
||||
case (int)TableFileSchema::TO_INDEX:
|
||||
to_index_count++;
|
||||
++to_index_count;
|
||||
break;
|
||||
case (int)TableFileSchema::INDEX:
|
||||
index_count++;
|
||||
++index_count;
|
||||
break;
|
||||
case (int)TableFileSchema::BACKUP:
|
||||
backup_count++;
|
||||
++backup_count;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return Status(DB_ERROR, "Unknown file type.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1639,7 +1639,7 @@ MySQLMetaImpl::FilesByType(const std::string& table_id, const std::vector<int>&
|
|||
msg = msg + " backup files:" + std::to_string(backup_count);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return Status(DB_ERROR, "Unknown file type!");
|
||||
}
|
||||
}
|
||||
ENGINE_LOG_DEBUG << msg;
|
||||
|
@ -1838,7 +1838,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter) {
|
|||
idsToDelete.emplace_back(std::to_string(table_file.id_));
|
||||
table_ids.insert(table_file.table_id_);
|
||||
|
||||
clean_files++;
|
||||
++clean_files;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1896,7 +1896,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter) {
|
|||
resRow["table_id"].to_string(table_id);
|
||||
|
||||
utils::DeleteTablePath(options_, table_id, false); // only delete empty folder
|
||||
remove_tables++;
|
||||
++remove_tables;
|
||||
idsToDeleteSS << "id = " << std::to_string(id) << " OR ";
|
||||
}
|
||||
std::string idsToDeleteStr = idsToDeleteSS.str();
|
||||
|
|
|
@ -810,7 +810,7 @@ SqliteMetaImpl::ShowPartitions(const std::string& table_id, std::vector<meta::Ta
|
|||
auto partitions = ConnectorPtr->select(columns(&TableSchema::table_id_),
|
||||
where(c(&TableSchema::owner_table_) == table_id
|
||||
and c(&TableSchema::state_) != (int)TableSchema::TO_DELETE));
|
||||
for (size_t i = 0; i < partitions.size(); i++) {
|
||||
for (size_t i = 0; i < partitions.size(); ++i) {
|
||||
std::string partition_name = std::get<0>(partitions[i]);
|
||||
meta::TableSchema partition_schema;
|
||||
partition_schema.table_id_ = partition_name;
|
||||
|
@ -1023,7 +1023,7 @@ SqliteMetaImpl::FilesToMerge(const std::string& table_id, DatePartionedTableFile
|
|||
}
|
||||
|
||||
files[table_file.date_].push_back(table_file);
|
||||
to_merge_files++;
|
||||
++to_merge_files;
|
||||
}
|
||||
|
||||
if (to_merge_files > 0) {
|
||||
|
@ -1136,21 +1136,29 @@ SqliteMetaImpl::FilesByType(const std::string& table_id,
|
|||
file_schema.created_on_ = std::get<7>(file);
|
||||
|
||||
switch (file_schema.file_type_) {
|
||||
case (int)TableFileSchema::RAW:raw_count++;
|
||||
case (int)TableFileSchema::RAW:
|
||||
++raw_count;
|
||||
break;
|
||||
case (int)TableFileSchema::NEW:new_count++;
|
||||
case (int)TableFileSchema::NEW:
|
||||
++new_count;
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_MERGE:new_merge_count++;
|
||||
case (int)TableFileSchema::NEW_MERGE:
|
||||
++new_merge_count;
|
||||
break;
|
||||
case (int)TableFileSchema::NEW_INDEX:new_index_count++;
|
||||
case (int)TableFileSchema::NEW_INDEX:
|
||||
++new_index_count;
|
||||
break;
|
||||
case (int)TableFileSchema::TO_INDEX:to_index_count++;
|
||||
case (int)TableFileSchema::TO_INDEX:
|
||||
++to_index_count;
|
||||
break;
|
||||
case (int)TableFileSchema::INDEX:index_count++;
|
||||
case (int)TableFileSchema::INDEX:
|
||||
++index_count;
|
||||
break;
|
||||
case (int)TableFileSchema::BACKUP:backup_count++;
|
||||
case (int)TableFileSchema::BACKUP:
|
||||
++backup_count;
|
||||
break;
|
||||
default:break;
|
||||
default:
|
||||
return Status(DB_ERROR, "Unknown file type.");
|
||||
}
|
||||
|
||||
table_files.emplace_back(file_schema);
|
||||
|
@ -1180,7 +1188,8 @@ SqliteMetaImpl::FilesByType(const std::string& table_id,
|
|||
case (int)TableFileSchema::BACKUP:
|
||||
msg = msg + " backup files:" + std::to_string(backup_count);
|
||||
break;
|
||||
default:break;
|
||||
default:
|
||||
return Status(DB_ERROR, "Unknown file type!");
|
||||
}
|
||||
}
|
||||
ENGINE_LOG_DEBUG << msg;
|
||||
|
@ -1353,7 +1362,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter) {
|
|||
ENGINE_LOG_DEBUG << "Remove file id:" << table_file.file_id_ << " location:" << table_file.location_;
|
||||
table_ids.insert(table_file.table_id_);
|
||||
|
||||
clean_files++;
|
||||
++clean_files;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -1411,7 +1420,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds, CleanUpFilter* filter) {
|
|||
where(c(&TableFileSchema::table_id_) == table_id));
|
||||
if (selected.size() == 0) {
|
||||
utils::DeleteTablePath(options_, table_id);
|
||||
remove_tables++;
|
||||
++remove_tables;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ FaissFlatPass::Run(const TaskPtr& task) {
|
|||
} else {
|
||||
auto best_device_id = count_ % gpus.size();
|
||||
SERVER_LOG_DEBUG << "FaissFlatPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!";
|
||||
count_++;
|
||||
++count_;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
|
|
|
@ -61,7 +61,7 @@ FaissIVFPQPass::Run(const TaskPtr& task) {
|
|||
} else {
|
||||
auto best_device_id = count_ % gpus.size();
|
||||
SERVER_LOG_DEBUG << "FaissIVFPQPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!";
|
||||
count_++;
|
||||
++count_;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
|
|
|
@ -60,7 +60,7 @@ FaissIVFSQ8HPass::Run(const TaskPtr& task) {
|
|||
auto best_device_id = count_ % gpus.size();
|
||||
SERVER_LOG_DEBUG << "FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu" << best_device_id
|
||||
<< " to search!";
|
||||
count_++;
|
||||
++count_;
|
||||
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
|
||||
}
|
||||
auto label = std::make_shared<SpecResLabel>(res_ptr);
|
||||
|
|
Loading…
Reference in New Issue