Refactor LOG macro and improve log format (#1927)

* update log format

Signed-off-by: wxyu <xy.wang@zilliz.com>

* add new log macro

Signed-off-by: wxyu <xy.wang@zilliz.com>

* use new log macro instead

Signed-off-by: Xiangyu Wang <xy.wang@zilliz.com>

* add SetThreadName function

Signed-off-by: wxyu <xy.wang@zilliz.com>

* clang-format

Signed-off-by: wxyu <xy.wang@zilliz.com>

* set thread name

Signed-off-by: Xiangyu Wang <xy.wang@zilliz.com>

* add changelog

Signed-off-by: wxyu <xy.wang@zilliz.com>

* add git ignore .swp file

Signed-off-by: wxyu <xy.wang@zilliz.com>

* update log level in LogUtil.cpp

Signed-off-by: wxyu <xy.wang@zilliz.com>
pull/1936/head
Wang XiangYu 2020-04-15 14:44:00 +08:00 committed by GitHub
parent 407cedd74b
commit 890fe08e7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
79 changed files with 895 additions and 737 deletions

1
.gitignore vendored
View File

@ -32,3 +32,4 @@ cov_html/
# temp
shards/all_in_one_with_mysql/metadata/
shards/mishards/.env
*.swp

View File

@ -2,6 +2,17 @@
Please mark all change in change log and use the issue from GitHub
# Milvus 0.9.0 (TBD)
## Bug
## Feature
## Improvement
- \#221 Refactor LOG macro
## Task
# Milvus 0.8.0 (TBD)
## Bug

View File

@ -1,5 +1,5 @@
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FORMAT = "[%datetime][%level]%msg"
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-global.log"
ENABLED = true
TO_FILE = true
@ -24,4 +24,4 @@
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"
FILENAME = "@MILVUS_DB_PATH@/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"

View File

@ -76,7 +76,7 @@ bool
Cache<ItemObj>::reserve(const int64_t item_size) {
std::lock_guard<std::mutex> lock(mutex_);
if (item_size > capacity_) {
SERVER_LOG_ERROR << header_ << " item size " << (item_size >> 20) << "MB too big to insert into cache capacity"
LOG_SERVER_ERROR_ << header_ << " item size " << (item_size >> 20) << "MB too big to insert into cache capacity"
<< (capacity_ >> 20) << "MB";
return false;
}
@ -92,7 +92,7 @@ Cache<ItemObj>::clear() {
std::lock_guard<std::mutex> lock(mutex_);
lru_.clear();
usage_ = 0;
SERVER_LOG_DEBUG << header_ << " Clear cache !";
LOG_SERVER_DEBUG_ << header_ << " Clear cache !";
}
@ -102,9 +102,9 @@ Cache<ItemObj>::print() {
std::lock_guard<std::mutex> lock(mutex_);
size_t cache_count = lru_.size();
// for (auto it = lru_.begin(); it != lru_.end(); ++it) {
// SERVER_LOG_DEBUG << it->first;
// LOG_SERVER_DEBUG_ << it->first;
// }
SERVER_LOG_DEBUG << header_ << " [item count]: " << cache_count << ", [usage] " << (usage_ >> 20)
LOG_SERVER_DEBUG_ << header_ << " [item count]: " << cache_count << ", [usage] " << (usage_ >> 20)
<< "MB, [capacity] " << (capacity_ >> 20) << "MB";
}
@ -128,15 +128,15 @@ Cache<ItemObj>::insert_internal(const std::string& key, const ItemObj& item) {
// if usage exceed capacity, free some items
if (usage_ > capacity_) {
SERVER_LOG_DEBUG << header_ << " Current usage " << (usage_ >> 20) << "MB is too high for capacity "
LOG_SERVER_DEBUG_ << header_ << " Current usage " << (usage_ >> 20) << "MB is too high for capacity "
<< (capacity_ >> 20) << "MB, start free memory";
free_memory_internal(capacity_);
}
// insert new item
lru_.put(key, item);
SERVER_LOG_DEBUG << header_ << " Insert " << key << " size: " << (item_size >> 20) << "MB into cache";
SERVER_LOG_DEBUG << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
LOG_SERVER_DEBUG_ << header_ << " Insert " << key << " size: " << (item_size >> 20) << "MB into cache";
LOG_SERVER_DEBUG_ << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
<< (capacity_ >> 20) << "MB";
}
@ -153,8 +153,8 @@ Cache<ItemObj>::erase_internal(const std::string& key) {
lru_.erase(key);
usage_ -= item_size;
SERVER_LOG_DEBUG << header_ << " Erase " << key << " size: " << (item_size >> 20) << "MB from cache";
SERVER_LOG_DEBUG << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
LOG_SERVER_DEBUG_ << header_ << " Erase " << key << " size: " << (item_size >> 20) << "MB from cache";
LOG_SERVER_DEBUG_ << header_ << " Count: " << lru_.size() << ", Usage: " << (usage_ >> 20) << "MB, Capacity: "
<< (capacity_ >> 20) << "MB";
}
@ -180,7 +180,7 @@ Cache<ItemObj>::free_memory_internal(const int64_t target_size) {
++it;
}
SERVER_LOG_DEBUG << header_ << " To be released memory size: " << (released_size >> 20) << "MB";
LOG_SERVER_DEBUG_ << header_ << " To be released memory size: " << (released_size >> 20) << "MB";
for (auto& key : key_array) {
erase_internal(key);

View File

@ -24,7 +24,7 @@ template <typename ItemObj>
uint64_t
CacheMgr<ItemObj>::ItemCount() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return (uint64_t)(cache_->size());
@ -34,7 +34,7 @@ template <typename ItemObj>
bool
CacheMgr<ItemObj>::ItemExists(const std::string& key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return false;
}
return cache_->exists(key);
@ -44,7 +44,7 @@ template <typename ItemObj>
ItemObj
CacheMgr<ItemObj>::GetItem(const std::string& key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return nullptr;
}
server::Metrics::GetInstance().CacheAccessTotalIncrement();
@ -55,7 +55,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::InsertItem(const std::string& key, const ItemObj& data) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->insert(key, data);
@ -66,7 +66,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::EraseItem(const std::string& key) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->erase(key);
@ -77,7 +77,7 @@ template <typename ItemObj>
bool
CacheMgr<ItemObj>::Reserve(const int64_t size) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return false;
}
return cache_->reserve(size);
@ -87,7 +87,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::PrintInfo() {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->print();
@ -97,7 +97,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::ClearCache() {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->clear();
@ -107,7 +107,7 @@ template <typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheUsage() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return cache_->usage();
@ -117,7 +117,7 @@ template <typename ItemObj>
int64_t
CacheMgr<ItemObj>::CacheCapacity() const {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return 0;
}
return cache_->capacity();
@ -127,7 +127,7 @@ template <typename ItemObj>
void
CacheMgr<ItemObj>::SetCapacity(int64_t capacity) {
if (cache_ == nullptr) {
SERVER_LOG_ERROR << "Cache doesn't exist";
LOG_SERVER_ERROR_ << "Cache doesn't exist";
return;
}
cache_->set_capacity(capacity);

View File

@ -44,14 +44,14 @@ DefaultDeletedDocsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Del
int del_fd = open(del_file_path.c_str(), O_RDONLY, 00664);
if (del_fd == -1) {
std::string err_msg = "Failed to open file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(del_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -61,7 +61,7 @@ DefaultDeletedDocsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Del
if (::read(del_fd, deleted_docs_list.data(), num_bytes) == -1) {
std::string err_msg = "Failed to read from file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -69,7 +69,7 @@ DefaultDeletedDocsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Del
if (::close(del_fd) == -1) {
std::string err_msg = "Failed to close file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
@ -92,7 +92,7 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
int del_fd = open(temp_path.c_str(), O_RDWR | O_CREAT, 00664);
if (del_fd == -1) {
std::string err_msg = "Failed to open file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
@ -100,7 +100,7 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
if (exists) {
if (::read(del_fd, &old_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
} else {
@ -114,12 +114,12 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
int off = lseek(del_fd, 0, SEEK_SET);
if (off == -1) {
std::string err_msg = "Failed to seek file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(del_fd, &new_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to write to file" + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -127,18 +127,18 @@ DefaultDeletedDocsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segme
off = lseek(del_fd, 0, SEEK_END);
if (off == -1) {
std::string err_msg = "Failed to seek file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(del_fd, deleted_docs_list.data(), sizeof(segment::offset_t) * deleted_docs->GetSize()) == -1) {
std::string err_msg = "Failed to write to file" + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(del_fd) == -1) {
std::string err_msg = "Failed to close file: " + temp_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -156,14 +156,14 @@ DefaultDeletedDocsFormat::readSize(const storage::FSHandlerPtr& fs_ptr, size_t&
int del_fd = open(del_file_path.c_str(), O_RDONLY, 00664);
if (del_fd == -1) {
std::string err_msg = "Failed to open file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(del_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -171,7 +171,7 @@ DefaultDeletedDocsFormat::readSize(const storage::FSHandlerPtr& fs_ptr, size_t&
if (::close(del_fd) == -1) {
std::string err_msg = "Failed to close file: " + del_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}

View File

@ -40,7 +40,7 @@ DefaultIdBloomFilterFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::I
if (bloom_filter == nullptr) {
std::string err_msg =
"Failed to read bloom filter from file: " + bloom_filter_file_path + ". " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_UNEXPECTED_ERROR, err_msg);
}
id_bloom_filter_ptr = std::make_shared<segment::IdBloomFilter>(bloom_filter);
@ -56,7 +56,7 @@ DefaultIdBloomFilterFormat::write(const storage::FSHandlerPtr& fs_ptr,
if (scaling_bloom_flush(id_bloom_filter_ptr->GetBloomFilter()) == -1) {
std::string err_msg =
"Failed to write bloom filter to file: " + bloom_filter_file_path + ". " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_UNEXPECTED_ERROR, err_msg);
}
}
@ -71,7 +71,7 @@ DefaultIdBloomFilterFormat::create(const storage::FSHandlerPtr& fs_ptr,
if (bloom_filter == nullptr) {
std::string err_msg =
"Failed to read bloom filter from file: " + bloom_filter_file_path + ". " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_UNEXPECTED_ERROR, err_msg);
}
id_bloom_filter_ptr = std::make_shared<segment::IdBloomFilter>(bloom_filter);

View File

@ -37,13 +37,13 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
recorder.RecordSection("Start");
if (!fs_ptr->reader_ptr_->open(path)) {
ENGINE_LOG_ERROR << "Fail to open vector index: " << path;
LOG_ENGINE_ERROR_ << "Fail to open vector index: " << path;
return nullptr;
}
int64_t length = fs_ptr->reader_ptr_->length();
if (length <= 0) {
ENGINE_LOG_ERROR << "Invalid vector index length: " << path;
LOG_ENGINE_ERROR_ << "Invalid vector index length: " << path;
return nullptr;
}
@ -55,7 +55,7 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
rp += sizeof(current_type);
fs_ptr->reader_ptr_->seekg(rp);
ENGINE_LOG_DEBUG << "Start to read_index(" << path << ") length: " << length << " bytes";
LOG_ENGINE_DEBUG_ << "Start to read_index(" << path << ") length: " << length << " bytes";
while (rp < length) {
size_t meta_length;
fs_ptr->reader_ptr_->read(&meta_length, sizeof(meta_length));
@ -85,7 +85,7 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
double span = recorder.RecordSection("End");
double rate = length * 1000000.0 / span / 1024 / 1024;
ENGINE_LOG_DEBUG << "read_index(" << path << ") rate " << rate << "MB/s";
LOG_ENGINE_DEBUG_ << "read_index(" << path << ") rate " << rate << "MB/s";
knowhere::VecIndexFactory& vec_index_factory = knowhere::VecIndexFactory::GetInstance();
auto index =
@ -94,7 +94,7 @@ DefaultVectorIndexFormat::read_internal(const storage::FSHandlerPtr& fs_ptr, con
index->Load(load_data_list);
index->SetIndexSize(length);
} else {
ENGINE_LOG_ERROR << "Fail to create vector index: " << path;
LOG_ENGINE_ERROR_ << "Fail to create vector index: " << path;
}
return index;
@ -108,7 +108,7 @@ DefaultVectorIndexFormat::read(const storage::FSHandlerPtr& fs_ptr, const std::s
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
@ -132,7 +132,7 @@ DefaultVectorIndexFormat::write(const storage::FSHandlerPtr& fs_ptr, const std::
recorder.RecordSection("Start");
if (!fs_ptr->writer_ptr_->open(location)) {
ENGINE_LOG_ERROR << "Fail to open vector index: " << location;
LOG_ENGINE_ERROR_ << "Fail to open vector index: " << location;
return;
}
@ -153,7 +153,7 @@ DefaultVectorIndexFormat::write(const storage::FSHandlerPtr& fs_ptr, const std::
double span = recorder.RecordSection("End");
double rate = fs_ptr->writer_ptr_->length() * 1000000.0 / span / 1024 / 1024;
ENGINE_LOG_DEBUG << "write_index(" << location << ") rate " << rate << "MB/s";
LOG_ENGINE_DEBUG_ << "write_index(" << location << ") rate " << rate << "MB/s";
}
} // namespace codec

View File

@ -36,14 +36,14 @@ DefaultVectorsFormat::read_vectors_internal(const std::string& file_path, off_t
int rv_fd = open(file_path.c_str(), O_RDONLY, 00664);
if (rv_fd == -1) {
std::string err_msg = "Failed to open file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(rv_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -53,20 +53,20 @@ DefaultVectorsFormat::read_vectors_internal(const std::string& file_path, off_t
int off = lseek(rv_fd, offset, SEEK_SET);
if (off == -1) {
std::string err_msg = "Failed to seek file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
raw_vectors.resize(num / sizeof(uint8_t));
if (::read(rv_fd, raw_vectors.data(), num) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(rv_fd) == -1) {
std::string err_msg = "Failed to close file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
@ -76,27 +76,27 @@ DefaultVectorsFormat::read_uids_internal(const std::string& file_path, std::vect
int uid_fd = open(file_path.c_str(), O_RDONLY, 00664);
if (uid_fd == -1) {
std::string err_msg = "Failed to open file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t num_bytes;
if (::read(uid_fd, &num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
uids.resize(num_bytes / sizeof(segment::doc_id_t));
if (::read(uid_fd, uids.data(), num_bytes) == -1) {
std::string err_msg = "Failed to read from file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(uid_fd) == -1) {
std::string err_msg = "Failed to close file: " + file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
}
@ -108,7 +108,7 @@ DefaultVectorsFormat::read(const storage::FSHandlerPtr& fs_ptr, segment::Vectors
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
@ -147,24 +147,24 @@ DefaultVectorsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segment::
int rv_fd = open(rv_file_path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 00664);
if (rv_fd == -1) {
std::string err_msg = "Failed to open file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t rv_num_bytes = vectors->GetData().size() * sizeof(uint8_t);
if (::write(rv_fd, &rv_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to write to file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(rv_fd, vectors->GetData().data(), rv_num_bytes) == -1) {
std::string err_msg = "Failed to write to file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(rv_fd) == -1) {
std::string err_msg = "Failed to close file: " + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -173,23 +173,23 @@ DefaultVectorsFormat::write(const storage::FSHandlerPtr& fs_ptr, const segment::
int uid_fd = open(uid_file_path.c_str(), O_WRONLY | O_TRUNC | O_CREAT, 00664);
if (uid_fd == -1) {
std::string err_msg = "Failed to open file: " + uid_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FILE, err_msg);
}
size_t uid_num_bytes = vectors->GetUids().size() * sizeof(segment::doc_id_t);
if (::write(uid_fd, &uid_num_bytes, sizeof(size_t)) == -1) {
std::string err_msg = "Failed to write to file" + rv_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::write(uid_fd, vectors->GetUids().data(), uid_num_bytes) == -1) {
std::string err_msg = "Failed to write to file" + uid_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
if (::close(uid_fd) == -1) {
std::string err_msg = "Failed to close file: " + uid_file_path + ", error: " + std::strerror(errno);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_WRITE_ERROR, err_msg);
}
@ -203,7 +203,7 @@ DefaultVectorsFormat::read_uids(const storage::FSHandlerPtr& fs_ptr, std::vector
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}
@ -228,7 +228,7 @@ DefaultVectorsFormat::read_vectors(const storage::FSHandlerPtr& fs_ptr, off_t of
std::string dir_path = fs_ptr->operation_ptr_->GetDirectory();
if (!boost::filesystem::is_directory(dir_path)) {
std::string err_msg = "Directory: " + dir_path + "does not exist";
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_INVALID_ARGUMENT, err_msg);
}

View File

@ -668,7 +668,7 @@ Config::CheckConfigVersion(const std::string& value) {
if (exist_error) {
std::string msg = "Invalid config version: " + value +
". Expected config version: " + milvus_config_version_map.at(MILVUS_VERSION);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
}

View File

@ -187,18 +187,18 @@ ConfigNode::ClearSequences() {
void
ConfigNode::PrintAll(const std::string& prefix) const {
for (auto& elem : config_) {
SERVER_LOG_INFO << prefix << elem.first + ": " << elem.second;
LOG_SERVER_INFO_ << prefix << elem.first + ": " << elem.second;
}
for (auto& elem : sequences_) {
SERVER_LOG_INFO << prefix << elem.first << ": ";
LOG_SERVER_INFO_ << prefix << elem.first << ": ";
for (auto& str : elem.second) {
SERVER_LOG_INFO << prefix << " - " << str;
LOG_SERVER_INFO_ << prefix << " - " << str;
}
}
for (auto& elem : children_) {
SERVER_LOG_INFO << prefix << elem.first << ": ";
LOG_SERVER_INFO_ << prefix << elem.first << ": ";
elem.second.PrintAll(prefix + " ");
}
}

View File

@ -30,7 +30,7 @@ YamlConfigMgr::LoadConfigFile(const std::string& filename) {
void
YamlConfigMgr::Print() const {
SERVER_LOG_INFO << "System config content:";
LOG_SERVER_INFO_ << "System config content:";
config_.PrintAll();
}

View File

@ -96,7 +96,7 @@ DBImpl::Start() {
return Status::OK();
}
// ENGINE_LOG_TRACE << "DB service start";
// LOG_ENGINE_TRACE_ << "DB service start";
initialized_.store(true, std::memory_order_release);
// wal
@ -184,7 +184,7 @@ DBImpl::Stop() {
swn_metric_.Notify();
bg_metric_thread_.join();
// ENGINE_LOG_TRACE << "DB service stop";
// LOG_ENGINE_TRACE_ << "DB service stop";
return Status::OK();
}
@ -321,7 +321,7 @@ DBImpl::GetCollectionInfo(const std::string& collection_id, CollectionInfo& coll
status = meta_ptr_->FilesByType(name_tag.first, file_types, collection_files);
if (!status.ok()) {
std::string err_msg = "Failed to get collection info: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
@ -375,8 +375,8 @@ DBImpl::PreloadCollection(const std::string& collection_id) {
int64_t available_size = cache_total - cache_usage;
// step 3: load file one by one
ENGINE_LOG_DEBUG << "Begin pre-load collection:" + collection_id + ", totally " << files_array.size()
<< " files need to be pre-loaded";
LOG_ENGINE_DEBUG_ << "Begin pre-load collection:" + collection_id + ", totally " << files_array.size()
<< " files need to be pre-loaded";
TimeRecorderAuto rc("Pre-load collection:" + collection_id);
for (auto& file : files_array) {
EngineType engine_type;
@ -394,7 +394,7 @@ DBImpl::PreloadCollection(const std::string& collection_id) {
EngineFactory::Build(file.dimension_, file.location_, engine_type, (MetricType)file.metric_type_, json);
fiu_do_on("DBImpl.PreloadCollection.null_engine", engine = nullptr);
if (engine == nullptr) {
ENGINE_LOG_ERROR << "Invalid engine type";
LOG_ENGINE_ERROR_ << "Invalid engine type";
return Status(DB_ERROR, "Invalid engine type");
}
@ -408,12 +408,12 @@ DBImpl::PreloadCollection(const std::string& collection_id) {
size += engine->Size();
if (size > available_size) {
ENGINE_LOG_DEBUG << "Pre-load cancelled since cache is almost full";
LOG_ENGINE_DEBUG_ << "Pre-load cancelled since cache is almost full";
return Status(SERVER_CACHE_FULL, "Cache is full");
}
} catch (std::exception& ex) {
std::string msg = "Pre-load collection encounter exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
}
@ -460,7 +460,7 @@ DBImpl::DropPartition(const std::string& partition_name) {
mem_mgr_->EraseMemVector(partition_name); // not allow insert
auto status = meta_ptr_->DropPartition(partition_name); // soft delete collection
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
@ -482,7 +482,7 @@ DBImpl::DropPartitionByTag(const std::string& collection_id, const std::string&
std::string partition_name;
auto status = meta_ptr_->GetPartitionName(collection_id, partition_tag, partition_name);
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
@ -500,7 +500,7 @@ DBImpl::ShowPartitions(const std::string& collection_id, std::vector<meta::Colle
Status
DBImpl::InsertVectors(const std::string& collection_id, const std::string& partition_tag, VectorsData& vectors) {
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
// LOG_ENGINE_DEBUG_ << "Insert " << n << " vectors to cache";
if (!initialized_.load(std::memory_order_acquire)) {
return SHUTDOWN_ERROR;
}
@ -511,7 +511,7 @@ DBImpl::InsertVectors(const std::string& collection_id, const std::string& parti
SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance();
Status status = id_generator.GetNextIDNumbers(vectors.vector_count_, vectors.id_array_);
if (!status.ok()) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Get next id number fail: %s", "insert", 0, status.message().c_str());
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Get next id number fail: %s", "insert", 0, status.message().c_str());
return status;
}
}
@ -521,7 +521,7 @@ DBImpl::InsertVectors(const std::string& collection_id, const std::string& parti
std::string target_collection_name;
status = GetPartitionByTag(collection_id, partition_tag, target_collection_name);
if (!status.ok()) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Get partition fail: %s", "insert", 0, status.message().c_str());
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Get partition fail: %s", "insert", 0, status.message().c_str());
return status;
}
@ -600,14 +600,14 @@ DBImpl::Flush(const std::string& collection_id) {
return status;
}
if (!has_collection) {
ENGINE_LOG_ERROR << "Collection to flush does not exist: " << collection_id;
LOG_ENGINE_ERROR_ << "Collection to flush does not exist: " << collection_id;
return Status(DB_NOT_FOUND, "Collection to flush does not exist");
}
ENGINE_LOG_DEBUG << "Begin flush collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Begin flush collection: " << collection_id;
if (options_.wal_enable_) {
ENGINE_LOG_DEBUG << "WAL flush";
LOG_ENGINE_DEBUG_ << "WAL flush";
auto lsn = wal_mgr_->Flush(collection_id);
if (lsn != 0) {
swn_wal_.Notify();
@ -615,11 +615,11 @@ DBImpl::Flush(const std::string& collection_id) {
}
} else {
ENGINE_LOG_DEBUG << "MemTable flush";
LOG_ENGINE_DEBUG_ << "MemTable flush";
InternalFlush(collection_id);
}
ENGINE_LOG_DEBUG << "End flush collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "End flush collection: " << collection_id;
return status;
}
@ -630,22 +630,22 @@ DBImpl::Flush() {
return SHUTDOWN_ERROR;
}
ENGINE_LOG_DEBUG << "Begin flush all collections";
LOG_ENGINE_DEBUG_ << "Begin flush all collections";
Status status;
if (options_.wal_enable_) {
ENGINE_LOG_DEBUG << "WAL flush";
LOG_ENGINE_DEBUG_ << "WAL flush";
auto lsn = wal_mgr_->Flush();
if (lsn != 0) {
swn_wal_.Notify();
flush_req_swn_.Wait();
}
} else {
ENGINE_LOG_DEBUG << "MemTable flush";
LOG_ENGINE_DEBUG_ << "MemTable flush";
InternalFlush();
}
ENGINE_LOG_DEBUG << "End flush all collections";
LOG_ENGINE_DEBUG_ << "End flush all collections";
return status;
}
@ -661,26 +661,26 @@ DBImpl::Compact(const std::string& collection_id) {
auto status = DescribeCollection(collection_schema);
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
ENGINE_LOG_ERROR << "Collection to compact does not exist: " << collection_id;
LOG_ENGINE_ERROR_ << "Collection to compact does not exist: " << collection_id;
return Status(DB_NOT_FOUND, "Collection to compact does not exist");
} else {
return status;
}
} else {
if (!collection_schema.owner_collection_.empty()) {
ENGINE_LOG_ERROR << "Collection to compact does not exist: " << collection_id;
LOG_ENGINE_ERROR_ << "Collection to compact does not exist: " << collection_id;
return Status(DB_NOT_FOUND, "Collection to compact does not exist");
}
}
ENGINE_LOG_DEBUG << "Before compacting, wait for build index thread to finish...";
LOG_ENGINE_DEBUG_ << "Before compacting, wait for build index thread to finish...";
// WaitBuildIndexFinish();
const std::lock_guard<std::mutex> index_lock(build_index_mutex_);
const std::lock_guard<std::mutex> merge_lock(flush_merge_compact_mutex_);
ENGINE_LOG_DEBUG << "Compacting collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Compacting collection: " << collection_id;
// Get files to compact from meta.
std::vector<int> file_types{meta::SegmentSchema::FILE_TYPE::RAW, meta::SegmentSchema::FILE_TYPE::TO_INDEX,
@ -689,11 +689,11 @@ DBImpl::Compact(const std::string& collection_id) {
status = meta_ptr_->FilesByType(collection_id, file_types, files_to_compact);
if (!status.ok()) {
std::string err_msg = "Failed to get files to compact: " + status.message();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
ENGINE_LOG_DEBUG << "Found " << files_to_compact.size() << " segment to compact";
LOG_ENGINE_DEBUG_ << "Found " << files_to_compact.size() << " segment to compact";
OngoingFileChecker::GetInstance().MarkOngoingFiles(files_to_compact);
@ -719,18 +719,18 @@ DBImpl::Compact(const std::string& collection_id) {
compact_status = CompactFile(collection_id, file, files_to_update);
if (!compact_status.ok()) {
ENGINE_LOG_ERROR << "Compact failed for segment " << file.segment_id_ << ": "
<< compact_status.message();
LOG_ENGINE_ERROR_ << "Compact failed for segment " << file.segment_id_ << ": "
<< compact_status.message();
OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
continue; // skip this file and try compact next one
}
} else {
OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
ENGINE_LOG_DEBUG << "Segment " << file.segment_id_ << " has no deleted data. No need to compact";
LOG_ENGINE_DEBUG_ << "Segment " << file.segment_id_ << " has no deleted data. No need to compact";
continue; // skip this file and try compact next one
}
ENGINE_LOG_DEBUG << "Updating meta after compaction...";
LOG_ENGINE_DEBUG_ << "Updating meta after compaction...";
status = meta_ptr_->UpdateCollectionFiles(files_to_update);
OngoingFileChecker::GetInstance().UnmarkOngoingFile(file);
if (!status.ok()) {
@ -742,7 +742,7 @@ DBImpl::Compact(const std::string& collection_id) {
OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_compact);
if (compact_status.ok()) {
ENGINE_LOG_DEBUG << "Finished compacting collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Finished compacting collection: " << collection_id;
}
return compact_status;
@ -751,7 +751,7 @@ DBImpl::Compact(const std::string& collection_id) {
Status
DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema& file,
meta::SegmentsSchema& files_to_update) {
ENGINE_LOG_DEBUG << "Compacting segment " << file.segment_id_ << " for collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Compacting segment " << file.segment_id_ << " for collection: " << collection_id;
// Create new collection file
meta::SegmentSchema compacted_file;
@ -761,7 +761,7 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
Status status = meta_ptr_->CreateCollectionFile(compacted_file);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection file: " << status.message();
LOG_ENGINE_ERROR_ << "Failed to create collection file: " << status.message();
return status;
}
@ -774,18 +774,18 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
std::string segment_dir_to_merge;
utils::GetParentPath(file.location_, segment_dir_to_merge);
ENGINE_LOG_DEBUG << "Compacting begin...";
LOG_ENGINE_DEBUG_ << "Compacting begin...";
segment_writer_ptr->Merge(segment_dir_to_merge, compacted_file.file_id_);
// Serialize
ENGINE_LOG_DEBUG << "Serializing compacted segment...";
LOG_ENGINE_DEBUG_ << "Serializing compacted segment...";
status = segment_writer_ptr->Serialize();
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to serialize compacted segment: " << status.message();
LOG_ENGINE_ERROR_ << "Failed to serialize compacted segment: " << status.message();
compacted_file.file_type_ = meta::SegmentSchema::TO_DELETE;
auto mark_status = meta_ptr_->UpdateCollectionFile(compacted_file);
if (mark_status.ok()) {
ENGINE_LOG_DEBUG << "Mark file: " << compacted_file.file_id_ << " to to_delete";
LOG_ENGINE_DEBUG_ << "Mark file: " << compacted_file.file_id_ << " to to_delete";
}
return status;
}
@ -804,7 +804,7 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
compacted_file.row_count_ = segment_writer_ptr->VectorCount();
if (compacted_file.row_count_ == 0) {
ENGINE_LOG_DEBUG << "Compacted segment is empty. Mark it as TO_DELETE";
LOG_ENGINE_DEBUG_ << "Compacted segment is empty. Mark it as TO_DELETE";
compacted_file.file_type_ = meta::SegmentSchema::TO_DELETE;
}
@ -822,9 +822,9 @@ DBImpl::CompactFile(const std::string& collection_id, const meta::SegmentSchema&
files_to_update.emplace_back(f);
}
ENGINE_LOG_DEBUG << "Compacted segment " << compacted_file.segment_id_ << " from "
<< std::to_string(file.file_size_) << " bytes to " << std::to_string(compacted_file.file_size_)
<< " bytes";
LOG_ENGINE_DEBUG_ << "Compacted segment " << compacted_file.segment_id_ << " from "
<< std::to_string(file.file_size_) << " bytes to " << std::to_string(compacted_file.file_size_)
<< " bytes";
if (options_.insert_cache_immediately_) {
segment_writer_ptr->Cache();
@ -842,7 +842,7 @@ DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_i
bool has_collection;
auto status = HasCollection(collection_id, has_collection);
if (!has_collection) {
ENGINE_LOG_ERROR << "Collection " << collection_id << " does not exist: ";
LOG_ENGINE_ERROR_ << "Collection " << collection_id << " does not exist: ";
return Status(DB_NOT_FOUND, "Collection does not exist");
}
if (!status.ok()) {
@ -857,7 +857,7 @@ DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_i
status = meta_ptr_->FilesByType(collection_id, file_types, files_to_query);
if (!status.ok()) {
std::string err_msg = "Failed to get files for GetVectorByID: " + status.message();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return status;
}
@ -868,7 +868,7 @@ DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_i
status = meta_ptr_->FilesByType(schema.collection_id_, file_types, files);
if (!status.ok()) {
std::string err_msg = "Failed to get files for GetVectorByID: " + status.message();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return status;
}
files_to_query.insert(files_to_query.end(), std::make_move_iterator(files.begin()),
@ -876,7 +876,7 @@ DBImpl::GetVectorByID(const std::string& collection_id, const IDNumber& vector_i
}
if (files_to_query.empty()) {
ENGINE_LOG_DEBUG << "No files to get vector by id from";
LOG_ENGINE_DEBUG_ << "No files to get vector by id from";
return Status::OK();
}
@ -901,7 +901,7 @@ DBImpl::GetVectorIDs(const std::string& collection_id, const std::string& segmen
bool has_collection;
auto status = HasCollection(collection_id, has_collection);
if (!has_collection) {
ENGINE_LOG_ERROR << "Collection " << collection_id << " does not exist: ";
LOG_ENGINE_ERROR_ << "Collection " << collection_id << " does not exist: ";
return Status(DB_NOT_FOUND, "Collection does not exist");
}
if (!status.ok()) {
@ -965,7 +965,7 @@ DBImpl::GetVectorIDs(const std::string& collection_id, const std::string& segmen
Status
DBImpl::GetVectorByIdHelper(const std::string& collection_id, IDNumber vector_id, VectorsData& vector,
const meta::SegmentsSchema& files) {
ENGINE_LOG_DEBUG << "Getting vector by id in " << files.size() << " files";
LOG_ENGINE_DEBUG_ << "Getting vector by id in " << files.size() << " files";
for (auto& file : files) {
// Load bloom filter
@ -1046,7 +1046,7 @@ DBImpl::CreateIndex(const std::string& collection_id, const CollectionIndex& ind
CollectionIndex old_index;
status = DescribeIndex(collection_id, old_index);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to get collection index info for collection: " << collection_id;
LOG_ENGINE_ERROR_ << "Failed to get collection index info for collection: " << collection_id;
return status;
}
@ -1087,7 +1087,7 @@ DBImpl::DropIndex(const std::string& collection_id) {
return SHUTDOWN_ERROR;
}
ENGINE_LOG_DEBUG << "Drop index for collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Drop index for collection: " << collection_id;
return DropCollectionIndexRecursively(collection_id);
}
@ -1218,7 +1218,7 @@ DBImpl::QueryAsync(const std::shared_ptr<server::Context>& context, const meta::
if (files.size() > milvus::scheduler::TASK_TABLE_MAX_COUNT) {
std::string msg =
"Search files count exceed scheduler limit: " + std::to_string(milvus::scheduler::TASK_TABLE_MAX_COUNT);
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
@ -1227,7 +1227,7 @@ DBImpl::QueryAsync(const std::shared_ptr<server::Context>& context, const meta::
// step 1: construct search job
auto status = OngoingFileChecker::GetInstance().MarkOngoingFiles(files);
ENGINE_LOG_DEBUG << LogOut("Engine query begin, index file count: %ld", files.size());
LOG_ENGINE_DEBUG_ << LogOut("Engine query begin, index file count: %ld", files.size());
scheduler::SearchJobPtr job = std::make_shared<scheduler::SearchJob>(tracer.Context(), k, extra_params, vectors);
for (auto& file : files) {
scheduler::SegmentSchemaPtr file_ptr = std::make_shared<meta::SegmentSchema>(file);
@ -1259,7 +1259,7 @@ DBImpl::BackgroundIndexThread() {
WaitMergeFileFinish();
WaitBuildIndexFinish();
ENGINE_LOG_DEBUG << "DB background thread exit";
LOG_ENGINE_DEBUG_ << "DB background thread exit";
break;
}
@ -1272,22 +1272,22 @@ DBImpl::BackgroundIndexThread() {
void
DBImpl::WaitMergeFileFinish() {
// ENGINE_LOG_DEBUG << "Begin WaitMergeFileFinish";
// LOG_ENGINE_DEBUG_ << "Begin WaitMergeFileFinish";
std::lock_guard<std::mutex> lck(merge_result_mutex_);
for (auto& iter : merge_thread_results_) {
iter.wait();
}
// ENGINE_LOG_DEBUG << "End WaitMergeFileFinish";
// LOG_ENGINE_DEBUG_ << "End WaitMergeFileFinish";
}
void
DBImpl::WaitBuildIndexFinish() {
// ENGINE_LOG_DEBUG << "Begin WaitBuildIndexFinish";
// LOG_ENGINE_DEBUG_ << "Begin WaitBuildIndexFinish";
std::lock_guard<std::mutex> lck(index_result_mutex_);
for (auto& iter : index_thread_results_) {
iter.wait();
}
// ENGINE_LOG_DEBUG << "End WaitBuildIndexFinish";
// LOG_ENGINE_DEBUG_ << "End WaitBuildIndexFinish";
}
void
@ -1322,7 +1322,7 @@ DBImpl::StartMetricTask() {
void
DBImpl::StartMergeTask() {
// ENGINE_LOG_DEBUG << "Begin StartMergeTask";
// LOG_ENGINE_DEBUG_ << "Begin StartMergeTask";
// merge task has been finished?
{
std::lock_guard<std::mutex> lck(merge_result_mutex_);
@ -1356,14 +1356,14 @@ DBImpl::StartMergeTask() {
}
}
// ENGINE_LOG_DEBUG << "End StartMergeTask";
// LOG_ENGINE_DEBUG_ << "End StartMergeTask";
}
Status
DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema& files) {
// const std::lock_guard<std::mutex> lock(flush_merge_compact_mutex_);
ENGINE_LOG_DEBUG << "Merge files for collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Merge files for collection: " << collection_id;
// step 1: create collection file
meta::SegmentSchema collection_file;
@ -1372,7 +1372,7 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
Status status = meta_ptr_->CreateCollectionFile(collection_file);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection: " << status.ToString();
LOG_ENGINE_ERROR_ << "Failed to create collection: " << status.ToString();
return status;
}
@ -1409,19 +1409,19 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
fiu_do_on("DBImpl.MergeFiles.Serialize_ErrorStatus", status = Status(DB_ERROR, ""));
} catch (std::exception& ex) {
std::string msg = "Serialize merged index encounter exception: " + std::string(ex.what());
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
status = Status(DB_ERROR, msg);
}
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to persist merged segment: " << new_segment_dir << ". Error: " << status.message();
LOG_ENGINE_ERROR_ << "Failed to persist merged segment: " << new_segment_dir << ". Error: " << status.message();
// if failed to serialize merge file to disk
// typical error: out of disk space, out of memory or permission denied
collection_file.file_type_ = meta::SegmentSchema::TO_DELETE;
status = meta_ptr_->UpdateCollectionFile(collection_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << collection_file.file_id_
<< " to to_delete";
LOG_ENGINE_DEBUG_ << "Failed to update file to index, mark file: " << collection_file.file_id_
<< " to to_delete";
return status;
}
@ -1440,8 +1440,8 @@ DBImpl::MergeFiles(const std::string& collection_id, const meta::SegmentsSchema&
collection_file.row_count_ = segment_writer_ptr->VectorCount();
updated.push_back(collection_file);
status = meta_ptr_->UpdateCollectionFiles(updated);
ENGINE_LOG_DEBUG << "New merged segment " << collection_file.segment_id_ << " of size "
<< segment_writer_ptr->Size() << " bytes";
LOG_ENGINE_DEBUG_ << "New merged segment " << collection_file.segment_id_ << " of size "
<< segment_writer_ptr->Size() << " bytes";
if (options_.insert_cache_immediately_) {
segment_writer_ptr->Cache();
@ -1457,12 +1457,12 @@ DBImpl::BackgroundMergeFiles(const std::string& collection_id) {
meta::SegmentsSchema raw_files;
auto status = meta_ptr_->FilesToMerge(collection_id, raw_files);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to get merge files for collection: " << collection_id;
LOG_ENGINE_ERROR_ << "Failed to get merge files for collection: " << collection_id;
return status;
}
if (raw_files.size() < options_.merge_trigger_number_) {
ENGINE_LOG_TRACE << "Files number not greater equal than merge trigger number, skip merge action";
LOG_ENGINE_TRACE_ << "Files number not greater equal than merge trigger number, skip merge action";
return Status::OK();
}
@ -1471,7 +1471,7 @@ DBImpl::BackgroundMergeFiles(const std::string& collection_id) {
status = OngoingFileChecker::GetInstance().UnmarkOngoingFiles(raw_files);
if (!initialized_.load(std::memory_order_acquire)) {
ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action for collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Server will shutdown, skip merge action for collection: " << collection_id;
}
return Status::OK();
@ -1479,17 +1479,17 @@ DBImpl::BackgroundMergeFiles(const std::string& collection_id) {
void
DBImpl::BackgroundMerge(std::set<std::string> collection_ids) {
// ENGINE_LOG_TRACE << " Background merge thread start";
// LOG_ENGINE_TRACE_ << " Background merge thread start";
Status status;
for (auto& collection_id : collection_ids) {
status = BackgroundMergeFiles(collection_id);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Merge files for collection " << collection_id << " failed: " << status.ToString();
LOG_ENGINE_ERROR_ << "Merge files for collection " << collection_id << " failed: " << status.ToString();
}
if (!initialized_.load(std::memory_order_acquire)) {
ENGINE_LOG_DEBUG << "Server will shutdown, skip merge action";
LOG_ENGINE_DEBUG_ << "Server will shutdown, skip merge action";
break;
}
}
@ -1505,7 +1505,7 @@ DBImpl::BackgroundMerge(std::set<std::string> collection_ids) {
meta_ptr_->CleanUpFilesWithTTL(ttl);
}
// ENGINE_LOG_TRACE << " Background merge thread exit";
// LOG_ENGINE_TRACE_ << " Background merge thread exit";
}
void
@ -1538,7 +1538,7 @@ DBImpl::BackgroundBuildIndex() {
Status status = index_failed_checker_.IgnoreFailedIndexFiles(to_index_files);
if (!to_index_files.empty()) {
ENGINE_LOG_DEBUG << "Background build index thread begin";
LOG_ENGINE_DEBUG_ << "Background build index thread begin";
status = OngoingFileChecker::GetInstance().MarkOngoingFiles(to_index_files);
// step 2: put build index task to scheduler
@ -1558,18 +1558,18 @@ DBImpl::BackgroundBuildIndex() {
job->WaitBuildIndexFinish();
if (!job->GetStatus().ok()) {
Status status = job->GetStatus();
ENGINE_LOG_ERROR << "Building index job " << job->id() << " failed: " << status.ToString();
LOG_ENGINE_ERROR_ << "Building index job " << job->id() << " failed: " << status.ToString();
index_failed_checker_.MarkFailedIndexFile(file_schema, status.message());
} else {
ENGINE_LOG_DEBUG << "Building index job " << job->id() << " succeed.";
LOG_ENGINE_DEBUG_ << "Building index job " << job->id() << " succeed.";
index_failed_checker_.MarkSucceedIndexFile(file_schema);
}
status = OngoingFileChecker::GetInstance().UnmarkOngoingFile(file_schema);
}
ENGINE_LOG_DEBUG << "Background build index thread finished";
LOG_ENGINE_DEBUG_ << "Background build index thread finished";
}
}
@ -1594,7 +1594,7 @@ DBImpl::GetFilesToBuildIndex(const std::string& collection_id, const std::vector
Status
DBImpl::GetFilesToSearch(const std::string& collection_id, meta::SegmentsSchema& files) {
ENGINE_LOG_DEBUG << "Collect files from collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Collect files from collection: " << collection_id;
meta::SegmentsSchema search_files;
auto status = meta_ptr_->FilesToSearch(collection_id, search_files);
@ -1629,7 +1629,7 @@ DBImpl::GetPartitionByTag(const std::string& collection_id, const std::string& p
status = meta_ptr_->GetPartitionName(collection_id, partition_tag, partition_name);
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
}
}
@ -1670,7 +1670,7 @@ DBImpl::GetPartitionsByTags(const std::string& collection_id, const std::vector<
Status
DBImpl::DropCollectionRecursively(const std::string& collection_id) {
// dates partly delete files of the collection but currently we don't support
ENGINE_LOG_DEBUG << "Prepare to delete collection " << collection_id;
LOG_ENGINE_DEBUG_ << "Prepare to delete collection " << collection_id;
Status status;
if (options_.wal_enable_) {
@ -1708,7 +1708,7 @@ DBImpl::UpdateCollectionIndexRecursively(const std::string& collection_id, const
fiu_do_on("DBImpl.UpdateCollectionIndexRecursively.fail_update_collection_index",
status = Status(DB_META_TRANSACTION_FAILED, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to update collection index info for collection: " << collection_id;
LOG_ENGINE_ERROR_ << "Failed to update collection index info for collection: " << collection_id;
return status;
}
@ -1748,7 +1748,7 @@ DBImpl::WaitCollectionIndexRecursively(const std::string& collection_id, const C
int times = 1;
while (!collection_files.empty()) {
ENGINE_LOG_DEBUG << "Non index files detected! Will build index " << times;
LOG_ENGINE_DEBUG_ << "Non index files detected! Will build index " << times;
if (!utils::IsRawIndexType(index.engine_type_)) {
status = meta_ptr_->UpdateCollectionFilesToIndex(collection_id);
}
@ -1785,7 +1785,7 @@ DBImpl::WaitCollectionIndexRecursively(const std::string& collection_id, const C
Status
DBImpl::DropCollectionIndexRecursively(const std::string& collection_id) {
ENGINE_LOG_DEBUG << "Drop index for collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Drop index for collection: " << collection_id;
index_failed_checker_.CleanFailedIndexFileOfCollection(collection_id);
auto status = meta_ptr_->DropCollectionIndex(collection_id);
if (!status.ok()) {
@ -1868,7 +1868,7 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
std::string target_collection_name;
status = GetPartitionByTag(record.collection_id, record.partition_tag, target_collection_name);
if (!status.ok()) {
WAL_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << "Get partition fail: " << status.message();
LOG_WAL_ERROR_ << LogOut("[%s][%ld] ", "insert", 0) << "Get partition fail: " << status.message();
return status;
}
@ -1888,7 +1888,7 @@ DBImpl::ExecWalRecord(const wal::MXLogRecord& record) {
std::string target_collection_name;
status = GetPartitionByTag(record.collection_id, record.partition_tag, target_collection_name);
if (!status.ok()) {
WAL_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << "Get partition fail: " << status.message();
LOG_WAL_ERROR_ << LogOut("[%s][%ld] ", "insert", 0) << "Get partition fail: " << status.message();
return status;
}
@ -1994,6 +1994,7 @@ DBImpl::InternalFlush(const std::string& collection_id) {
void
DBImpl::BackgroundWalThread() {
SetThreadName("wal_thread");
server::SystemInfo::GetInstance().Init();
std::chrono::system_clock::time_point next_auto_flush_time;
@ -2015,7 +2016,7 @@ DBImpl::BackgroundWalThread() {
wal::MXLogRecord record;
auto error_code = wal_mgr_->GetNextRecord(record);
if (error_code != WAL_SUCCESS) {
ENGINE_LOG_ERROR << "WAL background GetNextRecord error";
LOG_ENGINE_ERROR_ << "WAL background GetNextRecord error";
break;
}
@ -2037,7 +2038,7 @@ DBImpl::BackgroundWalThread() {
flush_req_swn_.Notify();
WaitMergeFileFinish();
WaitBuildIndexFinish();
ENGINE_LOG_DEBUG << "WAL background thread exit";
LOG_ENGINE_DEBUG_ << "WAL background thread exit";
break;
}
@ -2052,10 +2053,11 @@ DBImpl::BackgroundWalThread() {
void
DBImpl::BackgroundFlushThread() {
SetThreadName("flush_thread");
server::SystemInfo::GetInstance().Init();
while (true) {
if (!initialized_.load(std::memory_order_acquire)) {
ENGINE_LOG_DEBUG << "DB background flush thread exit";
LOG_ENGINE_DEBUG_ << "DB background flush thread exit";
break;
}
@ -2073,7 +2075,7 @@ DBImpl::BackgroundMetricThread() {
server::SystemInfo::GetInstance().Init();
while (true) {
if (!initialized_.load(std::memory_order_acquire)) {
ENGINE_LOG_DEBUG << "DB background metric thread exit";
LOG_ENGINE_DEBUG_ << "DB background metric thread exit";
break;
}

View File

@ -100,7 +100,7 @@ Status
SafeIDGenerator::NextIDNumbers(size_t n, IDNumbers& ids) {
if (n <= 0 || n > MAX_IDS_PER_MICRO) {
std::string msg = "Invalid ID number: " + std::to_string(n);
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(SERVER_UNEXPECTED_ERROR, msg);
}

View File

@ -94,8 +94,8 @@ OngoingFileChecker::MarkOngoingFileNoLock(const meta::SegmentSchema& table_file)
}
}
ENGINE_LOG_DEBUG << "Mark ongoing file:" << table_file.file_id_
<< " refcount:" << ongoing_files_[table_file.collection_id_][table_file.file_id_];
LOG_ENGINE_DEBUG_ << "Mark ongoing file:" << table_file.file_id_
<< " refcount:" << ongoing_files_[table_file.collection_id_][table_file.file_id_];
return Status::OK();
}
@ -112,7 +112,7 @@ OngoingFileChecker::UnmarkOngoingFileNoLock(const meta::SegmentSchema& table_fil
if (it_file != iter->second.end()) {
it_file->second--;
ENGINE_LOG_DEBUG << "Unmark ongoing file:" << table_file.file_id_ << " refcount:" << it_file->second;
LOG_ENGINE_DEBUG_ << "Unmark ongoing file:" << table_file.file_id_ << " refcount:" << it_file->second;
if (it_file->second <= 0) {
iter->second.erase(table_file.file_id_);

View File

@ -54,11 +54,11 @@ ArchiveConf::ParseCritirias(const std::string& criterias) {
std::vector<std::string> kv;
boost::algorithm::split(kv, token, boost::is_any_of(":"));
if (kv.size() != 2) {
ENGINE_LOG_WARNING << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
LOG_ENGINE_WARNING_ << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
continue;
}
if (kv[0] != "disk" && kv[0] != "days") {
ENGINE_LOG_WARNING << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
LOG_ENGINE_WARNING_ << "Invalid ArchiveConf Criterias: " << token << " Ignore!";
continue;
}
try {
@ -68,11 +68,11 @@ ArchiveConf::ParseCritirias(const std::string& criterias) {
criterias_[kv[0]] = value;
} catch (std::out_of_range&) {
std::string msg = "Out of range: '" + kv[1] + "'";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw InvalidArgumentException(msg);
} catch (...) {
std::string msg = "Invalid argument: '" + kv[1] + "'";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw InvalidArgumentException(msg);
}
}

View File

@ -84,7 +84,7 @@ CreateCollectionPath(const DBMetaOptions& options, const std::string& collection
std::string table_path = db_path + TABLES_FOLDER + collection_id;
auto status = server::CommonUtil::CreateDirectory(table_path);
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
@ -93,7 +93,7 @@ CreateCollectionPath(const DBMetaOptions& options, const std::string& collection
status = server::CommonUtil::CreateDirectory(table_path);
fiu_do_on("CreateCollectionPath.creat_slave_path", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
}
@ -110,10 +110,10 @@ DeleteCollectionPath(const DBMetaOptions& options, const std::string& collection
std::string table_path = path + TABLES_FOLDER + collection_id;
if (force) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path;
LOG_ENGINE_DEBUG_ << "Remove collection folder: " << table_path;
} else if (boost::filesystem::exists(table_path) && boost::filesystem::is_empty(table_path)) {
boost::filesystem::remove_all(table_path);
ENGINE_LOG_DEBUG << "Remove collection folder: " << table_path;
LOG_ENGINE_DEBUG_ << "Remove collection folder: " << table_path;
}
}
@ -141,7 +141,7 @@ CreateCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& tabl
auto status = server::CommonUtil::CreateDirectory(parent_path);
fiu_do_on("CreateCollectionFilePath.fail_create", status = Status(DB_INVALID_PATH, ""));
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
@ -181,7 +181,7 @@ GetCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_f
std::string msg = "Collection file doesn't exist: " + file_path;
if (table_file.file_size_ > 0) { // no need to pop error for empty file
ENGINE_LOG_ERROR << msg << " in path: " << options.path_ << " for collection: " << table_file.collection_id_;
LOG_ENGINE_ERROR_ << msg << " in path: " << options.path_ << " for collection: " << table_file.collection_id_;
}
return Status(DB_ERROR, msg);

View File

@ -22,11 +22,11 @@ ExecutionEnginePtr
EngineFactory::Build(uint16_t dimension, const std::string& location, EngineType index_type, MetricType metric_type,
const milvus::json& index_params) {
if (index_type == EngineType::INVALID) {
ENGINE_LOG_ERROR << "Unsupported engine type";
LOG_ENGINE_ERROR_ << "Unsupported engine type";
return nullptr;
}
ENGINE_LOG_DEBUG << "EngineFactory index type: " << (int)index_type;
LOG_ENGINE_DEBUG_ << "EngineFactory index type: " << (int)index_type;
ExecutionEnginePtr execution_engine_ptr =
std::make_shared<ExecutionEngineImpl>(dimension, location, index_type, metric_type, index_params);

View File

@ -131,7 +131,7 @@ ExecutionEngineImpl::ExecutionEngineImpl(uint16_t dimension, const std::string&
conf[knowhere::meta::DEVICEID] = gpu_num_;
conf[knowhere::meta::DIM] = dimension;
MappingMetricType(metric_type, conf);
ENGINE_LOG_DEBUG << "Index params: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index params: " << conf.dump();
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckTrain(conf, index_->index_mode())) {
throw Exception(DB_ERROR, "Illegal index params");
@ -223,13 +223,13 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
break;
}
default: {
ENGINE_LOG_ERROR << "Unsupported index type " << (int)type;
LOG_ENGINE_ERROR_ << "Unsupported index type " << (int)type;
return nullptr;
}
}
if (index == nullptr) {
std::string err_msg = "Invalid index type " + std::to_string((int)type) + " mod " + std::to_string((int)mode);
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(DB_ERROR, err_msg);
}
return index;
@ -240,7 +240,7 @@ ExecutionEngineImpl::HybridLoad() const {
#ifdef MILVUS_GPU_VERSION
auto hybrid_index = std::dynamic_pointer_cast<knowhere::IVFSQHybrid>(index_);
if (hybrid_index == nullptr) {
ENGINE_LOG_WARNING << "HybridLoad only support with IVFSQHybrid";
LOG_ENGINE_WARNING_ << "HybridLoad only support with IVFSQHybrid";
return;
}
@ -250,7 +250,7 @@ ExecutionEngineImpl::HybridLoad() const {
std::vector<int64_t> gpus;
Status s = config.GetGpuResourceConfigSearchResources(gpus);
if (!s.ok()) {
ENGINE_LOG_ERROR << s.message();
LOG_ENGINE_ERROR_ << s.message();
return;
}
@ -289,9 +289,9 @@ ExecutionEngineImpl::HybridLoad() const {
milvus::json quantizer_conf{{knowhere::meta::DEVICEID, best_device_id}, {"mode", 1}};
auto quantizer = hybrid_index->LoadQuantizer(quantizer_conf);
ENGINE_LOG_DEBUG << "Quantizer params: " << quantizer_conf.dump();
LOG_ENGINE_DEBUG_ << "Quantizer params: " << quantizer_conf.dump();
if (quantizer == nullptr) {
ENGINE_LOG_ERROR << "quantizer is nullptr";
LOG_ENGINE_ERROR_ << "quantizer is nullptr";
}
hybrid_index->SetQuantizer(quantizer);
auto cache_quantizer = std::make_shared<CachedQuantizer>(quantizer);
@ -328,7 +328,7 @@ ExecutionEngineImpl::AddWithIds(int64_t n, const uint8_t* xdata, const int64_t*
size_t
ExecutionEngineImpl::Count() const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, return count 0";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, return count 0";
return 0;
}
return index_->Count();
@ -337,7 +337,7 @@ ExecutionEngineImpl::Count() const {
size_t
ExecutionEngineImpl::Dimension() const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, return dimension " << dim_;
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, return dimension " << dim_;
return dim_;
}
return index_->Dim();
@ -346,7 +346,7 @@ ExecutionEngineImpl::Dimension() const {
size_t
ExecutionEngineImpl::Size() const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, return size 0";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, return size 0";
return 0;
}
return index_->Size();
@ -363,7 +363,7 @@ ExecutionEngineImpl::Serialize() {
// here we reset index size by file size,
// since some index type(such as SQ8) data size become smaller after serialized
index_->SetIndexSize(server::CommonUtil::GetFileSize(location_));
ENGINE_LOG_DEBUG << "Finish serialize index file: " << location_ << " size: " << index_->Size();
LOG_ENGINE_DEBUG_ << "Finish serialize index file: " << location_ << " size: " << index_->Size();
if (index_->Size() == 0) {
std::string msg = "Failed to serialize file: " + location_ + " reason: out of disk space or memory";
@ -394,7 +394,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
milvus::json conf{{knowhere::meta::DEVICEID, gpu_num_}, {knowhere::meta::DIM, dim_}};
MappingMetricType(metric_type_, conf);
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
ENGINE_LOG_DEBUG << "Index params: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index params: " << conf.dump();
if (!adapter->CheckTrain(conf, index_->index_mode())) {
throw Exception(DB_ERROR, "Illegal index params");
}
@ -402,7 +402,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
auto status = segment_reader_ptr->Load();
if (!status.ok()) {
std::string msg = "Failed to load segment from " + location_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
@ -413,7 +413,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
auto vectors_uids = vectors->GetUids();
index_->SetUids(vectors_uids);
ENGINE_LOG_DEBUG << "set uids " << index_->GetUids().size() << " for index " << location_;
LOG_ENGINE_DEBUG_ << "set uids " << index_->GetUids().size() << " for index " << location_;
auto vectors_data = vectors->GetData();
@ -438,7 +438,7 @@ ExecutionEngineImpl::Load(bool to_cache) {
bin_bf_index->SetBlacklist(concurrent_bitset_ptr);
}
ENGINE_LOG_DEBUG << "Finished loading raw data from segment " << segment_dir;
LOG_ENGINE_DEBUG_ << "Finished loading raw data from segment " << segment_dir;
} else {
try {
segment::SegmentPtr segment_ptr;
@ -448,14 +448,14 @@ ExecutionEngineImpl::Load(bool to_cache) {
if (index_ == nullptr) {
std::string msg = "Failed to load index from " + location_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
} else {
segment::DeletedDocsPtr deleted_docs_ptr;
auto status = segment_reader_ptr->LoadDeletedDocs(deleted_docs_ptr);
if (!status.ok()) {
std::string msg = "Failed to load deleted docs from " + location_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
auto& deleted_docs = deleted_docs_ptr->GetDeletedDocs();
@ -473,12 +473,12 @@ ExecutionEngineImpl::Load(bool to_cache) {
std::vector<segment::doc_id_t> uids;
segment_reader_ptr->LoadUids(uids);
index_->SetUids(uids);
ENGINE_LOG_DEBUG << "set uids " << index_->GetUids().size() << " for index " << location_;
LOG_ENGINE_DEBUG_ << "set uids " << index_->GetUids().size() << " for index " << location_;
ENGINE_LOG_DEBUG << "Finished loading index file from segment " << segment_dir;
LOG_ENGINE_DEBUG_ << "Finished loading index file from segment " << segment_dir;
}
} catch (std::exception& e) {
ENGINE_LOG_ERROR << e.what();
LOG_ENGINE_ERROR_ << e.what();
return Status(DB_ERROR, e.what());
}
}
@ -552,7 +552,7 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
index_ = index;
} else {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to gpu";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to copy to gpu";
return Status(DB_ERROR, "index is null");
}
@ -561,14 +561,14 @@ ExecutionEngineImpl::CopyToGpu(uint64_t device_id, bool hybrid) {
* Add lock here to avoid multiple INDEX are copied to one GPU card at same time.
* And reserve space to avoid GPU out of memory issue.
*/
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id << " start";
LOG_ENGINE_DEBUG_ << "CPU to GPU" << device_id << " start";
auto gpu_cache_mgr = cache::GpuCacheMgr::GetInstance(device_id);
// gpu_cache_mgr->Reserve(index_->Size());
index_ = knowhere::cloner::CopyCpuToGpu(index_, device_id, knowhere::Config());
// gpu_cache_mgr->InsertItem(location_, std::static_pointer_cast<cache::DataObj>(index_));
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id << " finished";
LOG_ENGINE_DEBUG_ << "CPU to GPU" << device_id << " finished";
} catch (std::exception& e) {
ENGINE_LOG_ERROR << e.what();
LOG_ENGINE_ERROR_ << e.what();
return Status(DB_ERROR, e.what());
}
}
@ -599,15 +599,15 @@ ExecutionEngineImpl::CopyToCpu() {
index_ = index;
} else {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to cpu";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to copy to cpu";
return Status(DB_ERROR, "index is null");
}
try {
index_ = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
ENGINE_LOG_DEBUG << "GPU to CPU";
LOG_ENGINE_DEBUG_ << "GPU to CPU";
} catch (std::exception& e) {
ENGINE_LOG_ERROR << e.what();
LOG_ENGINE_ERROR_ << e.what();
return Status(DB_ERROR, e.what());
}
}
@ -617,19 +617,19 @@ ExecutionEngineImpl::CopyToCpu() {
}
return Status::OK();
#else
ENGINE_LOG_ERROR << "Calling ExecutionEngineImpl::CopyToCpu when using CPU version";
LOG_ENGINE_ERROR_ << "Calling ExecutionEngineImpl::CopyToCpu when using CPU version";
return Status(DB_ERROR, "Calling ExecutionEngineImpl::CopyToCpu when using CPU version");
#endif
}
ExecutionEnginePtr
ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_type) {
ENGINE_LOG_DEBUG << "Build index file: " << location << " from: " << location_;
LOG_ENGINE_DEBUG_ << "Build index file: " << location << " from: " << location_;
auto from_index = std::dynamic_pointer_cast<knowhere::IDMAP>(index_);
auto bin_from_index = std::dynamic_pointer_cast<knowhere::BinaryIDMAP>(index_);
if (from_index == nullptr && bin_from_index == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: from_index is null, failed to build index";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: from_index is null, failed to build index";
return nullptr;
}
@ -643,12 +643,12 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
conf[knowhere::meta::ROWS] = Count();
conf[knowhere::meta::DEVICEID] = gpu_num_;
MappingMetricType(metric_type_, conf);
ENGINE_LOG_DEBUG << "Index params: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index params: " << conf.dump();
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(to_index->index_type());
if (!adapter->CheckTrain(conf, to_index->index_mode())) {
throw Exception(DB_ERROR, "Illegal index params");
}
ENGINE_LOG_DEBUG << "Index config: " << conf.dump();
LOG_ENGINE_DEBUG_ << "Index config: " << conf.dump();
std::vector<segment::doc_id_t> uids;
faiss::ConcurrentBitsetPtr blacklist;
@ -675,13 +675,13 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
#endif
to_index->SetUids(uids);
ENGINE_LOG_DEBUG << "Set " << to_index->GetUids().size() << "uids for " << location;
LOG_ENGINE_DEBUG_ << "Set " << to_index->GetUids().size() << "uids for " << location;
if (blacklist != nullptr) {
to_index->SetBlacklist(blacklist);
ENGINE_LOG_DEBUG << "Set blacklist for index " << location;
LOG_ENGINE_DEBUG_ << "Set blacklist for index " << location;
}
ENGINE_LOG_DEBUG << "Finish build index: " << location;
LOG_ENGINE_DEBUG_ << "Finish build index: " << location;
return std::make_shared<ExecutionEngineImpl>(to_index, location, engine_type, metric_type_, index_params_);
}
@ -766,7 +766,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu
TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search float", "search", 0));
if (index_ == nullptr) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
return Status(DB_ERROR, "index is null");
}
@ -774,7 +774,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu
conf[knowhere::meta::TOPK] = k;
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Illegal search params", "search", 0);
throw Exception(DB_ERROR, "Illegal search params");
}
@ -787,8 +787,8 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, const milvu
auto result = index_->Query(dataset, conf);
rc.RecordSection("query done");
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
MapAndCopyResult(result, index_->GetUids(), n, k, distances, labels);
rc.RecordSection("map uids " + std::to_string(n * k));
@ -805,7 +805,7 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil
TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search uint8", "search", 0));
if (index_ == nullptr) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
return Status(DB_ERROR, "index is null");
}
@ -813,7 +813,7 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil
conf[knowhere::meta::TOPK] = k;
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Illegal search params", "search", 0);
throw Exception(DB_ERROR, "Illegal search params");
}
@ -826,8 +826,8 @@ ExecutionEngineImpl::Search(int64_t n, const uint8_t* data, int64_t k, const mil
auto result = index_->Query(dataset, conf);
rc.RecordSection("query done");
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
MapAndCopyResult(result, index_->GetUids(), n, k, distances, labels);
rc.RecordSection("map uids " + std::to_string(n * k));
@ -844,7 +844,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
TimeRecorder rc(LogOut("[%s][%ld] ExecutionEngineImpl::Search vector of ids", "search", 0));
if (index_ == nullptr) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ExecutionEngineImpl: index is null, failed to search", "search", 0);
return Status(DB_ERROR, "index is null");
}
@ -852,7 +852,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
conf[knowhere::meta::TOPK] = k;
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(index_->index_type());
if (!adapter->CheckSearch(conf, index_->index_type(), index_->index_mode())) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Illegal search params", "search", 0);
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Illegal search params", "search", 0);
throw Exception(DB_ERROR, "Illegal search params");
}
@ -906,8 +906,8 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
auto result = index_->QueryById(dataset, conf);
rc.RecordSection("query by id done");
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] get %ld uids from index %s", "search", 0, index_->GetUids().size(),
location_.c_str());
MapAndCopyResult(result, uids, offsets.size(), k, distances, labels);
rc.RecordSection("map uids " + std::to_string(offsets.size() * k));
}
@ -922,7 +922,7 @@ ExecutionEngineImpl::Search(int64_t n, const std::vector<int64_t>& ids, int64_t
Status
ExecutionEngineImpl::GetVectorByID(const int64_t& id, float* vector, bool hybrid) {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to search";
return Status(DB_ERROR, "index is null");
}
@ -947,11 +947,11 @@ ExecutionEngineImpl::GetVectorByID(const int64_t& id, float* vector, bool hybrid
Status
ExecutionEngineImpl::GetVectorByID(const int64_t& id, uint8_t* vector, bool hybrid) {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
LOG_ENGINE_ERROR_ << "ExecutionEngineImpl: index is null, failed to search";
return Status(DB_ERROR, "index is null");
}
ENGINE_LOG_DEBUG << "Get binary vector by id: " << id;
LOG_ENGINE_DEBUG_ << "Get binary vector by id: " << id;
if (hybrid) {
HybridLoad();

View File

@ -36,7 +36,7 @@ MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length,
const float* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) {
flushed_tables.clear();
if (GetCurrentMem() > options_.insert_buffer_size_) {
ENGINE_LOG_DEBUG << "Insert buffer size exceeds limit. Performing force flush";
LOG_ENGINE_DEBUG_ << "Insert buffer size exceeds limit. Performing force flush";
// TODO(zhiru): Don't apply delete here in order to avoid possible concurrency issues with Merge
auto status = Flush(flushed_tables, false);
if (!status.ok()) {
@ -62,12 +62,12 @@ MemManagerImpl::InsertVectors(const std::string& collection_id, int64_t length,
const uint8_t* vectors, uint64_t lsn, std::set<std::string>& flushed_tables) {
flushed_tables.clear();
if (GetCurrentMem() > options_.insert_buffer_size_) {
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] ", "insert", 0)
<< "Insert buffer size exceeds limit. Performing force flush";
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] ", "insert", 0)
<< "Insert buffer size exceeds limit. Performing force flush";
// TODO(zhiru): Don't apply delete here in order to avoid possible concurrency issues with Merge
auto status = Flush(flushed_tables, false);
if (!status.ok()) {
ENGINE_LOG_DEBUG << LogOut("[%s][%ld] ", "insert", 0) << "Flush fail: " << status.message();
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld] ", "insert", 0) << "Flush fail: " << status.message();
return status;
}
}
@ -143,13 +143,13 @@ MemManagerImpl::Flush(const std::string& collection_id, bool apply_delete) {
std::unique_lock<std::mutex> lock(serialization_mtx_);
auto max_lsn = GetMaxLSN(temp_immutable_list);
for (auto& mem : temp_immutable_list) {
ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushing collection: " << mem->GetTableId();
auto status = mem->Serialize(max_lsn, apply_delete);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed";
LOG_ENGINE_ERROR_ << "Flush collection " << mem->GetTableId() << " failed";
return status;
}
ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushed collection: " << mem->GetTableId();
}
return Status::OK();
@ -169,14 +169,14 @@ MemManagerImpl::Flush(std::set<std::string>& table_ids, bool apply_delete) {
table_ids.clear();
auto max_lsn = GetMaxLSN(temp_immutable_list);
for (auto& mem : temp_immutable_list) {
ENGINE_LOG_DEBUG << "Flushing collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushing collection: " << mem->GetTableId();
auto status = mem->Serialize(max_lsn, apply_delete);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Flush collection " << mem->GetTableId() << " failed";
LOG_ENGINE_ERROR_ << "Flush collection " << mem->GetTableId() << " failed";
return status;
}
table_ids.insert(mem->GetTableId());
ENGINE_LOG_DEBUG << "Flushed collection: " << mem->GetTableId();
LOG_ENGINE_DEBUG_ << "Flushed collection: " << mem->GetTableId();
}
meta_->SetGlobalLastLSN(max_lsn);
@ -194,7 +194,7 @@ MemManagerImpl::ToImmutable(const std::string& collection_id) {
mem_id_map_.erase(memIt);
}
// std::string err_msg = "Could not find collection = " + collection_id + " to flush";
// ENGINE_LOG_ERROR << err_msg;
// LOG_ENGINE_ERROR_ << err_msg;
// return Status(DB_NOT_FOUND, err_msg);
}

View File

@ -53,7 +53,7 @@ MemTable::Add(const VectorSourcePtr& source) {
if (!status.ok()) {
std::string err_msg = "Insert failed: " + status.ToString();
ENGINE_LOG_ERROR << LogOut("[%s][%ld] ", "insert", 0) << err_msg;
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] ", "insert", 0) << err_msg;
return Status(DB_ERROR, err_msg);
}
}
@ -113,7 +113,7 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) {
return status;
}
ENGINE_LOG_DEBUG << "Flushed segment " << (*mem_table_file)->GetSegmentId();
LOG_ENGINE_DEBUG_ << "Flushed segment " << (*mem_table_file)->GetSegmentId();
{
std::lock_guard<std::mutex> lock(mutex_);
@ -125,13 +125,13 @@ MemTable::Serialize(uint64_t wal_lsn, bool apply_delete) {
auto status = meta_->UpdateCollectionFlushLSN(collection_id_, wal_lsn);
if (!status.ok()) {
std::string err_msg = "Failed to write flush lsn to meta: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
ENGINE_LOG_DEBUG << "Finished flushing for collection " << collection_id_ << " in " << diff.count() << " s";
LOG_ENGINE_DEBUG_ << "Finished flushing for collection " << collection_id_ << " in " << diff.count() << " s";
return Status::OK();
}
@ -173,7 +173,7 @@ MemTable::ApplyDeletes() {
// Serialize segment's deletedDoc TODO(zhiru): append directly to previous file for now, may have duplicates
// Serialize bloom filter
ENGINE_LOG_DEBUG << "Applying " << doc_ids_to_delete_.size() << " deletes in collection: " << collection_id_;
LOG_ENGINE_DEBUG_ << "Applying " << doc_ids_to_delete_.size() << " deletes in collection: " << collection_id_;
auto start_total = std::chrono::high_resolution_clock::now();
@ -185,7 +185,7 @@ MemTable::ApplyDeletes() {
auto status = meta_->FilesByType(collection_id_, file_types, table_files);
if (!status.ok()) {
std::string err_msg = "Failed to apply deletes: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
@ -220,13 +220,14 @@ MemTable::ApplyDeletes() {
auto time0 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff0 = time0 - start_total;
ENGINE_LOG_DEBUG << "Found " << ids_to_check_map.size() << " segment to apply deletes in " << diff0.count() << " s";
LOG_ENGINE_DEBUG_ << "Found " << ids_to_check_map.size() << " segment to apply deletes in " << diff0.count()
<< " s";
meta::SegmentsSchema table_files_to_update;
for (auto& kv : ids_to_check_map) {
auto& table_file = table_files[kv.first];
ENGINE_LOG_DEBUG << "Applying deletes in segment: " << table_file.segment_id_;
LOG_ENGINE_DEBUG_ << "Applying deletes in segment: " << table_file.segment_id_;
auto time1 = std::chrono::high_resolution_clock::now();
@ -273,13 +274,13 @@ MemTable::ApplyDeletes() {
auto time2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff1 = time2 - time1;
ENGINE_LOG_DEBUG << "Loading uids and deleted docs took " << diff1.count() << " s";
LOG_ENGINE_DEBUG_ << "Loading uids and deleted docs took " << diff1.count() << " s";
std::sort(ids_to_check.begin(), ids_to_check.end());
auto time3 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff2 = time3 - time2;
ENGINE_LOG_DEBUG << "Sorting " << ids_to_check.size() << " ids took " << diff2.count() << " s";
LOG_ENGINE_DEBUG_ << "Sorting " << ids_to_check.size() << " ids took " << diff2.count() << " s";
size_t delete_count = 0;
auto find_diff = std::chrono::duration<double>::zero();
@ -315,9 +316,9 @@ MemTable::ApplyDeletes() {
}
}
ENGINE_LOG_DEBUG << "Finding " << ids_to_check.size() << " uids in " << uids.size() << " uids took "
<< find_diff.count() << " s in total";
ENGINE_LOG_DEBUG << "Setting deleted docs and bloom filter took " << set_diff.count() << " s in total";
LOG_ENGINE_DEBUG_ << "Finding " << ids_to_check.size() << " uids in " << uids.size() << " uids took "
<< find_diff.count() << " s in total";
LOG_ENGINE_DEBUG_ << "Setting deleted docs and bloom filter took " << set_diff.count() << " s in total";
auto time4 = std::chrono::high_resolution_clock::now();
@ -336,9 +337,9 @@ MemTable::ApplyDeletes() {
auto time5 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff4 = time5 - time4;
ENGINE_LOG_DEBUG << "Appended " << deleted_docs->GetSize()
<< " offsets to deleted docs in segment: " << table_file.segment_id_ << " in " << diff4.count()
<< " s";
LOG_ENGINE_DEBUG_ << "Appended " << deleted_docs->GetSize()
<< " offsets to deleted docs in segment: " << table_file.segment_id_ << " in "
<< diff4.count() << " s";
// start = std::chrono::high_resolution_clock::now();
@ -348,8 +349,8 @@ MemTable::ApplyDeletes() {
}
auto time6 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff5 = time6 - time5;
ENGINE_LOG_DEBUG << "Updated bloom filter in segment: " << table_file.segment_id_ << " in " << diff5.count()
<< " s";
LOG_ENGINE_DEBUG_ << "Updated bloom filter in segment: " << table_file.segment_id_ << " in " << diff5.count()
<< " s";
// Update collection file row count
for (auto& file : segment_files) {
@ -362,8 +363,8 @@ MemTable::ApplyDeletes() {
auto time7 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff6 = time7 - time6;
diff6 = time6 - time5;
ENGINE_LOG_DEBUG << "Update collection file row count in vector of segment: " << table_file.segment_id_
<< " in " << diff6.count() << " s";
LOG_ENGINE_DEBUG_ << "Update collection file row count in vector of segment: " << table_file.segment_id_
<< " in " << diff6.count() << " s";
}
auto time7 = std::chrono::high_resolution_clock::now();
@ -372,7 +373,7 @@ MemTable::ApplyDeletes() {
if (!status.ok()) {
std::string err_msg = "Failed to apply deletes: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
@ -380,9 +381,9 @@ MemTable::ApplyDeletes() {
auto end_total = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff7 = end_total - time7;
ENGINE_LOG_DEBUG << "Update deletes to meta in collection " << collection_id_ << " in " << diff7.count() << " s";
LOG_ENGINE_DEBUG_ << "Update deletes to meta in collection " << collection_id_ << " in " << diff7.count() << " s";
std::chrono::duration<double> diff_total = end_total - start_total;
ENGINE_LOG_DEBUG << "Finished deletes in collection " << collection_id_ << " in " << diff_total.count() << " s";
LOG_ENGINE_DEBUG_ << "Finished deletes in collection " << collection_id_ << " in " << diff_total.count() << " s";
OngoingFileChecker::GetInstance().UnmarkOngoingFiles(files_to_check);

View File

@ -54,7 +54,7 @@ MemTableFile::CreateCollectionFile() {
table_file_schema_ = table_file_schema;
} else {
std::string err_msg = "MemTableFile::CreateCollectionFile failed: " + status.ToString();
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
}
return status;
}
@ -65,7 +65,7 @@ MemTableFile::Add(const VectorSourcePtr& source) {
std::string err_msg =
"MemTableFile::Add: table_file_schema dimension = " + std::to_string(table_file_schema_.dimension_) +
", collection_id = " + table_file_schema_.collection_id_;
ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << err_msg;
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << err_msg;
return Status(DB_ERROR, "Not able to create collection file");
}
@ -160,7 +160,7 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
auto status = segment_writer_ptr_->Serialize();
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to serialize segment: " << table_file_schema_.segment_id_;
LOG_ENGINE_ERROR_ << "Failed to serialize segment: " << table_file_schema_.segment_id_;
/* Can't mark it as to_delete because data is stored in this mem collection file. Any further flush
* will try to serialize the same mem collection file and it won't be able to find the directory
@ -168,7 +168,7 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
*
table_file_schema_.file_type_ = meta::SegmentSchema::TO_DELETE;
meta_->UpdateCollectionFile(table_file_schema_);
ENGINE_LOG_DEBUG << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_
LOG_ENGINE_DEBUG_ << "Failed to serialize segment, mark file: " << table_file_schema_.file_id_
<< " to to_delete";
*/
return status;
@ -194,8 +194,8 @@ MemTableFile::Serialize(uint64_t wal_lsn) {
status = meta_->UpdateCollectionFile(table_file_schema_);
ENGINE_LOG_DEBUG << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn;
LOG_ENGINE_DEBUG_ << "New " << ((table_file_schema_.file_type_ == meta::SegmentSchema::RAW) ? "raw" : "to_index")
<< " file " << table_file_schema_.file_id_ << " of size " << size << " bytes, lsn = " << wal_lsn;
// TODO(zhiru): cache
/*

View File

@ -40,7 +40,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
SafeIDGenerator& id_generator = SafeIDGenerator::GetInstance();
Status status = id_generator.GetNextIDNumbers(num_vectors_added, vector_ids_to_add);
if (!status.ok()) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "Generate ids fail: " << status.message();
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << "Generate ids fail: " << status.message();
return status;
}
} else {
@ -62,7 +62,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
vectors.resize(size);
memcpy(vectors.data(), vectors_.float_data_.data() + current_num_vectors_added * table_file_schema.dimension_,
size);
ENGINE_LOG_DEBUG << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
status = segment_writer_ptr->AddVectors(table_file_schema.file_id_, vectors, vector_ids_to_add);
} else if (!vectors_.binary_data_.empty()) {
@ -79,7 +79,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
vectors.data(),
vectors_.binary_data_.data() + current_num_vectors_added * SingleVectorSize(table_file_schema.dimension_),
size);
ENGINE_LOG_DEBUG << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
LOG_ENGINE_DEBUG_ << LogOut("[%s][%ld]", "insert", 0) << "Insert into segment";
status = segment_writer_ptr->AddVectors(table_file_schema.file_id_, vectors, vector_ids_to_add);
}
@ -90,7 +90,7 @@ VectorSource::Add(/*const ExecutionEnginePtr& execution_engine,*/ const segment:
vector_ids_.insert(vector_ids_.end(), std::make_move_iterator(vector_ids_to_add.begin()),
std::make_move_iterator(vector_ids_to_add.end()));
} else {
ENGINE_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "VectorSource::Add failed: " + status.ToString();
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << "VectorSource::Add failed: " + status.ToString();
}
return status;

View File

@ -50,18 +50,18 @@ MetaFactory::Build(const DBMetaOptions& metaOptions, const int& mode) {
utils::MetaUriInfo uri_info;
auto status = utils::ParseMetaUri(uri, uri_info);
if (!status.ok()) {
ENGINE_LOG_ERROR << "Wrong URI format: URI = " << uri;
LOG_ENGINE_ERROR_ << "Wrong URI format: URI = " << uri;
throw InvalidArgumentException("Wrong URI format ");
}
if (strcasecmp(uri_info.dialect_.c_str(), "mysql") == 0) {
ENGINE_LOG_INFO << "Using MySQL";
LOG_ENGINE_INFO_ << "Using MySQL";
return std::make_shared<meta::MySQLMetaImpl>(metaOptions, mode);
} else if (strcasecmp(uri_info.dialect_.c_str(), "sqlite") == 0) {
ENGINE_LOG_INFO << "Using SQLite";
LOG_ENGINE_INFO_ << "Using SQLite";
return std::make_shared<meta::SqliteMetaImpl>(metaOptions);
} else {
ENGINE_LOG_ERROR << "Invalid dialect in URI: dialect = " << uri_info.dialect_;
LOG_ENGINE_ERROR_ << "Invalid dialect in URI: dialect = " << uri_info.dialect_;
throw InvalidArgumentException("URI dialect is not mysql / sqlite");
}
}

View File

@ -36,8 +36,8 @@ void
MySQLConnectionPool::release(const mysqlpp::Connection* pc) {
mysqlpp::ConnectionPool::release(pc);
if (conns_in_use_ <= 0) {
ENGINE_LOG_WARNING << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = "
<< conns_in_use_;
LOG_ENGINE_WARNING_ << "MySQLConnetionPool::release: conns_in_use_ is less than zero. conns_in_use_ = "
<< conns_in_use_;
} else {
--conns_in_use_;
}
@ -70,8 +70,8 @@ MySQLConnectionPool::create() {
user_.empty() ? 0 : user_.c_str(), password_.empty() ? 0 : password_.c_str(), port_);
return conn;
} catch (const mysqlpp::ConnectionFailed& er) {
ENGINE_LOG_ERROR << "Failed to connect to database server"
<< ": " << er.what();
LOG_ENGINE_ERROR_ << "Failed to connect to database server"
<< ": " << er.what();
return nullptr;
}
}

View File

@ -49,12 +49,12 @@ namespace {
Status
HandleException(const std::string& desc, const char* what = nullptr) {
if (what == nullptr) {
ENGINE_LOG_ERROR << desc;
LOG_ENGINE_ERROR_ << desc;
return Status(DB_META_TRANSACTION_FAILED, desc);
}
std::string msg = desc + ":" + what;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_META_TRANSACTION_FAILED, msg);
}
@ -236,7 +236,7 @@ MySQLMetaImpl::ValidateMetaSchema() {
exist_fields.push_back(MetaField(name, type, ""));
}
} catch (std::exception& e) {
ENGINE_LOG_DEBUG << "Meta collection '" << schema.name() << "' not exist and will be created";
LOG_ENGINE_DEBUG_ << "Meta collection '" << schema.name() << "' not exist and will be created";
}
if (exist_fields.empty()) {
@ -270,7 +270,7 @@ MySQLMetaImpl::Initialize() {
fiu_do_on("MySQLMetaImpl.Initialize.fail_create_directory", ret = false);
if (!ret) {
std::string msg = "Failed to create db directory " + options_.path_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_META_TRANSACTION_FAILED, msg);
}
}
@ -282,13 +282,13 @@ MySQLMetaImpl::Initialize() {
auto status = utils::ParseMetaUri(uri, uri_info);
if (!status.ok()) {
std::string msg = "Wrong URI format: " + uri;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_INVALID_META_URI, msg);
}
if (strcasecmp(uri_info.dialect_.c_str(), "mysql") != 0) {
std::string msg = "URI's dialect is not MySQL";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_INVALID_META_URI, msg);
}
@ -302,7 +302,7 @@ MySQLMetaImpl::Initialize() {
mysql_connection_pool_ = std::make_shared<MySQLConnectionPool>(
uri_info.db_name_, uri_info.username_, uri_info.password_, uri_info.host_, port, max_pool_size);
ENGINE_LOG_DEBUG << "MySQL connection pool: maximum pool size = " << std::to_string(max_pool_size);
LOG_ENGINE_DEBUG_ << "MySQL connection pool: maximum pool size = " << std::to_string(max_pool_size);
// step 4: validate to avoid open old version schema
ValidateMetaSchema();
@ -317,7 +317,7 @@ MySQLMetaImpl::Initialize() {
if (connectionPtr == nullptr) {
std::string msg = "Failed to connect MySQL meta server: " + uri;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_INVALID_META_URI, msg);
}
@ -326,7 +326,7 @@ MySQLMetaImpl::Initialize() {
if (!is_thread_aware) {
std::string msg =
"Failed to initialize MySQL meta backend: MySQL client component wasn't built with thread awareness";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_INVALID_META_URI, msg);
}
@ -335,13 +335,13 @@ MySQLMetaImpl::Initialize() {
InitializeQuery << "CREATE TABLE IF NOT EXISTS " << TABLES_SCHEMA.name() << " (" << TABLES_SCHEMA.ToString() + ");";
ENGINE_LOG_DEBUG << "Initialize: " << InitializeQuery.str();
LOG_ENGINE_DEBUG_ << "Initialize: " << InitializeQuery.str();
bool initialize_query_exec = InitializeQuery.exec();
fiu_do_on("MySQLMetaImpl.Initialize.fail_create_collection_scheme", initialize_query_exec = false);
if (!initialize_query_exec) {
std::string msg = "Failed to create meta collection 'Tables' in MySQL";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_META_TRANSACTION_FAILED, msg);
}
@ -349,13 +349,13 @@ MySQLMetaImpl::Initialize() {
InitializeQuery << "CREATE TABLE IF NOT EXISTS " << TABLEFILES_SCHEMA.name() << " ("
<< TABLEFILES_SCHEMA.ToString() + ");";
ENGINE_LOG_DEBUG << "Initialize: " << InitializeQuery.str();
LOG_ENGINE_DEBUG_ << "Initialize: " << InitializeQuery.str();
initialize_query_exec = InitializeQuery.exec();
fiu_do_on("MySQLMetaImpl.Initialize.fail_create_collection_files", initialize_query_exec = false);
if (!initialize_query_exec) {
std::string msg = "Failed to create meta collection 'TableFiles' in MySQL";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_META_TRANSACTION_FAILED, msg);
}
@ -363,12 +363,12 @@ MySQLMetaImpl::Initialize() {
InitializeQuery << "CREATE TABLE IF NOT EXISTS " << ENVIRONMENT_SCHEMA.name() << " ("
<< ENVIRONMENT_SCHEMA.ToString() + ");";
ENGINE_LOG_DEBUG << "Initialize: " << InitializeQuery.str();
LOG_ENGINE_DEBUG_ << "Initialize: " << InitializeQuery.str();
initialize_query_exec = InitializeQuery.exec();
if (!initialize_query_exec) {
std::string msg = "Failed to create meta table 'Environment' in MySQL";
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_META_TRANSACTION_FAILED, msg);
}
@ -397,7 +397,7 @@ MySQLMetaImpl::CreateCollection(CollectionSchema& collection_schema) {
statement << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< collection_schema.collection_id_ << ";";
ENGINE_LOG_DEBUG << "CreateCollection: " << statement.str();
LOG_ENGINE_DEBUG_ << "CreateCollection: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -437,7 +437,7 @@ MySQLMetaImpl::CreateCollection(CollectionSchema& collection_schema) {
<< metric_type << ", " << mysqlpp::quote << owner_collection << ", " << mysqlpp::quote
<< partition_tag << ", " << mysqlpp::quote << version << ", " << flush_lsn << ");";
ENGINE_LOG_DEBUG << "CreateCollection: " << statement.str();
LOG_ENGINE_DEBUG_ << "CreateCollection: " << statement.str();
if (mysqlpp::SimpleResult res = statement.execute()) {
collection_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()?
@ -448,7 +448,7 @@ MySQLMetaImpl::CreateCollection(CollectionSchema& collection_schema) {
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully create collection: " << collection_schema.collection_id_;
LOG_ENGINE_DEBUG_ << "Successfully create collection: " << collection_schema.collection_id_;
return utils::CreateCollectionPath(options_, collection_schema.collection_id_);
} catch (std::exception& e) {
return HandleException("Failed to create collection", e.what());
@ -477,7 +477,7 @@ MySQLMetaImpl::DescribeCollection(CollectionSchema& collection_schema) {
<< collection_schema.collection_id_ << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "DescribeCollection: " << statement.str();
LOG_ENGINE_DEBUG_ << "DescribeCollection: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -531,7 +531,7 @@ MySQLMetaImpl::HasCollection(const std::string& collection_id, bool& has_or_not)
<< " AS " << mysqlpp::quote << "check"
<< ";";
ENGINE_LOG_DEBUG << "HasCollection: " << HasCollectionQuery.str();
LOG_ENGINE_DEBUG_ << "HasCollection: " << HasCollectionQuery.str();
res = HasCollectionQuery.store();
} // Scoped Connection
@ -566,7 +566,7 @@ MySQLMetaImpl::AllCollections(std::vector<CollectionSchema>& collection_schema_a
<< " FROM " << META_TABLES << " WHERE state <> " << std::to_string(CollectionSchema::TO_DELETE)
<< " AND owner_table = \"\";";
ENGINE_LOG_DEBUG << "AllCollections: " << statement.str();
LOG_ENGINE_DEBUG_ << "AllCollections: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -615,7 +615,7 @@ MySQLMetaImpl::DropCollection(const std::string& collection_id) {
statement << "UPDATE " << META_TABLES << " SET state = " << std::to_string(CollectionSchema::TO_DELETE)
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "DeleteCollection: " << statement.str();
LOG_ENGINE_DEBUG_ << "DeleteCollection: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to drop collection", statement.error());
@ -628,7 +628,7 @@ MySQLMetaImpl::DropCollection(const std::string& collection_id) {
DeleteCollectionFiles(collection_id);
}
ENGINE_LOG_DEBUG << "Successfully delete collection: " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully delete collection: " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to drop collection", e.what());
}
@ -659,14 +659,14 @@ MySQLMetaImpl::DeleteCollectionFiles(const std::string& collection_id) {
<< " WHERE table_id = " << mysqlpp::quote << collection_id << " AND file_type <> "
<< std::to_string(SegmentSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "DeleteCollectionFiles: " << statement.str();
LOG_ENGINE_DEBUG_ << "DeleteCollectionFiles: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to delete colletion files", statement.error());
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully delete collection files from " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully delete collection files from " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to delete colletion files", e.what());
}
@ -733,7 +733,7 @@ MySQLMetaImpl::CreateCollectionFile(SegmentSchema& file_schema) {
<< mysqlpp::quote << file_id << ", " << file_type << ", " << file_size << ", " << row_count
<< ", " << updated_time << ", " << created_on << ", " << date << ", " << flush_lsn << ");";
ENGINE_LOG_DEBUG << "CreateCollectionFile: " << statement.str();
LOG_ENGINE_DEBUG_ << "CreateCollectionFile: " << statement.str();
if (mysqlpp::SimpleResult res = statement.execute()) {
file_schema.id_ = res.insert_id(); // Might need to use SELECT LAST_INSERT_ID()?
@ -744,7 +744,7 @@ MySQLMetaImpl::CreateCollectionFile(SegmentSchema& file_schema) {
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "Successfully create collection file, file id = " << file_schema.file_id_;
return utils::CreateCollectionFilePath(options_, file_schema);
} catch (std::exception& e) {
return HandleException("Failed to create collection file", e.what());
@ -784,7 +784,7 @@ MySQLMetaImpl::GetCollectionFiles(const std::string& collection_id, const std::v
<< idStr << ")"
<< " AND file_type <> " << std::to_string(SegmentSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "GetCollectionFiles: " << statement.str();
LOG_ENGINE_DEBUG_ << "GetCollectionFiles: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -815,7 +815,7 @@ MySQLMetaImpl::GetCollectionFiles(const std::string& collection_id, const std::v
collection_files.emplace_back(file_schema);
}
ENGINE_LOG_DEBUG << "Get collection files by id";
LOG_ENGINE_DEBUG_ << "Get collection files by id";
return ret;
} catch (std::exception& e) {
return HandleException("Failed to get collection files", e.what());
@ -840,7 +840,7 @@ MySQLMetaImpl::GetCollectionFilesBySegmentId(const std::string& segment_id,
<< " FROM " << META_TABLEFILES << " WHERE segment_id = " << mysqlpp::quote << segment_id
<< " AND file_type <> " << std::to_string(SegmentSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "GetCollectionFilesBySegmentId: " << statement.str();
LOG_ENGINE_DEBUG_ << "GetCollectionFilesBySegmentId: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -875,7 +875,7 @@ MySQLMetaImpl::GetCollectionFilesBySegmentId(const std::string& segment_id,
}
}
ENGINE_LOG_DEBUG << "Get collection files by segment id";
LOG_ENGINE_DEBUG_ << "Get collection files by segment id";
return Status::OK();
} catch (std::exception& e) {
return HandleException("Failed to get collection files by segment id", e.what());
@ -903,7 +903,7 @@ MySQLMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const Col
<< collection_id << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionIndex: " << updateCollectionIndexParamQuery.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionIndex: " << updateCollectionIndexParamQuery.str();
mysqlpp::StoreQueryResult res = updateCollectionIndexParamQuery.store();
@ -922,7 +922,7 @@ MySQLMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const Col
<< index.extra_params_.dump() << " ,metric_type = " << index.metric_type_
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionIndex: " << updateCollectionIndexParamQuery.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionIndex: " << updateCollectionIndexParamQuery.str();
if (!updateCollectionIndexParamQuery.exec()) {
return HandleException("Failed to update collection index",
@ -933,7 +933,7 @@ MySQLMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const Col
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully update collection index for " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection index for " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to update collection index", e.what());
}
@ -960,14 +960,14 @@ MySQLMetaImpl::UpdateCollectionFlag(const std::string& collection_id, int64_t fl
statement << "UPDATE " << META_TABLES << " SET flag = " << flag << " WHERE table_id = " << mysqlpp::quote
<< collection_id << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFlag: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFlag: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to update collection flag", statement.error());
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully update collection flag for " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection flag for " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to update collection flag", e.what());
}
@ -991,14 +991,14 @@ MySQLMetaImpl::UpdateCollectionFlushLSN(const std::string& collection_id, uint64
statement << "UPDATE " << META_TABLES << " SET flush_lsn = " << flush_lsn
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFlushLSN: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFlushLSN: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to update collection lsn", statement.error());
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully update collection flush_lsn for " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection flush_lsn for " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to update collection lsn", e.what());
}
@ -1022,7 +1022,7 @@ MySQLMetaImpl::GetCollectionFlushLSN(const std::string& collection_id, uint64_t&
statement << "SELECT flush_lsn FROM " << META_TABLES << " WHERE collection_id = " << mysqlpp::quote
<< collection_id << ";";
ENGINE_LOG_DEBUG << "GetCollectionFlushLSN: " << statement.str();
LOG_ENGINE_DEBUG_ << "GetCollectionFlushLSN: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1061,7 +1061,7 @@ MySQLMetaImpl::UpdateCollectionFile(SegmentSchema& file_schema) {
statement << "SELECT state FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote
<< file_schema.collection_id_ << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFile: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFile: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -1091,16 +1091,16 @@ MySQLMetaImpl::UpdateCollectionFile(SegmentSchema& file_schema) {
<< " ,updated_time = " << updated_time << " ,created_on = " << created_on << " ,date = " << date
<< " WHERE id = " << id << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFile: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFile: " << statement.str();
if (!statement.exec()) {
ENGINE_LOG_DEBUG << "collection_id= " << file_schema.collection_id_
<< " file_id=" << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "collection_id= " << file_schema.collection_id_
<< " file_id=" << file_schema.file_id_;
return HandleException("Failed to update collection file", statement.error());
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Update single collection file, file id: " << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "Update single collection file, file id: " << file_schema.file_id_;
} catch (std::exception& e) {
return HandleException("Failed to update collection file", e.what());
}
@ -1127,13 +1127,13 @@ MySQLMetaImpl::UpdateCollectionFilesToIndex(const std::string& collection_id) {
<< " AND row_count >= " << std::to_string(meta::BUILD_INDEX_THRESHOLD)
<< " AND file_type = " << std::to_string(SegmentSchema::RAW) << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFilesToIndex: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFilesToIndex: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to update collection files to index", statement.error());
}
ENGINE_LOG_DEBUG << "Update files to to_index for " << collection_id;
LOG_ENGINE_DEBUG_ << "Update files to to_index for " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to update collection files to index", e.what());
}
@ -1170,7 +1170,7 @@ MySQLMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
<< " AS " << mysqlpp::quote << "check"
<< ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFiles: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFiles: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -1201,7 +1201,7 @@ MySQLMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
<< " ,row_count = " << row_count << " ,updated_time = " << updated_time
<< " ,created_on = " << created_on << " ,date = " << date << " WHERE id = " << id << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFiles: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFiles: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to update collection files", statement.error());
@ -1209,7 +1209,7 @@ MySQLMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files";
LOG_ENGINE_DEBUG_ << "Update " << files.size() << " collection files";
} catch (std::exception& e) {
return HandleException("Failed to update collection files", e.what());
}
@ -1238,17 +1238,17 @@ MySQLMetaImpl::UpdateCollectionFilesRowCount(SegmentsSchema& files) {
statement << "UPDATE " << META_TABLEFILES << " SET row_count = " << row_count
<< " , updated_time = " << updated_time << " WHERE file_id = " << file.file_id_ << ";";
ENGINE_LOG_DEBUG << "UpdateCollectionFilesRowCount: " << statement.str();
LOG_ENGINE_DEBUG_ << "UpdateCollectionFilesRowCount: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to update collection files row count", statement.error());
}
ENGINE_LOG_DEBUG << "Update file " << file.file_id_ << " row count to " << file.row_count_;
LOG_ENGINE_DEBUG_ << "Update file " << file.file_id_ << " row count to " << file.row_count_;
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files";
LOG_ENGINE_DEBUG_ << "Update " << files.size() << " collection files";
} catch (std::exception& e) {
return HandleException("Failed to update collection files row count", e.what());
}
@ -1276,7 +1276,7 @@ MySQLMetaImpl::DescribeCollectionIndex(const std::string& collection_id, Collect
<< " FROM " << META_TABLES << " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND state <> " << std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "DescribeCollectionIndex: " << statement.str();
LOG_ENGINE_DEBUG_ << "DescribeCollectionIndex: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -1321,7 +1321,7 @@ MySQLMetaImpl::DropCollectionIndex(const std::string& collection_id) {
<< " ,updated_time = " << utils::GetMicroSecTimeStamp() << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND file_type = " << std::to_string(SegmentSchema::INDEX) << ";";
ENGINE_LOG_DEBUG << "DropCollectionIndex: " << statement.str();
LOG_ENGINE_DEBUG_ << "DropCollectionIndex: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to drop collection index", statement.error());
@ -1332,7 +1332,7 @@ MySQLMetaImpl::DropCollectionIndex(const std::string& collection_id) {
<< " ,updated_time = " << utils::GetMicroSecTimeStamp() << " WHERE table_id = " << mysqlpp::quote
<< collection_id << " AND file_type = " << std::to_string(SegmentSchema::BACKUP) << ";";
ENGINE_LOG_DEBUG << "DropCollectionIndex: " << statement.str();
LOG_ENGINE_DEBUG_ << "DropCollectionIndex: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to drop collection index", statement.error());
@ -1348,14 +1348,14 @@ MySQLMetaImpl::DropCollectionIndex(const std::string& collection_id) {
<< " , index_params = '{}'"
<< " WHERE table_id = " << mysqlpp::quote << collection_id << ";";
ENGINE_LOG_DEBUG << "DropCollectionIndex: " << statement.str();
LOG_ENGINE_DEBUG_ << "DropCollectionIndex: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to drop collection index", statement.error());
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully drop collection index for " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully drop collection index for " << collection_id;
} catch (std::exception& e) {
return HandleException("Failed to drop collection index", e.what());
}
@ -1442,7 +1442,7 @@ MySQLMetaImpl::ShowPartitions(const std::string& collection_id,
<< " WHERE owner_table = " << mysqlpp::quote << collection_id << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "AllCollections: " << statement.str();
LOG_ENGINE_DEBUG_ << "AllCollections: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1498,7 +1498,7 @@ MySQLMetaImpl::GetPartitionName(const std::string& collection_id, const std::str
<< collection_id << " AND partition_tag = " << mysqlpp::quote << valid_tag << " AND state <> "
<< std::to_string(CollectionSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "AllCollections: " << statement.str();
LOG_ENGINE_DEBUG_ << "AllCollections: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1543,7 +1543,7 @@ MySQLMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema& f
<< " OR file_type = " << std::to_string(SegmentSchema::TO_INDEX)
<< " OR file_type = " << std::to_string(SegmentSchema::INDEX) << ");";
ENGINE_LOG_DEBUG << "FilesToSearch: " << statement.str();
LOG_ENGINE_DEBUG_ << "FilesToSearch: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1581,7 +1581,7 @@ MySQLMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema& f
}
if (res.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-search files";
LOG_ENGINE_DEBUG_ << "Collect " << res.size() << " to-search files";
}
return ret;
} catch (std::exception& e) {
@ -1621,7 +1621,7 @@ MySQLMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& fi
<< " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND file_type = " << std::to_string(SegmentSchema::RAW) << " ORDER BY row_count DESC;";
ENGINE_LOG_DEBUG << "FilesToMerge: " << statement.str();
LOG_ENGINE_DEBUG_ << "FilesToMerge: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1659,7 +1659,7 @@ MySQLMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& fi
}
if (to_merge_files > 0) {
ENGINE_LOG_TRACE << "Collect " << to_merge_files << " to-merge files";
LOG_ENGINE_TRACE_ << "Collect " << to_merge_files << " to-merge files";
}
return ret;
} catch (std::exception& e) {
@ -1690,7 +1690,7 @@ MySQLMetaImpl::FilesToIndex(SegmentsSchema& files) {
<< " FROM " << META_TABLEFILES << " WHERE file_type = " << std::to_string(SegmentSchema::TO_INDEX)
<< ";";
ENGINE_LOG_DEBUG << "FilesToIndex: " << statement.str();
LOG_ENGINE_DEBUG_ << "FilesToIndex: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1734,7 +1734,7 @@ MySQLMetaImpl::FilesToIndex(SegmentsSchema& files) {
}
if (res.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << res.size() << " to-index files";
LOG_ENGINE_DEBUG_ << "Collect " << res.size() << " to-index files";
}
return ret;
} catch (std::exception& e) {
@ -1780,7 +1780,7 @@ MySQLMetaImpl::FilesByType(const std::string& collection_id, const std::vector<i
<< " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id
<< " AND file_type in (" << types << ");";
ENGINE_LOG_DEBUG << "FilesByType: " << hasNonIndexFilesQuery.str();
LOG_ENGINE_DEBUG_ << "FilesByType: " << hasNonIndexFilesQuery.str();
res = hasNonIndexFilesQuery.store();
} // Scoped Connection
@ -1876,7 +1876,7 @@ MySQLMetaImpl::FilesByType(const std::string& collection_id, const std::vector<i
break;
}
}
ENGINE_LOG_DEBUG << msg;
LOG_ENGINE_DEBUG_ << msg;
}
} catch (std::exception& e) {
return HandleException("Failed to get files by type", e.what());
@ -1925,7 +1925,7 @@ MySQLMetaImpl::FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files)
<< " OR file_type = " << std::to_string(SegmentSchema::TO_INDEX)
<< " OR file_type = " << std::to_string(SegmentSchema::INDEX) << ");";
ENGINE_LOG_DEBUG << "FilesToSearch: " << statement.str();
LOG_ENGINE_DEBUG_ << "FilesToSearch: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -1971,9 +1971,9 @@ MySQLMetaImpl::FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files)
}
if (files.empty()) {
ENGINE_LOG_ERROR << "No file to search in file id list";
LOG_ENGINE_ERROR_ << "No file to search in file id list";
} else {
ENGINE_LOG_DEBUG << "Collect " << files.size() << " files by id";
LOG_ENGINE_DEBUG_ << "Collect " << files.size() << " files by id";
}
return ret;
@ -2013,13 +2013,13 @@ MySQLMetaImpl::Archive() {
<< std::to_string(now - usecs) << " AND file_type <> "
<< std::to_string(SegmentSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "Archive: " << statement.str();
LOG_ENGINE_DEBUG_ << "Archive: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to archive", statement.error());
}
ENGINE_LOG_DEBUG << "Archive old files";
LOG_ENGINE_DEBUG_ << "Archive old files";
} catch (std::exception& e) {
return HandleException("Failed to archive", e.what());
}
@ -2031,7 +2031,7 @@ MySQLMetaImpl::Archive() {
auto to_delete = (sum - limit * G);
DiscardFiles(to_delete);
ENGINE_LOG_DEBUG << "Archive files to free disk";
LOG_ENGINE_DEBUG_ << "Archive files to free disk";
}
}
@ -2059,7 +2059,7 @@ MySQLMetaImpl::Size(uint64_t& result) {
<< " FROM " << META_TABLEFILES << " WHERE file_type <> "
<< std::to_string(SegmentSchema::TO_DELETE) << ";";
ENGINE_LOG_DEBUG << "Size: " << statement.str();
LOG_ENGINE_DEBUG_ << "Size: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -2094,17 +2094,17 @@ MySQLMetaImpl::CleanUpShadowFiles() {
<< " WHERE table_schema = " << mysqlpp::quote << mysql_connection_pool_->getDB()
<< " AND table_name = " << mysqlpp::quote << META_TABLEFILES << ";";
ENGINE_LOG_DEBUG << "CleanUp: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUp: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
if (!res.empty()) {
ENGINE_LOG_DEBUG << "Remove collection file type as NEW";
LOG_ENGINE_DEBUG_ << "Remove collection file type as NEW";
statement << "DELETE FROM " << META_TABLEFILES << " WHERE file_type IN ("
<< std::to_string(SegmentSchema::NEW) << "," << std::to_string(SegmentSchema::NEW_MERGE) << ","
<< std::to_string(SegmentSchema::NEW_INDEX) << ");";
ENGINE_LOG_DEBUG << "CleanUp: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUp: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to clean shadow files", statement.error());
@ -2112,7 +2112,7 @@ MySQLMetaImpl::CleanUpShadowFiles() {
}
if (res.size() > 0) {
ENGINE_LOG_DEBUG << "Clean " << res.size() << " files";
LOG_ENGINE_DEBUG_ << "Clean " << res.size() << " files";
}
} catch (std::exception& e) {
return HandleException("Failed to clean shadow files", e.what());
@ -2148,7 +2148,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
<< std::to_string(SegmentSchema::TO_DELETE) << "," << std::to_string(SegmentSchema::BACKUP) << ")"
<< " AND updated_time < " << std::to_string(now - seconds * US_PS) << ";";
ENGINE_LOG_DEBUG << "CleanUpFilesWithTTL: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUpFilesWithTTL: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -2167,8 +2167,8 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
// check if the file can be deleted
if (OngoingFileChecker::GetInstance().IsIgnored(collection_file)) {
ENGINE_LOG_DEBUG << "File:" << collection_file.file_id_
<< " currently is in use, not able to delete now";
LOG_ENGINE_DEBUG_ << "File:" << collection_file.file_id_
<< " currently is in use, not able to delete now";
continue; // ignore this file, don't delete it
}
@ -2180,8 +2180,8 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
if (collection_file.file_type_ == (int)SegmentSchema::TO_DELETE) {
// delete file from disk storage
utils::DeleteCollectionFilePath(options_, collection_file);
ENGINE_LOG_DEBUG << "Remove file id:" << collection_file.id_
<< " location:" << collection_file.location_;
LOG_ENGINE_DEBUG_ << "Remove file id:" << collection_file.id_
<< " location:" << collection_file.location_;
delete_ids.emplace_back(std::to_string(collection_file.id_));
collection_ids.insert(collection_file.collection_id_);
@ -2202,7 +2202,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR "
statement << "DELETE FROM " << META_TABLEFILES << " WHERE " << idsToDeleteStr << ";";
ENGINE_LOG_DEBUG << "CleanUpFilesWithTTL: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUpFilesWithTTL: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to clean up with ttl", statement.error());
@ -2210,7 +2210,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
}
if (clean_files > 0) {
ENGINE_LOG_DEBUG << "Clean " << clean_files << " files expired in " << seconds << " seconds";
LOG_ENGINE_DEBUG_ << "Clean " << clean_files << " files expired in " << seconds << " seconds";
}
} // Scoped Connection
} catch (std::exception& e) {
@ -2238,7 +2238,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
<< " FROM " << META_TABLES << " WHERE state = " << std::to_string(CollectionSchema::TO_DELETE)
<< ";";
ENGINE_LOG_DEBUG << "CleanUpFilesWithTTL: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUpFilesWithTTL: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -2258,7 +2258,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); // remove the last " OR "
statement << "DELETE FROM " << META_TABLES << " WHERE " << idsToDeleteStr << ";";
ENGINE_LOG_DEBUG << "CleanUpFilesWithTTL: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUpFilesWithTTL: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to clean up with ttl", statement.error());
@ -2266,7 +2266,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
}
if (remove_collections > 0) {
ENGINE_LOG_DEBUG << "Remove " << remove_collections << " collections from meta";
LOG_ENGINE_DEBUG_ << "Remove " << remove_collections << " collections from meta";
}
} // Scoped Connection
} catch (std::exception& e) {
@ -2296,7 +2296,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
<< " FROM " << META_TABLEFILES << " WHERE table_id = " << mysqlpp::quote << collection_id
<< ";";
ENGINE_LOG_DEBUG << "CleanUpFilesWithTTL: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUpFilesWithTTL: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -2306,7 +2306,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
}
if (collection_ids.size() > 0) {
ENGINE_LOG_DEBUG << "Remove " << collection_ids.size() << " collections folder";
LOG_ENGINE_DEBUG_ << "Remove " << collection_ids.size() << " collections folder";
}
}
} catch (std::exception& e) {
@ -2337,7 +2337,7 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
<< " FROM " << META_TABLEFILES << " WHERE segment_id = " << mysqlpp::quote << segment_id.first
<< ";";
ENGINE_LOG_DEBUG << "CleanUpFilesWithTTL: " << statement.str();
LOG_ENGINE_DEBUG_ << "CleanUpFilesWithTTL: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
@ -2345,13 +2345,13 @@ MySQLMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/)
utils::DeleteSegment(options_, segment_id.second);
std::string segment_dir;
utils::GetParentPath(segment_id.second.location_, segment_dir);
ENGINE_LOG_DEBUG << "Remove segment directory: " << segment_dir;
LOG_ENGINE_DEBUG_ << "Remove segment directory: " << segment_dir;
++remove_segments;
}
}
if (remove_segments > 0) {
ENGINE_LOG_DEBUG << "Remove " << remove_segments << " segments folder";
LOG_ENGINE_DEBUG_ << "Remove " << remove_segments << " segments folder";
}
}
} catch (std::exception& e) {
@ -2392,7 +2392,7 @@ MySQLMetaImpl::Count(const std::string& collection_id, uint64_t& result) {
<< " OR file_type = " << std::to_string(SegmentSchema::TO_INDEX)
<< " OR file_type = " << std::to_string(SegmentSchema::INDEX) << ");";
ENGINE_LOG_DEBUG << "Count: " << statement.str();
LOG_ENGINE_DEBUG_ << "Count: " << statement.str();
res = statement.store();
} // Scoped Connection
@ -2412,7 +2412,7 @@ MySQLMetaImpl::Count(const std::string& collection_id, uint64_t& result) {
Status
MySQLMetaImpl::DropAll() {
try {
ENGINE_LOG_DEBUG << "Drop all mysql meta";
LOG_ENGINE_DEBUG_ << "Drop all mysql meta";
mysqlpp::ScopedConnection connectionPtr(*mysql_connection_pool_, safe_grab_);
bool is_null_connection = (connectionPtr == nullptr);
@ -2425,7 +2425,7 @@ MySQLMetaImpl::DropAll() {
mysqlpp::Query statement = connectionPtr->query();
statement << "DROP TABLE IF EXISTS " << TABLES_SCHEMA.name() << ", " << TABLEFILES_SCHEMA.name() << ";";
ENGINE_LOG_DEBUG << "DropAll: " << statement.str();
LOG_ENGINE_DEBUG_ << "DropAll: " << statement.str();
if (statement.exec()) {
return Status::OK();
@ -2441,7 +2441,7 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) {
if (to_discard_size <= 0) {
return Status::OK();
}
ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size;
LOG_ENGINE_DEBUG_ << "About to discard size=" << to_discard_size;
try {
server::MetricCollector metric;
@ -2462,7 +2462,7 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) {
<< std::to_string(SegmentSchema::TO_DELETE) << " ORDER BY id ASC "
<< " LIMIT 10;";
ENGINE_LOG_DEBUG << "DiscardFiles: " << statement.str();
LOG_ENGINE_DEBUG_ << "DiscardFiles: " << statement.str();
mysqlpp::StoreQueryResult res = statement.store();
if (res.num_rows() == 0) {
@ -2478,8 +2478,8 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) {
collection_file.id_ = resRow["id"];
collection_file.file_size_ = resRow["file_size"];
idsToDiscardSS << "id = " << std::to_string(collection_file.id_) << " OR ";
ENGINE_LOG_DEBUG << "Discard file id=" << collection_file.file_id_
<< " file size=" << collection_file.file_size_;
LOG_ENGINE_DEBUG_ << "Discard file id=" << collection_file.file_id_
<< " file size=" << collection_file.file_size_;
to_discard_size -= collection_file.file_size_;
}
@ -2490,7 +2490,7 @@ MySQLMetaImpl::DiscardFiles(int64_t to_discard_size) {
<< " ,updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " WHERE "
<< idsToDiscardStr << ";";
ENGINE_LOG_DEBUG << "DiscardFiles: " << statement.str();
LOG_ENGINE_DEBUG_ << "DiscardFiles: " << statement.str();
status = statement.exec();
if (!status) {
@ -2528,7 +2528,7 @@ MySQLMetaImpl::SetGlobalLastLSN(uint64_t lsn) {
if (first_create) { // first time to get global lsn
mysqlpp::Query statement = connectionPtr->query();
statement << "INSERT INTO " << META_ENVIRONMENT << " VALUES(" << lsn << ");";
ENGINE_LOG_DEBUG << "SetGlobalLastLSN: " << statement.str();
LOG_ENGINE_DEBUG_ << "SetGlobalLastLSN: " << statement.str();
if (!statement.exec()) {
return HandleException("QUERY ERROR WHEN SET GLOBAL LSN", statement.error());
@ -2536,7 +2536,7 @@ MySQLMetaImpl::SetGlobalLastLSN(uint64_t lsn) {
} else {
mysqlpp::Query statement = connectionPtr->query();
statement << "UPDATE " << META_ENVIRONMENT << " SET global_lsn = " << lsn << ";";
ENGINE_LOG_DEBUG << "SetGlobalLastLSN: " << statement.str();
LOG_ENGINE_DEBUG_ << "SetGlobalLastLSN: " << statement.str();
if (!statement.exec()) {
return HandleException("Failed to set global lsn", statement.error());
@ -2544,7 +2544,7 @@ MySQLMetaImpl::SetGlobalLastLSN(uint64_t lsn) {
}
} // Scoped Connection
ENGINE_LOG_DEBUG << "Successfully update global_lsn: " << lsn;
LOG_ENGINE_DEBUG_ << "Successfully update global_lsn: " << lsn;
} catch (std::exception& e) {
return HandleException("Failed to set global lsn", e.what());
}
@ -2567,7 +2567,7 @@ MySQLMetaImpl::GetGlobalLastLSN(uint64_t& lsn) {
mysqlpp::Query statement = connectionPtr->query();
statement << "SELECT global_lsn FROM " << META_ENVIRONMENT << ";";
ENGINE_LOG_DEBUG << "GetGlobalLastLSN: " << statement.str();
LOG_ENGINE_DEBUG_ << "GetGlobalLastLSN: " << statement.str();
res = statement.store();
} // Scoped Connection

View File

@ -46,11 +46,11 @@ namespace {
Status
HandleException(const std::string& desc, const char* what = nullptr) {
if (what == nullptr) {
ENGINE_LOG_ERROR << desc;
LOG_ENGINE_ERROR_ << desc;
return Status(DB_META_TRANSACTION_FAILED, desc);
} else {
std::string msg = desc + ":" + what;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_META_TRANSACTION_FAILED, msg);
}
}
@ -145,7 +145,7 @@ SqliteMetaImpl::Initialize() {
fiu_do_on("SqliteMetaImpl.Initialize.fail_create_directory", ret = false);
if (!ret) {
std::string msg = "Failed to create db directory " + options_.path_;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(DB_INVALID_PATH, msg);
}
}
@ -198,7 +198,7 @@ SqliteMetaImpl::CreateCollection(CollectionSchema& collection_schema) {
return HandleException("Encounter exception when create collection", e.what());
}
ENGINE_LOG_DEBUG << "Successfully create collection: " << collection_schema.collection_id_;
LOG_ENGINE_DEBUG_ << "Successfully create collection: " << collection_schema.collection_id_;
return utils::CreateCollectionPath(options_, collection_schema.collection_id_);
} catch (std::exception& e) {
@ -319,7 +319,7 @@ SqliteMetaImpl::DropCollection(const std::string& collection_id) {
set(c(&CollectionSchema::state_) = (int)CollectionSchema::TO_DELETE),
where(c(&CollectionSchema::collection_id_) == collection_id and c(&CollectionSchema::state_) != (int)CollectionSchema::TO_DELETE));
ENGINE_LOG_DEBUG << "Successfully delete collection, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully delete collection, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection", e.what());
}
@ -343,7 +343,7 @@ SqliteMetaImpl::DeleteCollectionFiles(const std::string& collection_id) {
where(c(&SegmentSchema::collection_id_) == collection_id and
c(&SegmentSchema::file_type_) != (int)SegmentSchema::TO_DELETE));
ENGINE_LOG_DEBUG << "Successfully delete collection files, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully delete collection files, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection files", e.what());
}
@ -387,7 +387,7 @@ SqliteMetaImpl::CreateCollectionFile(SegmentSchema& file_schema) {
auto id = ConnectorPtr->insert(file_schema);
file_schema.id_ = id;
ENGINE_LOG_DEBUG << "Successfully create collection file, file id = " << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "Successfully create collection file, file id = " << file_schema.file_id_;
return utils::CreateCollectionFilePath(options_, file_schema);
} catch (std::exception& e) {
return HandleException("Encounter exception when create collection file", e.what());
@ -439,7 +439,7 @@ SqliteMetaImpl::GetCollectionFiles(const std::string& collection_id, const std::
collection_files.emplace_back(file_schema);
}
ENGINE_LOG_DEBUG << "Get collection files by id";
LOG_ENGINE_DEBUG_ << "Get collection files by id";
return result;
} catch (std::exception& e) {
return HandleException("Encounter exception when lookup collection files", e.what());
@ -489,7 +489,7 @@ SqliteMetaImpl::GetCollectionFilesBySegmentId(const std::string& segment_id,
}
}
ENGINE_LOG_DEBUG << "Get collection files by segment id";
LOG_ENGINE_DEBUG_ << "Get collection files by segment id";
return Status::OK();
} catch (std::exception& e) {
return HandleException("Encounter exception when lookup collection files by segment id", e.what());
@ -504,7 +504,7 @@ SqliteMetaImpl::UpdateCollectionFlag(const std::string& collection_id, int64_t f
// set all backup file to raw
ConnectorPtr->update_all(set(c(&CollectionSchema::flag_) = flag), where(c(&CollectionSchema::collection_id_) == collection_id));
ENGINE_LOG_DEBUG << "Successfully update collection flag, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection flag, collection id = " << collection_id;
} catch (std::exception& e) {
std::string msg = "Encounter exception when update collection flag: collection_id = " + collection_id;
return HandleException(msg, e.what());
@ -520,7 +520,7 @@ SqliteMetaImpl::UpdateCollectionFlushLSN(const std::string& collection_id, uint6
ConnectorPtr->update_all(set(c(&CollectionSchema::flush_lsn_) = flush_lsn),
where(c(&CollectionSchema::collection_id_) == collection_id));
ENGINE_LOG_DEBUG << "Successfully update collection flush_lsn, collection id = " << collection_id << " flush_lsn = " << flush_lsn;;
LOG_ENGINE_DEBUG_ << "Successfully update collection flush_lsn, collection id = " << collection_id << " flush_lsn = " << flush_lsn;;
} catch (std::exception& e) {
std::string msg = "Encounter exception when update collection lsn: collection_id = " + collection_id;
return HandleException(msg, e.what());
@ -571,7 +571,7 @@ SqliteMetaImpl::UpdateCollectionFile(SegmentSchema& file_schema) {
ConnectorPtr->update(file_schema);
ENGINE_LOG_DEBUG << "Update single collection file, file id = " << file_schema.file_id_;
LOG_ENGINE_DEBUG_ << "Update single collection file, file id = " << file_schema.file_id_;
} catch (std::exception& e) {
std::string msg =
"Exception update collection file: collection_id = " + file_schema.collection_id_ + " file_id = " + file_schema.file_id_;
@ -621,7 +621,7 @@ SqliteMetaImpl::UpdateCollectionFiles(SegmentsSchema& files) {
return HandleException("UpdateCollectionFiles error: sqlite transaction failed");
}
ENGINE_LOG_DEBUG << "Update " << files.size() << " collection files";
LOG_ENGINE_DEBUG_ << "Update " << files.size() << " collection files";
} catch (std::exception& e) {
return HandleException("Encounter exception when update collection files", e.what());
}
@ -640,7 +640,7 @@ SqliteMetaImpl::UpdateCollectionFilesRowCount(SegmentsSchema& files) {
ConnectorPtr->update_all(set(c(&SegmentSchema::row_count_) = file.row_count_,
c(&SegmentSchema::updated_time_) = utils::GetMicroSecTimeStamp()),
where(c(&SegmentSchema::file_id_) == file.file_id_));
ENGINE_LOG_DEBUG << "Update file " << file.file_id_ << " row count to " << file.row_count_;
LOG_ENGINE_DEBUG_ << "Update file " << file.file_id_ << " row count to " << file.row_count_;
}
} catch (std::exception& e) {
return HandleException("Encounter exception when update collection files row count", e.what());
@ -692,7 +692,7 @@ SqliteMetaImpl::UpdateCollectionIndex(const std::string& collection_id, const Co
where(c(&SegmentSchema::collection_id_) == collection_id and
c(&SegmentSchema::file_type_) == (int)SegmentSchema::BACKUP));
ENGINE_LOG_DEBUG << "Successfully update collection index, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully update collection index, collection id = " << collection_id;
} catch (std::exception& e) {
std::string msg = "Encounter exception when update collection index: collection_id = " + collection_id;
return HandleException(msg, e.what());
@ -715,7 +715,7 @@ SqliteMetaImpl::UpdateCollectionFilesToIndex(const std::string& collection_id) {
c(&SegmentSchema::row_count_) >= meta::BUILD_INDEX_THRESHOLD and
c(&SegmentSchema::file_type_) == (int)SegmentSchema::RAW));
ENGINE_LOG_DEBUG << "Update files to to_index, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Update files to to_index, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when update collection files to to_index", e.what());
}
@ -783,7 +783,7 @@ SqliteMetaImpl::DropCollectionIndex(const std::string& collection_id) {
set(c(&CollectionSchema::engine_type_) = raw_engine_type, c(&CollectionSchema::index_params_) = "{}"),
where(c(&CollectionSchema::collection_id_) == collection_id));
ENGINE_LOG_DEBUG << "Successfully drop collection index, collection id = " << collection_id;
LOG_ENGINE_DEBUG_ << "Successfully drop collection index, collection id = " << collection_id;
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection index files", e.what());
}
@ -968,11 +968,11 @@ SqliteMetaImpl::FilesToSearch(const std::string& collection_id, SegmentsSchema&
files.emplace_back(collection_file);
}
if (files.empty()) {
ENGINE_LOG_ERROR << "No file to search for collection: " << collection_id;
LOG_ENGINE_ERROR_ << "No file to search for collection: " << collection_id;
}
if (selected.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-search files";
LOG_ENGINE_DEBUG_ << "Collect " << selected.size() << " to-search files";
}
return ret;
} catch (std::exception& e) {
@ -1038,7 +1038,7 @@ SqliteMetaImpl::FilesToMerge(const std::string& collection_id, SegmentsSchema& f
}
if (to_merge_files > 0) {
ENGINE_LOG_TRACE << "Collect " << to_merge_files << " to-merge files";
LOG_ENGINE_TRACE_ << "Collect " << to_merge_files << " to-merge files";
}
return result;
} catch (std::exception& e) {
@ -1102,7 +1102,7 @@ SqliteMetaImpl::FilesToIndex(SegmentsSchema& files) {
}
if (selected.size() > 0) {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " to-index files";
LOG_ENGINE_DEBUG_ << "Collect " << selected.size() << " to-index files";
}
return ret;
} catch (std::exception& e) {
@ -1206,7 +1206,7 @@ SqliteMetaImpl::FilesByType(const std::string& collection_id, const std::vector<
default:break;
}
}
ENGINE_LOG_DEBUG << msg;
LOG_ENGINE_DEBUG_ << msg;
}
} catch (std::exception& e) {
return HandleException("Encounter exception when check non index files", e.what());
@ -1284,9 +1284,9 @@ SqliteMetaImpl::FilesByID(const std::vector<size_t>& ids, SegmentsSchema& files)
}
if (files.empty()) {
ENGINE_LOG_ERROR << "No file to search in file id list";
LOG_ENGINE_ERROR_ << "No file to search in file id list";
} else {
ENGINE_LOG_DEBUG << "Collect " << selected.size() << " files by id";
LOG_ENGINE_DEBUG_ << "Collect " << selected.size() << " files by id";
}
return ret;
@ -1323,7 +1323,7 @@ SqliteMetaImpl::Archive() {
return HandleException("Encounter exception when update collection files", e.what());
}
ENGINE_LOG_DEBUG << "Archive old files";
LOG_ENGINE_DEBUG_ << "Archive old files";
}
if (criteria == engine::ARCHIVE_CONF_DISK) {
uint64_t sum = 0;
@ -1332,7 +1332,7 @@ SqliteMetaImpl::Archive() {
int64_t to_delete = (int64_t)sum - limit * G;
DiscardFiles(to_delete);
ENGINE_LOG_DEBUG << "Archive files to free disk";
LOG_ENGINE_DEBUG_ << "Archive files to free disk";
}
}
@ -1375,7 +1375,7 @@ SqliteMetaImpl::CleanUpShadowFiles() {
auto commited = ConnectorPtr->transaction([&]() mutable {
for (auto& file : files) {
ENGINE_LOG_DEBUG << "Remove collection file type as NEW";
LOG_ENGINE_DEBUG_ << "Remove collection file type as NEW";
ConnectorPtr->remove<SegmentSchema>(std::get<0>(file));
}
return true;
@ -1388,7 +1388,7 @@ SqliteMetaImpl::CleanUpShadowFiles() {
}
if (files.size() > 0) {
ENGINE_LOG_DEBUG << "Clean " << files.size() << " files";
LOG_ENGINE_DEBUG_ << "Clean " << files.size() << " files";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when clean collection file", e.what());
@ -1439,7 +1439,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
// check if the file can be deleted
if (OngoingFileChecker::GetInstance().IsIgnored(collection_file)) {
ENGINE_LOG_DEBUG << "File:" << collection_file.file_id_
LOG_ENGINE_DEBUG_ << "File:" << collection_file.file_id_
<< " currently is in use, not able to delete now";
continue; // ignore this file, don't delete it
}
@ -1457,7 +1457,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
// delete file from disk storage
utils::DeleteCollectionFilePath(options_, collection_file);
ENGINE_LOG_DEBUG << "Remove file id:" << collection_file.file_id_ << " location:"
LOG_ENGINE_DEBUG_ << "Remove file id:" << collection_file.file_id_ << " location:"
<< collection_file.location_;
collection_ids.insert(collection_file.collection_id_);
segment_ids.insert(std::make_pair(collection_file.segment_id_, collection_file));
@ -1474,7 +1474,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
}
if (clean_files > 0) {
ENGINE_LOG_DEBUG << "Clean " << clean_files << " files expired in " << seconds << " seconds";
LOG_ENGINE_DEBUG_ << "Clean " << clean_files << " files expired in " << seconds << " seconds";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when clean collection files", e.what());
@ -1506,7 +1506,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
}
if (collections.size() > 0) {
ENGINE_LOG_DEBUG << "Remove " << collections.size() << " collections from meta";
LOG_ENGINE_DEBUG_ << "Remove " << collections.size() << " collections from meta";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when clean collection files", e.what());
@ -1529,7 +1529,7 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
}
if (remove_collections) {
ENGINE_LOG_DEBUG << "Remove " << remove_collections << " collections folder";
LOG_ENGINE_DEBUG_ << "Remove " << remove_collections << " collections folder";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection folder", e.what());
@ -1549,13 +1549,13 @@ SqliteMetaImpl::CleanUpFilesWithTTL(uint64_t seconds /*, CleanUpFilter* filter*/
utils::DeleteSegment(options_, segment_id.second);
std::string segment_dir;
utils::GetParentPath(segment_id.second.location_, segment_dir);
ENGINE_LOG_DEBUG << "Remove segment directory: " << segment_dir;
LOG_ENGINE_DEBUG_ << "Remove segment directory: " << segment_dir;
++remove_segments;
}
}
if (remove_segments > 0) {
ENGINE_LOG_DEBUG << "Remove " << remove_segments << " segments folder";
LOG_ENGINE_DEBUG_ << "Remove " << remove_segments << " segments folder";
}
} catch (std::exception& e) {
return HandleException("Encounter exception when delete collection folder", e.what());
@ -1597,7 +1597,7 @@ SqliteMetaImpl::Count(const std::string& collection_id, uint64_t& result) {
Status
SqliteMetaImpl::DropAll() {
ENGINE_LOG_DEBUG << "Drop all sqlite meta";
LOG_ENGINE_DEBUG_ << "Drop all sqlite meta";
try {
ConnectorPtr->drop_table(META_TABLES);
@ -1615,7 +1615,7 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) {
return Status::OK();
}
ENGINE_LOG_DEBUG << "About to discard size=" << to_discard_size;
LOG_ENGINE_DEBUG_ << "About to discard size=" << to_discard_size;
try {
fiu_do_on("SqliteMetaImpl.DiscardFiles.throw_exception", throw std::exception());
@ -1640,7 +1640,7 @@ SqliteMetaImpl::DiscardFiles(int64_t to_discard_size) {
collection_file.id_ = std::get<0>(file);
collection_file.file_size_ = std::get<1>(file);
ids.push_back(collection_file.id_);
ENGINE_LOG_DEBUG << "Discard file id=" << collection_file.file_id_
LOG_ENGINE_DEBUG_ << "Discard file id=" << collection_file.file_id_
<< " file size=" << collection_file.file_size_;
to_discard_size -= collection_file.file_size_;
}
@ -1685,7 +1685,7 @@ SqliteMetaImpl::SetGlobalLastLSN(uint64_t lsn) {
ConnectorPtr->update_all(set(c(&EnvironmentSchema::global_lsn_) = lsn));
}
ENGINE_LOG_DEBUG << "Update global lsn = " << lsn;
LOG_ENGINE_DEBUG_ << "Update global lsn = " << lsn;
} catch (std::exception& e) {
std::string msg = "Exception update global lsn = " + lsn;
return HandleException(msg, e.what());

View File

@ -50,7 +50,7 @@ MXLogBuffer::~MXLogBuffer() {
*/
bool
MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
WAL_LOG_DEBUG << "start_lsn " << start_lsn << " end_lsn " << end_lsn;
LOG_WAL_DEBUG_ << "start_lsn " << start_lsn << " end_lsn " << end_lsn;
ParserLsn(start_lsn, mxlog_buffer_reader_.file_no, mxlog_buffer_reader_.buf_offset);
ParserLsn(end_lsn, mxlog_buffer_writer_.file_no, mxlog_buffer_writer_.buf_offset);
@ -72,7 +72,7 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
file_handler.SetFileName(ToFileName(i));
auto file_size = file_handler.GetFileSize();
if (file_size == 0) {
WAL_LOG_ERROR << "bad wal file " << i;
LOG_WAL_ERROR_ << "bad wal file " << i;
return false;
}
if (file_size > buffer_size_need) {
@ -85,7 +85,7 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
if (buffer_size_need > mxlog_buffer_size_) {
mxlog_buffer_size_ = buffer_size_need;
WAL_LOG_INFO << "recovery will need more buffer, buffer size changed " << mxlog_buffer_size_;
LOG_WAL_INFO_ << "recovery will need more buffer, buffer size changed " << mxlog_buffer_size_;
}
}
@ -104,14 +104,14 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
} else {
mxlog_writer_.SetFileOpenMode("r+");
if (!mxlog_writer_.FileExists()) {
WAL_LOG_ERROR << "wal file not exist " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "wal file not exist " << mxlog_buffer_writer_.file_no;
return false;
}
auto read_offset = mxlog_buffer_reader_.buf_offset;
auto read_size = mxlog_buffer_writer_.buf_offset - mxlog_buffer_reader_.buf_offset;
if (!mxlog_writer_.Load(buf_[0].get() + read_offset, read_offset, read_size)) {
WAL_LOG_ERROR << "load wal file error " << read_offset << " " << read_size;
LOG_WAL_ERROR_ << "load wal file error " << read_offset << " " << read_size;
return false;
}
}
@ -135,11 +135,11 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
mxlog_writer_.SetFileName(ToFileName(mxlog_buffer_writer_.file_no));
mxlog_writer_.SetFileOpenMode("r+");
if (!mxlog_writer_.FileExists()) {
WAL_LOG_ERROR << "wal file not exist " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "wal file not exist " << mxlog_buffer_writer_.file_no;
return false;
}
if (!mxlog_writer_.Load(buf_[1].get(), 0, mxlog_buffer_writer_.buf_offset)) {
WAL_LOG_ERROR << "load wal file error " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "load wal file error " << mxlog_buffer_writer_.file_no;
return false;
}
}
@ -151,7 +151,7 @@ MXLogBuffer::Init(uint64_t start_lsn, uint64_t end_lsn) {
void
MXLogBuffer::Reset(uint64_t lsn) {
WAL_LOG_DEBUG << "reset lsn " << lsn;
LOG_WAL_DEBUG_ << "reset lsn " << lsn;
buf_[0] = BufferPtr(new char[mxlog_buffer_size_]);
buf_[1] = BufferPtr(new char[mxlog_buffer_size_]);
@ -206,7 +206,7 @@ MXLogBuffer::Append(MXLogRecord& record) {
// Reborn means close old wal file and open new wal file
if (!mxlog_writer_.ReBorn(ToFileName(mxlog_buffer_writer_.file_no), "w")) {
WAL_LOG_ERROR << "ReBorn wal file error " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "ReBorn wal file error " << mxlog_buffer_writer_.file_no;
return WAL_FILE_ERROR;
}
}
@ -247,7 +247,7 @@ MXLogBuffer::Append(MXLogRecord& record) {
bool write_rst = mxlog_writer_.Write(current_write_buf + mxlog_buffer_writer_.buf_offset, record_size);
if (!write_rst) {
WAL_LOG_ERROR << "write wal file error";
LOG_WAL_ERROR_ << "write wal file error";
return WAL_FILE_ERROR;
}
@ -289,7 +289,7 @@ MXLogBuffer::Next(const uint64_t last_applied_lsn, MXLogRecord& record) {
mxlog_reader.SetFileOpenMode("r");
uint32_t file_size = mxlog_reader.Load(buf_[mxlog_buffer_reader_.buf_idx].get(), 0);
if (file_size == 0) {
WAL_LOG_ERROR << "load wal file error " << mxlog_buffer_reader_.file_no;
LOG_WAL_ERROR_ << "load wal file error " << mxlog_buffer_reader_.file_no;
return WAL_FILE_ERROR;
}
mxlog_buffer_reader_.max_offset = file_size;
@ -346,29 +346,29 @@ MXLogBuffer::GetReadLsn() {
bool
MXLogBuffer::ResetWriteLsn(uint64_t lsn) {
WAL_LOG_INFO << "reset write lsn " << lsn;
LOG_WAL_INFO_ << "reset write lsn " << lsn;
int32_t old_file_no = mxlog_buffer_writer_.file_no;
ParserLsn(lsn, mxlog_buffer_writer_.file_no, mxlog_buffer_writer_.buf_offset);
if (old_file_no == mxlog_buffer_writer_.file_no) {
WAL_LOG_DEBUG << "file No. is not changed";
LOG_WAL_DEBUG_ << "file No. is not changed";
return true;
}
std::unique_lock<std::mutex> lck(mutex_);
if (mxlog_buffer_writer_.file_no == mxlog_buffer_reader_.file_no) {
mxlog_buffer_writer_.buf_idx = mxlog_buffer_reader_.buf_idx;
WAL_LOG_DEBUG << "file No. is the same as reader";
LOG_WAL_DEBUG_ << "file No. is the same as reader";
return true;
}
lck.unlock();
if (!mxlog_writer_.ReBorn(ToFileName(mxlog_buffer_writer_.file_no), "r+")) {
WAL_LOG_ERROR << "reborn file error " << mxlog_buffer_writer_.file_no;
LOG_WAL_ERROR_ << "reborn file error " << mxlog_buffer_writer_.file_no;
return false;
}
if (!mxlog_writer_.Load(buf_[mxlog_buffer_writer_.buf_idx].get(), 0, mxlog_buffer_writer_.buf_offset)) {
WAL_LOG_ERROR << "load file error";
LOG_WAL_ERROR_ << "load file error";
return false;
}
@ -387,7 +387,7 @@ MXLogBuffer::SetFileNoFrom(uint32_t file_no) {
if (!file_handler.FileExists()) {
break;
}
WAL_LOG_INFO << "Delete wal file " << file_no;
LOG_WAL_INFO_ << "Delete wal file " << file_no;
file_handler.DeleteFile();
} while (file_no > 0);
}
@ -402,7 +402,7 @@ MXLogBuffer::RemoveOldFiles(uint64_t flushed_lsn) {
MXLogFileHandler file_handler(mxlog_writer_.GetFilePath());
do {
file_handler.SetFileName(ToFileName(file_no_from_));
WAL_LOG_INFO << "Delete wal file " << file_no_from_;
LOG_WAL_INFO_ << "Delete wal file " << file_no_from_;
file_handler.DeleteFile();
} while (++file_no_from_ < file_no);
}

View File

@ -41,7 +41,7 @@ WalManager::WalManager(const MXLogConfiguration& config) {
auto status = server::CommonUtil::CreateDirectory(mxlog_config_.mxlog_path);
if (!status.ok()) {
std::string msg = "failed to create wal directory " + mxlog_config_.mxlog_path;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
throw Exception(WAL_PATH_ERROR, msg);
}
}
@ -148,8 +148,8 @@ WalManager::GetNextRecovery(MXLogRecord& record) {
}
}
WAL_LOG_INFO << "record type " << (int32_t)record.type << " record lsn " << record.lsn << " error code "
<< error_code;
LOG_WAL_INFO_ << "record type " << (int32_t)record.type << " record lsn " << record.lsn << " error code "
<< error_code;
return error_code;
}
@ -166,7 +166,7 @@ WalManager::GetNextRecord(MXLogRecord& record) {
record.lsn = flush_info_.lsn_;
flush_info_.Clear();
WAL_LOG_INFO << "record flush collection " << record.collection_id << " lsn " << record.lsn;
LOG_WAL_INFO_ << "record flush collection " << record.collection_id << " lsn " << record.lsn;
return true;
}
}
@ -195,14 +195,14 @@ WalManager::GetNextRecord(MXLogRecord& record) {
}
}
WAL_LOG_INFO << "record type " << (int32_t)record.type << " collection " << record.collection_id << " lsn "
<< record.lsn;
LOG_WAL_INFO_ << "record type " << (int32_t)record.type << " collection " << record.collection_id << " lsn "
<< record.lsn;
return error_code;
}
uint64_t
WalManager::CreateCollection(const std::string& collection_id) {
WAL_LOG_INFO << "create collection " << collection_id << " " << last_applied_lsn_;
LOG_WAL_INFO_ << "create collection " << collection_id << " " << last_applied_lsn_;
std::lock_guard<std::mutex> lck(mutex_);
uint64_t applied_lsn = last_applied_lsn_;
tables_[collection_id] = {applied_lsn, applied_lsn};
@ -211,7 +211,7 @@ WalManager::CreateCollection(const std::string& collection_id) {
void
WalManager::DropCollection(const std::string& collection_id) {
WAL_LOG_INFO << "drop collection " << collection_id;
LOG_WAL_INFO_ << "drop collection " << collection_id;
std::lock_guard<std::mutex> lck(mutex_);
tables_.erase(collection_id);
}
@ -225,7 +225,7 @@ WalManager::CollectionFlushed(const std::string& collection_id, uint64_t lsn) {
}
lck.unlock();
WAL_LOG_INFO << collection_id << " is flushed by lsn " << lsn;
LOG_WAL_INFO_ << collection_id << " is flushed by lsn " << lsn;
}
template <typename T>
@ -243,7 +243,7 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio
size_t vector_num = vector_ids.size();
if (vector_num == 0) {
WAL_LOG_ERROR << LogOut("[%s][%ld] The ids is empty.", "insert", 0);
LOG_WAL_ERROR_ << LogOut("[%s][%ld] The ids is empty.", "insert", 0);
return false;
}
size_t dim = vectors.size() / vector_num;
@ -265,8 +265,8 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio
max_rcd_num = (mxlog_config_.buffer_size - head_size) / unit_size;
}
if (max_rcd_num == 0) {
WAL_LOG_ERROR << LogOut("[%s][%ld]", "insert", 0) << "Wal buffer size is too small "
<< mxlog_config_.buffer_size << " unit " << unit_size;
LOG_WAL_ERROR_ << LogOut("[%s][%ld]", "insert", 0) << "Wal buffer size is too small "
<< mxlog_config_.buffer_size << " unit " << unit_size;
return false;
}
@ -291,8 +291,8 @@ WalManager::Insert(const std::string& collection_id, const std::string& partitio
}
lck.unlock();
WAL_LOG_INFO << LogOut("[%s][%ld]", "insert", 0) << collection_id << " insert in part " << partition_tag
<< " with lsn " << new_lsn;
LOG_WAL_INFO_ << LogOut("[%s][%ld]", "insert", 0) << collection_id << " insert in part " << partition_tag
<< " with lsn " << new_lsn;
return p_meta_handler_->SetMXLogInternalMeta(new_lsn);
}
@ -301,7 +301,7 @@ bool
WalManager::DeleteById(const std::string& collection_id, const IDNumbers& vector_ids) {
size_t vector_num = vector_ids.size();
if (vector_num == 0) {
WAL_LOG_ERROR << "The ids is empty.";
LOG_WAL_ERROR_ << "The ids is empty.";
return false;
}
@ -344,7 +344,7 @@ WalManager::DeleteById(const std::string& collection_id, const IDNumbers& vector
}
lck.unlock();
WAL_LOG_INFO << collection_id << " delete rows by id, lsn " << new_lsn;
LOG_WAL_INFO_ << collection_id << " delete rows by id, lsn " << new_lsn;
return p_meta_handler_->SetMXLogInternalMeta(new_lsn);
}
@ -381,7 +381,7 @@ WalManager::Flush(const std::string& collection_id) {
flush_info_.lsn_ = lsn;
}
WAL_LOG_INFO << collection_id << " want to be flush, lsn " << lsn;
LOG_WAL_INFO_ << collection_id << " want to be flush, lsn " << lsn;
return lsn;
}

View File

@ -40,7 +40,7 @@ KnowhereResource::Initialize() {
faiss::faiss_use_avx512 = use_avx512;
std::string cpu_flag;
if (faiss::hook_init(cpu_flag)) {
ENGINE_LOG_DEBUG << "FAISS hook " << cpu_flag;
LOG_ENGINE_DEBUG_ << "FAISS hook " << cpu_flag;
} else {
return Status(KNOWHERE_UNEXPECTED_ERROR, "FAISS hook fail, CPU not supported!");
}

View File

@ -63,13 +63,13 @@ SystemInfo::Init() {
nvmlresult = nvmlInit();
fiu_do_on("SystemInfo.Init.nvmInit_fail", nvmlresult = NVML_ERROR_NOT_FOUND);
if (NVML_SUCCESS != nvmlresult) {
SERVER_LOG_ERROR << "System information initilization failed";
LOG_SERVER_ERROR_ << "System information initilization failed";
return;
}
nvmlresult = nvmlDeviceGetCount(&num_device_);
fiu_do_on("SystemInfo.Init.nvm_getDevice_fail", nvmlresult = NVML_ERROR_NOT_FOUND);
if (NVML_SUCCESS != nvmlresult) {
SERVER_LOG_ERROR << "Unable to get devidce number";
LOG_SERVER_ERROR_ << "Unable to get devidce number";
return;
}
#endif
@ -158,7 +158,7 @@ SystemInfo::getTotalCpuTime(std::vector<uint64_t>& work_time_array) {
FILE* file = fopen("/proc/stat", "r");
fiu_do_on("SystemInfo.getTotalCpuTime.open_proc", file = NULL);
if (file == NULL) {
SERVER_LOG_ERROR << "Could not open stat file";
LOG_SERVER_ERROR_ << "Could not open stat file";
return total_time_array;
}
@ -170,7 +170,7 @@ SystemInfo::getTotalCpuTime(std::vector<uint64_t>& work_time_array) {
char* ret = fgets(buffer, sizeof(buffer) - 1, file);
fiu_do_on("SystemInfo.getTotalCpuTime.read_proc", ret = NULL);
if (ret == NULL) {
SERVER_LOG_ERROR << "Could not read stat file";
LOG_SERVER_ERROR_ << "Could not read stat file";
fclose(file);
return total_time_array;
}
@ -265,7 +265,7 @@ SystemInfo::CPUTemperature() {
dir = opendir(path.c_str());
fiu_do_on("SystemInfo.CPUTemperature.opendir", dir = NULL);
if (!dir) {
SERVER_LOG_ERROR << "Could not open hwmon directory";
LOG_SERVER_ERROR_ << "Could not open hwmon directory";
return result;
}
@ -283,7 +283,7 @@ SystemInfo::CPUTemperature() {
FILE* file = fopen(object.c_str(), "r");
fiu_do_on("SystemInfo.CPUTemperature.openfile", file = NULL);
if (file == nullptr) {
SERVER_LOG_ERROR << "Could not open temperature file";
LOG_SERVER_ERROR_ << "Could not open temperature file";
return result;
}
float temp;

View File

@ -49,7 +49,7 @@ PrometheusMetrics::Init() {
// Pushgateway Registry
gateway_->RegisterCollectable(registry_);
} catch (std::exception& ex) {
SERVER_LOG_ERROR << "Failed to connect prometheus server: " << std::string(ex.what());
LOG_SERVER_ERROR_ << "Failed to connect prometheus server: " << std::string(ex.what());
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}

View File

@ -293,7 +293,7 @@ class PrometheusMetrics : public MetricsBase {
PushToGateway() override {
if (startup_) {
if (gateway_->Push() != 200) {
ENGINE_LOG_WARNING << "Metrics pushgateway failed";
LOG_ENGINE_WARNING_ << "Metrics pushgateway failed";
}
}
}

View File

@ -68,6 +68,7 @@ JobMgr::Put(const JobPtr& job) {
void
JobMgr::worker_function() {
SetThreadName("jobmgr_thread");
while (running_) {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] { return !queue_.empty(); });

View File

@ -19,8 +19,8 @@ namespace scheduler {
void
ResourceMgr::Start() {
if (not check_resource_valid()) {
ENGINE_LOG_ERROR << "Resources invalid, cannot start ResourceMgr.";
ENGINE_LOG_ERROR << Dump();
LOG_ENGINE_ERROR_ << "Resources invalid, cannot start ResourceMgr.";
LOG_ENGINE_ERROR_ << Dump();
return;
}
@ -54,7 +54,7 @@ ResourceMgr::Add(ResourcePtr&& resource) {
std::lock_guard<std::mutex> lck(resources_mutex_);
if (running_) {
ENGINE_LOG_ERROR << "ResourceMgr is running, not allow to add resource";
LOG_ENGINE_ERROR_ << "ResourceMgr is running, not allow to add resource";
return ret;
}
@ -97,7 +97,7 @@ void
ResourceMgr::Clear() {
std::lock_guard<std::mutex> lck(resources_mutex_);
if (running_) {
ENGINE_LOG_ERROR << "ResourceMgr is running, cannot clear.";
LOG_ENGINE_ERROR_ << "ResourceMgr is running, cannot clear.";
return;
}
disk_resources_.clear();
@ -237,6 +237,7 @@ ResourceMgr::post_event(const EventPtr& event) {
void
ResourceMgr::event_process() {
SetThreadName("resevt_thread");
while (running_) {
std::unique_lock<std::mutex> lock(event_mutex_);
event_cv_.wait(lock, [this] { return !queue_.empty(); });

View File

@ -110,14 +110,14 @@ class OptimizerInst {
for (auto build_id : build_gpus) {
build_msg.append(" gpu" + std::to_string(build_id));
}
SERVER_LOG_DEBUG << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
std::string search_msg = "Search gpu:";
for (auto search_id : search_gpus) {
search_msg.append(" gpu" + std::to_string(search_id));
}
search_msg.append(". gpu_search_threshold:" + std::to_string(gpu_search_threshold));
SERVER_LOG_DEBUG << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] %s", "search", 0, build_msg.c_str());
pass_list.push_back(std::make_shared<BuildIndexPass>());
pass_list.push_back(std::make_shared<FaissFlatPass>());

View File

@ -79,6 +79,7 @@ Scheduler::process(const EventPtr& event) {
void
Scheduler::worker_function() {
SetThreadName("schedevt_thread");
while (running_) {
std::unique_lock<std::mutex> lock(event_mutex_);
event_cv_.wait(lock, [this] { return !event_queue_.empty(); });

View File

@ -173,7 +173,7 @@ TaskTable::PickToLoad(uint64_t limit) {
// if task is a build index task, limit it
if (task->Type() == TaskType::BuildIndexTask && task->path().Current() == "cpu") {
if (BuildMgrInst::GetInstance()->NumOfAvailable() < 1) {
SERVER_LOG_WARNING << "BuildMgr doesnot have available place for building index";
LOG_SERVER_WARNING_ << "BuildMgr doesnot have available place for building index";
continue;
}
}
@ -188,7 +188,7 @@ TaskTable::PickToLoad(uint64_t limit) {
size_t count = 0;
for (uint64_t j = last_finish_ + 1; j < table_.size(); ++j) {
if (not table_[j]) {
SERVER_LOG_WARNING << "collection[" << j << "] is nullptr";
LOG_SERVER_WARNING_ << "collection[" << j << "] is nullptr";
}
if (table_[j]->task->path().Current() == "cpu") {

View File

@ -31,8 +31,8 @@ BuildIndexJob::AddToIndexFiles(const engine::meta::SegmentSchemaPtr& to_index_fi
return false;
}
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " add to_index file: " << to_index_file->id_
<< ", location: " << to_index_file->location_;
LOG_SERVER_DEBUG_ << "BuildIndexJob " << id() << " add to_index file: " << to_index_file->id_
<< ", location: " << to_index_file->location_;
to_index_files_[to_index_file->id_] = to_index_file;
return true;
@ -42,7 +42,7 @@ void
BuildIndexJob::WaitBuildIndexFinish() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] { return to_index_files_.empty(); });
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " all done";
LOG_SERVER_DEBUG_ << "BuildIndexJob " << id() << " all done";
}
void
@ -50,7 +50,7 @@ BuildIndexJob::BuildIndexDone(size_t to_index_id) {
std::unique_lock<std::mutex> lock(mutex_);
to_index_files_.erase(to_index_id);
cv_.notify_all();
SERVER_LOG_DEBUG << "BuildIndexJob " << id() << " finish index file: " << to_index_id;
LOG_SERVER_DEBUG_ << "BuildIndexJob " << id() << " finish index file: " << to_index_id;
}
json

View File

@ -28,7 +28,7 @@ SearchJob::AddIndexFile(const SegmentSchemaPtr& index_file) {
return false;
}
SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld add index file: %ld", "search", 0, id(), index_file->id_);
LOG_SERVER_DEBUG_ << LogOut("[%s][%ld] SearchJob %ld add index file: %ld", "search", 0, id(), index_file->id_);
index_files_[index_file->id_] = index_file;
return true;
@ -38,7 +38,7 @@ void
SearchJob::WaitResult() {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] { return index_files_.empty(); });
SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld all done", "search", 0, id());
LOG_SERVER_DEBUG_ << LogOut("[%s][%ld] SearchJob %ld all done", "search", 0, id());
}
void
@ -49,7 +49,7 @@ SearchJob::SearchDone(size_t index_id) {
cv_.notify_all();
}
SERVER_LOG_DEBUG << LogOut("[%s][%ld] SearchJob %ld finish index file: %ld", "search", 0, id(), index_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%ld] SearchJob %ld finish index file: %ld", "search", 0, id(), index_id);
}
ResultIds&

View File

@ -39,19 +39,19 @@ BuildIndexPass::Run(const TaskPtr& task) {
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << "Gpu disabled, specify cpu to build index!";
LOG_SERVER_DEBUG_ << "Gpu disabled, specify cpu to build index!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
fiu_do_on("BuildIndexPass.Run.empty_gpu_ids", build_gpus_.clear());
if (build_gpus_.empty()) {
SERVER_LOG_WARNING << "BuildIndexPass cannot get build index gpu!";
LOG_SERVER_WARNING_ << "BuildIndexPass cannot get build index gpu!";
return false;
}
if (specified_gpu_id_ >= build_gpus_.size()) {
specified_gpu_id_ = specified_gpu_id_ % build_gpus_.size();
}
SERVER_LOG_DEBUG << "Specify gpu" << specified_gpu_id_ << " to build index!";
LOG_SERVER_DEBUG_ << "Specify gpu" << specified_gpu_id_ << " to build index!";
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, build_gpus_[specified_gpu_id_]);
specified_gpu_id_ = (specified_gpu_id_ + 1) % build_gpus_.size();
}

View File

@ -54,16 +54,16 @@ FaissFlatPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissFlatPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
best_device_id, "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
best_device_id, "search", 0);
++count_;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}

View File

@ -55,16 +55,16 @@ FaissIVFFlatPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFFlatPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFFlatPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFFlatPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}

View File

@ -57,16 +57,16 @@ FaissIVFPQPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFPQPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFPQPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFPQPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
++count_;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}

View File

@ -54,17 +54,17 @@ FaissIVFSQ8HPass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8HPass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
}
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8HPass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8HPass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
++count_;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}

View File

@ -55,16 +55,16 @@ FaissIVFSQ8Pass::Run(const TaskPtr& task) {
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (!gpu_enable_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: gpu disable, specify cpu to search!", "search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8Pass: gpu disable, specify cpu to search!", "search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8Pass: nq < gpu_search_threshold, specify cpu to search!",
"search", 0);
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % search_gpus_.size();
SERVER_LOG_DEBUG << LogOut("[%s][%d] FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
LOG_SERVER_DEBUG_ << LogOut("[%s][%d] FaissIVFSQ8Pass: nq > gpu_search_threshold, specify gpu %d to search!",
"search", 0, best_device_id);
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, search_gpus_[best_device_id]);
}

View File

@ -27,7 +27,7 @@ FallbackPass::Run(const TaskPtr& task) {
return false;
}
// NEVER be empty
SERVER_LOG_DEBUG << "FallbackPass!";
LOG_SERVER_DEBUG_ << "FallbackPass!";
auto cpu = ResMgrInst::GetInstance()->GetCpuResources()[0];
auto label = std::make_shared<SpecResLabel>(cpu);
task->label() = label;

View File

@ -153,6 +153,7 @@ Resource::pick_task_execute() {
void
Resource::loader_function() {
SetThreadName("taskloader_th");
while (running_) {
std::unique_lock<std::mutex> lock(load_mutex_);
load_cv_.wait(lock, [&] { return load_flag_; });
@ -165,7 +166,7 @@ Resource::loader_function() {
}
if (task_item->task->Type() == TaskType::BuildIndexTask && name() == "cpu") {
BuildMgrInst::GetInstance()->Take();
SERVER_LOG_DEBUG << name() << " load BuildIndexTask";
LOG_SERVER_DEBUG_ << name() << " load BuildIndexTask";
}
LoadFile(task_item->task);
task_item->Loaded();
@ -183,6 +184,7 @@ Resource::loader_function() {
void
Resource::executor_function() {
SetThreadName("taskexector_th");
if (subscriber_) {
auto event = std::make_shared<StartUpEvent>(shared_from_this());
subscriber_(std::static_pointer_cast<Event>(event));

View File

@ -136,7 +136,7 @@ XBuildIndexTask::Execute() {
fiu_do_on("XBuildIndexTask.Execute.create_table_success", status = Status::OK());
if (!status.ok()) {
ENGINE_LOG_ERROR << "Failed to create collection file: " << status.ToString();
LOG_ENGINE_ERROR_ << "Failed to create collection file: " << status.ToString();
build_index_job->BuildIndexDone(to_index_id_);
build_index_job->GetStatus() = status;
to_index_engine_ = nullptr;
@ -146,7 +146,7 @@ XBuildIndexTask::Execute() {
auto failed_build_index = [&](std::string log_msg, std::string err_msg) {
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_ERROR << log_msg;
LOG_ENGINE_ERROR_ << log_msg;
build_index_job->BuildIndexDone(to_index_id_);
build_index_job->GetStatus() = Status(DB_ERROR, err_msg);
@ -155,7 +155,7 @@ XBuildIndexTask::Execute() {
// step 2: build index
try {
ENGINE_LOG_DEBUG << "Begin build index for file:" + table_file.location_;
LOG_ENGINE_DEBUG_ << "Begin build index for file:" + table_file.location_;
index = to_index_engine_->BuildIndex(table_file.location_, (EngineType)table_file.engine_type_);
fiu_do_on("XBuildIndexTask.Execute.build_index_fail", index = nullptr);
if (index == nullptr) {
@ -215,9 +215,9 @@ XBuildIndexTask::Execute() {
fiu_do_on("XBuildIndexTask.Execute.update_table_file_fail", status = Status(SERVER_UNEXPECTED_ERROR, ""));
if (status.ok()) {
ENGINE_LOG_DEBUG << "New index file " << table_file.file_id_ << " of size " << table_file.file_size_
<< " bytes"
<< " from file " << origin_file.file_id_;
LOG_ENGINE_DEBUG_ << "New index file " << table_file.file_id_ << " of size " << table_file.file_size_
<< " bytes"
<< " from file " << origin_file.file_id_;
if (build_index_job->options().insert_cache_immediately_) {
index->Cache();
}
@ -225,12 +225,13 @@ XBuildIndexTask::Execute() {
// failed to update meta, mark the new file as to_delete, don't delete old file
origin_file.file_type_ = engine::meta::SegmentSchema::TO_INDEX;
status = meta_ptr->UpdateCollectionFile(origin_file);
ENGINE_LOG_DEBUG << "Failed to update file to index, mark file: " << origin_file.file_id_ << " to to_index";
LOG_ENGINE_DEBUG_ << "Failed to update file to index, mark file: " << origin_file.file_id_
<< " to to_index";
table_file.file_type_ = engine::meta::SegmentSchema::TO_DELETE;
status = meta_ptr->UpdateCollectionFile(table_file);
ENGINE_LOG_DEBUG << "Failed to up date file to index, mark file: " << table_file.file_id_
<< " to to_delete";
LOG_ENGINE_DEBUG_ << "Failed to up date file to index, mark file: " << table_file.file_id_
<< " to to_delete";
}
build_index_job->BuildIndexDone(to_index_id_);

View File

@ -57,7 +57,7 @@ static constexpr size_t PARALLEL_REDUCE_BATCH = 1000;
// if (thread_count > 0) {
// reduce_batch = max_index / thread_count + 1;
// }
// ENGINE_LOG_DEBUG << "use " << thread_count <<
// LOG_ENGINE_DEBUG_ << "use " << thread_count <<
// " thread parallelly do reduce, each thread process " << reduce_batch << " vectors";
//
// std::vector<std::shared_ptr<std::thread> > thread_array;
@ -159,7 +159,7 @@ XSearchTask::Load(LoadType type, uint8_t device_id) {
} catch (std::exception& ex) {
// typical error: out of disk space or permition denied
error_msg = "Failed to load index file: " + std::string(ex.what());
ENGINE_LOG_ERROR << LogOut("[%s][%ld] Encounter execption: %s", "search", 0, error_msg.c_str());
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] Encounter execption: %s", "search", 0, error_msg.c_str());
stat = Status(SERVER_UNEXPECTED_ERROR, error_msg);
}
fiu_do_on("XSearchTask.Load.out_of_memory", stat = Status(SERVER_UNEXPECTED_ERROR, "out of memory"));
@ -202,7 +202,7 @@ void
XSearchTask::Execute() {
milvus::server::ContextFollower tracer(context_, "XSearchTask::Execute " + std::to_string(index_id_));
// ENGINE_LOG_DEBUG << "Searching in file id:" << index_id_ << " with "
// LOG_ENGINE_DEBUG_ << "Searching in file id:" << index_id_ << " with "
// << search_contexts_.size() << " tasks";
// TimeRecorder rc("DoSearch file id:" + std::to_string(index_id_));
@ -266,8 +266,8 @@ XSearchTask::Execute() {
// step 3: pick up topk result
auto spec_k = file_->row_count_ < topk ? file_->row_count_ : topk;
if (spec_k == 0) {
ENGINE_LOG_WARNING << LogOut("[%s][%ld] Searching in an empty file. file location = %s", "search", 0,
file_->location_.c_str());
LOG_ENGINE_WARNING_ << LogOut("[%s][%ld] Searching in an empty file. file location = %s", "search", 0,
file_->location_.c_str());
}
{
@ -288,7 +288,7 @@ XSearchTask::Execute() {
span = rc.RecordSection(hdr + ", reduce topk");
// search_job->AccumReduceCost(span);
} catch (std::exception& ex) {
ENGINE_LOG_ERROR << LogOut("[%s][%ld] SearchTask encounter exception: %s", "search", 0, ex.what());
LOG_ENGINE_ERROR_ << LogOut("[%s][%ld] SearchTask encounter exception: %s", "search", 0, ex.what());
// search_job->IndexSearchDone(index_id_);//mark as done avoid dead lock, even search failed
}
@ -307,7 +307,7 @@ XSearchTask::MergeTopkToResultSet(const scheduler::ResultIds& src_ids, const sch
size_t src_k, size_t nq, size_t topk, bool ascending, scheduler::ResultIds& tar_ids,
scheduler::ResultDistances& tar_distances) {
if (src_ids.empty()) {
ENGINE_LOG_DEBUG << LogOut("[%s][%d] Search result is empty.", "search", 0);
LOG_ENGINE_DEBUG_ << LogOut("[%s][%d] Search result is empty.", "search", 0);
return;
}

View File

@ -53,7 +53,7 @@ IdBloomFilter::Add(doc_id_t uid) {
const std::lock_guard<std::mutex> lock(mutex_);
if (scaling_bloom_add(bloom_filter_, s.c_str(), s.size(), uid) == -1) {
// Counter overflow does not affect bloom filter's normal functionality
ENGINE_LOG_WARNING << "Warning adding id=" << s << " to bloom filter: 4 bit counter Overflow";
LOG_ENGINE_WARNING_ << "Warning adding id=" << s << " to bloom filter: 4 bit counter Overflow";
// return Status(DB_BLOOM_FILTER_ERROR, "Bloom filter error: 4 bit counter Overflow");
}
return Status::OK();
@ -65,7 +65,7 @@ IdBloomFilter::Remove(doc_id_t uid) {
const std::lock_guard<std::mutex> lock(mutex_);
if (scaling_bloom_remove(bloom_filter_, s.c_str(), s.size(), uid) == -1) {
// Should never go in here, but just to be safe
ENGINE_LOG_WARNING << "Warning removing id=" << s << " in bloom filter: Decrementing zero in counter";
LOG_ENGINE_WARNING_ << "Warning removing id=" << s << " in bloom filter: Decrementing zero in counter";
// return Status(DB_BLOOM_FILTER_ERROR, "Error removing in bloom filter: Decrementing zero in counter");
}
return Status::OK();

View File

@ -66,7 +66,7 @@ SegmentReader::LoadVectors(off_t offset, size_t num_bytes, std::vector<uint8_t>&
default_codec.GetVectorsFormat()->read_vectors(fs_ptr_, offset, num_bytes, raw_vectors);
} catch (std::exception& e) {
std::string err_msg = "Failed to load raw vectors: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
@ -80,7 +80,7 @@ SegmentReader::LoadUids(std::vector<doc_id_t>& uids) {
default_codec.GetVectorsFormat()->read_uids(fs_ptr_, uids);
} catch (std::exception& e) {
std::string err_msg = "Failed to load uids: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
@ -100,7 +100,7 @@ SegmentReader::LoadVectorIndex(const std::string& location, segment::VectorIndex
default_codec.GetVectorIndexFormat()->read(fs_ptr_, location, vector_index_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to load vector index: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
@ -114,7 +114,7 @@ SegmentReader::LoadBloomFilter(segment::IdBloomFilterPtr& id_bloom_filter_ptr) {
default_codec.GetIdBloomFilterFormat()->read(fs_ptr_, id_bloom_filter_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to load bloom filter: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
@ -128,7 +128,7 @@ SegmentReader::LoadDeletedDocs(segment::DeletedDocsPtr& deleted_docs_ptr) {
default_codec.GetDeletedDocsFormat()->read(fs_ptr_, deleted_docs_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to load deleted docs: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();
@ -142,7 +142,7 @@ SegmentReader::ReadDeletedDocsSize(size_t& size) {
default_codec.GetDeletedDocsFormat()->readSize(fs_ptr_, size);
} catch (std::exception& e) {
std::string err_msg = "Failed to read deleted docs size: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(DB_ERROR, err_msg);
}
return Status::OK();

View File

@ -62,7 +62,7 @@ SegmentWriter::Serialize() {
auto status = WriteBloomFilter();
if (!status.ok()) {
ENGINE_LOG_ERROR << status.message();
LOG_ENGINE_ERROR_ << status.message();
return status;
}
@ -70,7 +70,7 @@ SegmentWriter::Serialize() {
status = WriteVectors();
if (!status.ok()) {
ENGINE_LOG_ERROR << "Write vectors fail: " << status.message();
LOG_ENGINE_ERROR_ << "Write vectors fail: " << status.message();
return status;
}
@ -92,7 +92,7 @@ SegmentWriter::WriteVectors() {
default_codec.GetVectorsFormat()->write(fs_ptr_, segment_ptr_->vectors_ptr_);
} catch (std::exception& e) {
std::string err_msg = "Failed to write vectors: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
@ -106,7 +106,7 @@ SegmentWriter::WriteVectorIndex(const std::string& location) {
default_codec.GetVectorIndexFormat()->write(fs_ptr_, location, segment_ptr_->vector_index_ptr_);
} catch (std::exception& e) {
std::string err_msg = "Failed to write vector index: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
@ -136,7 +136,7 @@ SegmentWriter::WriteBloomFilter() {
recorder.RecordSection("Writing bloom filter");
} catch (std::exception& e) {
std::string err_msg = "Failed to write vectors: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
@ -151,7 +151,7 @@ SegmentWriter::WriteDeletedDocs() {
default_codec.GetDeletedDocsFormat()->write(fs_ptr_, deleted_docs_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to write deleted docs: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
@ -165,7 +165,7 @@ SegmentWriter::WriteDeletedDocs(const DeletedDocsPtr& deleted_docs) {
default_codec.GetDeletedDocsFormat()->write(fs_ptr_, deleted_docs);
} catch (std::exception& e) {
std::string err_msg = "Failed to write deleted docs: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
@ -179,7 +179,7 @@ SegmentWriter::WriteBloomFilter(const IdBloomFilterPtr& id_bloom_filter_ptr) {
default_codec.GetIdBloomFilterFormat()->write(fs_ptr_, id_bloom_filter_ptr);
} catch (std::exception& e) {
std::string err_msg = "Failed to write bloom filter: " + std::string(e.what());
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
return Status(SERVER_WRITE_ERROR, err_msg);
}
return Status::OK();
@ -203,7 +203,7 @@ SegmentWriter::Merge(const std::string& dir_to_merge, const std::string& name) {
return Status(DB_ERROR, "Cannot Merge Self");
}
ENGINE_LOG_DEBUG << "Merging from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
LOG_ENGINE_DEBUG_ << "Merging from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
TimeRecorder recorder("SegmentWriter::Merge");
@ -214,7 +214,7 @@ SegmentWriter::Merge(const std::string& dir_to_merge, const std::string& name) {
status = segment_reader_to_merge.Load();
if (!status.ok()) {
std::string msg = "Failed to load segment from " + dir_to_merge;
ENGINE_LOG_ERROR << msg;
LOG_ENGINE_ERROR_ << msg;
return Status(DB_ERROR, msg);
}
}
@ -238,7 +238,7 @@ SegmentWriter::Merge(const std::string& dir_to_merge, const std::string& name) {
auto rows = segment_to_merge->vectors_ptr_->GetCount();
recorder.RecordSection("Adding " + std::to_string(rows) + " vectors and uids");
ENGINE_LOG_DEBUG << "Merging completed from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
LOG_ENGINE_DEBUG_ << "Merging completed from " << dir_to_merge << " to " << fs_ptr_->operation_ptr_->GetDirectory();
return Status::OK();
}

View File

@ -72,7 +72,7 @@ Vectors::Erase(std::vector<int32_t>& offsets) {
recorder.RecordSection("Deduplicating " + std::to_string(offsets.size()) + " offsets to delete");
// Reconstruct raw vectors and uids
ENGINE_LOG_DEBUG << "Begin erasing...";
LOG_ENGINE_DEBUG_ << "Begin erasing...";
size_t new_size = uids_.size() - offsets.size();
std::vector<doc_id_t> new_uids(new_size);

View File

@ -131,7 +131,7 @@ DBWrapper::StartService() {
if (omp_thread > 0) {
omp_set_num_threads(omp_thread);
SERVER_LOG_DEBUG << "Specify openmp thread number: " << omp_thread;
LOG_SERVER_DEBUG_ << "Specify openmp thread number: " << omp_thread;
} else {
int64_t sys_thread_cnt = 8;
if (CommonUtil::GetSystemAvailableThreads(sys_thread_cnt)) {

View File

@ -58,7 +58,7 @@ Server::Daemonize() {
// std::string log_path(GetLogDirFullPath());
// log_path += "zdb_server.(INFO/WARNNING/ERROR/CRITICAL)";
// SERVER_LOG_INFO << "Log will be exported to: " + log_path);
// LOG_SERVER_INFO_ << "Log will be exported to: " + log_path);
pid_t pid = 0;
@ -187,11 +187,11 @@ Server::Start() {
InitLog(log_config_file_);
// print version information
SERVER_LOG_INFO << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
LOG_SERVER_INFO_ << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
#ifdef MILVUS_GPU_VERSION
SERVER_LOG_INFO << "GPU edition";
LOG_SERVER_INFO_ << "GPU edition";
#else
SERVER_LOG_INFO << "CPU edition";
LOG_SERVER_INFO_ << "CPU edition";
#endif
/* record config and hardware information into log */
LogConfigInFile(config_filename_);
@ -262,7 +262,7 @@ Server::StartService() {
Status stat;
stat = engine::KnowhereResource::Initialize();
if (!stat.ok()) {
SERVER_LOG_ERROR << "KnowhereResource initialize fail: " << stat.message();
LOG_SERVER_ERROR_ << "KnowhereResource initialize fail: " << stat.message();
goto FAIL;
}
@ -270,7 +270,7 @@ Server::StartService() {
stat = DBWrapper::GetInstance().StartService();
if (!stat.ok()) {
SERVER_LOG_ERROR << "DBWrapper start service fail: " << stat.message();
LOG_SERVER_ERROR_ << "DBWrapper start service fail: " << stat.message();
goto FAIL;
}
@ -279,7 +279,7 @@ Server::StartService() {
// stat = storage::S3ClientWrapper::GetInstance().StartService();
// if (!stat.ok()) {
// SERVER_LOG_ERROR << "S3Client start service fail: " << stat.message();
// LOG_SERVER_ERROR_ << "S3Client start service fail: " << stat.message();
// goto FAIL;
// }

View File

@ -53,7 +53,7 @@ RequestScheduler::Stop() {
return;
}
SERVER_LOG_INFO << "Scheduler gonna stop...";
LOG_SERVER_INFO_ << "Scheduler gonna stop...";
{
std::lock_guard<std::mutex> lock(queue_mtx_);
for (auto& iter : request_groups_) {
@ -71,7 +71,7 @@ RequestScheduler::Stop() {
request_groups_.clear();
execute_threads_.clear();
stopped_ = true;
SERVER_LOG_INFO << "Scheduler stopped";
LOG_SERVER_INFO_ << "Scheduler stopped";
}
Status
@ -90,7 +90,7 @@ RequestScheduler::ExecuteRequest(const BaseRequestPtr& request_ptr) {
fiu_do_on("RequestScheduler.ExecuteRequest.push_queue_fail", status = Status(SERVER_INVALID_ARGUMENT, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << "Put request to queue failed with code: " << status.ToString();
LOG_SERVER_ERROR_ << "Put request to queue failed with code: " << status.ToString();
request_ptr->Done();
return status;
}
@ -109,6 +109,7 @@ RequestScheduler::ExecuteRequest(const BaseRequestPtr& request_ptr) {
void
RequestScheduler::TakeToExecute(RequestQueuePtr request_queue) {
SetThreadName("reqsched_thread");
if (request_queue == nullptr) {
return;
}
@ -116,7 +117,7 @@ RequestScheduler::TakeToExecute(RequestQueuePtr request_queue) {
while (true) {
BaseRequestPtr request = request_queue->TakeRequest();
if (request == nullptr) {
SERVER_LOG_ERROR << "Take null from request queue, stop thread";
LOG_SERVER_ERROR_ << "Take null from request queue, stop thread";
break; // stop the thread
}
@ -126,10 +127,10 @@ RequestScheduler::TakeToExecute(RequestQueuePtr request_queue) {
fiu_do_on("RequestScheduler.TakeToExecute.throw_std_exception", throw std::exception());
fiu_do_on("RequestScheduler.TakeToExecute.execute_fail", status = Status(SERVER_INVALID_ARGUMENT, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << "Request failed with code: " << status.ToString();
LOG_SERVER_ERROR_ << "Request failed with code: " << status.ToString();
}
} catch (std::exception& ex) {
SERVER_LOG_ERROR << "Request failed to execute: " << ex.what();
LOG_SERVER_ERROR_ << "Request failed to execute: " << ex.what();
}
}
}
@ -152,7 +153,7 @@ RequestScheduler::PutToQueue(const BaseRequestPtr& request_ptr) {
fiu_do_on("RequestScheduler.PutToQueue.push_null_thread", execute_threads_.push_back(nullptr));
execute_threads_.push_back(thread);
SERVER_LOG_INFO << "Create new thread for request group: " << group_name;
LOG_SERVER_INFO_ << "Create new thread for request group: " << group_name;
}
return Status::OK();

View File

@ -66,7 +66,7 @@ RequestGroup(BaseRequest::RequestType type) {
auto iter = s_map_type_group.find(type);
if (iter == s_map_type_group.end()) {
SERVER_LOG_ERROR << "Unsupported request type: " << type;
LOG_SERVER_ERROR_ << "Unsupported request type: " << type;
throw Exception(SERVER_NOT_IMPLEMENT, "request group undefined");
}
return iter->second;
@ -125,7 +125,7 @@ void
BaseRequest::set_status(const Status& status) {
status_ = status;
if (!status_.ok()) {
SERVER_LOG_ERROR << status_.message();
LOG_SERVER_ERROR_ << status_.message();
}
}

View File

@ -72,7 +72,7 @@ DeleteByIDRequest::OnExecute() {
collection_schema.engine_type_ == (int32_t)engine::EngineType::SPTAG_KDT) {
std::string err_msg =
"Index type " + std::to_string(collection_schema.engine_type_) + " does not support delete operation";
SERVER_LOG_ERROR << err_msg;
LOG_SERVER_ERROR_ << err_msg;
return Status(SERVER_UNSUPPORTED_ERROR, err_msg);
}

View File

@ -52,7 +52,7 @@ DropPartitionRequest::OnExecute() {
// step 2: check partition tag
if (partition_tag == milvus::engine::DEFAULT_PARTITON_TAG) {
std::string msg = "Default partition cannot be dropped.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}

View File

@ -47,7 +47,7 @@ FlushRequest::OnExecute() {
TimeRecorderAuto rc(hdr);
Status status = Status::OK();
SERVER_LOG_DEBUG << hdr;
LOG_SERVER_DEBUG_ << hdr;
for (auto& name : collection_names_) {
// only process root collection, ignore partition collection

View File

@ -46,7 +46,7 @@ InsertRequest::Create(const std::shared_ptr<milvus::server::Context>& context, c
Status
InsertRequest::OnExecute() {
SERVER_LOG_INFO << LogOut("[%s][%ld] ", "insert", 0) << "Execute insert request.";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "insert", 0) << "Execute insert request.";
try {
int64_t vector_count = vectors_data_.vector_count_;
fiu_do_on("InsertRequest.OnExecute.throw_std_exception", throw std::exception());
@ -57,12 +57,12 @@ InsertRequest::OnExecute() {
// step 1: check arguments
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid collection name: %s", "insert", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Invalid collection name: %s", "insert", 0, status.message().c_str());
return status;
}
if (vectors_data_.float_data_.empty() && vectors_data_.binary_data_.empty()) {
std::string msg = "The vector array is empty. Make sure you have entered vector records.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid records: %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Invalid records: %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
@ -70,7 +70,7 @@ InsertRequest::OnExecute() {
if (!vectors_data_.id_array_.empty()) {
if (vectors_data_.id_array_.size() != vector_count) {
std::string msg = "The size of vector ID array must be equal to the size of the vector.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] Invalid id array: %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Invalid id array: %s", "insert", 0, msg.c_str());
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
}
@ -85,17 +85,17 @@ InsertRequest::OnExecute() {
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Collection %s not found", "insert", 0, collection_name_.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Collection %s not found", "insert", 0, collection_name_.c_str());
return Status(SERVER_COLLECTION_NOT_EXIST, CollectionNotExistMsg(collection_name_));
} else {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Describe collection %s fail: %s", "insert", 0,
collection_name_.c_str(), status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Describe collection %s fail: %s", "insert", 0,
collection_name_.c_str(), status.message().c_str());
return status;
}
} else {
if (!collection_schema.owner_collection_.empty()) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] owner collection of %s is empty", "insert", 0,
collection_name_.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] owner collection of %s is empty", "insert", 0,
collection_name_.c_str());
return Status(SERVER_INVALID_COLLECTION_NAME, CollectionNotExistMsg(collection_name_));
}
}
@ -108,7 +108,7 @@ InsertRequest::OnExecute() {
// user already provided id before, all insert action require user id
if ((collection_schema.flag_ & engine::meta::FLAG_MASK_HAS_USERID) != 0 && !user_provide_ids) {
std::string msg = "Entities IDs are user-defined. Please provide IDs for all entities of the collection.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
@ -131,40 +131,40 @@ InsertRequest::OnExecute() {
if (!vectors_data_.float_data_.empty()) { // insert float vectors
if (engine::utils::IsBinaryMetricType(collection_schema.metric_type_)) {
std::string msg = "Collection metric type doesn't support float vectors.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
// check prepared float data
if (vectors_data_.float_data_.size() % vector_count != 0) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
fiu_do_on("InsertRequest.OnExecute.invalid_dim", collection_schema.dimension_ = -1);
if (vectors_data_.float_data_.size() / vector_count != collection_schema.dimension_) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
}
} else if (!vectors_data_.binary_data_.empty()) { // insert binary vectors
if (!engine::utils::IsBinaryMetricType(collection_schema.metric_type_)) {
std::string msg = "Collection metric type doesn't support binary vectors.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
// check prepared binary data
if (vectors_data_.binary_data_.size() % vector_count != 0) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_ROWRECORD_ARRAY, msg);
}
if (vectors_data_.binary_data_.size() * 8 / vector_count != collection_schema.dimension_) {
std::string msg = "The vector dimension must be equal to the collection dimension.";
SERVER_LOG_ERROR << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] %s", "insert", 0, msg.c_str());
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
}
}
@ -176,7 +176,7 @@ InsertRequest::OnExecute() {
status = DBWrapper::DB()->InsertVectors(collection_name_, partition_tag_, vectors_data_);
fiu_do_on("InsertRequest.OnExecute.insert_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, status.message().c_str());
return status;
}
@ -185,7 +185,7 @@ InsertRequest::OnExecute() {
if (ids_size != vec_count) {
std::string msg =
"Add " + std::to_string(vec_count) + " vectors but only return " + std::to_string(ids_size) + " id";
SERVER_LOG_ERROR << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, msg.c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Insert fail: %s", "insert", 0, msg.c_str());
return Status(SERVER_ILLEGAL_VECTOR_ID, msg);
}
@ -201,7 +201,7 @@ InsertRequest::OnExecute() {
rc.RecordSection("add vectors to engine");
rc.ElapseFromBegin("total cost");
} catch (std::exception& ex) {
SERVER_LOG_ERROR << LogOut("[%s][%ld] Encounter exception: %s", "insert", 0, ex.what());
LOG_SERVER_ERROR_ << LogOut("[%s][%ld] Encounter exception: %s", "insert", 0, ex.what());
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}

View File

@ -111,7 +111,7 @@ SearchByIDRequest::OnExecute() {
config.GetGpuResourceConfigSearchResources(search_resources);
if (!search_resources.empty()) {
std::string err_msg = "SearchByID cannot be executed on GPU";
SERVER_LOG_ERROR << err_msg;
LOG_SERVER_ERROR_ << err_msg;
return Status(SERVER_UNSUPPORTED_ERROR, err_msg);
}
}
@ -125,7 +125,7 @@ SearchByIDRequest::OnExecute() {
collection_schema.engine_type_ != (int32_t)engine::EngineType::FAISS_IVFSQ8) {
std::string err_msg = "Index type " + std::to_string(collection_schema.engine_type_) +
" does not support SearchByID operation";
SERVER_LOG_ERROR << err_msg;
LOG_SERVER_ERROR_ << err_msg;
return Status(SERVER_UNSUPPORTED_ERROR, err_msg);
}

View File

@ -233,8 +233,8 @@ Status
SearchCombineRequest::OnExecute() {
try {
size_t combined_request = request_list_.size();
SERVER_LOG_DEBUG << "SearchCombineRequest execute, request count=" << combined_request
<< ", extra_params=" << extra_params_.dump();
LOG_SERVER_DEBUG_ << "SearchCombineRequest execute, request count=" << combined_request
<< ", extra_params=" << extra_params_.dump();
std::string hdr = "SearchCombineRequest(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(hdr);
@ -309,12 +309,12 @@ SearchCombineRequest::OnExecute() {
// all requests are skipped
if (request_list_.empty()) {
SERVER_LOG_DEBUG << "all combined requests were skipped";
LOG_SERVER_DEBUG_ << "all combined requests were skipped";
return Status::OK();
}
SERVER_LOG_DEBUG << (combined_request - run_request) << " requests were skipped";
SERVER_LOG_DEBUG << "reset topk to " << search_topk_;
LOG_SERVER_DEBUG_ << (combined_request - run_request) << " requests were skipped";
LOG_SERVER_DEBUG_ << "reset topk to " << search_topk_;
rc.RecordSection("check validation");
// step 3: construct vectors_data
@ -348,7 +348,7 @@ SearchCombineRequest::OnExecute() {
}
}
SERVER_LOG_DEBUG << total_count << " query vectors combined";
LOG_SERVER_DEBUG_ << total_count << " query vectors combined";
rc.RecordSection("combined query vectors");
// step 4: search vectors

View File

@ -54,7 +54,7 @@ SearchRequest::Create(const std::shared_ptr<milvus::server::Context>& context, c
Status
SearchRequest::OnPreExecute() {
SERVER_LOG_INFO << LogOut("[%s][%ld] ", "search", 0) << "Search pre-execute. Check search parameters";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "search", 0) << "Search pre-execute. Check search parameters";
std::string hdr = "SearchRequest pre-execute(collection=" + collection_name_ + ")";
TimeRecorderAuto rc(LogOut("[%s][%ld] %s", "search", 0, hdr.c_str()));
@ -62,14 +62,14 @@ SearchRequest::OnPreExecute() {
// step 1: check collection name
auto status = ValidationUtil::ValidateCollectionName(collection_name_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
return status;
}
// step 2: check search topk
status = ValidationUtil::ValidateSearchTopk(topk_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
return status;
}
@ -77,7 +77,7 @@ SearchRequest::OnPreExecute() {
status = ValidationUtil::ValidatePartitionTags(partition_list_);
fiu_do_on("SearchRequest.OnExecute.invalid_partition_tags", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0, status.message().c_str());
return status;
}
@ -86,7 +86,7 @@ SearchRequest::OnPreExecute() {
Status
SearchRequest::OnExecute() {
SERVER_LOG_INFO << LogOut("[%s][%ld] ", "search", 0) << "Search execute.";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "search", 0) << "Search execute.";
try {
uint64_t vector_count = vectors_data_.vector_count_;
fiu_do_on("SearchRequest.OnExecute.throw_std_exception", throw std::exception());
@ -103,17 +103,18 @@ SearchRequest::OnExecute() {
status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
if (status.code() == DB_NOT_FOUND) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Collection %s not found: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Collection %s not found: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
return Status(SERVER_COLLECTION_NOT_EXIST, CollectionNotExistMsg(collection_name_));
} else {
SERVER_LOG_ERROR << LogOut("[%s][%d] Error occurred when describing collection %s: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Error occurred when describing collection %s: %s", "search", 0,
collection_name_.c_str(), status.message().c_str());
return status;
}
} else {
if (!collection_schema_.owner_collection_.empty()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] %s", "search", 0, CollectionNotExistMsg(collection_name_).c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] %s", "search", 0,
CollectionNotExistMsg(collection_name_).c_str());
return Status(SERVER_INVALID_COLLECTION_NAME, CollectionNotExistMsg(collection_name_));
}
}
@ -121,14 +122,14 @@ SearchRequest::OnExecute() {
// step 5: check search parameters
status = ValidationUtil::ValidateSearchParams(extra_params_, collection_schema_, topk_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Invalid search params: %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Invalid search params: %s", "search", 0, status.message().c_str());
return status;
}
// step 6: check vector data according to metric type
status = ValidationUtil::ValidateVectorData(vectors_data_, collection_schema_);
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Invalid vector data: %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Invalid vector data: %s", "search", 0, status.message().c_str());
return status;
}
@ -158,7 +159,7 @@ SearchRequest::OnExecute() {
#endif
fiu_do_on("SearchRequest.OnExecute.query_fail", status = Status(milvus::SERVER_UNEXPECTED_ERROR, ""));
if (!status.ok()) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Query fail: %s", "search", 0, status.message().c_str());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Query fail: %s", "search", 0, status.message().c_str());
return status;
}
fiu_do_on("SearchRequest.OnExecute.empty_result_ids", result_ids.clear());
@ -173,7 +174,7 @@ SearchRequest::OnExecute() {
result_.distance_list_.swap(result_distances);
rc.RecordSection("construct result");
} catch (std::exception& ex) {
SERVER_LOG_ERROR << LogOut("[%s][%d] Encounter exception: %s", "search", 0, ex.what());
LOG_SERVER_ERROR_ << LogOut("[%s][%d] Encounter exception: %s", "search", 0, ex.what());
return Status(SERVER_UNEXPECTED_ERROR, ex.what());
}

View File

@ -30,7 +30,7 @@ Status
SearchReqStrategy::ReScheduleQueue(const BaseRequestPtr& request, std::queue<BaseRequestPtr>& queue) {
if (request->GetRequestType() != BaseRequest::kSearch) {
std::string msg = "search strategy can only handle search request";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}
@ -49,7 +49,7 @@ SearchReqStrategy::ReScheduleQueue(const BaseRequestPtr& request, std::queue<Bas
combine_request->Combine(last_search_req);
combine_request->Combine(new_search_req);
queue.push(combine_request);
SERVER_LOG_DEBUG << "Combine 2 search request";
LOG_SERVER_DEBUG_ << "Combine 2 search request";
} else {
// directly put to queue
queue.push(request);
@ -59,14 +59,14 @@ SearchReqStrategy::ReScheduleQueue(const BaseRequestPtr& request, std::queue<Bas
if (combine_req->CanCombine(new_search_req)) {
// combine request
combine_req->Combine(new_search_req);
SERVER_LOG_DEBUG << "Combine more search request";
LOG_SERVER_DEBUG_ << "Combine more search request";
} else {
// directly put to queue
queue.push(request);
}
} else {
std::string msg = "unsupported request type for search strategy";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNSUPPORTED_ERROR, msg);
}

View File

@ -174,7 +174,7 @@ void
set_request_id(::grpc::ServerContext* context, const std::string& request_id) {
if (not context) {
// error
SERVER_LOG_ERROR << "set_request_id: grpc::ServerContext is nullptr" << std::endl;
LOG_SERVER_ERROR_ << "set_request_id: grpc::ServerContext is nullptr" << std::endl;
return;
}
@ -185,7 +185,7 @@ std::string
get_request_id(::grpc::ServerContext* context) {
if (not context) {
// error
SERVER_LOG_ERROR << "get_request_id: grpc::ServerContext is nullptr" << std::endl;
LOG_SERVER_ERROR_ << "get_request_id: grpc::ServerContext is nullptr" << std::endl;
return "INVALID_ID";
}
@ -194,7 +194,7 @@ get_request_id(::grpc::ServerContext* context) {
auto request_id_kv = server_metadata.find(REQ_ID);
if (request_id_kv == server_metadata.end()) {
// error
SERVER_LOG_ERROR << std::string(REQ_ID) << " not found in grpc.server_metadata" << std::endl;
LOG_SERVER_ERROR_ << std::string(REQ_ID) << " not found in grpc.server_metadata" << std::endl;
return "INVALID_ID";
}
@ -242,7 +242,7 @@ GrpcRequestHandler::OnPostRecvInitialMetaData(
auto request_id_kv = client_metadata.find("request_id");
if (request_id_kv != client_metadata.end()) {
request_id = request_id_kv->second.data();
SERVER_LOG_DEBUG << "client provide request_id: " << request_id;
LOG_SERVER_DEBUG_ << "client provide request_id: " << request_id;
// if request_id is being used by another request,
// convert it to request_id_n.
@ -265,7 +265,7 @@ GrpcRequestHandler::OnPostRecvInitialMetaData(
} else {
request_id = std::to_string(get_sequential_id());
set_request_id(server_context, request_id);
SERVER_LOG_DEBUG << "milvus generate request_id: " << request_id;
LOG_SERVER_DEBUG_ << "milvus generate request_id: " << request_id;
}
auto trace_context = std::make_shared<tracing::TraceContext>(span);
@ -282,7 +282,7 @@ GrpcRequestHandler::OnPreSendMessage(::grpc::experimental::ServerRpcInfo* server
if (context_map_.find(request_id) == context_map_.end()) {
// error
SERVER_LOG_ERROR << "request_id " << request_id << " not found in context_map_";
LOG_SERVER_ERROR_ << "request_id " << request_id << " not found in context_map_";
return;
}
context_map_[request_id]->GetTraceContext()->GetSpan()->Finish();
@ -294,7 +294,7 @@ GrpcRequestHandler::GetContext(::grpc::ServerContext* server_context) {
std::lock_guard<std::mutex> lock(context_map_mutex_);
auto request_id = get_request_id(server_context);
if (context_map_.find(request_id) == context_map_.end()) {
SERVER_LOG_ERROR << "GetContext: request_id " << request_id << " not found in context_map_";
LOG_SERVER_ERROR_ << "GetContext: request_id " << request_id << " not found in context_map_";
return nullptr;
}
return context_map_[request_id];
@ -382,7 +382,7 @@ GrpcRequestHandler::Insert(::grpc::ServerContext* context, const ::milvus::grpc:
::milvus::grpc::VectorIds* response) {
CHECK_NULLPTR_RETURN(request);
SERVER_LOG_INFO << LogOut("[%s][%d] Start insert.", "insert", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Start insert.", "insert", 0);
// step 1: copy vector data
engine::VectorsData vectors;
@ -397,7 +397,7 @@ GrpcRequestHandler::Insert(::grpc::ServerContext* context, const ::milvus::grpc:
memcpy(response->mutable_vector_id_array()->mutable_data(), vectors.id_array_.data(),
vectors.id_array_.size() * sizeof(int64_t));
SERVER_LOG_INFO << LogOut("[%s][%d] Insert done.", "insert", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Insert done.", "insert", 0);
SET_RESPONSE(response->mutable_status(), status, context);
return ::grpc::Status::OK;
}
@ -450,7 +450,7 @@ GrpcRequestHandler::Search(::grpc::ServerContext* context, const ::milvus::grpc:
::milvus::grpc::TopKQueryResult* response) {
CHECK_NULLPTR_RETURN(request);
SERVER_LOG_INFO << LogOut("[%s][%d] Search start in gRPC server", "search", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Search start in gRPC server", "search", 0);
// step 1: copy vector data
engine::VectorsData vectors;
CopyRowRecords(request->query_record_array(), google::protobuf::RepeatedField<google::protobuf::int64>(), vectors);
@ -481,7 +481,7 @@ GrpcRequestHandler::Search(::grpc::ServerContext* context, const ::milvus::grpc:
// step 5: construct and return result
ConstructResults(result, response);
SERVER_LOG_INFO << LogOut("[%s][%d] Search done.", "search", 0);
LOG_SERVER_INFO_ << LogOut("[%s][%d] Search done.", "search", 0);
SET_RESPONSE(response->mutable_status(), status, context);

View File

@ -73,6 +73,7 @@ GrpcServer::Stop() {
Status
GrpcServer::StartService() {
SetThreadName("grpcserv_thread");
Config& config = Config::GetInstance();
std::string address, port;

View File

@ -39,6 +39,7 @@ WebServer::Stop() {
Status
WebServer::StartService() {
SetThreadName("webserv_thread");
oatpp::base::Environment::init();
Config& config = Config::GetInstance();

View File

@ -33,7 +33,7 @@ DiskOperation::CreateDirectory() {
auto ret = boost::filesystem::create_directory(dir_path_);
if (!ret) {
std::string err_msg = "Failed to create directory: " + dir_path_;
ENGINE_LOG_ERROR << err_msg;
LOG_ENGINE_ERROR_ << err_msg;
throw Exception(SERVER_CANNOT_CREATE_FOLDER, err_msg);
}
}

View File

@ -38,7 +38,7 @@ S3ClientWrapper::StartService() {
CONFIG_CHECK(config.GetStorageConfigS3Enable(s3_enable));
fiu_do_on("S3ClientWrapper.StartService.s3_disable", s3_enable = false);
if (!s3_enable) {
STORAGE_LOG_INFO << "S3 not enabled!";
LOG_STORAGE_INFO_ << "S3 not enabled!";
return Status::OK();
}
@ -89,12 +89,12 @@ S3ClientWrapper::CreateBucket() {
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
if (err.GetErrorType() != Aws::S3::S3Errors::BUCKET_ALREADY_OWNED_BY_YOU) {
STORAGE_LOG_ERROR << "ERROR: CreateBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: CreateBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
}
STORAGE_LOG_DEBUG << "CreateBucket '" << s3_bucket_ << "' successfully!";
LOG_STORAGE_DEBUG_ << "CreateBucket '" << s3_bucket_ << "' successfully!";
return Status::OK();
}
@ -108,11 +108,11 @@ S3ClientWrapper::DeleteBucket() {
fiu_do_on("S3ClientWrapper.DeleteBucket.outcome.fail", outcome = Aws::S3::Model::DeleteBucketOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: DeleteBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: DeleteBucket: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "DeleteBucket '" << s3_bucket_ << "' successfully!";
LOG_STORAGE_DEBUG_ << "DeleteBucket '" << s3_bucket_ << "' successfully!";
return Status::OK();
}
@ -121,7 +121,7 @@ S3ClientWrapper::PutObjectFile(const std::string& object_name, const std::string
struct stat buffer;
if (stat(file_path.c_str(), &buffer) != 0) {
std::string str = "File '" + file_path + "' not exist!";
STORAGE_LOG_ERROR << "ERROR: " << str;
LOG_STORAGE_ERROR_ << "ERROR: " << str;
return Status(SERVER_UNEXPECTED_ERROR, str);
}
@ -137,11 +137,11 @@ S3ClientWrapper::PutObjectFile(const std::string& object_name, const std::string
fiu_do_on("S3ClientWrapper.PutObjectFile.outcome.fail", outcome = Aws::S3::Model::PutObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "PutObjectFile '" << file_path << "' successfully!";
LOG_STORAGE_DEBUG_ << "PutObjectFile '" << file_path << "' successfully!";
return Status::OK();
}
@ -159,11 +159,11 @@ S3ClientWrapper::PutObjectStr(const std::string& object_name, const std::string&
fiu_do_on("S3ClientWrapper.PutObjectStr.outcome.fail", outcome = Aws::S3::Model::PutObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: PutObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "PutObjectStr successfully!";
LOG_STORAGE_DEBUG_ << "PutObjectStr successfully!";
return Status::OK();
}
@ -177,7 +177,7 @@ S3ClientWrapper::GetObjectFile(const std::string& object_name, const std::string
fiu_do_on("S3ClientWrapper.GetObjectFile.outcome.fail", outcome = Aws::S3::Model::GetObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
@ -186,7 +186,7 @@ S3ClientWrapper::GetObjectFile(const std::string& object_name, const std::string
output_file << retrieved_file.rdbuf();
output_file.close();
STORAGE_LOG_DEBUG << "GetObjectFile '" << file_path << "' successfully!";
LOG_STORAGE_DEBUG_ << "GetObjectFile '" << file_path << "' successfully!";
return Status::OK();
}
@ -200,7 +200,7 @@ S3ClientWrapper::GetObjectStr(const std::string& object_name, std::string& conte
fiu_do_on("S3ClientWrapper.GetObjectStr.outcome.fail", outcome = Aws::S3::Model::GetObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: GetObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
@ -209,7 +209,7 @@ S3ClientWrapper::GetObjectStr(const std::string& object_name, std::string& conte
ss << retrieved_file.rdbuf();
content = std::move(ss.str());
STORAGE_LOG_DEBUG << "GetObjectStr successfully!";
LOG_STORAGE_DEBUG_ << "GetObjectStr successfully!";
return Status::OK();
}
@ -227,7 +227,7 @@ S3ClientWrapper::ListObjects(std::vector<std::string>& object_list, const std::s
fiu_do_on("S3ClientWrapper.ListObjects.outcome.fail", outcome = Aws::S3::Model::ListObjectsOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: ListObjects: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: ListObjects: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
@ -238,9 +238,9 @@ S3ClientWrapper::ListObjects(std::vector<std::string>& object_list, const std::s
}
if (marker.empty()) {
STORAGE_LOG_DEBUG << "ListObjects '" << s3_bucket_ << "' successfully!";
LOG_STORAGE_DEBUG_ << "ListObjects '" << s3_bucket_ << "' successfully!";
} else {
STORAGE_LOG_DEBUG << "ListObjects '" << s3_bucket_ << ":" << marker << "' successfully!";
LOG_STORAGE_DEBUG_ << "ListObjects '" << s3_bucket_ << ":" << marker << "' successfully!";
}
return Status::OK();
}
@ -255,11 +255,11 @@ S3ClientWrapper::DeleteObject(const std::string& object_name) {
fiu_do_on("S3ClientWrapper.DeleteObject.outcome.fail", outcome = Aws::S3::Model::DeleteObjectOutcome());
if (!outcome.IsSuccess()) {
auto err = outcome.GetError();
STORAGE_LOG_ERROR << "ERROR: DeleteObject: " << err.GetExceptionName() << ": " << err.GetMessage();
LOG_STORAGE_ERROR_ << "ERROR: DeleteObject: " << err.GetExceptionName() << ": " << err.GetMessage();
return Status(SERVER_UNEXPECTED_ERROR, err.GetMessage());
}
STORAGE_LOG_DEBUG << "DeleteObject '" << object_name << "' successfully!";
LOG_STORAGE_DEBUG_ << "DeleteObject '" << object_name << "' successfully!";
return Status::OK();
}

View File

@ -247,7 +247,7 @@ CommonUtil::GetCurrentTimeStr() {
void
CommonUtil::EraseFromCache(const std::string& item_key) {
if (item_key.empty()) {
SERVER_LOG_ERROR << "Empty key cannot be erased from cache";
LOG_SERVER_ERROR_ << "Empty key cannot be erased from cache";
return;
}

View File

@ -31,4 +31,22 @@ LogOut(const char* pattern, ...) {
return std::string(str_p.get());
}
void
SetThreadName(const std::string& name) {
pthread_setname_np(pthread_self(), name.c_str());
}
std::string
GetThreadName() {
std::string thread_name = "unamed";
char name[16];
size_t len = 16;
auto err = pthread_getname_np(pthread_self(), name, len);
if (not err) {
thread_name = name;
}
return thread_name;
}
} // namespace milvus

View File

@ -17,57 +17,172 @@
namespace milvus {
/////////////////////////////////////////////////////////////////////////////////////////////////
#define SERVER_DOMAIN_NAME "[SERVER] "
#define SERVER_LOG_TRACE LOG(TRACE) << SERVER_DOMAIN_NAME
#define SERVER_LOG_DEBUG LOG(DEBUG) << SERVER_DOMAIN_NAME
#define SERVER_LOG_INFO LOG(INFO) << SERVER_DOMAIN_NAME
#define SERVER_LOG_WARNING LOG(WARNING) << SERVER_DOMAIN_NAME
#define SERVER_LOG_ERROR LOG(ERROR) << SERVER_DOMAIN_NAME
#define SERVER_LOG_FATAL LOG(FATAL) << SERVER_DOMAIN_NAME
/*
* Please use LOG_MODULE_LEVEL_C macro in member function of class
* and LOG_MODULE_LEVEL_ macro in other functions.
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
#define ENGINE_DOMAIN_NAME "[ENGINE] "
#define SERVER_MODULE_NAME "SERVER"
#define SERVER_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", SERVER_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define SERVER_MODULE_FUNCTION LogOut("[%s][%s][%s] ", SERVER_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define ENGINE_LOG_TRACE LOG(TRACE) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_DEBUG LOG(DEBUG) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_INFO LOG(INFO) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_WARNING LOG(WARNING) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_ERROR LOG(ERROR) << ENGINE_DOMAIN_NAME
#define ENGINE_LOG_FATAL LOG(FATAL) << ENGINE_DOMAIN_NAME
#define LOG_SERVER_TRACE_C LOG(TRACE) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_DEBUG_C LOG(DEBUG) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_INFO_C LOG(INFO) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_WARNING_C LOG(WARNING) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_ERROR_C LOG(ERROR) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_FATAL_C LOG(FATAL) << SERVER_MODULE_CLASS_FUNCTION
#define LOG_SERVER_TRACE_ LOG(TRACE) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_DEBUG_ LOG(DEBUG) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_INFO_ LOG(INFO) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_WARNING_ LOG(WARNING) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_ERROR_ LOG(ERROR) << SERVER_MODULE_FUNCTION
#define LOG_SERVER_FATAL_ LOG(FATAL) << SERVER_MODULE_FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////////////
#define WRAPPER_DOMAIN_NAME "[WRAPPER] "
#define ENGINE_MODULE_NAME "ENGINE"
#define ENGINE_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", ENGINE_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define ENGINE_MODULE_FUNCTION LogOut("[%s][%s][%s] ", ENGINE_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define WRAPPER_LOG_TRACE LOG(TRACE) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_DEBUG LOG(DEBUG) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_INFO LOG(INFO) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_WARNING LOG(WARNING) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_ERROR LOG(ERROR) << WRAPPER_DOMAIN_NAME
#define WRAPPER_LOG_FATAL LOG(FATAL) << WRAPPER_DOMAIN_NAME
#define LOG_ENGINE_TRACE_C LOG(TRACE) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_DEBUG_C LOG(DEBUG) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_INFO_C LOG(INFO) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_WARNING_C LOG(WARNING) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_ERROR_C LOG(ERROR) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_FATAL_C LOG(FATAL) << ENGINE_MODULE_CLASS_FUNCTION
#define LOG_ENGINE_TRACE_ LOG(TRACE) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_DEBUG_ LOG(DEBUG) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_INFO_ LOG(INFO) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_WARNING_ LOG(WARNING) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_ERROR_ LOG(ERROR) << ENGINE_MODULE_FUNCTION
#define LOG_ENGINE_FATAL_ LOG(FATAL) << ENGINE_MODULE_FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////////////
#define STORAGE_DOMAIN_NAME "[STORAGE] "
#define WRAPPER_MODULE_NAME "WRAPPER"
#define WRAPPER_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", WRAPPER_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define WRAPPER_MODULE_FUNCTION LogOut("[%s][%s][%s] ", WRAPPER_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define STORAGE_LOG_TRACE LOG(TRACE) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_DEBUG LOG(DEBUG) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_INFO LOG(INFO) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_WARNING LOG(WARNING) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_ERROR LOG(ERROR) << STORAGE_DOMAIN_NAME
#define STORAGE_LOG_FATAL LOG(FATAL) << STORAGE_DOMAIN_NAME
#define LOG_WRAPPER_TRACE_C LOG(TRACE) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_DEBUG_C LOG(DEBUG) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_INFO_C LOG(INFO) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_WARNING_C LOG(WARNING) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_ERROR_C LOG(ERROR) << WRAPPER_MODULE_CLASS_FUNCTION
#define LOG_WRAPPER_FATAL_C LOG(FATAL) << WRAPPER_MODULE_CLASS_FUNCTION
#define WAL_DOMAIN_NAME "[WAL] "
#define LOG_WRAPPER_TRACE_ LOG(TRACE) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_DEBUG_ LOG(DEBUG) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_INFO_ LOG(INFO) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_WARNING_ LOG(WARNING) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_ERROR_ LOG(ERROR) << WRAPPER_MODULE_FUNCTION
#define LOG_WRAPPER_FATAL_ LOG(FATAL) << WRAPPER_MODULE_FUNCTION
#define WAL_LOG_TRACE LOG(TRACE) << WAL_DOMAIN_NAME
#define WAL_LOG_DEBUG LOG(DEBUG) << WAL_DOMAIN_NAME
#define WAL_LOG_INFO LOG(INFO) << WAL_DOMAIN_NAME
#define WAL_LOG_WARNING LOG(WARNING) << WAL_DOMAIN_NAME
#define WAL_LOG_ERROR LOG(ERROR) << WAL_DOMAIN_NAME
#define WAL_LOG_FATAL LOG(FATAL) << WAL_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
#define STORAGE_MODULE_NAME "STORAGE"
#define STORAGE_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", STORAGE_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define STORAGE_MODULE_FUNCTION LogOut("[%s][%s][%s] ", STORAGE_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_STORAGE_TRACE_C LOG(TRACE) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_DEBUG_C LOG(DEBUG) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_INFO_C LOG(INFO) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_WARNING_C LOG(WARNING) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_ERROR_C LOG(ERROR) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_FATAL_C LOG(FATAL) << STORAGE_MODULE_CLASS_FUNCTION
#define LOG_STORAGE_TRACE_ LOG(TRACE) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_DEBUG_ LOG(DEBUG) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_INFO_ LOG(INFO) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_WARNING_ LOG(WARNING) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_ERROR_ LOG(ERROR) << STORAGE_MODULE_FUNCTION
#define LOG_STORAGE_FATAL_ LOG(FATAL) << STORAGE_MODULE_FUNCTION
/////////////////////////////////////////////////////////////////////////////////////////////////
#define WAL_MODULE_NAME "WAL"
#define WAL_MODULE_CLASS_FUNCTION \
LogOut("[%s][%s::%s][%s] ", WAL_MODULE_NAME, (typeid(*this).name()), __FUNCTION__, GetThreadName().c_str())
#define WAL_MODULE_FUNCTION LogOut("[%s][%s][%s] ", WAL_MODULE_NAME, __FUNCTION__, GetThreadName().c_str())
#define LOG_WAL_TRACE_C LOG(TRACE) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_DEBUG_C LOG(DEBUG) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_INFO_C LOG(INFO) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_WARNING_C LOG(WARNING) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_ERROR_C LOG(ERROR) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_FATAL_C LOG(FATAL) << WAL_MODULE_CLASS_FUNCTION
#define LOG_WAL_TRACE_ LOG(TRACE) << WAL_MODULE_FUNCTION
#define LOG_WAL_DEBUG_ LOG(DEBUG) << WAL_MODULE_FUNCTION
#define LOG_WAL_INFO_ LOG(INFO) << WAL_MODULE_FUNCTION
#define LOG_WAL_WARNING_ LOG(WARNING) << WAL_MODULE_FUNCTION
#define LOG_WAL_ERROR_ LOG(ERROR) << WAL_MODULE_FUNCTION
#define LOG_WAL_FATAL_ LOG(FATAL) << WAL_MODULE_FUNCTION
/*
* Deprecated
*/
/////////////////////////////////////////////////////////////////////////////////////////////////
// #define SERVER_DOMAIN_NAME "[SERVER]"
// #define LOG_SERVER_TRACE_ LOG(TRACE) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_DEBUG_ LOG(DEBUG) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_INFO_ LOG(INFO) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_WARNING_ LOG(WARNING) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_ERROR_ LOG(ERROR) << SERVER_DOMAIN_NAME
// #define LOG_SERVER_FATAL_ LOG(FATAL) << SERVER_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
// #define ENGINE_DOMAIN_NAME "[ENGINE]"
// #define LOG_ENGINE_TRACE_ LOG(TRACE) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_DEBUG_ LOG(DEBUG) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_INFO_ LOG(INFO) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_WARNING_ LOG(WARNING) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_ERROR_ LOG(ERROR) << ENGINE_DOMAIN_NAME
// #define LOG_ENGINE_FATAL_ LOG(FATAL) << ENGINE_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
// #define WRAPPER_DOMAIN_NAME "[WRAPPER]"
// #define WRAPPER_LOG_TRACE LOG(TRACE) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_DEBUG LOG(DEBUG) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_INFO LOG(INFO) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_WARNING LOG(WARNING) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_ERROR LOG(ERROR) << WRAPPER_DOMAIN_NAME
// #define WRAPPER_LOG_FATAL LOG(FATAL) << WRAPPER_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////
// #define STORAGE_DOMAIN_NAME "[STORAGE]"
// #define LOG_STORAGE_TRACE_ LOG(TRACE) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_DEBUG_ LOG(DEBUG) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_INFO_ LOG(INFO) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_WARNING_ LOG(WARNING) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_ERROR_ LOG(ERROR) << STORAGE_DOMAIN_NAME
// #define LOG_STORAGE_FATAL_ LOG(FATAL) << STORAGE_DOMAIN_NAME
// #define WAL_DOMAIN_NAME "[WAL]"
// #define LOG_WAL_TRACE_ LOG(TRACE) << WAL_DOMAIN_NAME
// #define LOG_WAL_DEBUG_ LOG(DEBUG) << WAL_DOMAIN_NAME
// #define LOG_WAL_INFO_ LOG(INFO) << WAL_DOMAIN_NAME
// #define LOG_WAL_WARNING_ LOG(WARNING) << WAL_DOMAIN_NAME
// #define LOG_WAL_ERROR_ LOG(ERROR) << WAL_DOMAIN_NAME
// #define LOG_WAL_FATAL_ LOG(FATAL) << WAL_DOMAIN_NAME
/////////////////////////////////////////////////////////////////////////////////////////////////////
std::string
LogOut(const char* pattern, ...);
void
SetThreadName(const std::string& name);
std::string
GetThreadName();
} // namespace milvus

View File

@ -97,7 +97,7 @@ LogConfigInFile(const std::string& path) {
auto node = YAML::LoadFile(path);
YAML::Emitter out;
out << node;
SERVER_LOG_DEBUG << "\n\n"
LOG_SERVER_INFO_ << "\n\n"
<< std::string(15, '*') << "Config in file" << std::string(15, '*') << "\n\n"
<< out.c_str();
}
@ -107,7 +107,7 @@ LogConfigInMem() {
auto& config = Config::GetInstance();
std::string config_str;
config.GetConfigJsonStr(config_str, 3);
SERVER_LOG_DEBUG << "\n\n"
LOG_SERVER_INFO_ << "\n\n"
<< std::string(15, '*') << "Config in memory" << std::string(15, '*') << "\n\n"
<< config_str;
}
@ -117,7 +117,7 @@ LogCpuInfo() {
/*CPU information*/
std::fstream fcpu("/proc/cpuinfo", std::ios::in);
if (!fcpu.is_open()) {
SERVER_LOG_WARNING << "Cannot obtain CPU information. Open file /proc/cpuinfo fail: " << strerror(errno);
LOG_SERVER_WARNING_ << "Cannot obtain CPU information. Open file /proc/cpuinfo fail: " << strerror(errno);
return;
}
std::stringstream cpu_info_ss;
@ -127,12 +127,12 @@ LogCpuInfo() {
auto processor_pos = cpu_info.rfind("processor");
if (std::string::npos == processor_pos) {
SERVER_LOG_WARNING << "Cannot obtain CPU information. No sub string \'processor\'";
LOG_SERVER_WARNING_ << "Cannot obtain CPU information. No sub string \'processor\'";
return;
}
auto sub_str = cpu_info.substr(processor_pos);
SERVER_LOG_DEBUG << "\n\n" << std::string(15, '*') << "CPU" << std::string(15, '*') << "\n\n" << sub_str;
LOG_SERVER_INFO_ << "\n\n" << std::string(15, '*') << "CPU" << std::string(15, '*') << "\n\n" << sub_str;
}
} // namespace server

View File

@ -25,7 +25,7 @@ SignalUtil::HandleSignal(int signum) {
switch (signum) {
case SIGINT:
case SIGUSR2: {
SERVER_LOG_INFO << "Server received signal: " << signum;
LOG_SERVER_INFO_ << "Server received signal: " << signum;
server::Server& server = server::Server::GetInstance();
server.Stop();
@ -33,7 +33,7 @@ SignalUtil::HandleSignal(int signum) {
exit(0);
}
default: {
SERVER_LOG_INFO << "Server received critical signal: " << signum;
LOG_SERVER_INFO_ << "Server received critical signal: " << signum;
SignalUtil::PrintStacktrace();
server::Server& server = server::Server::GetInstance();
@ -46,7 +46,7 @@ SignalUtil::HandleSignal(int signum) {
void
SignalUtil::PrintStacktrace() {
SERVER_LOG_INFO << "Call stack:";
LOG_SERVER_INFO_ << "Call stack:";
const int size = 32;
void* array[size];
@ -54,7 +54,7 @@ SignalUtil::PrintStacktrace() {
char** stacktrace = backtrace_symbols(array, stack_num);
for (int i = 0; i < stack_num; ++i) {
std::string info = stacktrace[i];
SERVER_LOG_INFO << info;
LOG_SERVER_INFO_ << info;
}
free(stacktrace);
}

View File

@ -40,31 +40,31 @@ TimeRecorder::PrintTimeRecord(const std::string& msg, double span) {
switch (log_level_) {
case 0: {
SERVER_LOG_TRACE << str_log;
LOG_SERVER_TRACE_ << str_log;
break;
}
case 1: {
SERVER_LOG_DEBUG << str_log;
LOG_SERVER_DEBUG_ << str_log;
break;
}
case 2: {
SERVER_LOG_INFO << str_log;
LOG_SERVER_INFO_ << str_log;
break;
}
case 3: {
SERVER_LOG_WARNING << str_log;
LOG_SERVER_WARNING_ << str_log;
break;
}
case 4: {
SERVER_LOG_ERROR << str_log;
LOG_SERVER_ERROR_ << str_log;
break;
}
case 5: {
SERVER_LOG_FATAL << str_log;
LOG_SERVER_FATAL_ << str_log;
break;
}
default: {
SERVER_LOG_INFO << str_log;
LOG_SERVER_INFO_ << str_log;
break;
}
}

View File

@ -24,7 +24,7 @@ print_timestamp(const std::string& message) {
micros %= 1000000;
double millisecond = (double)micros / 1000.0;
SERVER_LOG_DEBUG << std::fixed << " " << millisecond << "(ms) [timestamp]" << message;
LOG_SERVER_DEBUG_ << std::fixed << " " << millisecond << "(ms) [timestamp]" << message;
}
class TimeRecorder {

View File

@ -45,7 +45,7 @@ CheckParameterRange(const milvus::json& json_params, const std::string& param_na
if (json_params.find(param_name) == json_params.end()) {
std::string msg = "Parameter list must contain: ";
msg += param_name;
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
@ -57,13 +57,13 @@ CheckParameterRange(const milvus::json& json_params, const std::string& param_na
std::string msg = "Invalid " + param_name + " value: " + std::to_string(value) + ". Valid range is " +
(min_close ? "[" : "(") + std::to_string(min) + ", " + std::to_string(max) +
(max_closed ? "]" : ")");
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
} catch (std::exception& e) {
std::string msg = "Invalid " + param_name + ": ";
msg += e.what();
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
@ -75,7 +75,7 @@ CheckParameterExistence(const milvus::json& json_params, const std::string& para
if (json_params.find(param_name) == json_params.end()) {
std::string msg = "Parameter list must contain: ";
msg += param_name;
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
@ -83,13 +83,13 @@ CheckParameterExistence(const milvus::json& json_params, const std::string& para
int64_t value = json_params[param_name];
if (value < 0) {
std::string msg = "Invalid " + param_name + " value: " + std::to_string(value);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
} catch (std::exception& e) {
std::string msg = "Invalid " + param_name + ": ";
msg += e.what();
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
@ -103,7 +103,7 @@ ValidationUtil::ValidateCollectionName(const std::string& collection_name) {
// Collection name shouldn't be empty.
if (collection_name.empty()) {
std::string msg = "Collection name should not be empty.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
@ -111,7 +111,7 @@ ValidationUtil::ValidateCollectionName(const std::string& collection_name) {
// Collection name size shouldn't exceed 16384.
if (collection_name.size() > COLLECTION_NAME_SIZE_LIMIT) {
std::string msg = invalid_msg + "The length of a collection name must be less than 255 characters.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
@ -119,7 +119,7 @@ ValidationUtil::ValidateCollectionName(const std::string& collection_name) {
char first_char = collection_name[0];
if (first_char != '_' && std::isalpha(first_char) == 0) {
std::string msg = invalid_msg + "The first character of a collection name must be an underscore or letter.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
@ -128,7 +128,7 @@ ValidationUtil::ValidateCollectionName(const std::string& collection_name) {
char name_char = collection_name[i];
if (name_char != '_' && std::isalnum(name_char) == 0) {
std::string msg = invalid_msg + "Collection name can only contain numbers, letters, and underscores.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
}
@ -142,7 +142,7 @@ ValidationUtil::ValidateTableDimension(int64_t dimension, int64_t metric_type) {
std::string msg = "Invalid collection dimension: " + std::to_string(dimension) + ". " +
"The collection dimension must be within the range of 1 ~ " +
std::to_string(COLLECTION_DIMENSION_LIMIT) + ".";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
}
@ -150,7 +150,7 @@ ValidationUtil::ValidateTableDimension(int64_t dimension, int64_t metric_type) {
if ((dimension % 8) != 0) {
std::string msg = "Invalid collection dimension: " + std::to_string(dimension) + ". " +
"The collection dimension must be a multiple of 8";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_VECTOR_DIMENSION, msg);
}
}
@ -164,7 +164,7 @@ ValidationUtil::ValidateCollectionIndexType(int32_t index_type) {
if (engine_type <= 0 || engine_type > static_cast<int>(engine::EngineType::MAX_VALUE)) {
std::string msg = "Invalid index type: " + std::to_string(index_type) + ". " +
"Make sure the index type is in IndexType list.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_INDEX_TYPE, msg);
}
@ -172,7 +172,7 @@ ValidationUtil::ValidateCollectionIndexType(int32_t index_type) {
// special case, hybird index only available in customize faiss library
if (engine_type == static_cast<int>(engine::EngineType::FAISS_IVFSQ8H)) {
std::string msg = "Unsupported index type: " + std::to_string(index_type);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_INDEX_TYPE, msg);
}
#endif
@ -215,7 +215,7 @@ ValidationUtil::ValidateIndexParams(const milvus::json& index_params,
int64_t m_value = index_params[index_params, knowhere::IndexParams::m];
if (resset.empty()) {
std::string msg = "Invalid collection dimension, unable to get reasonable values for 'm'";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_DIMENSION, msg);
}
@ -230,7 +230,7 @@ ValidationUtil::ValidateIndexParams(const milvus::json& index_params,
msg += std::to_string(resset[i]);
}
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
@ -364,7 +364,7 @@ ValidationUtil::ValidateCollectionIndexFileSize(int64_t index_file_size) {
std::string msg = "Invalid index file size: " + std::to_string(index_file_size) + ". " +
"The index file size must be within the range of 1 ~ " +
std::to_string(INDEX_FILE_SIZE_LIMIT) + ".";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_INDEX_FILE_SIZE, msg);
}
@ -376,7 +376,7 @@ ValidationUtil::ValidateCollectionIndexMetricType(int32_t metric_type) {
if (metric_type <= 0 || metric_type > static_cast<int32_t>(engine::MetricType::MAX_VALUE)) {
std::string msg = "Invalid index metric type: " + std::to_string(metric_type) + ". " +
"Make sure the metric type is in MetricType list.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_INDEX_METRIC_TYPE, msg);
}
return Status::OK();
@ -387,7 +387,7 @@ ValidationUtil::ValidateSearchTopk(int64_t top_k) {
if (top_k <= 0 || top_k > QUERY_MAX_TOPK) {
std::string msg =
"Invalid topk: " + std::to_string(top_k) + ". " + "The topk must be within the range of 1 ~ 2048.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_TOPK, msg);
}
@ -398,7 +398,7 @@ Status
ValidationUtil::ValidatePartitionName(const std::string& partition_name) {
if (partition_name.empty()) {
std::string msg = "Partition name should not be empty.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
@ -406,7 +406,7 @@ ValidationUtil::ValidatePartitionName(const std::string& partition_name) {
// Collection name size shouldn't exceed 16384.
if (partition_name.size() > COLLECTION_NAME_SIZE_LIMIT) {
std::string msg = invalid_msg + "The length of a partition name must be less than 255 characters.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
@ -414,7 +414,7 @@ ValidationUtil::ValidatePartitionName(const std::string& partition_name) {
char first_char = partition_name[0];
if (first_char != '_' && std::isalpha(first_char) == 0) {
std::string msg = invalid_msg + "The first character of a partition name must be an underscore or letter.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
@ -423,7 +423,7 @@ ValidationUtil::ValidatePartitionName(const std::string& partition_name) {
char name_char = partition_name[i];
if (name_char != '_' && std::isalnum(name_char) == 0) {
std::string msg = invalid_msg + "Partition name can only contain numbers, letters, and underscores.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_COLLECTION_NAME, msg);
}
}
@ -440,14 +440,14 @@ ValidationUtil::ValidatePartitionTags(const std::vector<std::string>& partition_
StringHelpFunctions::TrimStringBlank(valid_tag);
if (valid_tag.empty()) {
std::string msg = "Invalid partition tag: " + valid_tag + ". " + "Partition tag should not be empty.";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_PARTITION_TAG, msg);
}
// max length of partition tag
if (valid_tag.length() > 255) {
std::string msg = "Invalid partition tag: " + valid_tag + ". " + "Partition tag exceed max length(255).";
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_PARTITION_TAG, msg);
}
}
@ -464,13 +464,13 @@ ValidationUtil::ValidateGpuIndex(int32_t gpu_index) {
if (cuda_err != cudaSuccess) {
std::string msg = "Failed to get gpu card number, cuda error:" + std::to_string(cuda_err);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNEXPECTED_ERROR, msg);
}
if (gpu_index >= num_devices) {
std::string msg = "Invalid gpu index: " + std::to_string(gpu_index);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
#endif
@ -489,7 +489,7 @@ ValidationUtil::GetGpuMemory(int32_t gpu_index, size_t& memory) {
if (cuda_err) {
std::string msg = "Failed to get gpu properties for gpu" + std::to_string(gpu_index) +
" , cuda error:" + std::to_string(cuda_err);
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNEXPECTED_ERROR, msg);
}
@ -511,12 +511,12 @@ ValidationUtil::ValidateIpAddress(const std::string& ip_address) {
return Status::OK();
case 0: {
std::string msg = "Invalid IP address: " + ip_address;
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_INVALID_ARGUMENT, msg);
}
default: {
std::string msg = "IP address conversion error: " + ip_address;
SERVER_LOG_ERROR << msg;
LOG_SERVER_ERROR_ << msg;
return Status(SERVER_UNEXPECTED_ERROR, msg);
}
}
@ -583,7 +583,7 @@ ValidationUtil::ValidateDbURI(const std::string& uri) {
std::string dialect = pieces_match[1].str();
std::transform(dialect.begin(), dialect.end(), dialect.begin(), ::tolower);
if (dialect.find("mysql") == std::string::npos && dialect.find("sqlite") == std::string::npos) {
SERVER_LOG_ERROR << "Invalid dialect in URI: dialect = " << dialect;
LOG_SERVER_ERROR_ << "Invalid dialect in URI: dialect = " << dialect;
okay = false;
}
@ -593,7 +593,7 @@ ValidationUtil::ValidateDbURI(const std::string& uri) {
std::string host = pieces_match[4].str();
if (!host.empty() && host != "localhost") {
if (ValidateIpAddress(host) != SERVER_SUCCESS) {
SERVER_LOG_ERROR << "Invalid host ip address in uri = " << host;
LOG_SERVER_ERROR_ << "Invalid host ip address in uri = " << host;
okay = false;
}
}
@ -603,12 +603,12 @@ ValidationUtil::ValidateDbURI(const std::string& uri) {
if (!port.empty()) {
auto status = ValidateStringIsNumber(port);
if (!status.ok()) {
SERVER_LOG_ERROR << "Invalid port in uri = " << port;
LOG_SERVER_ERROR_ << "Invalid port in uri = " << port;
okay = false;
}
}
} else {
SERVER_LOG_ERROR << "Wrong URI format: URI = " << uri;
LOG_SERVER_ERROR_ << "Wrong URI format: URI = " << uri;
okay = false;
}