mirror of https://github.com/milvus-io/milvus.git
refine error code
Former-commit-id: 616ad6b30b1292a910a93e5fddb9da5787b57ac1pull/191/head
parent
a7dd4f0117
commit
3d1c10d5ee
|
@ -100,7 +100,7 @@ Status DBImpl::DropAll() {
|
|||
|
||||
Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
meta::TableSchema temp_schema = table_schema;
|
||||
|
@ -110,7 +110,7 @@ Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
|||
|
||||
Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
//dates partly delete files of the table but currently we don't support
|
||||
|
@ -136,7 +136,7 @@ Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& date
|
|||
|
||||
Status DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
auto stat = meta_ptr_->DescribeTable(table_schema);
|
||||
|
@ -146,7 +146,7 @@ Status DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
|||
|
||||
Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->HasTable(table_id, has_or_not);
|
||||
|
@ -154,7 +154,7 @@ Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
|||
|
||||
Status DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->AllTables(table_schema_array);
|
||||
|
@ -162,7 +162,7 @@ Status DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
|||
|
||||
Status DBImpl::PreloadTable(const std::string &table_id) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
|
@ -184,7 +184,7 @@ Status DBImpl::PreloadTable(const std::string &table_id) {
|
|||
ExecutionEnginePtr engine = EngineFactory::Build(file.dimension_, file.location_, (EngineType)file.engine_type_, (MetricType)file.metric_type_, file.nlist_);
|
||||
if(engine == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status::Error("Invalid engine type");
|
||||
return Status(DB_ERROR, "Invalid engine type");
|
||||
}
|
||||
|
||||
size += engine->PhysicalSize();
|
||||
|
@ -197,7 +197,7 @@ Status DBImpl::PreloadTable(const std::string &table_id) {
|
|||
} catch (std::exception &ex) {
|
||||
std::string msg = "Pre-load table encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ Status DBImpl::PreloadTable(const std::string &table_id) {
|
|||
|
||||
Status DBImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->UpdateTableFlag(table_id, flag);
|
||||
|
@ -215,7 +215,7 @@ Status DBImpl::UpdateTableFlag(const std::string &table_id, int64_t flag) {
|
|||
|
||||
Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->Count(table_id, row_count);
|
||||
|
@ -225,7 +225,7 @@ Status DBImpl::InsertVectors(const std::string& table_id_,
|
|||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
// ENGINE_LOG_DEBUG << "Insert " << n << " vectors to cache";
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
Status status;
|
||||
|
@ -314,7 +314,7 @@ Status DBImpl::DropIndex(const std::string& table_id) {
|
|||
Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq, uint64_t nprobe,
|
||||
const float *vectors, QueryResults &results) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
meta::DatesT dates = {utils::GetDate()};
|
||||
|
@ -326,7 +326,7 @@ Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq, uint6
|
|||
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq, uint64_t nprobe,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by dates for table: " << table_id;
|
||||
|
@ -354,7 +354,7 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
|
|||
uint64_t k, uint64_t nq, uint64_t nprobe, const float* vectors,
|
||||
const meta::DatesT& dates, QueryResults& results) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Query by file ids for table: " << table_id;
|
||||
|
@ -382,7 +382,7 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
|
|||
}
|
||||
|
||||
if(file_id_array.empty()) {
|
||||
return Status::Error("Invalid file id");
|
||||
return Status(DB_ERROR, "Invalid file id");
|
||||
}
|
||||
|
||||
cache::CpuCacheMgr::GetInstance()->PrintInfo(); //print cache info before query
|
||||
|
@ -393,7 +393,7 @@ Status DBImpl::Query(const std::string& table_id, const std::vector<std::string>
|
|||
|
||||
Status DBImpl::Size(uint64_t& result) {
|
||||
if (shutting_down_.load(std::memory_order_acquire)){
|
||||
return Status::Error("Milsvus server is shutdown!");
|
||||
return Status(DB_ERROR, "Milsvus server is shutdown!");
|
||||
}
|
||||
|
||||
return meta_ptr_->Size(result);
|
||||
|
@ -600,7 +600,7 @@ Status DBImpl::MergeFiles(const std::string& table_id, const meta::DateT& date,
|
|||
std::cout << "ERROR: failed to persist merged index file: " << table_file.location_
|
||||
<< ", possible out of disk space" << std::endl;
|
||||
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
//step 4: update table files state
|
||||
|
@ -708,7 +708,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
|||
(MetricType)file.metric_type_, file.nlist_);
|
||||
if(to_index == nullptr) {
|
||||
ENGINE_LOG_ERROR << "Invalid engine type";
|
||||
return Status::Error("Invalid engine type");
|
||||
return Status(DB_ERROR, "Invalid engine type");
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -755,7 +755,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
|||
|
||||
std::cout << "ERROR: failed to build index, index file is too large or gpu memory is not enough" << std::endl;
|
||||
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
//step 4: if table has been deleted, dont save index file
|
||||
|
@ -781,7 +781,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
|||
std::cout << "ERROR: failed to persist index file: " << table_file.location_
|
||||
<< ", possible out of disk space" << std::endl;
|
||||
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
//step 6: update meta
|
||||
|
@ -816,7 +816,7 @@ Status DBImpl::BuildIndex(const meta::TableFileSchema& file) {
|
|||
} catch (std::exception& ex) {
|
||||
std::string msg = "Build index encounter exception: " + std::string(ex.what());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
|
|
@ -12,57 +12,62 @@ namespace zilliz {
|
|||
namespace milvus {
|
||||
namespace engine {
|
||||
|
||||
const char* Status::CopyState(const char* state) {
|
||||
uint32_t size;
|
||||
std::memcpy(&size, state, sizeof(size));
|
||||
char* result = new char[size+5];
|
||||
memcpy(result, state, size+5);
|
||||
return result;
|
||||
constexpr int CODE_WIDTH = sizeof(ErrorCode);
|
||||
|
||||
Status::Status(ErrorCode code, const std::string& msg) {
|
||||
//4 bytes store code
|
||||
//4 bytes store message length
|
||||
//the left bytes store message string
|
||||
const uint32_t length = (uint32_t)msg.size();
|
||||
char* result = new char[length + sizeof(length) + CODE_WIDTH];
|
||||
std::memcpy(result, &code, CODE_WIDTH);
|
||||
std::memcpy(result + CODE_WIDTH, &length, sizeof(length));
|
||||
memcpy(result + sizeof(length) + CODE_WIDTH, msg.data(), length);
|
||||
|
||||
state_ = result;
|
||||
}
|
||||
|
||||
Status::Status(Code code, const std::string& msg, const std::string& msg2) {
|
||||
assert(code != kOK);
|
||||
const uint32_t len1 = msg.size();
|
||||
const uint32_t len2 = msg2.size();
|
||||
const uint32_t size = len1 + (len2 ? (2+len2) : 0);
|
||||
char* result = new char[size+5];
|
||||
std::memcpy(result, &size, sizeof(size));
|
||||
result[4] = static_cast<char>(code);
|
||||
memcpy(result+5, msg.data(), len1);
|
||||
if (len2) {
|
||||
result[5 + len1] = ':';
|
||||
result[6 + len1] = ' ';
|
||||
memcpy(result + 7 + len1, msg2.data(), len2);
|
||||
}
|
||||
state_ = result;
|
||||
Status::Status()
|
||||
: state_(nullptr) {
|
||||
|
||||
}
|
||||
|
||||
Status::~Status() {
|
||||
delete[] state_;
|
||||
}
|
||||
|
||||
const char* Status::CopyState(const char* state) {
|
||||
uint32_t length = 0;
|
||||
std::memcpy(&length, state + CODE_WIDTH, sizeof(length));
|
||||
int buff_len = length + sizeof(length) + CODE_WIDTH;
|
||||
char* result = new char[buff_len];
|
||||
memcpy(result, state, buff_len);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string Status::ToString() const {
|
||||
if (state_ == nullptr) return "OK";
|
||||
char tmp[30];
|
||||
char tmp[32];
|
||||
const char* type;
|
||||
switch (code()) {
|
||||
case kOK:
|
||||
case DB_SUCCESS:
|
||||
type = "OK";
|
||||
break;
|
||||
case kNotFound:
|
||||
type = "NotFound: ";
|
||||
break;
|
||||
case kError:
|
||||
case DB_ERROR:
|
||||
type = "Error: ";
|
||||
break;
|
||||
case kInvalidDBPath:
|
||||
type = "InvalidDBPath: ";
|
||||
break;
|
||||
case kGroupError:
|
||||
type = "GroupError: ";
|
||||
break;
|
||||
case kDBTransactionError:
|
||||
case DB_META_TRANSACTION_FAILED:
|
||||
type = "DBTransactionError: ";
|
||||
break;
|
||||
case kAlreadyExist:
|
||||
case DB_NOT_FOUND:
|
||||
type = "NotFound: ";
|
||||
break;
|
||||
case DB_ALREADY_EXIST:
|
||||
type = "AlreadyExist: ";
|
||||
break;
|
||||
case DB_INVALID_PATH:
|
||||
type = "InvalidPath: ";
|
||||
break;
|
||||
default:
|
||||
snprintf(tmp, sizeof(tmp), "Unkown code(%d): ",
|
||||
static_cast<int>(code()));
|
||||
|
@ -71,9 +76,9 @@ std::string Status::ToString() const {
|
|||
}
|
||||
|
||||
std::string result(type);
|
||||
uint32_t length;
|
||||
memcpy(&length, state_, sizeof(length));
|
||||
result.append(state_ + 5, length);
|
||||
uint32_t length = 0;
|
||||
memcpy(&length, state_ + CODE_WIDTH, sizeof(length));
|
||||
result.append(state_ + sizeof(length) + CODE_WIDTH, length);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
@ -5,8 +5,9 @@
|
|||
******************************************************************************/
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include "utils/Error.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace zilliz {
|
||||
namespace milvus {
|
||||
|
@ -14,9 +15,9 @@ namespace engine {
|
|||
|
||||
class Status {
|
||||
public:
|
||||
Status() noexcept : state_(nullptr) {}
|
||||
|
||||
~Status() { delete[] state_; }
|
||||
Status(ErrorCode code, const std::string &msg);
|
||||
Status();
|
||||
~Status();
|
||||
|
||||
Status(const Status &rhs);
|
||||
|
||||
|
@ -31,64 +32,17 @@ class Status {
|
|||
static Status
|
||||
OK() { return Status(); }
|
||||
|
||||
static Status
|
||||
NotFound(const std::string &msg, const std::string &msg2 = "") {
|
||||
return Status(kNotFound, msg, msg2);
|
||||
}
|
||||
static Status
|
||||
Error(const std::string &msg, const std::string &msg2 = "") {
|
||||
return Status(kError, msg, msg2);
|
||||
}
|
||||
|
||||
static Status
|
||||
InvalidDBPath(const std::string &msg, const std::string &msg2 = "") {
|
||||
return Status(kInvalidDBPath, msg, msg2);
|
||||
}
|
||||
static Status
|
||||
GroupError(const std::string &msg, const std::string &msg2 = "") {
|
||||
return Status(kGroupError, msg, msg2);
|
||||
}
|
||||
static Status
|
||||
DBTransactionError(const std::string &msg, const std::string &msg2 = "") {
|
||||
return Status(kDBTransactionError, msg, msg2);
|
||||
}
|
||||
|
||||
static Status
|
||||
AlreadyExist(const std::string &msg, const std::string &msg2 = "") {
|
||||
return Status(kAlreadyExist, msg, msg2);
|
||||
}
|
||||
|
||||
bool ok() const { return state_ == nullptr; }
|
||||
|
||||
bool IsNotFound() const { return code() == kNotFound; }
|
||||
bool IsError() const { return code() == kError; }
|
||||
|
||||
bool IsInvalidDBPath() const { return code() == kInvalidDBPath; }
|
||||
bool IsGroupError() const { return code() == kGroupError; }
|
||||
bool IsDBTransactionError() const { return code() == kDBTransactionError; }
|
||||
bool IsAlreadyExist() const { return code() == kAlreadyExist; }
|
||||
bool ok() const { return state_ == nullptr || code() == DB_SUCCESS; }
|
||||
|
||||
std::string ToString() const;
|
||||
|
||||
ErrorCode code() const {
|
||||
return (state_ == nullptr) ? DB_SUCCESS : *(ErrorCode*)(state_);
|
||||
}
|
||||
|
||||
private:
|
||||
const char *state_ = nullptr;
|
||||
|
||||
enum Code {
|
||||
kOK = 0,
|
||||
kNotFound,
|
||||
kError,
|
||||
|
||||
kInvalidDBPath,
|
||||
kGroupError,
|
||||
kDBTransactionError,
|
||||
|
||||
kAlreadyExist,
|
||||
};
|
||||
|
||||
Code code() const {
|
||||
return (state_ == nullptr) ? kOK : static_cast<Code>(state_[4]);
|
||||
}
|
||||
Status(Code code, const std::string &msg, const std::string &msg2);
|
||||
static const char *CopyState(const char *s);
|
||||
|
||||
}; // Status
|
||||
|
|
|
@ -70,7 +70,7 @@ Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id
|
|||
auto status = server::CommonUtil::CreateDirectory(table_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
return Status(DB_ERROR, "Failed to create table path");
|
||||
}
|
||||
|
||||
for(auto& path : options.slave_paths) {
|
||||
|
@ -78,7 +78,7 @@ Status CreateTablePath(const DBMetaOptions& options, const std::string& table_id
|
|||
status = server::CommonUtil::CreateDirectory(table_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
return Status::Error("Failed to create table path");
|
||||
return Status(DB_ERROR, "Failed to create table path");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ Status CreateTableFilePath(const DBMetaOptions& options, meta::TableFileSchema&
|
|||
auto status = server::CommonUtil::CreateDirectory(parent_path);
|
||||
if (status != 0) {
|
||||
ENGINE_LOG_ERROR << "Create directory " << parent_path << " Error";
|
||||
return Status::DBTransactionError("Failed to create partition directory");
|
||||
return Status(DB_ERROR, "Failed to create partition directory");
|
||||
}
|
||||
|
||||
table_file.location_ = parent_path + "/" + table_file.file_id_;
|
||||
|
@ -137,7 +137,7 @@ Status GetTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& tab
|
|||
|
||||
std::string msg = "Table file doesn't exist: " + table_file.file_id_;
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
Status DeleteTableFilePath(const DBMetaOptions& options, meta::TableFileSchema& table_file) {
|
||||
|
|
|
@ -86,7 +86,7 @@ VecIndexPtr ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
|
|||
Status ExecutionEngineImpl::AddWithIds(long n, const float *xdata, const long *xids) {
|
||||
auto ec = index_->Add(n, xdata, xids);
|
||||
if (ec != KNOWHERE_SUCCESS) {
|
||||
return Status::Error("Add error");
|
||||
return Status(DB_ERROR, "Add error");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ size_t ExecutionEngineImpl::PhysicalSize() const {
|
|||
Status ExecutionEngineImpl::Serialize() {
|
||||
auto ec = write_index(index_, location_);
|
||||
if (ec != KNOWHERE_SUCCESS) {
|
||||
return Status::Error("Serialize: write to disk error");
|
||||
return Status(DB_ERROR, "Serialize: write to disk error");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -134,15 +134,15 @@ Status ExecutionEngineImpl::Load(bool to_cache) {
|
|||
if(index_ == nullptr) {
|
||||
std::string msg = "Failed to load index from " + location_;
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
} else {
|
||||
ENGINE_LOG_DEBUG << "Disk io from: " << location_;
|
||||
}
|
||||
} catch (knowhere::KnowhereException &e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
} catch (std::exception &e) {
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ Status ExecutionEngineImpl::CopyToGpu(uint64_t device_id) {
|
|||
} else {
|
||||
if(index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to gpu";
|
||||
return Status::Error("index is null");
|
||||
return Status(DB_ERROR, "index is null");
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -168,9 +168,9 @@ Status ExecutionEngineImpl::CopyToGpu(uint64_t device_id) {
|
|||
ENGINE_LOG_DEBUG << "CPU to GPU" << device_id;
|
||||
} catch (knowhere::KnowhereException &e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
} catch (std::exception &e) {
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ Status ExecutionEngineImpl::CopyToCpu() {
|
|||
} else {
|
||||
if(index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to copy to cpu";
|
||||
return Status::Error("index is null");
|
||||
return Status(DB_ERROR, "index is null");
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -197,9 +197,9 @@ Status ExecutionEngineImpl::CopyToCpu() {
|
|||
ENGINE_LOG_DEBUG << "GPU to CPU";
|
||||
} catch (knowhere::KnowhereException &e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
} catch (std::exception &e) {
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,7 @@ ExecutionEnginePtr ExecutionEngineImpl::Clone() {
|
|||
|
||||
Status ExecutionEngineImpl::Merge(const std::string &location) {
|
||||
if (location == location_) {
|
||||
return Status::Error("Cannot Merge Self");
|
||||
return Status(DB_ERROR, "Cannot Merge Self");
|
||||
}
|
||||
ENGINE_LOG_DEBUG << "Merge index file: " << location << " to: " << location_;
|
||||
|
||||
|
@ -235,26 +235,26 @@ Status ExecutionEngineImpl::Merge(const std::string &location) {
|
|||
to_merge = read_index(location);
|
||||
} catch (knowhere::KnowhereException &e) {
|
||||
ENGINE_LOG_ERROR << e.what();
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
} catch (std::exception &e) {
|
||||
return Status::Error(e.what());
|
||||
return Status(DB_ERROR, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
if(index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to merge";
|
||||
return Status::Error("index is null");
|
||||
return Status(DB_ERROR, "index is null");
|
||||
}
|
||||
|
||||
if (auto file_index = std::dynamic_pointer_cast<BFIndex>(to_merge)) {
|
||||
auto ec = index_->Add(file_index->Count(), file_index->GetRawVectors(), file_index->GetRawIds());
|
||||
if (ec != KNOWHERE_SUCCESS) {
|
||||
ENGINE_LOG_ERROR << "Merge: Add Error";
|
||||
return Status::Error("Merge: Add Error");
|
||||
return Status(DB_ERROR, "Merge: Add Error");
|
||||
}
|
||||
return Status::OK();
|
||||
} else {
|
||||
return Status::Error("file index type is not idmap");
|
||||
return Status(DB_ERROR, "file index type is not idmap");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -297,7 +297,7 @@ Status ExecutionEngineImpl::Search(long n,
|
|||
long *labels) const {
|
||||
if(index_ == nullptr) {
|
||||
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
|
||||
return Status::Error("index is null");
|
||||
return Status(DB_ERROR, "index is null");
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "Search Params: [k] " << k << " [nprobe] " << nprobe;
|
||||
|
@ -305,7 +305,7 @@ Status ExecutionEngineImpl::Search(long n,
|
|||
auto ec = index_->Search(n, data, distances, labels, cfg);
|
||||
if (ec != KNOWHERE_SUCCESS) {
|
||||
ENGINE_LOG_ERROR << "Search error";
|
||||
return Status::Error("Search: Search Error");
|
||||
return Status(DB_ERROR, "Search: Search Error");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ Status MemTable::Add(VectorSource::Ptr &source, IDNumbers &vector_ids) {
|
|||
if (!status.ok()) {
|
||||
std::string err_msg = "MemTable::Add failed: " + status.ToString();
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
return Status::Error(err_msg);
|
||||
return Status(DB_ERROR, err_msg);
|
||||
}
|
||||
}
|
||||
return Status::OK();
|
||||
|
@ -58,7 +58,7 @@ Status MemTable::Serialize() {
|
|||
if (!status.ok()) {
|
||||
std::string err_msg = "MemTable::Serialize failed: " + status.ToString();
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
return Status::Error(err_msg);
|
||||
return Status(DB_ERROR, err_msg);
|
||||
}
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
mem_table_file = mem_table_file_list_.erase(mem_table_file);
|
||||
|
|
|
@ -49,7 +49,7 @@ Status MemTableFile::Add(const VectorSource::Ptr &source, IDNumbers& vector_ids)
|
|||
std::string err_msg = "MemTableFile::Add: table_file_schema dimension = " +
|
||||
std::to_string(table_file_schema_.dimension_) + ", table_id = " + table_file_schema_.table_id_;
|
||||
ENGINE_LOG_ERROR << err_msg;
|
||||
return Status::Error(err_msg);
|
||||
return Status(DB_ERROR, err_msg);
|
||||
}
|
||||
|
||||
size_t single_vector_mem_size = table_file_schema_.dimension_ * VECTOR_TYPE_SIZE;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -29,9 +29,15 @@ using namespace sqlite_orm;
|
|||
|
||||
namespace {
|
||||
|
||||
Status HandleException(const std::string& desc, std::exception &e) {
|
||||
ENGINE_LOG_ERROR << desc << ": " << e.what();
|
||||
return Status::DBTransactionError(desc, e.what());
|
||||
Status HandleException(const std::string &desc, const char* what = nullptr) {
|
||||
if(what == nullptr) {
|
||||
ENGINE_LOG_ERROR << desc;
|
||||
return Status(DB_META_TRANSACTION_FAILED, desc);
|
||||
} else {
|
||||
std::string msg = desc + ":" + what;
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status(DB_META_TRANSACTION_FAILED, msg);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -97,8 +103,9 @@ Status SqliteMetaImpl::Initialize() {
|
|||
if (!boost::filesystem::is_directory(options_.path)) {
|
||||
auto ret = boost::filesystem::create_directory(options_.path);
|
||||
if (!ret) {
|
||||
ENGINE_LOG_ERROR << "Failed to create db directory " << options_.path;
|
||||
return Status::InvalidDBPath("Failed to create db directory", options_.path);
|
||||
std::string msg = "Failed to create db directory " + options_.path;
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status(DB_INVALID_PATH, msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +148,7 @@ Status SqliteMetaImpl::DropPartitionsByDates(const std::string &table_id,
|
|||
in(&TableFileSchema::date_, dates)
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when drop partition", e);
|
||||
return HandleException("Encounter exception when drop partition", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -162,10 +169,10 @@ Status SqliteMetaImpl::CreateTable(TableSchema &table_schema) {
|
|||
where(c(&TableSchema::table_id_) == table_schema.table_id_));
|
||||
if (table.size() == 1) {
|
||||
if(TableSchema::TO_DELETE == std::get<0>(table[0])) {
|
||||
return Status::Error("Table already exists and it is in delete state, please wait a second");
|
||||
return Status(DB_ERROR, "Table already exists and it is in delete state, please wait a second");
|
||||
} else {
|
||||
// Change from no error to already exist.
|
||||
return Status::AlreadyExist("Table already exists");
|
||||
return Status(DB_ALREADY_EXIST, "Table already exists");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -176,15 +183,14 @@ Status SqliteMetaImpl::CreateTable(TableSchema &table_schema) {
|
|||
try {
|
||||
auto id = ConnectorPtr->insert(table_schema);
|
||||
table_schema.id_ = id;
|
||||
} catch (...) {
|
||||
ENGINE_LOG_ERROR << "sqlite transaction failed";
|
||||
return Status::DBTransactionError("Add Table Error");
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when create table", e.what());
|
||||
}
|
||||
|
||||
return utils::CreateTablePath(options_, table_schema.table_id_);
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when create table", e);
|
||||
return HandleException("Encounter exception when create table", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -208,7 +214,7 @@ Status SqliteMetaImpl::DeleteTable(const std::string& table_id) {
|
|||
));
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when delete table", e);
|
||||
return HandleException("Encounter exception when delete table", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -233,7 +239,7 @@ Status SqliteMetaImpl::DeleteTableFiles(const std::string& table_id) {
|
|||
));
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when delete table files", e);
|
||||
return HandleException("Encounter exception when delete table files", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -266,11 +272,11 @@ Status SqliteMetaImpl::DescribeTable(TableSchema &table_schema) {
|
|||
table_schema.nlist_ = std::get<7>(groups[0]);
|
||||
table_schema.metric_type_ = std::get<8>(groups[0]);
|
||||
} else {
|
||||
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
|
||||
return Status(DB_NOT_FOUND, "Table " + table_schema.table_id_ + " not found");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when describe table", e);
|
||||
return HandleException("Encounter exception when describe table", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -280,7 +286,7 @@ Status SqliteMetaImpl::FilesByType(const std::string& table_id,
|
|||
const std::vector<int>& file_types,
|
||||
std::vector<std::string>& file_ids) {
|
||||
if(file_types.empty()) {
|
||||
return Status::Error("file types array is empty");
|
||||
return Status(DB_ERROR, "file types array is empty");
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -330,7 +336,7 @@ Status SqliteMetaImpl::FilesByType(const std::string& table_id,
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when check non index files", e);
|
||||
return HandleException("Encounter exception when check non index files", e.what());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -366,7 +372,7 @@ Status SqliteMetaImpl::UpdateTableIndexParam(const std::string &table_id, const
|
|||
|
||||
ConnectorPtr->update(table_schema);
|
||||
} else {
|
||||
return Status::NotFound("Table " + table_id + " not found");
|
||||
return Status(DB_NOT_FOUND, "Table " + table_id + " not found");
|
||||
}
|
||||
|
||||
//set all backup file to raw
|
||||
|
@ -382,7 +388,7 @@ Status SqliteMetaImpl::UpdateTableIndexParam(const std::string &table_id, const
|
|||
|
||||
} catch (std::exception &e) {
|
||||
std::string msg = "Encounter exception when update table index: table_id = " + table_id;
|
||||
return HandleException(msg, e);
|
||||
return HandleException(msg, e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -403,7 +409,7 @@ Status SqliteMetaImpl::UpdateTableFlag(const std::string &table_id, int64_t flag
|
|||
|
||||
} catch (std::exception &e) {
|
||||
std::string msg = "Encounter exception when update table flag: table_id = " + table_id;
|
||||
return HandleException(msg, e);
|
||||
return HandleException(msg, e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -424,11 +430,11 @@ Status SqliteMetaImpl::DescribeTableIndex(const std::string &table_id, TableInde
|
|||
index.nlist_ = std::get<1>(groups[0]);
|
||||
index.metric_type_ = std::get<2>(groups[0]);
|
||||
} else {
|
||||
return Status::NotFound("Table " + table_id + " not found");
|
||||
return Status(DB_NOT_FOUND, "Table " + table_id + " not found");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when describe index", e);
|
||||
return HandleException("Encounter exception when describe index", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -475,7 +481,7 @@ Status SqliteMetaImpl::DropTableIndex(const std::string &table_id) {
|
|||
));
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when delete table index files", e);
|
||||
return HandleException("Encounter exception when delete table index files", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -496,7 +502,7 @@ Status SqliteMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when lookup table", e);
|
||||
return HandleException("Encounter exception when lookup table", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -532,7 +538,7 @@ Status SqliteMetaImpl::AllTables(std::vector<TableSchema>& table_schema_array) {
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when lookup all tables", e);
|
||||
return HandleException("Encounter exception when lookup all tables", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -571,8 +577,8 @@ Status SqliteMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
|||
|
||||
return utils::CreateTableFilePath(options_, file_schema);
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
return HandleException("Encounter exception when create table file", ex);
|
||||
} catch (std::exception& e) {
|
||||
return HandleException("Encounter exception when create table file", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -629,7 +635,7 @@ Status SqliteMetaImpl::FilesToIndex(TableFilesSchema &files) {
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when iterate raw files", e);
|
||||
return HandleException("Encounter exception when iterate raw files", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -714,7 +720,7 @@ Status SqliteMetaImpl::FilesToSearch(const std::string &table_id,
|
|||
ENGINE_LOG_ERROR << "No file to search for table: " << table_id;
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when iterate index files", e);
|
||||
return HandleException("Encounter exception when iterate index files", e.what());
|
||||
}
|
||||
|
||||
|
||||
|
@ -777,7 +783,7 @@ Status SqliteMetaImpl::FilesToMerge(const std::string &table_id,
|
|||
files[table_file.date_].push_back(table_file);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when iterate merge files", e);
|
||||
return HandleException("Encounter exception when iterate merge files", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -828,7 +834,7 @@ Status SqliteMetaImpl::GetTableFiles(const std::string& table_id,
|
|||
table_files.emplace_back(file_schema);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when lookup table files", e);
|
||||
return HandleException("Encounter exception when lookup table files", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -860,7 +866,7 @@ Status SqliteMetaImpl::Archive() {
|
|||
c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when update table files", e);
|
||||
return HandleException("Encounter exception when update table files", e.what());
|
||||
}
|
||||
}
|
||||
if (criteria == engine::ARCHIVE_CONF_DISK) {
|
||||
|
@ -890,7 +896,7 @@ Status SqliteMetaImpl::Size(uint64_t &result) {
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when calculte db size", e);
|
||||
return HandleException("Encounter exception when calculte db size", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -947,12 +953,11 @@ Status SqliteMetaImpl::DiscardFiles(long to_discard_size) {
|
|||
});
|
||||
|
||||
if (!commited) {
|
||||
ENGINE_LOG_ERROR << "sqlite transaction failed";
|
||||
return Status::DBTransactionError("Update table file error");
|
||||
return HandleException("DiscardFiles error: sqlite transaction failed");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when discard table file", e);
|
||||
return HandleException("Encounter exception when discard table file", e.what());
|
||||
}
|
||||
|
||||
return DiscardFiles(to_discard_size);
|
||||
|
@ -980,7 +985,7 @@ Status SqliteMetaImpl::UpdateTableFile(TableFileSchema &file_schema) {
|
|||
} catch (std::exception &e) {
|
||||
std::string msg = "Exception update table file: table_id = " + file_schema.table_id_
|
||||
+ " file_id = " + file_schema.file_id_;
|
||||
return HandleException(msg, e);
|
||||
return HandleException(msg, e.what());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -1001,7 +1006,7 @@ Status SqliteMetaImpl::UpdateTableFilesToIndex(const std::string& table_id) {
|
|||
c(&TableFileSchema::file_type_) == (int) TableFileSchema::RAW
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when update table files to to_index", e);
|
||||
return HandleException("Encounter exception when update table files to to_index", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -1042,12 +1047,11 @@ Status SqliteMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
|
|||
});
|
||||
|
||||
if (!commited) {
|
||||
ENGINE_LOG_ERROR << "sqlite transaction failed";
|
||||
return Status::DBTransactionError("Update table files error");
|
||||
return HandleException("UpdateTableFiles error: sqlite transaction failed");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when update table files", e);
|
||||
return HandleException("Encounter exception when update table files", e.what());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -1092,12 +1096,11 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
|||
});
|
||||
|
||||
if (!commited) {
|
||||
ENGINE_LOG_ERROR << "sqlite transaction failed";
|
||||
return Status::DBTransactionError("Clean files error");
|
||||
return HandleException("CleanUpFilesWithTTL error: sqlite transaction failed");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when clean table files", e);
|
||||
return HandleException("Encounter exception when clean table files", e.what());
|
||||
}
|
||||
|
||||
//remove to_delete tables
|
||||
|
@ -1121,12 +1124,11 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
|||
});
|
||||
|
||||
if (!commited) {
|
||||
ENGINE_LOG_ERROR << "sqlite transaction failed";
|
||||
return Status::DBTransactionError("Clean files error");
|
||||
return HandleException("CleanUpFilesWithTTL error: sqlite transaction failed");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when clean table files", e);
|
||||
return HandleException("Encounter exception when clean table files", e.what());
|
||||
}
|
||||
|
||||
//remove deleted table folder
|
||||
|
@ -1143,7 +1145,7 @@ Status SqliteMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when delete table folder", e);
|
||||
return HandleException("Encounter exception when delete table folder", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -1172,12 +1174,11 @@ Status SqliteMetaImpl::CleanUp() {
|
|||
});
|
||||
|
||||
if (!commited) {
|
||||
ENGINE_LOG_ERROR << "sqlite transaction failed";
|
||||
return Status::DBTransactionError("Clean files error");
|
||||
return HandleException("CleanUp error: sqlite transaction failed");
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when clean table file", e);
|
||||
return HandleException("Encounter exception when clean table file", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -1211,7 +1212,7 @@ Status SqliteMetaImpl::Count(const std::string &table_id, uint64_t &result) {
|
|||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when calculate table file size", e);
|
||||
return HandleException("Encounter exception when calculate table file size", e.what());
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -1223,7 +1224,7 @@ Status SqliteMetaImpl::DropAll() {
|
|||
ConnectorPtr->drop_table("Tables");
|
||||
ConnectorPtr->drop_table("TableFiles");
|
||||
} catch (std::exception &e) {
|
||||
return HandleException("Encounter exception when drop all meta", e);
|
||||
return HandleException("Encounter exception when drop all meta", e.what());
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
|
|
@ -134,7 +134,7 @@ Status SearchTask::ClusterResult(const std::vector<long> &output_ids,
|
|||
std::string msg = "Invalid id array size: " + std::to_string(output_ids.size()) +
|
||||
" distance array size: " + std::to_string(output_distence.size());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
result_set.clear();
|
||||
|
@ -249,7 +249,7 @@ Status SearchTask::TopkResult(SearchContext::ResultSet &result_src,
|
|||
if (result_src.size() != result_target.size()) {
|
||||
std::string msg = "Invalid result set size";
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
std::function<void(size_t, size_t)> ReduceWorker = [&](size_t from_index, size_t to_index) {
|
||||
|
|
|
@ -202,7 +202,7 @@ Status XSearchTask::ClusterResult(const std::vector<long> &output_ids,
|
|||
std::string msg = "Invalid id array size: " + std::to_string(output_ids.size()) +
|
||||
" distance array size: " + std::to_string(output_distence.size());
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
result_set.clear();
|
||||
|
@ -317,7 +317,7 @@ Status XSearchTask::TopkResult(SearchContext::ResultSet &result_src,
|
|||
if (result_src.size() != result_target.size()) {
|
||||
std::string msg = "Invalid result set size";
|
||||
ENGINE_LOG_ERROR << msg;
|
||||
return Status::Error(msg);
|
||||
return Status(DB_ERROR, msg);
|
||||
}
|
||||
|
||||
std::function<void(size_t, size_t)> ReduceWorker = [&](size_t from_index, size_t to_index) {
|
||||
|
|
|
@ -160,7 +160,7 @@ CreateTableTask::OnExecute() {
|
|||
engine::Status stat = DBWrapper::DB()->CreateTable(table_info);
|
||||
if (!stat.ok()) {
|
||||
//table could exist
|
||||
if(stat.IsAlreadyExist()) {
|
||||
if(stat.code() == DB_ALREADY_EXIST) {
|
||||
return SetError(SERVER_INVALID_TABLE_NAME, stat.ToString());
|
||||
}
|
||||
return SetError(DB_META_TRANSACTION_FAILED, stat.ToString());
|
||||
|
@ -351,7 +351,7 @@ DropTableTask::OnExecute() {
|
|||
table_info.table_id_ = table_name_;
|
||||
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!stat.ok()) {
|
||||
if (stat.IsNotFound()) {
|
||||
if (stat.code() == DB_NOT_FOUND) {
|
||||
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
} else {
|
||||
return SetError(DB_META_TRANSACTION_FAILED, stat.ToString());
|
||||
|
@ -450,7 +450,7 @@ InsertTask::OnExecute() {
|
|||
table_info.table_id_ = insert_param_->table_name();
|
||||
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!stat.ok()) {
|
||||
if (stat.IsNotFound()) {
|
||||
if (stat.code() == DB_NOT_FOUND) {
|
||||
return SetError(SERVER_TABLE_NOT_EXIST,
|
||||
"Table " + insert_param_->table_name() + " not exists");
|
||||
} else {
|
||||
|
@ -586,7 +586,7 @@ SearchTask::OnExecute() {
|
|||
table_info.table_id_ = table_name_;
|
||||
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!stat.ok()) {
|
||||
if (stat.IsNotFound()) {
|
||||
if (stat.code() == DB_NOT_FOUND) {
|
||||
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name_ + " not exists");
|
||||
} else {
|
||||
return SetError(DB_META_TRANSACTION_FAILED, stat.ToString());
|
||||
|
@ -811,7 +811,7 @@ DeleteByRangeTask::OnExecute() {
|
|||
table_info.table_id_ = table_name;
|
||||
engine::Status stat = DBWrapper::DB()->DescribeTable(table_info);
|
||||
if (!stat.ok()) {
|
||||
if (stat.IsNotFound()) {
|
||||
if (stat.code(), DB_NOT_FOUND) {
|
||||
return SetError(SERVER_TABLE_NOT_EXIST, "Table " + table_name + " not exists");
|
||||
} else {
|
||||
return SetError(DB_META_TRANSACTION_FAILED, stat.ToString());
|
||||
|
|
|
@ -21,11 +21,11 @@ ToServerErrorCode(const ErrorCode error_code) {
|
|||
return SERVER_ERROR_CODE_BASE + error_code;
|
||||
}
|
||||
|
||||
constexpr ErrorCode ENGINE_SUCCESS = 0;
|
||||
constexpr ErrorCode ENGINE_ERROR_CODE_BASE = 0x40000;
|
||||
constexpr ErrorCode DB_SUCCESS = 0;
|
||||
constexpr ErrorCode DB_ERROR_CODE_BASE = 0x40000;
|
||||
constexpr ErrorCode
|
||||
ToEngineErrorCode(const ErrorCode error_code) {
|
||||
return ENGINE_ERROR_CODE_BASE + error_code;
|
||||
ToDbErrorCode(const ErrorCode error_code) {
|
||||
return DB_ERROR_CODE_BASE + error_code;
|
||||
}
|
||||
|
||||
constexpr ErrorCode KNOWHERE_SUCCESS = 0;
|
||||
|
@ -67,9 +67,12 @@ constexpr ErrorCode SERVER_INVALID_INDEX_NLIST = ToServerErrorCode(114);
|
|||
constexpr ErrorCode SERVER_INVALID_INDEX_METRIC_TYPE = ToServerErrorCode(115);
|
||||
constexpr ErrorCode SERVER_INVALID_INDEX_FILE_SIZE = ToServerErrorCode(116);
|
||||
|
||||
//engine error code
|
||||
constexpr ErrorCode DB_META_TRANSACTION_FAILED = ToEngineErrorCode(1);
|
||||
constexpr ErrorCode DB_TABLE_NOT_FOUND = ToEngineErrorCode(2);
|
||||
//db error code
|
||||
constexpr ErrorCode DB_META_TRANSACTION_FAILED = ToDbErrorCode(1);
|
||||
constexpr ErrorCode DB_ERROR = ToDbErrorCode(2);
|
||||
constexpr ErrorCode DB_NOT_FOUND = ToDbErrorCode(3);
|
||||
constexpr ErrorCode DB_ALREADY_EXIST = ToDbErrorCode(4);
|
||||
constexpr ErrorCode DB_INVALID_PATH = ToDbErrorCode(5);
|
||||
|
||||
//knowhere error code
|
||||
constexpr ErrorCode KNOWHERE_ERROR = ToKnowhereErrorCode(1);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "db/Utils.h"
|
||||
#include "db/meta/MetaConsts.h"
|
||||
|
||||
using namespace zilliz::milvus;
|
||||
using namespace zilliz::milvus::engine;
|
||||
|
||||
TEST_F(MetaTest, TABLE_TEST) {
|
||||
|
@ -38,7 +39,7 @@ TEST_F(MetaTest, TABLE_TEST) {
|
|||
|
||||
table.table_id_ = table_id;
|
||||
status = impl_->CreateTable(table);
|
||||
ASSERT_TRUE(status.IsAlreadyExist());
|
||||
ASSERT_EQ(status.code(), DB_ALREADY_EXIST);
|
||||
|
||||
table.table_id_ = "";
|
||||
status = impl_->CreateTable(table);
|
||||
|
|
|
@ -41,24 +41,29 @@ TEST(DBMiscTest, STATUS_TEST) {
|
|||
std::string str = status.ToString();
|
||||
ASSERT_FALSE(str.empty());
|
||||
|
||||
status = engine::Status::Error("wrong", "mistake");
|
||||
ASSERT_TRUE(status.IsError());
|
||||
status = engine::Status(DB_ERROR, "mistake");
|
||||
ASSERT_EQ(status.code(), DB_ERROR);
|
||||
str = status.ToString();
|
||||
ASSERT_FALSE(str.empty());
|
||||
|
||||
status = engine::Status::NotFound("wrong", "mistake");
|
||||
ASSERT_TRUE(status.IsNotFound());
|
||||
status = engine::Status(DB_NOT_FOUND, "mistake");
|
||||
ASSERT_EQ(status.code(), DB_NOT_FOUND);
|
||||
str = status.ToString();
|
||||
ASSERT_FALSE(str.empty());
|
||||
|
||||
status = engine::Status::DBTransactionError("wrong", "mistake");
|
||||
ASSERT_TRUE(status.IsDBTransactionError());
|
||||
status = engine::Status(DB_ALREADY_EXIST, "mistake");
|
||||
ASSERT_EQ(status.code(), DB_ALREADY_EXIST);
|
||||
str = status.ToString();
|
||||
ASSERT_FALSE(str.empty());
|
||||
|
||||
status = engine::Status(DB_META_TRANSACTION_FAILED, "mistake");
|
||||
ASSERT_EQ(status.code(), DB_META_TRANSACTION_FAILED);
|
||||
str = status.ToString();
|
||||
ASSERT_FALSE(str.empty());
|
||||
|
||||
engine::Status status_copy = engine::Status::OK();
|
||||
CopyStatus(status_copy, status);
|
||||
ASSERT_TRUE(status.IsDBTransactionError());
|
||||
ASSERT_EQ(status.code(), DB_META_TRANSACTION_FAILED);
|
||||
}
|
||||
|
||||
TEST(DBMiscTest, OPTIONS_TEST) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <iostream>
|
||||
|
||||
using namespace zilliz::milvus;
|
||||
using namespace zilliz::milvus::engine;
|
||||
|
||||
TEST_F(MySqlMetaTest, TABLE_TEST) {
|
||||
|
@ -42,7 +43,7 @@ TEST_F(MySqlMetaTest, TABLE_TEST) {
|
|||
|
||||
table.table_id_ = table_id;
|
||||
status = impl_->CreateTable(table);
|
||||
ASSERT_TRUE(status.IsAlreadyExist());
|
||||
ASSERT_EQ(status.code(), DB_ALREADY_EXIST);
|
||||
|
||||
table.table_id_ = "";
|
||||
status = impl_->CreateTable(table);
|
||||
|
|
|
@ -127,7 +127,7 @@ TEST_F(MetricTest, Metric_Tes) {
|
|||
TEST_F(MetricTest, Collector_Metrics_Test){
|
||||
engine::Status status = engine::Status::OK();
|
||||
server::CollectInsertMetrics insert_metrics0(0, status);
|
||||
status = engine::Status::Error("error");
|
||||
status = engine::Status(DB_ERROR, "error");
|
||||
server::CollectInsertMetrics insert_metrics1(0, status);
|
||||
|
||||
server::CollectQueryMetrics query_metrics(10);
|
||||
|
|
Loading…
Reference in New Issue