enhance: Enhance and correct exception module (#33705)

#33704

Signed-off-by: luzhang <luzhang@zilliz.com>
Co-authored-by: luzhang <luzhang@zilliz.com>
pull/34042/head
zhagnlu 2024-06-23 21:22:01 +08:00 committed by GitHub
parent 6c1d815894
commit 0d7ea8ec42
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 147 additions and 339 deletions

View File

@ -47,7 +47,7 @@ class Channel {
inner_.pop(result);
if (!result.has_value()) {
if (ex_.has_value()) {
throw ex_.value();
std::rethrow_exception(ex_.value());
}
return false;
}
@ -56,7 +56,7 @@ class Channel {
}
void
close(std::optional<MilvusException> ex = std::nullopt) {
close(std::optional<std::exception_ptr> ex = std::nullopt) {
if (ex.has_value()) {
ex_ = std::move(ex);
}
@ -65,6 +65,6 @@ class Channel {
private:
oneapi::tbb::concurrent_bounded_queue<std::optional<T>> inner_{};
std::optional<MilvusException> ex_{};
std::optional<std::exception_ptr> ex_{};
};
} // namespace milvus

View File

@ -45,15 +45,15 @@ EasyAssertInfo(bool value,
if (!value) {
std::string info;
if (!expr_str.empty()) {
info += fmt::format("Assert \"{}\" at {}:{}\n",
expr_str,
std::string(filename),
std::to_string(lineno));
info += fmt::format("Assert \"{}\" ", expr_str);
}
if (!extra_info.empty()) {
info += " => " + std::string(extra_info);
}
info += fmt::format(
" at {}:{}\n", std::string(filename), std::to_string(lineno));
std::cout << info << std::endl;
throw SegcoreError(error_code, std::string(info));
}
}

View File

@ -118,10 +118,9 @@ FailureCStatus(int code, const std::string& msg) {
inline CStatus
FailureCStatus(const std::exception* ex) {
if (dynamic_cast<const SegcoreError*>(ex) != nullptr) {
auto segcore_error = dynamic_cast<const SegcoreError*>(ex);
return CStatus{static_cast<int>(segcore_error->get_error_code()),
strdup(ex->what())};
if (auto segcore_err = dynamic_cast<const SegcoreError*>(ex)) {
return CStatus{static_cast<int>(segcore_err->get_error_code()),
strdup(segcore_err->what())};
}
return CStatus{static_cast<int>(UnexpectedError), strdup(ex->what())};
}

View File

@ -20,186 +20,6 @@
namespace milvus {
class MilvusException : public std::exception {
public:
explicit MilvusException(const std::string& msg)
: std::exception(), exception_message_(msg) {
}
const char*
what() const noexcept {
return exception_message_.c_str();
}
virtual ~MilvusException() {
}
private:
std::string exception_message_;
};
class NotImplementedException : public std::exception {
public:
explicit NotImplementedException(const std::string& msg)
: std::exception(), exception_message_(msg) {
}
const char*
what() const noexcept {
return exception_message_.c_str();
}
virtual ~NotImplementedException() {
}
private:
std::string exception_message_;
};
class NotSupportedDataTypeException : public std::exception {
public:
explicit NotSupportedDataTypeException(const std::string& msg)
: std::exception(), exception_message_(msg) {
}
const char*
what() const noexcept {
return exception_message_.c_str();
}
virtual ~NotSupportedDataTypeException() {
}
private:
std::string exception_message_;
};
class UnistdException : public std::runtime_error {
public:
explicit UnistdException(const std::string& msg) : std::runtime_error(msg) {
}
virtual ~UnistdException() {
}
};
// Exceptions for storage module
class LocalChunkManagerException : public std::runtime_error {
public:
explicit LocalChunkManagerException(const std::string& msg)
: std::runtime_error(msg) {
}
virtual ~LocalChunkManagerException() {
}
};
class InvalidPathException : public LocalChunkManagerException {
public:
explicit InvalidPathException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~InvalidPathException() {
}
};
class OpenFileException : public LocalChunkManagerException {
public:
explicit OpenFileException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~OpenFileException() {
}
};
class CreateFileException : public LocalChunkManagerException {
public:
explicit CreateFileException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~CreateFileException() {
}
};
class ReadFileException : public LocalChunkManagerException {
public:
explicit ReadFileException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~ReadFileException() {
}
};
class WriteFileException : public LocalChunkManagerException {
public:
explicit WriteFileException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~WriteFileException() {
}
};
class PathAlreadyExistException : public LocalChunkManagerException {
public:
explicit PathAlreadyExistException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~PathAlreadyExistException() {
}
};
class DirNotExistException : public LocalChunkManagerException {
public:
explicit DirNotExistException(const std::string& msg)
: LocalChunkManagerException(msg) {
}
virtual ~DirNotExistException() {
}
};
class MinioException : public std::runtime_error {
public:
explicit MinioException(const std::string& msg) : std::runtime_error(msg) {
}
virtual ~MinioException() {
}
};
class InvalidBucketNameException : public MinioException {
public:
explicit InvalidBucketNameException(const std::string& msg)
: MinioException(msg) {
}
virtual ~InvalidBucketNameException() {
}
};
class ObjectNotExistException : public MinioException {
public:
explicit ObjectNotExistException(const std::string& msg)
: MinioException(msg) {
}
virtual ~ObjectNotExistException() {
}
};
class S3ErrorException : public MinioException {
public:
explicit S3ErrorException(const std::string& msg) : MinioException(msg) {
}
virtual ~S3ErrorException() {
}
};
class DiskANNFileManagerException : public std::runtime_error {
public:
explicit DiskANNFileManagerException(const std::string& msg)
: std::runtime_error(msg) {
}
virtual ~DiskANNFileManagerException() {
}
};
class ArrowException : public std::runtime_error {
public:
explicit ArrowException(const std::string& msg) : std::runtime_error(msg) {
}
virtual ~ArrowException() {
}
};
// Exceptions for executor module
class ExecDriverException : public std::exception {
public:

View File

@ -172,10 +172,10 @@ FieldDataImpl<Type, is_type_entire_row>::FillFieldData(
return FillFieldData(values.data(), element_count);
}
default: {
throw SegcoreError(DataTypeInvalid,
GetName() + "::FillFieldData" +
" not support data type " +
GetDataTypeName(data_type_));
PanicInfo(DataTypeInvalid,
GetName() + "::FillFieldData" +
" not support data type " +
GetDataTypeName(data_type_));
}
}
}
@ -223,9 +223,9 @@ InitScalarFieldData(const DataType& type, int64_t cap_rows) {
case DataType::JSON:
return std::make_shared<FieldData<Json>>(type, cap_rows);
default:
throw NotSupportedDataTypeException(
"InitScalarFieldData not support data type " +
GetDataTypeName(type));
PanicInfo(DataTypeInvalid,
"InitScalarFieldData not support data type " +
GetDataTypeName(type));
}
}

View File

@ -133,8 +133,10 @@ GetDataTypeSize(DataType data_type, int dim = 1) {
// method must handle this case themselves and must not pass
// VECTOR_SPARSE_FLOAT data_type.
default: {
throw SegcoreError(DataTypeInvalid,
fmt::format("invalid type is {}", data_type));
PanicInfo(
DataTypeInvalid,
fmt::format("failed to get data type size, invalid type {}",
data_type));
}
}
}

View File

@ -66,7 +66,7 @@ class BaseConfig {
virtual const std::unordered_map<std::string, std::string>&
values() const {
throw NotImplementedException("method values() is not supported");
PanicInfo(NotImplemented, "method values() is not supported");
}
virtual ~BaseConfig() = default;

View File

@ -20,6 +20,7 @@
#include <string>
#include <vector>
#include "common/EasyAssert.h"
#include "common/Types.h"
#include "common/Vector.h"
#include "exec/Driver.h"
@ -182,14 +183,13 @@ class SourceOperator : public Operator {
void
AddInput(RowVectorPtr& /* unused */) override {
throw NotImplementedException(
"SourceOperator does not support addInput()");
PanicInfo(NotImplemented, "SourceOperator does not support addInput()");
}
void
NoMoreInput() override {
throw NotImplementedException(
"SourceOperator does not support noMoreInput()");
PanicInfo(NotImplemented,
"SourceOperator does not support noMoreInput()");
}
};

View File

@ -50,8 +50,8 @@ struct ExprInfo {
case GenericValue::VAL_NOT_SET:
return true;
default:
throw NotImplementedException(
"Not supported GenericValue type");
PanicInfo(NotImplemented,
"Not supported GenericValue type");
}
}
};
@ -77,8 +77,8 @@ struct ExprInfo {
case GenericValue::VAL_NOT_SET:
break;
default:
throw NotImplementedException(
"Not supported GenericValue type");
PanicInfo(NotImplemented,
"Not supported GenericValue type");
}
return h;
}

View File

@ -80,8 +80,7 @@ BitmapIndex<T>::Build(size_t n, const T* data) {
return;
}
if (n == 0) {
throw SegcoreError(DataIsEmpty,
"BitmapIndex can not build null values");
PanicInfo(DataIsEmpty, "BitmapIndex can not build null values");
}
T* p = const_cast<T*>(data);
@ -151,8 +150,7 @@ BitmapIndex<T>::BuildWithFieldData(
total_num_rows += field_data->get_num_rows();
}
if (total_num_rows == 0) {
throw SegcoreError(DataIsEmpty,
"scalar bitmap index can not build null values");
PanicInfo(DataIsEmpty, "scalar bitmap index can not build null values");
}
total_num_rows_ = total_num_rows;
@ -590,8 +588,8 @@ BitmapIndex<T>::RangeForBitset(const T value, const OpType op) {
break;
}
default: {
throw SegcoreError(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
PanicInfo(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
}
}
@ -660,8 +658,8 @@ BitmapIndex<T>::RangeForRoaring(const T value, const OpType op) {
break;
}
default: {
throw SegcoreError(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
PanicInfo(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
}
}
@ -825,11 +823,10 @@ BitmapIndex<T>::Reverse_Lookup(size_t idx) const {
}
}
}
throw SegcoreError(
UnexpectedError,
fmt::format(
"scalar bitmap index can not lookup target value of index {}",
idx));
PanicInfo(UnexpectedError,
fmt::format(
"scalar bitmap index can not lookup target value of index {}",
idx));
}
template <typename T>
@ -867,11 +864,10 @@ BitmapIndex<T>::ShouldSkip(const T lower_value,
break;
}
default:
throw SegcoreError(
OpTypeInvalid,
fmt::format("Invalid OperatorType for "
"checking scalar index optimization: {}",
op));
PanicInfo(OpTypeInvalid,
fmt::format("Invalid OperatorType for "
"checking scalar index optimization: {}",
op));
}
return should_skip;
};

View File

@ -68,7 +68,7 @@ IndexFactory::CreatePrimitiveScalarIndex<std::string>(
}
return CreateStringIndexMarisa(file_manager_context);
#else
throw SegcoreError(Unsupported, "unsupported platform");
PanicInfo(Unsupported, "unsupported platform");
#endif
}
@ -106,7 +106,7 @@ IndexFactory::CreatePrimitiveScalarIndex<std::string>(
}
return CreateStringIndexMarisa(file_manager_context, space);
#else
throw SegcoreError(Unsupported, "unsupported platform");
PanicInfo(Unsupported, "unsupported platform");
#endif
}
@ -169,7 +169,7 @@ IndexFactory::CreatePrimitiveScalarIndex(
return CreatePrimitiveScalarIndex<std::string>(
index_type, file_manager_context);
default:
throw SegcoreError(
PanicInfo(
DataTypeInvalid,
fmt::format("invalid data type to build index: {}", data_type));
}
@ -261,7 +261,7 @@ IndexFactory::CreateVectorIndex(
index_type, metric_type, version, file_manager_context);
}
default:
throw SegcoreError(
PanicInfo(
DataTypeInvalid,
fmt::format("invalid data type to build disk index: {}",
data_type));
@ -286,7 +286,7 @@ IndexFactory::CreateVectorIndex(
index_type, metric_type, version, file_manager_context);
}
default:
throw SegcoreError(
PanicInfo(
DataTypeInvalid,
fmt::format("invalid data type to build mem index: {}",
data_type));
@ -347,7 +347,7 @@ IndexFactory::CreateVectorIndex(
file_manager_context);
}
default:
throw SegcoreError(
PanicInfo(
DataTypeInvalid,
fmt::format("invalid data type to build disk index: {}",
data_type));
@ -372,7 +372,7 @@ IndexFactory::CreateVectorIndex(
create_index_info, file_manager_context, space);
}
default:
throw SegcoreError(
PanicInfo(
DataTypeInvalid,
fmt::format("invalid data type to build mem index: {}",
data_type));

View File

@ -290,8 +290,8 @@ InvertedIndexTantivy<T>::Range(T value, OpType op) {
apply_hits(bitset, array, true);
} break;
default:
throw SegcoreError(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
PanicInfo(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
}
return bitset;

View File

@ -65,9 +65,8 @@ ScalarIndex<T>::Query(const DatasetPtr& dataset) {
case OpType::PrefixMatch:
case OpType::PostfixMatch:
default:
throw SegcoreError(
OpTypeInvalid,
fmt::format("unsupported operator type: {}", op));
PanicInfo(OpTypeInvalid,
fmt::format("unsupported operator type: {}", op));
}
}

View File

@ -82,8 +82,7 @@ ScalarIndexSort<T>::BuildV2(const Config& config) {
total_num_rows += data->get_num_rows();
}
if (total_num_rows == 0) {
throw SegcoreError(DataIsEmpty,
"ScalarIndexSort cannot build null values!");
PanicInfo(DataIsEmpty, "ScalarIndexSort cannot build null values!");
}
data_.reserve(total_num_rows);
@ -126,8 +125,7 @@ ScalarIndexSort<T>::Build(size_t n, const T* values) {
if (is_built_)
return;
if (n == 0) {
throw SegcoreError(DataIsEmpty,
"ScalarIndexSort cannot build null values!");
PanicInfo(DataIsEmpty, "ScalarIndexSort cannot build null values!");
}
data_.reserve(n);
idx_to_offsets_.resize(n);
@ -151,8 +149,7 @@ ScalarIndexSort<T>::BuildWithFieldData(
total_num_rows += data->get_num_rows();
}
if (total_num_rows == 0) {
throw SegcoreError(DataIsEmpty,
"ScalarIndexSort cannot build null values!");
PanicInfo(DataIsEmpty, "ScalarIndexSort cannot build null values!");
}
data_.reserve(total_num_rows);
@ -386,8 +383,8 @@ ScalarIndexSort<T>::Range(const T value, const OpType op) {
data_.begin(), data_.end(), IndexStructure<T>(value));
break;
default:
throw SegcoreError(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
PanicInfo(OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", op));
}
for (; lb < ub; ++lb) {
bitset[lb->idx_] = true;
@ -475,11 +472,10 @@ ScalarIndexSort<T>::ShouldSkip(const T lower_value,
break;
}
default:
throw SegcoreError(
OpTypeInvalid,
fmt::format("Invalid OperatorType for "
"checking scalar index optimization: {}",
op));
PanicInfo(OpTypeInvalid,
fmt::format("Invalid OperatorType for "
"checking scalar index optimization: {}",
op));
}
return shouldSkip;
}

View File

@ -123,7 +123,7 @@ StringIndexMarisa::BuildV2(const Config& config) {
void
StringIndexMarisa::Build(const Config& config) {
if (built_) {
throw SegcoreError(IndexAlreadyBuild, "index has been built");
PanicInfo(IndexAlreadyBuild, "index has been built");
}
auto insert_files =
@ -175,7 +175,7 @@ StringIndexMarisa::BuildWithFieldData(
void
StringIndexMarisa::Build(size_t n, const std::string* values) {
if (built_) {
throw SegcoreError(IndexAlreadyBuild, "index has been built");
PanicInfo(IndexAlreadyBuild, "index has been built");
}
marisa::Keyset keyset;
@ -267,9 +267,8 @@ StringIndexMarisa::LoadWithoutAssemble(const BinarySet& set,
if (written != len) {
file.Close();
remove(file_name.c_str());
throw SegcoreError(
ErrorCode::UnistdError,
fmt::format("write index to fd error: {}", strerror(errno)));
PanicInfo(ErrorCode::UnistdError,
fmt::format("write index to fd error: {}", strerror(errno)));
}
file.Seek(0, SEEK_SET);
@ -442,7 +441,7 @@ StringIndexMarisa::Range(std::string value, OpType op) {
break;
}
default:
throw SegcoreError(
PanicInfo(
OpTypeInvalid,
fmt::format("Invalid OperatorType: {}", static_cast<int>(op)));
}

View File

@ -327,10 +327,9 @@ ReadDataFromFD(int fd, void* buf, size_t size, size_t chunk_size) {
const size_t count = (size < chunk_size) ? size : chunk_size;
const ssize_t size_read = read(fd, buf, count);
if (size_read != count) {
throw SegcoreError(
ErrorCode::UnistdError,
"read data from fd error, returned read size is " +
std::to_string(size_read));
PanicInfo(ErrorCode::UnistdError,
"read data from fd error, returned read size is " +
std::to_string(size_read));
}
buf = static_cast<char*>(buf) + size_read;

View File

@ -67,9 +67,9 @@ VectorDiskAnnIndex<T>::VectorDiskAnnIndex(
} else {
auto err = get_index_obj.error();
if (err == knowhere::Status::invalid_index_error) {
throw SegcoreError(ErrorCode::Unsupported, get_index_obj.what());
PanicInfo(ErrorCode::Unsupported, get_index_obj.what());
}
throw SegcoreError(ErrorCode::KnowhereError, get_index_obj.what());
PanicInfo(ErrorCode::KnowhereError, get_index_obj.what());
}
}
@ -106,9 +106,9 @@ VectorDiskAnnIndex<T>::VectorDiskAnnIndex(
} else {
auto err = get_index_obj.error();
if (err == knowhere::Status::invalid_index_error) {
throw SegcoreError(ErrorCode::Unsupported, get_index_obj.what());
PanicInfo(ErrorCode::Unsupported, get_index_obj.what());
}
throw SegcoreError(ErrorCode::KnowhereError, get_index_obj.what());
PanicInfo(ErrorCode::KnowhereError, get_index_obj.what());
}
}

View File

@ -65,9 +65,10 @@ class VectorIndex : public IndexBase {
VectorIterators(const DatasetPtr dataset,
const knowhere::Json& json,
const BitsetView& bitset) const {
throw std::runtime_error("VectorIndex:" + this->GetIndexType() +
" didn't implement VectorIterator interface, "
"there must be sth wrong in the code");
PanicInfo(NotImplemented,
"VectorIndex:" + this->GetIndexType() +
" didn't implement VectorIterator interface, "
"there must be sth wrong in the code");
}
virtual const bool

View File

@ -77,9 +77,9 @@ VectorMemIndex<T>::VectorMemIndex(
} else {
auto err = get_index_obj.error();
if (err == knowhere::Status::invalid_index_error) {
throw SegcoreError(ErrorCode::Unsupported, get_index_obj.what());
PanicInfo(ErrorCode::Unsupported, get_index_obj.what());
}
throw SegcoreError(ErrorCode::KnowhereError, get_index_obj.what());
PanicInfo(ErrorCode::KnowhereError, get_index_obj.what());
}
}
@ -110,9 +110,9 @@ VectorMemIndex<T>::VectorMemIndex(
} else {
auto err = get_index_obj.error();
if (err == knowhere::Status::invalid_index_error) {
throw SegcoreError(ErrorCode::Unsupported, get_index_obj.what());
PanicInfo(ErrorCode::Unsupported, get_index_obj.what());
}
throw SegcoreError(ErrorCode::KnowhereError, get_index_obj.what());
PanicInfo(ErrorCode::KnowhereError, get_index_obj.what());
}
}

View File

@ -70,9 +70,8 @@ class IndexFactory {
case DataType::VECTOR_SPARSE_FLOAT:
return std::make_unique<VecIndexCreator>(type, config, context);
default:
throw SegcoreError(
DataTypeInvalid,
fmt::format("invalid type is {}", invalid_dtype_msg));
PanicInfo(DataTypeInvalid,
fmt::format("invalid type is {}", invalid_dtype_msg));
}
}
@ -107,7 +106,7 @@ class IndexFactory {
return std::make_unique<VecIndexCreator>(
type, field_name, dim, config, file_manager_context, space);
default:
throw std::invalid_argument(invalid_dtype_msg);
PanicInfo(ErrorCode::DataTypeInvalid, invalid_dtype_msg);
}
}
};

View File

@ -457,15 +457,24 @@ class SparseFloatColumn : public ColumnBase {
return static_cast<const char*>(static_cast<const void*>(vec_.data()));
}
// This is used to advice mmap prefetch, we don't currently support mmap for
// sparse float vector thus not implemented for now.
size_t
ByteSize() const override {
PanicInfo(ErrorCode::Unsupported,
"ByteSize not supported for sparse float column");
}
size_t
Capacity() const override {
throw std::runtime_error(
"Capacity not supported for sparse float column");
PanicInfo(ErrorCode::Unsupported,
"Capacity not supported for sparse float column");
}
SpanBase
Span() const override {
throw std::runtime_error("Span not supported for sparse float column");
PanicInfo(ErrorCode::Unsupported,
"Span not supported for sparse float column");
}
void
@ -481,8 +490,8 @@ class SparseFloatColumn : public ColumnBase {
void
Append(const char* data, size_t size) override {
throw std::runtime_error(
"Append not supported for sparse float column");
PanicInfo(ErrorCode::Unsupported,
"Append not supported for sparse float column");
}
int64_t

View File

@ -81,18 +81,11 @@ WriteFieldData(File& file,
break;
}
case DataType::VECTOR_SPARSE_FLOAT: {
for (size_t i = 0; i < data->get_num_rows(); ++i) {
auto vec =
static_cast<const knowhere::sparse::SparseRow<float>*>(
data->RawValue(i));
ssize_t written =
file.Write(vec->data(), vec->data_byte_size());
if (written < vec->data_byte_size()) {
break;
}
total_written += written;
}
break;
// TODO(SPARSE): this is for mmap to write data to disk so that
// the file can be mmaped into memory.
PanicInfo(
ErrorCode::NotImplemented,
"WriteFieldData for VECTOR_SPARSE_FLOAT not implemented");
}
default:
PanicInfo(DataTypeInvalid,

View File

@ -157,9 +157,10 @@ PrepareVectorIteratorsFromIndex(const SearchInfo& search_info,
"group_by: "
"group_by operation will be terminated",
e.what());
throw std::runtime_error(
PanicInfo(
ErrorCode::Unsupported,
"Failed to groupBy, current index:" + index.GetIndexType() +
" doesn't support search_group_by");
" doesn't support search_group_by");
}
return true;
}

View File

@ -61,7 +61,7 @@ ParsePlaceholderGroup(const Plan* plan,
} else {
auto line_size = info.values().Get(0).size();
if (field_meta.get_sizeof() != line_size) {
throw SegcoreError(
PanicInfo(
DimNotMatch,
fmt::format("vector dimension mismatch, expected vector "
"size(byte) {}, actual {}.",
@ -102,7 +102,7 @@ CreateRetrievePlanByExpr(const Schema& schema,
auto res = plan_node.ParsePartialFromCodedStream(&input_stream);
if (!res) {
throw SegcoreError(UnexpectedError, "parse plan node proto failed");
PanicInfo(UnexpectedError, "parse plan node proto failed");
}
return ProtoParser(schema).CreateRetrievePlan(plan_node);
}

View File

@ -180,9 +180,8 @@ BruteForceSearch(const dataset::SearchDataset& dataset,
}
milvus::tracer::AddEvent("knowhere_finish_BruteForce_SearchWithBuf");
if (stat != knowhere::Status::success) {
throw SegcoreError(
KnowhereError,
"Brute force search fail: " + KnowhereStatusString(stat));
PanicInfo(KnowhereError,
"Brute force search fail: " + KnowhereStatusString(stat));
}
}
sub_result.round_values();

View File

@ -99,7 +99,7 @@ SegmentInternalInterface::Retrieve(tracer::TraceContext* trace_ctx,
output_data_size += get_field_avg_size(field_id) * result_rows;
}
if (output_data_size > limit_size) {
throw SegcoreError(
PanicInfo(
RetrieveError,
fmt::format("query results exceed the limit size ", limit_size));
}
@ -258,7 +258,7 @@ SegmentInternalInterface::get_field_avg_size(FieldId field_id) const {
return sizeof(int64_t);
}
throw SegcoreError(FieldIDInvalid, "unsupported system field id");
PanicInfo(FieldIDInvalid, "unsupported system field id");
}
auto schema = get_schema();

View File

@ -820,7 +820,7 @@ LoadFieldDatasFromRemote(const std::vector<std::string>& remote_files,
channel->close();
} catch (std::exception& e) {
LOG_INFO("failed to load data from remote: {}", e.what());
channel->close(MilvusException(e.what()));
channel->close(std::current_exception());
}
}

View File

@ -44,8 +44,8 @@ AppendLoadFieldInfo(CLoadFieldDataInfo c_load_field_data_info,
static_cast<LoadFieldDataInfo*>(c_load_field_data_info);
auto iter = load_field_data_info->field_infos.find(field_id);
if (iter != load_field_data_info->field_infos.end()) {
throw milvus::SegcoreError(milvus::FieldAlreadyExist,
"append same field info multi times");
PanicInfo(milvus::ErrorCode::FieldAlreadyExist,
"append same field info multi times");
}
FieldBinlogInfo binlog_info;
binlog_info.field_id = field_id;
@ -67,8 +67,8 @@ AppendLoadFieldDataPath(CLoadFieldDataInfo c_load_field_data_info,
static_cast<LoadFieldDataInfo*>(c_load_field_data_info);
auto iter = load_field_data_info->field_infos.find(field_id);
if (iter == load_field_data_info->field_infos.end()) {
throw milvus::SegcoreError(milvus::FieldIDInvalid,
"please append field info first");
PanicInfo(milvus::ErrorCode::FieldIDInvalid,
"please append field info first");
}
std::string file_path(c_file_path);
load_field_data_info->field_infos[field_id].insert_files.emplace_back(

View File

@ -70,8 +70,7 @@ class AzureChunkManager : public ChunkManager {
uint64_t offset,
void* buf,
uint64_t len) {
throw SegcoreError(NotImplemented,
GetName() + "Read with offset not implement");
PanicInfo(NotImplemented, GetName() + "Read with offset not implement");
}
virtual void
@ -79,8 +78,8 @@ class AzureChunkManager : public ChunkManager {
uint64_t offset,
void* buf,
uint64_t len) {
throw SegcoreError(NotImplemented,
GetName() + "Write with offset not implement");
PanicInfo(NotImplemented,
GetName() + "Write with offset not implement");
}
virtual uint64_t

View File

@ -49,8 +49,7 @@ LocalChunkManager::Size(const std::string& filepath) {
boost::filesystem::path absPath(filepath);
if (!Exist(filepath)) {
throw SegcoreError(PathNotExist,
"invalid local path:" + absPath.string());
PanicInfo(PathNotExist, "invalid local path:" + absPath.string());
}
boost::system::error_code err;
int64_t size = boost::filesystem::file_size(absPath, err);
@ -86,7 +85,7 @@ LocalChunkManager::Read(const std::string& filepath,
std::stringstream err_msg;
err_msg << "Error: open local file '" << filepath << " failed, "
<< strerror(errno);
throw SegcoreError(FileOpenFailed, err_msg.str());
PanicInfo(FileOpenFailed, err_msg.str());
}
infile.seekg(offset, std::ios::beg);
@ -95,7 +94,7 @@ LocalChunkManager::Read(const std::string& filepath,
std::stringstream err_msg;
err_msg << "Error: read local file '" << filepath << " failed, "
<< strerror(errno);
throw SegcoreError(FileReadFailed, err_msg.str());
PanicInfo(FileReadFailed, err_msg.str());
}
}
return infile.gcount();
@ -116,13 +115,13 @@ LocalChunkManager::Write(const std::string& absPathStr,
std::stringstream err_msg;
err_msg << "Error: open local file '" << absPathStr << " failed, "
<< strerror(errno);
throw SegcoreError(FileOpenFailed, err_msg.str());
PanicInfo(FileOpenFailed, err_msg.str());
}
if (!outfile.write(reinterpret_cast<char*>(buf), size)) {
std::stringstream err_msg;
err_msg << "Error: write local file '" << absPathStr << " failed, "
<< strerror(errno);
throw SegcoreError(FileWriteFailed, err_msg.str());
PanicInfo(FileWriteFailed, err_msg.str());
}
}
@ -144,7 +143,7 @@ LocalChunkManager::Write(const std::string& absPathStr,
std::stringstream err_msg;
err_msg << "Error: open local file '" << absPathStr << " failed, "
<< strerror(errno);
throw SegcoreError(FileOpenFailed, err_msg.str());
PanicInfo(FileOpenFailed, err_msg.str());
}
outfile.seekp(offset, std::ios::beg);
@ -152,14 +151,14 @@ LocalChunkManager::Write(const std::string& absPathStr,
std::stringstream err_msg;
err_msg << "Error: write local file '" << absPathStr << " failed, "
<< strerror(errno);
throw SegcoreError(FileWriteFailed, err_msg.str());
PanicInfo(FileWriteFailed, err_msg.str());
}
}
std::vector<std::string>
LocalChunkManager::ListWithPrefix(const std::string& filepath) {
throw SegcoreError(NotImplemented,
GetName() + "::ListWithPrefix" + " not implement now");
PanicInfo(NotImplemented,
GetName() + "::ListWithPrefix" + " not implement now");
}
bool
@ -175,7 +174,7 @@ LocalChunkManager::CreateFile(const std::string& filepath) {
std::stringstream err_msg;
err_msg << "Error: create new local file '" << absPathStr << " failed, "
<< strerror(errno);
throw SegcoreError(FileCreateFailed, err_msg.str());
PanicInfo(FileCreateFailed, err_msg.str());
}
file.close();
return true;
@ -196,12 +195,12 @@ void
LocalChunkManager::CreateDir(const std::string& dir) {
bool isExist = DirExist(dir);
if (isExist) {
throw SegcoreError(PathAlreadyExist, "dir:" + dir + " already exists");
PanicInfo(PathAlreadyExist, "dir:" + dir + " already exists");
}
boost::filesystem::path dirPath(dir);
auto create_success = boost::filesystem::create_directories(dirPath);
if (!create_success) {
throw SegcoreError(FileCreateFailed, "create dir failed" + dir);
PanicInfo(FileCreateFailed, "create dir failed" + dir);
}
}
@ -220,7 +219,7 @@ LocalChunkManager::GetSizeOfDir(const std::string& dir) {
boost::filesystem::path dirPath(dir);
bool is_dir = boost::filesystem::is_directory(dirPath);
if (!is_dir) {
throw SegcoreError(PathNotExist, "dir:" + dir + " not exists");
PanicInfo(PathNotExist, "dir:" + dir + " not exists");
}
using boost::filesystem::directory_entry;

View File

@ -118,8 +118,7 @@ class MinioChunkManager : public ChunkManager {
uint64_t offset,
void* buf,
uint64_t len) {
throw SegcoreError(NotImplemented,
GetName() + "Read with offset not implement");
PanicInfo(NotImplemented, GetName() + "Read with offset not implement");
}
virtual void
@ -127,8 +126,8 @@ class MinioChunkManager : public ChunkManager {
uint64_t offset,
void* buf,
uint64_t len) {
throw SegcoreError(NotImplemented,
GetName() + "Write with offset not implement");
PanicInfo(NotImplemented,
GetName() + "Write with offset not implement");
}
virtual uint64_t
@ -316,7 +315,7 @@ class GoogleHttpClientFactory : public Aws::Http::HttpClientFactory {
request->SetResponseStreamFactory(streamFactory);
auto auth_header = credentials_->AuthorizationHeader();
if (!auth_header.ok()) {
throw SegcoreError(
PanicInfo(
S3Error,
fmt::format("get authorization failed, errcode: {}",
StatusCodeToString(auth_header.status().code())));

View File

@ -183,7 +183,7 @@ OpenDALChunkManager::Read(const std::string& filepath,
}
}
if (buf_index != size) {
throw SegcoreError(
PanicInfo(
S3Error,
fmt::format(
"Read size mismatch, target size is {}, actual size is {}",

View File

@ -45,8 +45,7 @@ class OpenDALChunkManager : public ChunkManager {
uint64_t offset,
void* buf,
uint64_t len) override {
throw SegcoreError(NotImplemented,
GetName() + "Read with offset not implement");
PanicInfo(NotImplemented, GetName() + "Read with offset not implement");
}
void
@ -54,8 +53,8 @@ class OpenDALChunkManager : public ChunkManager {
uint64_t offset,
void* buf,
uint64_t len) override {
throw SegcoreError(NotImplemented,
GetName() + "Write with offset not implement");
PanicInfo(NotImplemented,
GetName() + "Write with offset not implement");
}
uint64_t

View File

@ -819,9 +819,9 @@ CreateFieldData(const DataType& type, int64_t dim, int64_t total_num_rows) {
return std::make_shared<FieldData<SparseFloatVector>>(
type, total_num_rows);
default:
throw SegcoreError(DataTypeInvalid,
"CreateFieldData not support data type " +
GetDataTypeName(type));
PanicInfo(DataTypeInvalid,
"CreateFieldData not support data type " +
GetDataTypeName(type));
}
}