mirror of https://github.com/milvus-io/milvus.git
Merge branch 'branch-0.3.0' into 'branch-0.3.0'
MS-45 - Implement DeleteTable interface See merge request megasearch/vecwise_engine!77 Former-commit-id: db5775f82ff3923dda39ec04070b1ec7433c37f8pull/191/head
commit
cb489d015d
|
@ -20,6 +20,8 @@ Please mark all change in change log and use the ticket from JIRA.
|
|||
- MS-71 - cmake: fix faiss dependency
|
||||
- MS-72 - cmake: change prometheus source to git
|
||||
- MS-73 - cmake: delete civetweb
|
||||
- MS-65 - Implement GetTableRowCount interface
|
||||
- MS-45 - Implement DeleteTable interface
|
||||
|
||||
## Task
|
||||
|
||||
|
|
|
@ -23,19 +23,22 @@ public:
|
|||
static void Open(const Options& options, DB** dbptr);
|
||||
|
||||
virtual Status CreateTable(meta::TableSchema& table_schema_) = 0;
|
||||
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) = 0;
|
||||
virtual Status DescribeTable(meta::TableSchema& table_schema_) = 0;
|
||||
virtual Status HasTable(const std::string& table_id_, bool& has_or_not_) = 0;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not_) = 0;
|
||||
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) = 0;
|
||||
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) = 0;
|
||||
|
||||
virtual Status InsertVectors(const std::string& table_id_,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) = 0;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, QueryResults& results) = 0;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) = 0;
|
||||
|
||||
virtual Status Size(long& result) = 0;
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status DropAll() = 0;
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "DBImpl.h"
|
||||
#include "DBMetaImpl.h"
|
||||
#include "Env.h"
|
||||
#include "Log.h"
|
||||
#include "EngineFactory.h"
|
||||
#include "metrics/Metrics.h"
|
||||
#include "scheduler/SearchScheduler.h"
|
||||
|
@ -15,8 +16,8 @@
|
|||
#include <thread>
|
||||
#include <iostream>
|
||||
#include <cstring>
|
||||
#include <easylogging++.h>
|
||||
#include <cache/CpuCacheMgr.h>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
namespace zilliz {
|
||||
namespace vecwise {
|
||||
|
@ -88,6 +89,34 @@ Status DBImpl::CreateTable(meta::TableSchema& table_schema) {
|
|||
return pMeta_->CreateTable(table_schema);
|
||||
}
|
||||
|
||||
Status DBImpl::DeleteTable(const std::string& table_id, const meta::DatesT& dates) {
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
auto status = pMeta_->FilesToDelete(table_id, dates, files);
|
||||
if (!status.ok()) { return status; }
|
||||
|
||||
for (auto &day_files : files) {
|
||||
for (auto &file : day_files.second) {
|
||||
boost::filesystem::remove(file.location_);
|
||||
}
|
||||
}
|
||||
|
||||
//dates empty means delete all files of the table
|
||||
if(dates.empty()) {
|
||||
meta::TableSchema table_schema;
|
||||
table_schema.table_id_ = table_id;
|
||||
status = DescribeTable(table_schema);
|
||||
|
||||
pMeta_->DeleteTable(table_id);
|
||||
boost::system::error_code ec;
|
||||
boost::filesystem::remove_all(table_schema.location_, ec);
|
||||
if(ec.failed()) {
|
||||
ENGINE_LOG_WARNING << "Failed to remove table folder";
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::DescribeTable(meta::TableSchema& table_schema) {
|
||||
return pMeta_->DescribeTable(table_schema);
|
||||
}
|
||||
|
@ -96,8 +125,16 @@ Status DBImpl::HasTable(const std::string& table_id, bool& has_or_not) {
|
|||
return pMeta_->HasTable(table_id, has_or_not);
|
||||
}
|
||||
|
||||
Status DBImpl::AllTables(std::vector<meta::TableSchema>& table_schema_array) {
|
||||
return pMeta_->AllTables(table_schema_array);
|
||||
}
|
||||
|
||||
Status DBImpl::GetTableRowCount(const std::string& table_id, uint64_t& row_count) {
|
||||
return pMeta_->Count(table_id, row_count);
|
||||
}
|
||||
|
||||
Status DBImpl::InsertVectors(const std::string& table_id_,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids_) {
|
||||
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
Status status = pMemMgr_->InsertVectors(table_id_, n, vectors, vector_ids_);
|
||||
|
@ -111,7 +148,7 @@ Status DBImpl::InsertVectors(const std::string& table_id_,
|
|||
|
||||
}
|
||||
|
||||
Status DBImpl::Query(const std::string &table_id, size_t k, size_t nq,
|
||||
Status DBImpl::Query(const std::string &table_id, uint64_t k, uint64_t nq,
|
||||
const float *vectors, QueryResults &results) {
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
meta::DatesT dates = {meta::Meta::GetDate()};
|
||||
|
@ -124,7 +161,7 @@ Status DBImpl::Query(const std::string &table_id, size_t k, size_t nq,
|
|||
return result;
|
||||
}
|
||||
|
||||
Status DBImpl::Query(const std::string& table_id, size_t k, size_t nq,
|
||||
Status DBImpl::Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
#if 0
|
||||
return QuerySync(table_id, k, nq, vectors, dates, results);
|
||||
|
@ -133,13 +170,13 @@ Status DBImpl::Query(const std::string& table_id, size_t k, size_t nq,
|
|||
#endif
|
||||
}
|
||||
|
||||
Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status DBImpl::QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
meta::DatePartionedTableFilesSchema files;
|
||||
auto status = pMeta_->FilesToSearch(table_id, dates, files);
|
||||
if (!status.ok()) { return status; }
|
||||
|
||||
LOG(DEBUG) << "Search DateT Size=" << files.size();
|
||||
ENGINE_LOG_DEBUG << "Search DateT Size = " << files.size();
|
||||
|
||||
meta::TableFilesSchema index_files;
|
||||
meta::TableFilesSchema raw_files;
|
||||
|
@ -156,7 +193,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
} else if (!raw_files.empty()) {
|
||||
dim = raw_files[0].dimension_;
|
||||
} else {
|
||||
LOG(DEBUG) << "no files to search";
|
||||
ENGINE_LOG_DEBUG << "no files to search";
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -192,7 +229,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
auto file_size = index->PhysicalSize();
|
||||
search_set_size += file_size;
|
||||
|
||||
LOG(DEBUG) << "Search file_type " << file.file_type_ << " Of Size: "
|
||||
ENGINE_LOG_DEBUG << "Search file_type " << file.file_type_ << " Of Size: "
|
||||
<< file_size/(1024*1024) << " M";
|
||||
|
||||
int inner_k = index->Count() < k ? index->Count() : k;
|
||||
|
@ -254,7 +291,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
search_in_index(raw_files);
|
||||
search_in_index(index_files);
|
||||
|
||||
LOG(DEBUG) << "Search Overall Set Size=" << search_set_size << " M";
|
||||
ENGINE_LOG_DEBUG << "Search Overall Set Size = " << search_set_size << " M";
|
||||
cluster_topk();
|
||||
|
||||
free(output_distence);
|
||||
|
@ -267,7 +304,7 @@ Status DBImpl::QuerySync(const std::string& table_id, size_t k, size_t nq,
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status DBImpl::QueryAsync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) {
|
||||
|
||||
//step 1: get files to search
|
||||
|
@ -275,7 +312,7 @@ Status DBImpl::QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
|||
auto status = pMeta_->FilesToSearch(table_id, dates, files);
|
||||
if (!status.ok()) { return status; }
|
||||
|
||||
LOG(DEBUG) << "Search DateT Size=" << files.size();
|
||||
ENGINE_LOG_DEBUG << "Search DateT Size=" << files.size();
|
||||
|
||||
SearchContextPtr context = std::make_shared<SearchContext>(k, nq, vectors);
|
||||
|
||||
|
@ -312,11 +349,12 @@ void DBImpl::BackgroundTimerTask(int interval) {
|
|||
if (shutting_down_.load(std::memory_order_acquire)) break;
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::seconds(interval));
|
||||
|
||||
server::Metrics::GetInstance().KeepingAliveCounterIncrement(interval);
|
||||
int64_t cache_usage = cache::CpuCacheMgr::GetInstance()->CacheUsage();
|
||||
int64_t cache_total = cache::CpuCacheMgr::GetInstance()->CacheCapacity();
|
||||
server::Metrics::GetInstance().CacheUsageGaugeSet(cache_usage*100/cache_total);
|
||||
long size;
|
||||
uint64_t size;
|
||||
Size(size);
|
||||
server::Metrics::GetInstance().DataFileSizeGaugeSet(size);
|
||||
server::Metrics::GetInstance().CPUUsagePercentSet();
|
||||
|
@ -517,7 +555,7 @@ Status DBImpl::DropAll() {
|
|||
return pMeta_->DropAll();
|
||||
}
|
||||
|
||||
Status DBImpl::Size(long& result) {
|
||||
Status DBImpl::Size(uint64_t& result) {
|
||||
return pMeta_->Size(result);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,29 +33,32 @@ public:
|
|||
DBImpl(const Options& options);
|
||||
|
||||
virtual Status CreateTable(meta::TableSchema& table_schema) override;
|
||||
virtual Status DeleteTable(const std::string& table_id, const meta::DatesT& dates) override;
|
||||
virtual Status DescribeTable(meta::TableSchema& table_schema) override;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
|
||||
virtual Status AllTables(std::vector<meta::TableSchema>& table_schema_array) override;
|
||||
virtual Status GetTableRowCount(const std::string& table_id, uint64_t& row_count) override;
|
||||
|
||||
virtual Status InsertVectors(const std::string& table_id,
|
||||
size_t n, const float* vectors, IDNumbers& vector_ids) override;
|
||||
uint64_t n, const float* vectors, IDNumbers& vector_ids) override;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, QueryResults& results) override;
|
||||
|
||||
virtual Status Query(const std::string& table_id, size_t k, size_t nq,
|
||||
virtual Status Query(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results) override;
|
||||
|
||||
virtual Status DropAll() override;
|
||||
|
||||
virtual Status Size(long& result) override;
|
||||
virtual Status Size(uint64_t& result) override;
|
||||
|
||||
virtual ~DBImpl();
|
||||
|
||||
private:
|
||||
Status QuerySync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status QuerySync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results);
|
||||
|
||||
Status QueryAsync(const std::string& table_id, size_t k, size_t nq,
|
||||
Status QueryAsync(const std::string& table_id, uint64_t k, uint64_t nq,
|
||||
const float* vectors, const meta::DatesT& dates, QueryResults& results);
|
||||
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "DBMetaImpl.h"
|
||||
#include "IDGenerator.h"
|
||||
#include "Utils.h"
|
||||
#include "Log.h"
|
||||
#include "MetaConsts.h"
|
||||
#include "Factories.h"
|
||||
#include "metrics/Metrics.h"
|
||||
|
@ -17,7 +18,6 @@
|
|||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <sqlite_orm.h>
|
||||
#include <easylogging++.h>
|
||||
|
||||
|
||||
namespace zilliz {
|
||||
|
@ -27,6 +27,15 @@ namespace meta {
|
|||
|
||||
using namespace sqlite_orm;
|
||||
|
||||
namespace {
|
||||
|
||||
void HandleException(std::exception &e) {
|
||||
ENGINE_LOG_DEBUG << "Engine meta exception: " << e.what();
|
||||
throw e;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
inline auto StoragePrototype(const std::string &path) {
|
||||
return make_storage(path,
|
||||
make_table("Table",
|
||||
|
@ -100,7 +109,7 @@ Status DBMetaImpl::Initialize() {
|
|||
if (!boost::filesystem::is_directory(options_.path)) {
|
||||
auto ret = boost::filesystem::create_directory(options_.path);
|
||||
if (!ret) {
|
||||
LOG(ERROR) << "Create directory " << options_.path << " Error";
|
||||
ENGINE_LOG_ERROR << "Create directory " << options_.path << " Error";
|
||||
}
|
||||
assert(ret);
|
||||
}
|
||||
|
@ -148,8 +157,7 @@ Status DBMetaImpl::DropPartitionsByDates(const std::string &table_id,
|
|||
in(&TableFileSchema::date_, dates)
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -175,12 +183,12 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
|||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
|
||||
auto group_path = GetTablePath(table_schema.table_id_);
|
||||
|
||||
if (!boost::filesystem::is_directory(group_path)) {
|
||||
auto ret = boost::filesystem::create_directories(group_path);
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
if (!boost::filesystem::is_directory(table_path)) {
|
||||
auto ret = boost::filesystem::create_directories(table_path);
|
||||
if (!ret) {
|
||||
LOG(ERROR) << "Create directory " << group_path << " Error";
|
||||
ENGINE_LOG_ERROR << "Create directory " << table_path << " Error";
|
||||
}
|
||||
assert(ret);
|
||||
}
|
||||
|
@ -188,6 +196,21 @@ Status DBMetaImpl::CreateTable(TableSchema &table_schema) {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::DeleteTable(const std::string& table_id) {
|
||||
try {
|
||||
//drop the table from meta
|
||||
auto tables = ConnectorPtr->select(columns(&TableSchema::id_),
|
||||
where(c(&TableSchema::table_id_) == table_id));
|
||||
for (auto &table : tables) {
|
||||
ConnectorPtr->remove<TableSchema>(std::get<0>(table));
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
||||
try {
|
||||
server::Metrics::GetInstance().MetaAccessTotalIncrement();
|
||||
|
@ -212,9 +235,12 @@ Status DBMetaImpl::DescribeTable(TableSchema &table_schema) {
|
|||
} else {
|
||||
return Status::NotFound("Table " + table_schema.table_id_ + " not found");
|
||||
}
|
||||
|
||||
auto table_path = GetTablePath(table_schema.table_id_);
|
||||
table_schema.location_ = table_path;
|
||||
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -237,12 +263,42 @@ Status DBMetaImpl::HasTable(const std::string &table_id, bool &has_or_not) {
|
|||
has_or_not = false;
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::AllTables(std::vector<TableSchema>& table_schema_array) {
|
||||
try {
|
||||
server::Metrics::GetInstance().MetaAccessTotalIncrement();
|
||||
auto start_time = METRICS_NOW_TIME;
|
||||
auto selected = ConnectorPtr->select(columns(&TableSchema::id_,
|
||||
&TableSchema::table_id_,
|
||||
&TableSchema::files_cnt_,
|
||||
&TableSchema::dimension_,
|
||||
&TableSchema::engine_type_,
|
||||
&TableSchema::store_raw_data_));
|
||||
auto end_time = METRICS_NOW_TIME;
|
||||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
for (auto &table : selected) {
|
||||
TableSchema schema;
|
||||
schema.id_ = std::get<0>(table);
|
||||
schema.table_id_ = std::get<1>(table);
|
||||
schema.files_cnt_ = std::get<2>(table);
|
||||
schema.dimension_ = std::get<3>(table);
|
||||
schema.engine_type_ = std::get<4>(table);
|
||||
schema.store_raw_data_ = std::get<5>(table);
|
||||
|
||||
table_schema_array.emplace_back(schema);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
||||
if (file_schema.date_ == EmptyDate) {
|
||||
file_schema.date_ = Meta::GetDate();
|
||||
|
@ -282,7 +338,7 @@ Status DBMetaImpl::CreateTableFile(TableFileSchema &file_schema) {
|
|||
if (!boost::filesystem::is_directory(partition_path)) {
|
||||
auto ret = boost::filesystem::create_directory(partition_path);
|
||||
if (!ret) {
|
||||
LOG(ERROR) << "Create directory " << partition_path << " Error";
|
||||
ENGINE_LOG_ERROR << "Create directory " << partition_path << " Error";
|
||||
}
|
||||
assert(ret);
|
||||
}
|
||||
|
@ -336,8 +392,7 @@ Status DBMetaImpl::FilesToIndex(TableFilesSchema &files) {
|
|||
files.push_back(table_file);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -438,8 +493,7 @@ Status DBMetaImpl::FilesToSearch(const std::string &table_id,
|
|||
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -488,8 +542,79 @@ Status DBMetaImpl::FilesToMerge(const std::string &table_id,
|
|||
files[table_file.date_].push_back(table_file);
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::FilesToDelete(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) {
|
||||
auto now = utils::GetMicroSecTimeStamp();
|
||||
try {
|
||||
if(partition.empty()) {
|
||||
//step 1: get table files by dates
|
||||
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
|
||||
&TableFileSchema::table_id_,
|
||||
&TableFileSchema::file_id_,
|
||||
&TableFileSchema::size_,
|
||||
&TableFileSchema::date_),
|
||||
where(c(&TableFileSchema::file_type_) !=
|
||||
(int) TableFileSchema::TO_DELETE
|
||||
and c(&TableFileSchema::table_id_) == table_id));
|
||||
|
||||
//step 2: erase table files from meta
|
||||
for (auto &file : selected) {
|
||||
TableFileSchema table_file;
|
||||
table_file.id_ = std::get<0>(file);
|
||||
table_file.table_id_ = std::get<1>(file);
|
||||
table_file.file_id_ = std::get<2>(file);
|
||||
table_file.size_ = std::get<3>(file);
|
||||
table_file.date_ = std::get<4>(file);
|
||||
GetTableFilePath(table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
}
|
||||
files[table_file.date_].push_back(table_file);
|
||||
|
||||
ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
|
||||
}
|
||||
|
||||
} else {
|
||||
//step 1: get all table files
|
||||
auto selected = ConnectorPtr->select(columns(&TableFileSchema::id_,
|
||||
&TableFileSchema::table_id_,
|
||||
&TableFileSchema::file_id_,
|
||||
&TableFileSchema::size_,
|
||||
&TableFileSchema::date_),
|
||||
where(c(&TableFileSchema::file_type_) !=
|
||||
(int) TableFileSchema::TO_DELETE
|
||||
and in(&TableFileSchema::date_, partition)
|
||||
and c(&TableFileSchema::table_id_) == table_id));
|
||||
|
||||
//step 2: erase table files from meta
|
||||
for (auto &file : selected) {
|
||||
TableFileSchema table_file;
|
||||
table_file.id_ = std::get<0>(file);
|
||||
table_file.table_id_ = std::get<1>(file);
|
||||
table_file.file_id_ = std::get<2>(file);
|
||||
table_file.size_ = std::get<3>(file);
|
||||
table_file.date_ = std::get<4>(file);
|
||||
GetTableFilePath(table_file);
|
||||
auto dateItr = files.find(table_file.date_);
|
||||
if (dateItr == files.end()) {
|
||||
files[table_file.date_] = TableFilesSchema();
|
||||
}
|
||||
files[table_file.date_].push_back(table_file);
|
||||
|
||||
ConnectorPtr->remove<TableFileSchema>(std::get<0>(file));
|
||||
}
|
||||
}
|
||||
|
||||
} catch (std::exception &e) {
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -520,8 +645,7 @@ Status DBMetaImpl::GetTableFile(TableFileSchema &file_schema) {
|
|||
" File:" + file_schema.file_id_ + " not found");
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -550,12 +674,11 @@ Status DBMetaImpl::Archive() {
|
|||
c(&TableFileSchema::file_type_) != (int) TableFileSchema::TO_DELETE
|
||||
));
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
}
|
||||
if (criteria == "disk") {
|
||||
long sum = 0;
|
||||
uint64_t sum = 0;
|
||||
Size(sum);
|
||||
|
||||
auto to_delete = (sum - limit * G);
|
||||
|
@ -566,7 +689,7 @@ Status DBMetaImpl::Archive() {
|
|||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::Size(long &result) {
|
||||
Status DBMetaImpl::Size(uint64_t &result) {
|
||||
result = 0;
|
||||
try {
|
||||
auto selected = ConnectorPtr->select(columns(sum(&TableFileSchema::size_)),
|
||||
|
@ -578,11 +701,10 @@ Status DBMetaImpl::Size(long &result) {
|
|||
if (!std::get<0>(sub_query)) {
|
||||
continue;
|
||||
}
|
||||
result += (long) (*std::get<0>(sub_query));
|
||||
result += (uint64_t) (*std::get<0>(sub_query));
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -609,7 +731,8 @@ Status DBMetaImpl::DiscardFiles(long to_discard_size) {
|
|||
table_file.id_ = std::get<0>(file);
|
||||
table_file.size_ = std::get<1>(file);
|
||||
ids.push_back(table_file.id_);
|
||||
LOG(DEBUG) << "Discard table_file.id=" << table_file.file_id_ << " table_file.size=" << table_file.size_;
|
||||
ENGINE_LOG_DEBUG << "Discard table_file.id=" << table_file.file_id_
|
||||
<< " table_file.size=" << table_file.size_;
|
||||
to_discard_size -= table_file.size_;
|
||||
}
|
||||
|
||||
|
@ -626,11 +749,9 @@ Status DBMetaImpl::DiscardFiles(long to_discard_size) {
|
|||
));
|
||||
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
|
||||
return DiscardFiles(to_discard_size);
|
||||
}
|
||||
|
||||
|
@ -644,9 +765,8 @@ Status DBMetaImpl::UpdateTableFile(TableFileSchema &file_schema) {
|
|||
auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
server::Metrics::GetInstance().MetaAccessDurationSecondsHistogramObserve(total_time);
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
LOG(DEBUG) << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
|
||||
throw e;
|
||||
ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -669,8 +789,7 @@ Status DBMetaImpl::UpdateTableFiles(TableFilesSchema &files) {
|
|||
return Status::DBTransactionError("Update files Error");
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -708,8 +827,7 @@ Status DBMetaImpl::CleanUpFilesWithTTL(uint16_t seconds) {
|
|||
/* LOG(DEBUG) << "Removing deleted id=" << table_file.id << " location=" << table_file.location << std::endl; */
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
@ -747,14 +865,13 @@ Status DBMetaImpl::CleanUp() {
|
|||
/* LOG(DEBUG) << "Removing id=" << table_file.id << " location=" << table_file.location << std::endl; */
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status DBMetaImpl::Count(const std::string &table_id, long &result) {
|
||||
Status DBMetaImpl::Count(const std::string &table_id, uint64_t &result) {
|
||||
|
||||
try {
|
||||
|
||||
|
@ -785,10 +902,10 @@ Status DBMetaImpl::Count(const std::string &table_id, long &result) {
|
|||
}
|
||||
|
||||
result /= table_schema.dimension_;
|
||||
result /= sizeof(float);
|
||||
|
||||
} catch (std::exception &e) {
|
||||
LOG(DEBUG) << e.what();
|
||||
throw e;
|
||||
HandleException(e);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
|
|
@ -20,8 +20,10 @@ public:
|
|||
DBMetaImpl(const DBMetaOptions& options_);
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) override;
|
||||
virtual Status DeleteTable(const std::string& table_id) override;
|
||||
virtual Status DescribeTable(TableSchema& group_info_) override;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) override;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) override;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) override;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
|
@ -40,11 +42,15 @@ public:
|
|||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
|
||||
virtual Status FilesToDelete(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) override;
|
||||
|
||||
virtual Status FilesToIndex(TableFilesSchema&) override;
|
||||
|
||||
virtual Status Archive() override;
|
||||
|
||||
virtual Status Size(long& result) override;
|
||||
virtual Status Size(uint64_t& result) override;
|
||||
|
||||
virtual Status CleanUp() override;
|
||||
|
||||
|
@ -52,7 +58,7 @@ public:
|
|||
|
||||
virtual Status DropAll() override;
|
||||
|
||||
virtual Status Count(const std::string& table_id, long& result) override;
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) override;
|
||||
|
||||
virtual ~DBMetaImpl();
|
||||
|
||||
|
|
|
@ -24,8 +24,10 @@ public:
|
|||
using Ptr = std::shared_ptr<Meta>;
|
||||
|
||||
virtual Status CreateTable(TableSchema& table_schema) = 0;
|
||||
virtual Status DeleteTable(const std::string& table_id) = 0;
|
||||
virtual Status DescribeTable(TableSchema& table_schema) = 0;
|
||||
virtual Status HasTable(const std::string& table_id, bool& has_or_not) = 0;
|
||||
virtual Status AllTables(std::vector<TableSchema>& table_schema_array) = 0;
|
||||
|
||||
virtual Status CreateTableFile(TableFileSchema& file_schema) = 0;
|
||||
virtual Status DropPartitionsByDates(const std::string& table_id,
|
||||
|
@ -43,7 +45,11 @@ public:
|
|||
virtual Status FilesToMerge(const std::string& table_id,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status Size(long& result) = 0;
|
||||
virtual Status FilesToDelete(const std::string& table_id,
|
||||
const DatesT& partition,
|
||||
DatePartionedTableFilesSchema& files) = 0;
|
||||
|
||||
virtual Status Size(uint64_t& result) = 0;
|
||||
|
||||
virtual Status Archive() = 0;
|
||||
|
||||
|
@ -54,7 +60,7 @@ public:
|
|||
|
||||
virtual Status DropAll() = 0;
|
||||
|
||||
virtual Status Count(const std::string& table_id, long& result) = 0;
|
||||
virtual Status Count(const std::string& table_id, uint64_t& result) = 0;
|
||||
|
||||
static DateT GetDate(const std::time_t& t, int day_delta = 0);
|
||||
static DateT GetDate();
|
||||
|
|
|
@ -111,7 +111,7 @@ bool SearchTask::DoSearch() {
|
|||
return false;
|
||||
}
|
||||
|
||||
server::TimeRecorder rc("DoSearch");
|
||||
server::TimeRecorder rc("DoSearch index(" + std::to_string(index_id_) + ")");
|
||||
|
||||
std::vector<long> output_ids;
|
||||
std::vector<float> output_distence;
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace {
|
|||
std::cout << "Table name: " << tb_schema.table_name << std::endl;
|
||||
std::cout << "Table index type: " << (int)tb_schema.index_type << std::endl;
|
||||
std::cout << "Table dimension: " << tb_schema.dimension << std::endl;
|
||||
std::cout << "Table store raw data: " << tb_schema.store_raw_vector << std::endl;
|
||||
std::cout << "Table store raw data: " << (tb_schema.store_raw_vector ? "true" : "false") << std::endl;
|
||||
BLOCK_SPLITER
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,9 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
std::cout << "ShowTables function call status: " << stat.ToString() << std::endl;
|
||||
std::cout << "All tables: " << std::endl;
|
||||
for(auto& table : tables) {
|
||||
std::cout << "\t" << table << std::endl;
|
||||
int64_t row_count = 0;
|
||||
stat = conn->GetTableRowCount(table, row_count);
|
||||
std::cout << "\t" << table << "(" << row_count << " rows)" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -192,10 +194,10 @@ ClientTest::Test(const std::string& address, const std::string& port) {
|
|||
PrintSearchResult(topk_query_result_array);
|
||||
}
|
||||
|
||||
// {//delete table
|
||||
// Status stat = conn->DeleteTable(TABLE_NAME);
|
||||
// std::cout << "DeleteTable function call status: " << stat.ToString() << std::endl;
|
||||
// }
|
||||
{//delete table
|
||||
Status stat = conn->DeleteTable(TABLE_NAME);
|
||||
std::cout << "DeleteTable function call status: " << stat.ToString() << std::endl;
|
||||
}
|
||||
|
||||
{//server status
|
||||
std::string status = conn->ServerStatus();
|
||||
|
|
|
@ -72,7 +72,7 @@ class Status {
|
|||
* @return, the status is assigned.
|
||||
*
|
||||
*/
|
||||
inline Status &operator=(const Status &s);
|
||||
Status &operator=(const Status &s);
|
||||
|
||||
/**
|
||||
* @brief Status
|
||||
|
@ -93,7 +93,7 @@ class Status {
|
|||
* @return, the status is moved.
|
||||
*
|
||||
*/
|
||||
inline Status &operator=(Status &&s) noexcept;
|
||||
Status &operator=(Status &&s) noexcept;
|
||||
|
||||
/**
|
||||
* @brief Status
|
||||
|
|
|
@ -77,7 +77,7 @@ ClientProxy::Disconnect() {
|
|||
|
||||
std::string
|
||||
ClientProxy::ClientVersion() const {
|
||||
return std::string("v1.0");
|
||||
return "";
|
||||
}
|
||||
|
||||
Status
|
||||
|
@ -221,6 +221,8 @@ ClientProxy::DescribeTable(const std::string &table_name, TableSchema &table_sch
|
|||
|
||||
table_schema.table_name = thrift_schema.table_name;
|
||||
table_schema.index_type = (IndexType)thrift_schema.index_type;
|
||||
table_schema.dimension = thrift_schema.dimension;
|
||||
table_schema.store_raw_vector = thrift_schema.store_raw_vector;
|
||||
|
||||
} catch ( std::exception& ex) {
|
||||
return Status(StatusCode::UnknownError, "failed to describe table: " + std::string(ex.what()));
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Proprietary and confidential.
|
||||
******************************************************************************/
|
||||
#include "ConnectionImpl.h"
|
||||
#include "version.h"
|
||||
|
||||
namespace megasearch {
|
||||
|
||||
|
@ -47,7 +48,7 @@ ConnectionImpl::Disconnect() {
|
|||
|
||||
std::string
|
||||
ConnectionImpl::ClientVersion() const {
|
||||
return client_proxy_->ClientVersion();
|
||||
return MEGASEARCH_VERSION;
|
||||
}
|
||||
|
||||
Status
|
||||
|
|
|
@ -21,9 +21,6 @@ static const std::string DQL_TASK_GROUP = "dql";
|
|||
static const std::string DDL_DML_TASK_GROUP = "ddl_dml";
|
||||
static const std::string PING_TASK_GROUP = "ping";
|
||||
|
||||
static const std::string VECTOR_UID = "uid";
|
||||
static const uint64_t USE_MT = 5000;
|
||||
|
||||
using DB_META = zilliz::vecwise::engine::meta::Meta;
|
||||
using DB_DATE = zilliz::vecwise::engine::meta::DateT;
|
||||
|
||||
|
@ -76,6 +73,20 @@ namespace {
|
|||
return map_type[type];
|
||||
}
|
||||
|
||||
int IndexType(engine::EngineType type) {
|
||||
static std::map<engine::EngineType, int> map_type = {
|
||||
{engine::EngineType::INVALID, 0},
|
||||
{engine::EngineType::FAISS_IDMAP, 1},
|
||||
{engine::EngineType::FAISS_IVFFLAT, 2},
|
||||
};
|
||||
|
||||
if(map_type.find(type) == map_type.end()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return map_type[type];
|
||||
}
|
||||
|
||||
ServerError
|
||||
ConvertRowRecordToFloatArray(const std::vector<thrift::RowRecord>& record_array,
|
||||
uint64_t dimension,
|
||||
|
@ -174,16 +185,17 @@ ServerError CreateTableTask::OnExecute() {
|
|||
//step 2: create table
|
||||
engine::Status stat = DB()->CreateTable(table_info);
|
||||
if(!stat.ok()) {//table could exist
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = "Engine failed: " + stat.ToString();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return SERVER_SUCCESS;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = ex.what();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return SERVER_UNEXPECTED_ERROR;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
rc.Record("done");
|
||||
|
@ -215,10 +227,13 @@ ServerError DescribeTableTask::OnExecute() {
|
|||
error_msg_ = "Engine failed: " + stat.ToString();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
} else {
|
||||
|
||||
}
|
||||
|
||||
schema_.table_name = table_info.table_id_;
|
||||
schema_.index_type = IndexType((engine::EngineType)table_info.engine_type_);
|
||||
schema_.dimension = table_info.dimension_;
|
||||
schema_.store_raw_vector = table_info.store_raw_data_;
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = ex.what();
|
||||
|
@ -243,16 +258,53 @@ BaseTaskPtr DeleteTableTask::Create(const std::string& group_id) {
|
|||
}
|
||||
|
||||
ServerError DeleteTableTask::OnExecute() {
|
||||
error_code_ = SERVER_NOT_IMPLEMENT;
|
||||
error_msg_ = "delete table not implemented";
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
try {
|
||||
TimeRecorder rc("DeleteTableTask");
|
||||
|
||||
return SERVER_NOT_IMPLEMENT;
|
||||
//step 1: check validation
|
||||
if (table_name_.empty()) {
|
||||
error_code_ = SERVER_INVALID_ARGUMENT;
|
||||
error_msg_ = "Table name cannot be empty";
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
//step 2: check table existence
|
||||
engine::meta::TableSchema table_info;
|
||||
table_info.table_id_ = table_name_;
|
||||
engine::Status stat = DB()->DescribeTable(table_info);
|
||||
if(!stat.ok()) {
|
||||
error_code_ = SERVER_TABLE_NOT_EXIST;
|
||||
error_msg_ = "Engine failed: " + stat.ToString();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
rc.Record("check validation");
|
||||
|
||||
//step 3: delete table
|
||||
std::vector<DB_DATE> dates;
|
||||
stat = DB()->DeleteTable(table_name_, dates);
|
||||
if(!stat.ok()) {
|
||||
SERVER_LOG_ERROR << "Engine failed: " << stat.ToString();
|
||||
return SERVER_UNEXPECTED_ERROR;
|
||||
}
|
||||
|
||||
rc.Record("deleta table");
|
||||
rc.Elapse("totally cost");
|
||||
} catch (std::exception& ex) {
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = ex.what();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
ShowTablesTask::ShowTablesTask(std::vector<std::string>& tables)
|
||||
: BaseTask(PING_TASK_GROUP),
|
||||
: BaseTask(DQL_TASK_GROUP),
|
||||
tables_(tables) {
|
||||
|
||||
}
|
||||
|
@ -262,6 +314,19 @@ BaseTaskPtr ShowTablesTask::Create(std::vector<std::string>& tables) {
|
|||
}
|
||||
|
||||
ServerError ShowTablesTask::OnExecute() {
|
||||
std::vector<engine::meta::TableSchema> schema_array;
|
||||
engine::Status stat = DB()->AllTables(schema_array);
|
||||
if(!stat.ok()) {
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = "Engine failed: " + stat.ToString();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
tables_.clear();
|
||||
for(auto& schema : schema_array) {
|
||||
tables_.push_back(schema.table_id_);
|
||||
}
|
||||
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
@ -468,17 +533,39 @@ BaseTaskPtr GetTableRowCountTask::Create(const std::string& table_name, int64_t&
|
|||
}
|
||||
|
||||
ServerError GetTableRowCountTask::OnExecute() {
|
||||
if(table_name_.empty()) {
|
||||
try {
|
||||
TimeRecorder rc("GetTableRowCountTask");
|
||||
|
||||
//step 1: check validation
|
||||
if (table_name_.empty()) {
|
||||
error_code_ = SERVER_INVALID_ARGUMENT;
|
||||
error_msg_ = "Table name cannot be empty";
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
//step 2: get row count
|
||||
uint64_t row_count = 0;
|
||||
engine::Status stat = DB()->GetTableRowCount(table_name_, row_count);
|
||||
if (!stat.ok()) {
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = "Engine failed: " + stat.ToString();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
row_count_ = (int64_t) row_count;
|
||||
|
||||
rc.Elapse("totally cost");
|
||||
|
||||
} catch (std::exception& ex) {
|
||||
error_code_ = SERVER_UNEXPECTED_ERROR;
|
||||
error_msg_ = "Table name cannot be empty";
|
||||
error_msg_ = ex.what();
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
}
|
||||
|
||||
error_code_ = SERVER_NOT_IMPLEMENT;
|
||||
error_msg_ = "Not implemented";
|
||||
SERVER_LOG_ERROR << error_msg_;
|
||||
return error_code_;
|
||||
return SERVER_SUCCESS;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -64,7 +64,7 @@ TEST_F(DBTest2, ARHIVE_DISK_CHECK) {
|
|||
|
||||
static const std::string group_name = "test_group";
|
||||
static const int group_dim = 256;
|
||||
long size;
|
||||
uint64_t size;
|
||||
|
||||
engine::meta::TableSchema group_info;
|
||||
group_info.dimension_ = group_dim;
|
||||
|
@ -149,8 +149,8 @@ TEST_F(DBTest, DB_TEST) {
|
|||
|
||||
INIT_TIMER;
|
||||
std::stringstream ss;
|
||||
long count = 0;
|
||||
long prev_count = -1;
|
||||
uint64_t count = 0;
|
||||
uint64_t prev_count = 0;
|
||||
|
||||
for (auto j=0; j<10; ++j) {
|
||||
ss.str("");
|
||||
|
|
|
@ -72,8 +72,8 @@ TEST_F(DBTest, Metric_Tes) {
|
|||
|
||||
INIT_TIMER;
|
||||
std::stringstream ss;
|
||||
long count = 0;
|
||||
long prev_count = -1;
|
||||
uint64_t count = 0;
|
||||
uint64_t prev_count = 0;
|
||||
|
||||
for (auto j=0; j<10; ++j) {
|
||||
ss.str("");
|
||||
|
|
Loading…
Reference in New Issue