mirror of https://github.com/milvus-io/milvus.git
fix mysql scoped connection concurrency issue
Former-commit-id: ecb3cf07b5f49f2f87e3972a126452ec2950caf8pull/191/head
parent
511a71f476
commit
7f24723ec7
|
@ -271,7 +271,7 @@ endif()
|
|||
if(DEFINED ENV{MILVUS_MYSQLPP_URL})
|
||||
set(MYSQLPP_SOURCE_URL "$ENV{MILVUS_MYSQLPP_URL}")
|
||||
else()
|
||||
set(MYSQLPP_SOURCE_URL "https://github.com/youny626/mysqlpp.git")
|
||||
set(MYSQLPP_SOURCE_URL "https://tangentsoft.com/mysqlpp/releases/mysql++-${MYSQLPP_VERSION}.tar.gz")
|
||||
endif()
|
||||
|
||||
if (DEFINED ENV{MILVUS_OPENBLAS_URL})
|
||||
|
@ -1095,18 +1095,18 @@ macro(build_mysqlpp)
|
|||
"LDFLAGS=-pthread")
|
||||
|
||||
externalproject_add(mysqlpp_ep
|
||||
# URL
|
||||
# ${MYSQLPP_SOURCE_URL}
|
||||
GIT_REPOSITORY
|
||||
URL
|
||||
${MYSQLPP_SOURCE_URL}
|
||||
GIT_TAG
|
||||
${MYSQLPP_VERSION}
|
||||
GIT_SHALLOW
|
||||
TRUE
|
||||
# GIT_REPOSITORY
|
||||
# ${MYSQLPP_SOURCE_URL}
|
||||
# GIT_TAG
|
||||
# ${MYSQLPP_VERSION}
|
||||
# GIT_SHALLOW
|
||||
# TRUE
|
||||
${EP_LOG_OPTIONS}
|
||||
CONFIGURE_COMMAND
|
||||
"./bootstrap"
|
||||
COMMAND
|
||||
# "./bootstrap"
|
||||
# COMMAND
|
||||
"./configure"
|
||||
${MYSQLPP_CONFIGURE_ARGS}
|
||||
BUILD_COMMAND
|
||||
|
|
|
@ -10,7 +10,7 @@ db_config:
|
|||
db_path: /tmp/milvus
|
||||
#URI format: dialect://username:password@host:port/database
|
||||
#All parts except dialect are optional, but you MUST include the delimiters
|
||||
db_backend_url: sqlite://:@:/
|
||||
db_backend_url: mysql://root:1234@:/test
|
||||
index_building_threshold: 1024 #build index file when raw data file size larger than this value, unit: MB
|
||||
|
||||
metric_config:
|
||||
|
|
|
@ -183,7 +183,10 @@ endif ()
|
|||
|
||||
install(TARGETS milvus_server DESTINATION bin)
|
||||
|
||||
install(FILES ${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
install(FILES
|
||||
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}
|
||||
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3
|
||||
${CMAKE_BINARY_DIR}/mysqlpp_ep-prefix/src/mysqlpp_ep/lib/${CMAKE_SHARED_LIBRARY_PREFIX}mysqlpp${CMAKE_SHARED_LIBRARY_SUFFIX}.3.2.4
|
||||
DESTINATION bin) #need to copy libmysqlpp.so
|
||||
|
||||
add_subdirectory(sdk)
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#include <string>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "Log.h"
|
||||
|
||||
class MySQLConnectionPool : public mysqlpp::ConnectionPool {
|
||||
|
||||
public:
|
||||
|
@ -43,6 +45,7 @@ public:
|
|||
sleep(1);
|
||||
}
|
||||
|
||||
ENGINE_LOG_DEBUG << "conns_in_use_ in grab: " << conns_in_use_ << std::endl;
|
||||
++conns_in_use_;
|
||||
return mysqlpp::ConnectionPool::grab();
|
||||
}
|
||||
|
@ -50,7 +53,11 @@ public:
|
|||
// Other half of in-use conn count limit
|
||||
void release(const mysqlpp::Connection* pc) override {
|
||||
mysqlpp::ConnectionPool::release(pc);
|
||||
ENGINE_LOG_DEBUG << "conns_in_use_ in release: " << conns_in_use_ << std::endl;
|
||||
--conns_in_use_;
|
||||
if (conns_in_use_ < 0) {
|
||||
ENGINE_LOG_DEBUG << "conns_in_use_ in release < 0: " << conns_in_use_ << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void set_max_idle_time(int max_idle) {
|
||||
|
|
|
@ -160,6 +160,10 @@ namespace meta {
|
|||
// std::cout << "MySQL++ thread aware:" << std::to_string(connectionPtr->thread_aware()) << std::endl;
|
||||
|
||||
try {
|
||||
|
||||
CleanUp();
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
// if (!connectionPtr->connect(dbName, serverAddress, username, password, port)) {
|
||||
// return Status::Error("DB connection failed: ", connectionPtr->error());
|
||||
|
@ -168,8 +172,6 @@ namespace meta {
|
|||
ENGINE_LOG_ERROR << "MySQL++ wasn't built with thread awareness! Can't run without it.";
|
||||
return Status::Error("MySQL++ wasn't built with thread awareness! Can't run without it.");
|
||||
}
|
||||
|
||||
CleanUp();
|
||||
Query InitializeQuery = connectionPtr->query();
|
||||
|
||||
// InitializeQuery << "SET max_allowed_packet=67108864;";
|
||||
|
@ -177,8 +179,8 @@ namespace meta {
|
|||
// return Status::DBTransactionError("Initialization Error", InitializeQuery.error());
|
||||
// }
|
||||
|
||||
// InitializeQuery << "DROP TABLE IF EXISTS meta, metaFile;";
|
||||
InitializeQuery << "CREATE TABLE IF NOT EXISTS meta (" <<
|
||||
// InitializeQuery << "DROP TABLE IF EXISTS Tables, TableFiles;";
|
||||
InitializeQuery << "CREATE TABLE IF NOT EXISTS Tables (" <<
|
||||
"id BIGINT PRIMARY KEY AUTO_INCREMENT, " <<
|
||||
"table_id VARCHAR(255) UNIQUE NOT NULL, " <<
|
||||
"state INT NOT NULL, " <<
|
||||
|
@ -191,7 +193,7 @@ namespace meta {
|
|||
return Status::DBTransactionError("Initialization Error", InitializeQuery.error());
|
||||
}
|
||||
|
||||
InitializeQuery << "CREATE TABLE IF NOT EXISTS metaFile (" <<
|
||||
InitializeQuery << "CREATE TABLE IF NOT EXISTS TableFiles (" <<
|
||||
"id BIGINT PRIMARY KEY AUTO_INCREMENT, " <<
|
||||
"table_id VARCHAR(255) NOT NULL, " <<
|
||||
"engine_type INT DEFAULT 1 NOT NULL, " <<
|
||||
|
@ -204,6 +206,7 @@ namespace meta {
|
|||
if (!InitializeQuery.exec()) {
|
||||
return Status::DBTransactionError("Initialization Error", InitializeQuery.error());
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
// //Consume all results to avoid "Commands out of sync" error
|
||||
// while (InitializeQuery.more_results()) {
|
||||
|
@ -247,7 +250,7 @@ namespace meta {
|
|||
|
||||
// std::lock_guard<std::recursive_mutex> lock(mysql_mutex);
|
||||
|
||||
if (dates.size() == 0) {
|
||||
if (dates.empty()) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -260,8 +263,6 @@ namespace meta {
|
|||
|
||||
try {
|
||||
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
auto yesterday = GetDateWithDelta(-1);
|
||||
|
||||
for (auto &date : dates) {
|
||||
|
@ -270,8 +271,6 @@ namespace meta {
|
|||
}
|
||||
}
|
||||
|
||||
Query dropPartitionsByDatesQuery = connectionPtr->query();
|
||||
|
||||
std::stringstream dateListSS;
|
||||
for (auto &date : dates) {
|
||||
dateListSS << std::to_string(date) << ", ";
|
||||
|
@ -279,15 +278,22 @@ namespace meta {
|
|||
std::string dateListStr = dateListSS.str();
|
||||
dateListStr = dateListStr.substr(0, dateListStr.size() - 2); //remove the last ", "
|
||||
|
||||
dropPartitionsByDatesQuery << "UPDATE metaFile " <<
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query dropPartitionsByDatesQuery = connectionPtr->query();
|
||||
|
||||
dropPartitionsByDatesQuery << "UPDATE TableFiles " <<
|
||||
"SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"date in (" << dateListStr << ");";
|
||||
|
||||
if (!dropPartitionsByDatesQuery.exec()) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN DROPPING PARTITIONS BY DATES";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN DROPPING PARTITIONS BY DATES", dropPartitionsByDatesQuery.error());
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN DROPPING PARTITIONS BY DATES",
|
||||
dropPartitionsByDatesQuery.error());
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
|
@ -310,25 +316,29 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query createTableQuery = connectionPtr->query();
|
||||
|
||||
ENGINE_LOG_DEBUG << "Create Table in";
|
||||
if (table_schema.table_id_.empty()) {
|
||||
NextTableId(table_schema.table_id_);
|
||||
}
|
||||
else {
|
||||
createTableQuery << "SELECT state FROM meta " <<
|
||||
} else {
|
||||
createTableQuery << "SELECT state FROM Tables " <<
|
||||
"WHERE table_id = " << quote << table_schema.table_id_ << ";";
|
||||
ENGINE_LOG_DEBUG << "Create Table : " << createTableQuery.str();
|
||||
StoreQueryResult res = createTableQuery.store();
|
||||
assert(res && res.num_rows() <= 1);
|
||||
if (res.num_rows() == 1) {
|
||||
int state = res[0]["state"];
|
||||
std::string msg = (TableSchema::TO_DELETE == state) ?
|
||||
"Table already exists and it is in delete state, please wait a second" : "Table already exists";
|
||||
"Table already exists and it is in delete state, please wait a second"
|
||||
: "Table already exists";
|
||||
ENGINE_LOG_WARNING << "MySQLMetaImpl::CreateTable: " << msg;
|
||||
return Status::Error(msg);
|
||||
}
|
||||
}
|
||||
ENGINE_LOG_DEBUG << "Create Table start";
|
||||
|
||||
table_schema.files_cnt_ = 0;
|
||||
table_schema.id_ = -1;
|
||||
|
@ -345,11 +355,10 @@ namespace meta {
|
|||
std::string engine_type = std::to_string(table_schema.engine_type_);
|
||||
std::string store_raw_data = table_schema.store_raw_data_ ? "true" : "false";
|
||||
|
||||
createTableQuery << "INSERT INTO meta VALUES" <<
|
||||
createTableQuery << "INSERT INTO Tables VALUES" <<
|
||||
"(" << id << ", " << quote << table_id << ", " << state << ", " << dimension << ", " <<
|
||||
created_on << ", " << files_cnt << ", " << engine_type << ", " << store_raw_data
|
||||
<< ");";
|
||||
|
||||
created_on << ", " << files_cnt << ", " << engine_type << ", " << store_raw_data << ");";
|
||||
ENGINE_LOG_DEBUG << "Create Table : " << createTableQuery.str();
|
||||
if (SimpleResult res = createTableQuery.execute()) {
|
||||
table_schema.id_ = res.insert_id(); //Might need to use SELECT LAST_INSERT_ID()?
|
||||
// std::cout << table_schema.id_ << std::endl;
|
||||
|
@ -357,11 +366,11 @@ namespace meta {
|
|||
// while (createTableQuery.more_results()) {
|
||||
// createTableQuery.store_next();
|
||||
// }
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ENGINE_LOG_ERROR << "Add Table Error";
|
||||
return Status::DBTransactionError("Add Table Error", createTableQuery.error());
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
// auto end_time = METRICS_NOW_TIME;
|
||||
// auto total_time = METRICS_MICROSECONDS(start_time, end_time);
|
||||
|
@ -399,12 +408,13 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
//soft delete table
|
||||
Query deleteTableQuery = connectionPtr->query();
|
||||
//
|
||||
deleteTableQuery << "UPDATE meta " <<
|
||||
deleteTableQuery << "UPDATE Tables " <<
|
||||
"SET state = " << std::to_string(TableSchema::TO_DELETE) << " " <<
|
||||
"WHERE table_id = " << quote << table_id << ";";
|
||||
|
||||
|
@ -412,6 +422,8 @@ namespace meta {
|
|||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN DELETING TABLE";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN DELETING TABLE", deleteTableQuery.error());
|
||||
}
|
||||
|
||||
} //Scoped Connection
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
ENGINE_LOG_ERROR << "GENERAL ERROR WHEN DELETING TABLE" << ": " << er.what();
|
||||
|
@ -429,21 +441,22 @@ namespace meta {
|
|||
try {
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
//soft delete table files
|
||||
Query deleteTableFilesQuery = connectionPtr->query();
|
||||
//
|
||||
deleteTableFilesQuery << "UPDATE metaFile " <<
|
||||
deleteTableFilesQuery << "UPDATE TableFiles " <<
|
||||
"SET state = " << std::to_string(TableSchema::TO_DELETE) << ", " <<
|
||||
"updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " "
|
||||
"updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " " <<
|
||||
"WHERE table_id = " << quote << table_id << ";";
|
||||
|
||||
if (!deleteTableFilesQuery.exec()) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN DELETING TABLE FILES";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN DELETING TABLE", deleteTableFilesQuery.error());
|
||||
}
|
||||
|
||||
} //Scoped Connection
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN DELETING TABLE FILES" << ": " << er.what();
|
||||
|
@ -465,14 +478,18 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query describeTableQuery = connectionPtr->query();
|
||||
describeTableQuery << "SELECT id, dimension, files_cnt, engine_type, store_raw_data " <<
|
||||
"FROM meta " <<
|
||||
"FROM Tables " <<
|
||||
"WHERE table_id = " << quote << table_schema.table_id_ << " " <<
|
||||
"AND state <> " << std::to_string(TableSchema::TO_DELETE) << ";";
|
||||
StoreQueryResult res = describeTableQuery.store();
|
||||
res = describeTableQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
assert(res && res.num_rows() <= 1);
|
||||
if (res.num_rows() == 1) {
|
||||
|
@ -516,16 +533,20 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query hasTableQuery = connectionPtr->query();
|
||||
//since table_id is a unique column we just need to check whether it exists or not
|
||||
hasTableQuery << "SELECT EXISTS " <<
|
||||
"(SELECT 1 FROM meta " <<
|
||||
"(SELECT 1 FROM Tables " <<
|
||||
"WHERE table_id = " << quote << table_id << " " <<
|
||||
"AND state <> " << std::to_string(TableSchema::TO_DELETE) << ") " <<
|
||||
"AS " << quote << "check" << ";";
|
||||
StoreQueryResult res = hasTableQuery.store();
|
||||
res = hasTableQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
assert(res && res.num_rows() == 1);
|
||||
int check = res[0]["check"];
|
||||
|
@ -552,13 +573,17 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query allTablesQuery = connectionPtr->query();
|
||||
allTablesQuery << "SELECT id, table_id, dimension, files_cnt, engine_type, store_raw_data " <<
|
||||
"FROM meta " <<
|
||||
"FROM Tables " <<
|
||||
"WHERE state <> " << std::to_string(TableSchema::TO_DELETE) << ";";
|
||||
StoreQueryResult res = allTablesQuery.store();
|
||||
res = allTablesQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
for (auto& resRow : res) {
|
||||
TableSchema table_schema;
|
||||
|
@ -610,8 +635,6 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
NextFileId(file_schema.file_id_);
|
||||
file_schema.file_type_ = TableFileSchema::NEW;
|
||||
file_schema.dimension_ = table_schema.dimension_;
|
||||
|
@ -621,7 +644,6 @@ namespace meta {
|
|||
file_schema.engine_type_ = table_schema.engine_type_;
|
||||
GetTableFilePath(file_schema);
|
||||
|
||||
Query createTableFileQuery = connectionPtr->query();
|
||||
std::string id = "NULL"; //auto-increment
|
||||
std::string table_id = file_schema.table_id_;
|
||||
std::string engine_type = std::to_string(file_schema.engine_type_);
|
||||
|
@ -632,7 +654,12 @@ namespace meta {
|
|||
std::string created_on = std::to_string(file_schema.created_on_);
|
||||
std::string date = std::to_string(file_schema.date_);
|
||||
|
||||
createTableFileQuery << "INSERT INTO metaFile VALUES" <<
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query createTableFileQuery = connectionPtr->query();
|
||||
|
||||
createTableFileQuery << "INSERT INTO TableFiles VALUES" <<
|
||||
"(" << id << ", " << quote << table_id << ", " << engine_type << ", " <<
|
||||
quote << file_id << ", " << file_type << ", " << size << ", " <<
|
||||
updated_time << ", " << created_on << ", " << date << ");";
|
||||
|
@ -644,11 +671,11 @@ namespace meta {
|
|||
// while (createTableFileQuery.more_results()) {
|
||||
// createTableFileQuery.store_next();
|
||||
// }
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN ADDING TABLE FILE";
|
||||
return Status::DBTransactionError("Add file Error", createTableFileQuery.error());
|
||||
}
|
||||
} // Scoped Connection
|
||||
|
||||
auto partition_path = GetTableDatePartitionPath(file_schema.table_id_, file_schema.date_);
|
||||
|
||||
|
@ -685,13 +712,17 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query filesToIndexQuery = connectionPtr->query();
|
||||
filesToIndexQuery << "SELECT id, table_id, engine_type, file_id, file_type, size, date " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE file_type = " << std::to_string(TableFileSchema::TO_INDEX) << ";";
|
||||
StoreQueryResult res = filesToIndexQuery.store();
|
||||
res = filesToIndexQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
std::map<std::string, TableSchema> groups;
|
||||
TableFileSchema table_file;
|
||||
|
@ -757,23 +788,23 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
if (partition.empty()) {
|
||||
|
||||
Query filesToSearchQuery = connectionPtr->query();
|
||||
filesToSearchQuery << "SELECT id, table_id, engine_type, file_id, file_type, size, date " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::INDEX) << ");";
|
||||
res = filesToSearchQuery.store();
|
||||
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
|
||||
Query filesToSearchQuery = connectionPtr->query();
|
||||
|
||||
|
@ -785,7 +816,7 @@ namespace meta {
|
|||
partitionListStr = partitionListStr.substr(0, partitionListStr.size() - 2); //remove the last ", "
|
||||
|
||||
filesToSearchQuery << "SELECT id, table_id, engine_type, file_id, file_type, size, date " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"date IN (" << partitionListStr << ") AND " <<
|
||||
"(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " <<
|
||||
|
@ -794,6 +825,7 @@ namespace meta {
|
|||
res = filesToSearchQuery.store();
|
||||
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
TableSchema table_schema;
|
||||
table_schema.table_id_ = table_id;
|
||||
|
@ -857,15 +889,19 @@ namespace meta {
|
|||
try {
|
||||
MetricCollector metric;
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query filesToMergeQuery = connectionPtr->query();
|
||||
filesToMergeQuery << "SELECT id, table_id, file_id, file_type, size, date " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::RAW) << " " <<
|
||||
"ORDER BY size DESC" << ";";
|
||||
StoreQueryResult res = filesToMergeQuery.store();
|
||||
res = filesToMergeQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
TableSchema table_schema;
|
||||
table_schema.table_id_ = table_id;
|
||||
|
@ -934,14 +970,18 @@ namespace meta {
|
|||
|
||||
try {
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query getTableFileQuery = connectionPtr->query();
|
||||
getTableFileQuery << "SELECT engine_type, file_id, file_type, size, date " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"(" << idStr << ");";
|
||||
StoreQueryResult res = getTableFileQuery.store();
|
||||
res = getTableFileQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
assert(res);
|
||||
|
||||
|
@ -1011,7 +1051,7 @@ namespace meta {
|
|||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query archiveQuery = connectionPtr->query();
|
||||
archiveQuery << "UPDATE metaFile " <<
|
||||
archiveQuery << "UPDATE TableFiles " <<
|
||||
"SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " " <<
|
||||
"WHERE created_on < " << std::to_string(now - usecs) << " AND " <<
|
||||
"file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";";
|
||||
|
@ -1048,13 +1088,17 @@ namespace meta {
|
|||
result = 0;
|
||||
try {
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query getSizeQuery = connectionPtr->query();
|
||||
getSizeQuery << "SELECT SUM(size) AS sum " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << ";";
|
||||
StoreQueryResult res = getSizeQuery.store();
|
||||
res = getSizeQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
assert(res && res.num_rows() == 1);
|
||||
// if (!res) {
|
||||
|
@ -1097,11 +1141,14 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
bool status;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query discardFilesQuery = connectionPtr->query();
|
||||
discardFilesQuery << "SELECT id, size " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE file_type <> " << std::to_string(TableFileSchema::TO_DELETE) << " " <<
|
||||
"ORDER BY id ASC " <<
|
||||
"LIMIT 10;";
|
||||
|
@ -1130,18 +1177,19 @@ namespace meta {
|
|||
std::string idsToDiscardStr = idsToDiscardSS.str();
|
||||
idsToDiscardStr = idsToDiscardStr.substr(0, idsToDiscardStr.size() - 4); //remove the last " OR "
|
||||
|
||||
discardFilesQuery << "UPDATE metaFile " <<
|
||||
discardFilesQuery << "UPDATE TableFiles " <<
|
||||
"SET file_type = " << std::to_string(TableFileSchema::TO_DELETE) << ", " <<
|
||||
"updated_time = " << std::to_string(utils::GetMicroSecTimeStamp()) << " " <<
|
||||
"WHERE " << idsToDiscardStr << ";";
|
||||
|
||||
if (discardFilesQuery.exec()) {
|
||||
return DiscardFiles(to_discard_size);
|
||||
}
|
||||
else {
|
||||
status = discardFilesQuery.exec();
|
||||
if (!status) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN DISCARDING FILES";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN DISCARDING FILES", discardFilesQuery.error());
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
return DiscardFiles(to_discard_size);
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
|
@ -1164,23 +1212,24 @@ namespace meta {
|
|||
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query updateTableFileQuery = connectionPtr->query();
|
||||
|
||||
//if the table has been deleted, just mark the table file as TO_DELETE
|
||||
//clean thread will delete the file later
|
||||
updateTableFileQuery << "SELECT state FROM meta " <<
|
||||
updateTableFileQuery << "SELECT state FROM Tables " <<
|
||||
"WHERE table_id = " << quote << file_schema.table_id_ << ";";
|
||||
StoreQueryResult res = updateTableFileQuery.store();
|
||||
|
||||
assert(res && res.num_rows() <= 1);
|
||||
if (res.num_rows() == 1) {
|
||||
int state = res[0]["state"];
|
||||
if (state == TableSchema::TO_DELETE) {
|
||||
file_schema.file_type_ = TableFileSchema::TO_DELETE;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
file_schema.file_type_ = TableFileSchema::TO_DELETE;
|
||||
}
|
||||
|
||||
|
@ -1194,7 +1243,7 @@ namespace meta {
|
|||
std::string created_on = std::to_string(file_schema.created_on_);
|
||||
std::string date = std::to_string(file_schema.date_);
|
||||
|
||||
updateTableFileQuery << "UPDATE metaFile " <<
|
||||
updateTableFileQuery << "UPDATE TableFiles " <<
|
||||
"SET table_id = " << quote << table_id << ", " <<
|
||||
"engine_type = " << engine_type << ", " <<
|
||||
"file_id = " << quote << file_id << ", " <<
|
||||
|
@ -1210,8 +1259,10 @@ namespace meta {
|
|||
if (!updateTableFileQuery.exec()) {
|
||||
ENGINE_LOG_DEBUG << "table_id= " << file_schema.table_id_ << " file_id=" << file_schema.file_id_;
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN UPDATING TABLE FILE";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN UPDATING TABLE FILE", updateTableFileQuery.error());
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN UPDATING TABLE FILE",
|
||||
updateTableFileQuery.error());
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
|
@ -1234,6 +1285,7 @@ namespace meta {
|
|||
try {
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query updateTableFilesQuery = connectionPtr->query();
|
||||
|
@ -1246,7 +1298,7 @@ namespace meta {
|
|||
}
|
||||
|
||||
updateTableFilesQuery << "SELECT EXISTS " <<
|
||||
"(SELECT 1 FROM meta " <<
|
||||
"(SELECT 1 FROM Tables " <<
|
||||
"WHERE table_id = " << quote << file_schema.table_id_ << " " <<
|
||||
"AND state <> " << std::to_string(TableSchema::TO_DELETE) << ") " <<
|
||||
"AS " << quote << "check" << ";";
|
||||
|
@ -1274,7 +1326,7 @@ namespace meta {
|
|||
std::string created_on = std::to_string(file_schema.created_on_);
|
||||
std::string date = std::to_string(file_schema.date_);
|
||||
|
||||
updateTableFilesQuery << "UPDATE metaFile " <<
|
||||
updateTableFilesQuery << "UPDATE TableFiles " <<
|
||||
"SET table_id = " << quote << table_id << ", " <<
|
||||
"engine_type = " << engine_type << ", " <<
|
||||
"file_id = " << quote << file_id << ", " <<
|
||||
|
@ -1285,12 +1337,13 @@ namespace meta {
|
|||
"date = " << date << " " <<
|
||||
"WHERE id = " << id << ";";
|
||||
|
||||
}
|
||||
|
||||
if (!updateTableFilesQuery.exec()) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN UPDATING TABLE FILES";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN UPDATING TABLE FILES", updateTableFilesQuery.error());
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN UPDATING TABLE FILES",
|
||||
updateTableFilesQuery.error());
|
||||
}
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
|
@ -1314,11 +1367,12 @@ namespace meta {
|
|||
try {
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query cleanUpFilesWithTTLQuery = connectionPtr->query();
|
||||
cleanUpFilesWithTTLQuery << "SELECT id, table_id, file_id, date " <<
|
||||
"FROM metaFile " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE file_type = " << std::to_string(TableFileSchema::TO_DELETE) << " AND " <<
|
||||
"updated_time < " << std::to_string(now - seconds * US_PS) << ";";
|
||||
StoreQueryResult res = cleanUpFilesWithTTLQuery.store();
|
||||
|
@ -1344,7 +1398,8 @@ namespace meta {
|
|||
|
||||
GetTableFilePath(table_file);
|
||||
|
||||
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = " << table_file.location_ << std::endl;
|
||||
ENGINE_LOG_DEBUG << "Removing deleted id =" << table_file.id_ << " location = "
|
||||
<< table_file.location_ << std::endl;
|
||||
boost::filesystem::remove(table_file.location_);
|
||||
|
||||
idsToDelete.emplace_back(std::to_string(table_file.id_));
|
||||
|
@ -1356,12 +1411,13 @@ namespace meta {
|
|||
}
|
||||
std::string idsToDeleteStr = idsToDeleteSS.str();
|
||||
idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); //remove the last " OR "
|
||||
cleanUpFilesWithTTLQuery << "DELETE FROM metaFile WHERE " <<
|
||||
cleanUpFilesWithTTLQuery << "DELETE FROM TableFiles WHERE " <<
|
||||
idsToDeleteStr << ";";
|
||||
if (!cleanUpFilesWithTTLQuery.exec()) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN CLEANING UP FILES WITH TTL";
|
||||
return Status::DBTransactionError("CleanUpFilesWithTTL Error", cleanUpFilesWithTTLQuery.error());
|
||||
}
|
||||
} //Scoped Connection
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
|
@ -1376,11 +1432,12 @@ namespace meta {
|
|||
try {
|
||||
MetricCollector metric;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query cleanUpFilesWithTTLQuery = connectionPtr->query();
|
||||
cleanUpFilesWithTTLQuery << "SELECT id, table_id " <<
|
||||
"FROM meta " <<
|
||||
"FROM Tables " <<
|
||||
"WHERE state = " << std::to_string(TableSchema::TO_DELETE) << ";";
|
||||
StoreQueryResult res = cleanUpFilesWithTTLQuery.store();
|
||||
assert(res);
|
||||
|
@ -1400,13 +1457,14 @@ namespace meta {
|
|||
}
|
||||
std::string idsToDeleteStr = idsToDeleteSS.str();
|
||||
idsToDeleteStr = idsToDeleteStr.substr(0, idsToDeleteStr.size() - 4); //remove the last " OR "
|
||||
cleanUpFilesWithTTLQuery << "DELETE FROM meta WHERE " <<
|
||||
cleanUpFilesWithTTLQuery << "DELETE FROM Tables WHERE " <<
|
||||
idsToDeleteStr << ";";
|
||||
if (!cleanUpFilesWithTTLQuery.exec()) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN CLEANING UP FILES WITH TTL";
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN CLEANING UP FILES WITH TTL", cleanUpFilesWithTTLQuery.error());
|
||||
return Status::DBTransactionError("QUERY ERROR WHEN CLEANING UP FILES WITH TTL",
|
||||
cleanUpFilesWithTTLQuery.error());
|
||||
}
|
||||
|
||||
} //Scoped Connection
|
||||
|
||||
} catch (const BadQuery& er) {
|
||||
// Handle any query errors
|
||||
|
@ -1430,7 +1488,7 @@ namespace meta {
|
|||
|
||||
ENGINE_LOG_DEBUG << "Remove table file type as NEW";
|
||||
Query cleanUpQuery = connectionPtr->query();
|
||||
cleanUpQuery << "DELETE FROM metaFile WHERE file_type = " << std::to_string(TableFileSchema::NEW) << ";";
|
||||
cleanUpQuery << "DELETE FROM TableFiles WHERE file_type = " << std::to_string(TableFileSchema::NEW) << ";";
|
||||
|
||||
if (!cleanUpQuery.exec()) {
|
||||
ENGINE_LOG_ERROR << "QUERY ERROR WHEN CLEANING UP FILES";
|
||||
|
@ -1457,17 +1515,6 @@ namespace meta {
|
|||
try {
|
||||
MetricCollector metric;
|
||||
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query countQuery = connectionPtr->query();
|
||||
countQuery << "SELECT size " <<
|
||||
"FROM metaFile " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::INDEX) << ");";
|
||||
StoreQueryResult res = countQuery.store();
|
||||
|
||||
TableSchema table_schema;
|
||||
table_schema.table_id_ = table_id;
|
||||
auto status = DescribeTable(table_schema);
|
||||
|
@ -1476,6 +1523,21 @@ namespace meta {
|
|||
return status;
|
||||
}
|
||||
|
||||
StoreQueryResult res;
|
||||
|
||||
{
|
||||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query countQuery = connectionPtr->query();
|
||||
countQuery << "SELECT size " <<
|
||||
"FROM TableFiles " <<
|
||||
"WHERE table_id = " << quote << table_id << " AND " <<
|
||||
"(file_type = " << std::to_string(TableFileSchema::RAW) << " OR " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::TO_INDEX) << " OR " <<
|
||||
"file_type = " << std::to_string(TableFileSchema::INDEX) << ");";
|
||||
res = countQuery.store();
|
||||
} //Scoped Connection
|
||||
|
||||
result = 0;
|
||||
for (auto &resRow : res) {
|
||||
size_t size = resRow["size"];
|
||||
|
@ -1510,7 +1572,7 @@ namespace meta {
|
|||
ScopedConnection connectionPtr(*mySQLConnectionPool_, safe_grab);
|
||||
|
||||
Query dropTableQuery = connectionPtr->query();
|
||||
dropTableQuery << "DROP TABLE IF EXISTS meta, metaFile;";
|
||||
dropTableQuery << "DROP TABLE IF EXISTS Tables, TableFiles;";
|
||||
if (dropTableQuery.exec()) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ GTEST_VERSION=1.8.1
|
|||
JSONCONS_VERSION=0.126.0
|
||||
LAPACK_VERSION=v3.8.0
|
||||
LZ4_VERSION=v1.9.1
|
||||
MYSQLPP_VERSION=zilliz
|
||||
MYSQLPP_VERSION=3.2.4
|
||||
OPENBLAS_VERSION=v0.3.6
|
||||
PROMETHEUS_VERSION=v0.7.0
|
||||
ROCKSDB_VERSION=v6.0.2
|
||||
|
|
Loading…
Reference in New Issue