Update the logic of insert and delete

Signed-off-by: shengjh <1572099106@qq.com>
pull/4973/head^2
shengjh 2020-09-05 19:17:08 +08:00 committed by yefu.chen
parent 559c160dd7
commit bfd4fe5a82
25 changed files with 892 additions and 893 deletions

4
.gitignore vendored
View File

@ -27,10 +27,6 @@ proxy/cmake_build
proxy/cmake-build-debug
proxy/thirdparty/grpc-src
proxy/thirdparty/grpc-build
proxy/milvus/
proxy/milvus/*
proxy/suvlim/
proxy/suvlim/*
# Compiled source
*.a

View File

@ -3,7 +3,6 @@ set(src-cpp
ClientV2.cpp
Consumer.cpp
Producer.cpp
${PROJECT_SOURCE_DIR}/src/grpc/gen-milvus/suvlim.pb.cc
)
add_library(message_client_cpp SHARED

View File

@ -1,87 +1,92 @@
#include "ClientV2.h"
#include "pulsar/Result.h"
namespace milvus {
namespace {
int64_t gen_channe_id(int64_t uid) {
// TODO: murmur3 hash from pulsar source code
return 0;
}
}
namespace {
int64_t gen_channe_id(int64_t uid) {
// TODO: murmur3 hash from pulsar source code
return 0;
}
}
namespace milvus::message_client {
MsgClientV2::MsgClientV2(int64_t client_id, std::string &service_url, const pulsar::ClientConfiguration &config)
: client_id_(client_id), service_url_(service_url) {}
MsgClientV2 &MsgClientV2::GetInstance() {
// TODO: do not hardcode pulsar message configure and init
std::string pulsar_server_addr = "pulsar://localhost:6650";
int64_t client_id = 0;
static MsgClientV2 msg_client(client_id, pulsar_server_addr);
return msg_client;
}
Status MsgClientV2::Init(const std::string &mut_topic, const std::string &query_topic,
const std::string &result_topic) {
auto pulsar_client = std::make_shared<pulsar::Client>(service_url_);
mut_producer_ = std::make_shared<MsgProducer>(pulsar_client, mut_topic);
query_producer_ = std::make_shared<MsgProducer>(pulsar_client, mut_topic);
consumer_ = std::make_shared<MsgConsumer>(pulsar_client, "default");
MsgClientV2::MsgClientV2(int64_t client_id, std::string &service_url, const pulsar::ClientConfiguration &config)
: client_id_(client_id), service_url_(service_url) {}
auto result = consumer_->subscribe(result_topic);
if (result != pulsar::Result::ResultOk) {
return Status(SERVER_UNEXPECTED_ERROR, pulsar::strResult(result));
}
return Status::OK();
Status MsgClientV2::Init(const std::string &mut_topic, const std::string &query_topic,
const std::string &result_topic) {
auto pulsar_client = std::make_shared<pulsar::Client>(service_url_);
mut_producer_ = std::make_shared<MsgProducer>(pulsar_client, mut_topic);
query_producer_ = std::make_shared<MsgProducer>(pulsar_client, mut_topic);
consumer_ = std::make_shared<MsgConsumer>(pulsar_client, result_topic);
auto result = consumer_->subscribe(result_topic);
if (result != pulsar::Result::ResultOk) {
return Status(SERVER_UNEXPECTED_ERROR, "Pulsar message client init occur error, " + std::string(pulsar::strResult(result)));
}
return Status::OK();
}
void MsgClientV2::GetQueryResult(int64_t query_id) {
throw std::exception();
}
Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request) {
// may have retry policy?
auto row_count = request.rows_data_size();
// TODO: Get the segment from master
int64_t segment = 0;
milvus::grpc::InsertOrDeleteMsg mut_msg;
for (auto i = 0; i < row_count; i++) {
mut_msg.set_op(milvus::grpc::OpType::INSERT);
mut_msg.set_uid(GetUniqueQId());
mut_msg.set_client_id(client_id_);
auto channel_id = gen_channe_id(request.entity_id_array(i));
mut_msg.set_channel_id(channel_id);
mut_msg.set_collection_name(request.collection_name());
mut_msg.set_partition_tag(request.partition_tag());
mut_msg.set_segment_id(segment);
mut_msg.mutable_rows_data()->CopyFrom(request.rows_data(i));
mut_msg.mutable_extra_params()->CopyFrom(request.extra_params());
auto result = mut_producer_->send(mut_msg);
if (result != pulsar::ResultOk) {
// TODO: error code
return Status(DB_ERROR, pulsar::strResult(result));
}
}
return Status::OK();
}
Status MsgClientV2::SendMutMessage(const milvus::grpc::DeleteByIDParam &request) {
milvus::grpc::InsertOrDeleteMsg mut_msg;
for (auto id: request.id_array()) {
mut_msg.set_op(milvus::grpc::OpType::DELETE);
mut_msg.set_uid(GetUniqueQId());
mut_msg.set_client_id(client_id_);
mut_msg.set_uid(id);
mut_msg.set_collection_name(request.collection_name());
void MsgClientV2::GetQueryResult(int64_t query_id) {
throw std::exception();
auto result = mut_producer_->send(mut_msg);
if (result != pulsar::ResultOk) {
// TODO: error code
return Status(DB_ERROR, pulsar::strResult(result));
}
}
return Status::OK();
}
Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request) {
// may have retry policy?
auto row_count = request.rows_data_size();
// TODO: Get the segment from master
int64_t segment = 0;
milvus::grpc::InsertOrDeleteMsg mut_msg;
for (auto i = 0; i < row_count; i++) {
mut_msg.set_op(milvus::grpc::OpType::INSERT);
mut_msg.set_uid(GetUniqueQId());
mut_msg.set_client_id(client_id_);
auto channel_id = gen_channe_id(request.entity_id_array(i));
mut_msg.set_channel_id(channel_id);
mut_msg.set_collection_name(request.collection_name());
mut_msg.set_partition_tag(request.partition_tag());
mut_msg.set_segment_id(segment);
mut_msg.mutable_rows_data()->CopyFrom(request.rows_data(i));
mut_msg.mutable_extra_params()->CopyFrom(request.extra_params());
auto result = mut_producer_->send(mut_msg.SerializeAsString());
if (result != pulsar::ResultOk) {
// TODO: error code
return Status(DB_ERROR, pulsar::strResult(result));
}
}
return Status::OK();
}
Status MsgClientV2::SendMutMessage(const milvus::grpc::DeleteByIDParam &request) {
milvus::grpc::InsertOrDeleteMsg mut_msg;
for (auto id: request.id_array()) {
mut_msg.set_op(milvus::grpc::OpType::DELETE);
mut_msg.set_uid(GetUniqueQId());
mut_msg.set_client_id(client_id_);
mut_msg.set_uid(id);
mut_msg.set_collection_name(request.collection_name());
auto result = mut_producer_->send(mut_msg.SerializeAsString());
if (result != pulsar::ResultOk) {
// TODO: error code
return Status(DB_ERROR, pulsar::strResult(result));
}
}
return Status::OK();
}
MsgClientV2::~MsgClientV2() {
mut_producer_->close();
query_producer_->close();
consumer_->close();
}
MsgClientV2::~MsgClientV2() {
mut_producer_->close();
query_producer_->close();
consumer_->close();
}
}

View File

@ -1,42 +1,46 @@
#pragma once
#include "utils/Status.h"
#include "src/utils/Status.h"
#include "Producer.h"
#include "Consumer.h"
#include "grpc/gen-milvus/suvlim.pb.h"
namespace milvus::message_client {
class MsgClientV2 {
public:
MsgClientV2(int64_t client_id, std::string &service_url, const pulsar::ClientConfiguration& config = pulsar::ClientConfiguration());
class MsgClientV2 {
public:
static MsgClientV2 &GetInstance();
~MsgClientV2();
~MsgClientV2();
// When using MsgClient, make sure it init successfully
Status Init(const std::string &mut_topic,
const std::string &query_topic, const std::string &result_topic);
// When using MsgClient, make sure it init successfully
Status Init(const std::string &mut_topic,
const std::string &query_topic, const std::string &result_topic);
// unpackage batch insert or delete request, and delivery message to pulsar per row
Status SendMutMessage(const milvus::grpc::InsertParam &request);
// unpackage batch insert or delete request, and delivery message to pulsar per row
Status SendMutMessage(const milvus::grpc::InsertParam &request);
Status SendMutMessage(const milvus::grpc::DeleteByIDParam &request);
Status SendMutMessage(const milvus::grpc::DeleteByIDParam &request);
//
Status SendQueryMessage(const milvus::grpc::SearchParam &request);
//
Status SendQueryMessage(const milvus::grpc::SearchParam &request);
void GetQueryResult(int64_t query_id);
void GetQueryResult(int64_t query_id);
private:
private:
int64_t GetUniqueQId() {
return q_id_.fetch_add(1);
}
MsgClientV2(int64_t client_id,
std::string &service_url,
const pulsar::ClientConfiguration &config = pulsar::ClientConfiguration());
private:
std::atomic<int64_t> q_id_ = 0;
int64_t client_id_;
std::string service_url_;
std::shared_ptr<MsgConsumer> consumer_;
std::shared_ptr<MsgProducer> mut_producer_;
std::shared_ptr<MsgProducer> query_producer_;
};
int64_t GetUniqueQId() {
return q_id_.fetch_add(1);
}
private:
std::atomic<int64_t> q_id_ = 0;
int64_t client_id_;
std::string service_url_;
std::shared_ptr<MsgConsumer> consumer_;
std::shared_ptr<MsgProducer> mut_producer_;
std::shared_ptr<MsgProducer> query_producer_;
};
}

View File

@ -1,6 +1,6 @@
#include "Consumer.h"
#include "grpc/gen-milvus/suvlim.pb.h"
#include "src/grpc/gen-milvus/suvlim.pb.h"
namespace milvus {
namespace message_client {

View File

@ -2,7 +2,7 @@
#include "pulsar/Producer.h"
#include "Client.h"
#include "grpc/gen-milvus/suvlim.pb.h"
#include "src/grpc/gen-milvus/suvlim.pb.h"
namespace milvus {
namespace message_client {

View File

@ -26,7 +26,7 @@ aux_source_directory( ${MILVUS_ENGINE_SRC}/server/init SERVER_INIT_
aux_source_directory( ${MILVUS_ENGINE_SRC}/server/delivery/request DELIVERY_REQUEST_FILES )
aux_source_directory( ${MILVUS_ENGINE_SRC}/server/delivery/strategy DELIVERY_STRATEGY_FILES )
aux_source_directory( ${MILVUS_ENGINE_SRC}/server/delivery DELIVERY_FILES )
aux_source_directory( ${MILVUS_ENGINE_SRC}/server/timesync TIME_SYNC_FILES )
aux_source_directory( ${MILVUS_ENGINE_SRC}/server/tso TSO_FILES)
set( SERVER_FILES ${SERVER_INIT_FILES}
${SERVER_SERVICE_FILES}
@ -34,7 +34,7 @@ set( SERVER_FILES ${SERVER_INIT_FILES}
${DELIVERY_REQUEST_FILES}
${DELIVERY_STRATEGY_FILES}
${DELIVERY_FILES}
${TIME_SYNC_FILES}
${TSO_FILES}
)
aux_source_directory( ${MILVUS_ENGINE_SRC}/server/grpc_impl GRPC_IMPL_FILES )

View File

@ -28,6 +28,7 @@
#include "server/init/StorageChecker.h"
#include "src/version.h"
#include <yaml-cpp/yaml.h>
#include "src/pulsar/message_client/ClientV2.h"
#include "utils/Log.h"
#include "utils/SignalHandler.h"
#include "utils/TimeRecorder.h"
@ -290,11 +291,11 @@ Server::StartService() {
grpc::GrpcServer::GetInstance().Start();
// stat = storage::S3ClientWrapper::GetInstance().StartService();
// if (!stat.ok()) {
// LOG_SERVER_ERROR_ << "S3Client start service fail: " << stat.message();
// goto FAIL;
// }
stat = message_client::MsgClientV2::GetInstance().Init("topic-insert","topic-query","topic-result");
if (!stat.ok()) {
LOG_SERVER_ERROR_ << "Pulsar message client start service fail: " << stat.message();
goto FAIL;
}
return Status::OK();
FAIL:

View File

@ -149,9 +149,8 @@ ReqHandler::DropIndex(const ContextPtr& context, const std::string& collection_n
}
Status
ReqHandler::Insert(const ContextPtr& context, const std::string& collection_name, const std::string& partition_name,
const int64_t& row_count, std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data) {
BaseReqPtr req_ptr = InsertReq::Create(context, collection_name, partition_name, row_count, chunk_data);
ReqHandler::Insert(const ContextPtr& context, const ::milvus::grpc::InsertParam* insert_param) {
BaseReqPtr req_ptr = InsertReq::Create(context, insert_param);
ReqScheduler::ExecReq(req_ptr);
return req_ptr->status();
}
@ -167,9 +166,8 @@ ReqHandler::GetEntityByID(const ContextPtr& context, const std::string& collecti
}
Status
ReqHandler::DeleteEntityByID(const ContextPtr& context, const std::string& collection_name,
const engine::IDNumbers& ids) {
BaseReqPtr req_ptr = DeleteEntityByIDReq::Create(context, collection_name, ids);
ReqHandler::DeleteEntityByID(const ContextPtr& context, const ::milvus::grpc::DeleteByIDParam *param) {
BaseReqPtr req_ptr = DeleteEntityByIDReq::Create(context, param);
ReqScheduler::ExecReq(req_ptr);
return req_ptr->status();
}

View File

@ -79,8 +79,7 @@ class ReqHandler {
const std::string& index_name);
Status
Insert(const ContextPtr& context, const std::string& collection_name, const std::string& partition_name,
const int64_t& row_count, std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data);
Insert(const ContextPtr& context, const ::milvus::grpc::InsertParam* insert_param);
Status
GetEntityByID(const ContextPtr& context, const std::string& collection_name, const engine::IDNumbers& ids,
@ -88,7 +87,7 @@ class ReqHandler {
engine::DataChunkPtr& data_chunk);
Status
DeleteEntityByID(const ContextPtr& context, const std::string& collection_name, const engine::IDNumbers& ids);
DeleteEntityByID(const ContextPtr& context, const ::milvus::grpc::DeleteByIDParam *param);
Status
Search(const ContextPtr& context, const query::QueryPtr& query_ptr, const milvus::json& json_params,

View File

@ -11,6 +11,7 @@
#include "server/delivery/ReqScheduler.h"
#include "utils/Log.h"
#include "server/tso/TSO.h"
#include <unistd.h>
#include <utility>
@ -134,6 +135,9 @@ Status
ReqScheduler::PutToQueue(const BaseReqPtr& req_ptr) {
std::lock_guard<std::mutex> lock(queue_mtx_);
auto &tso = TSOracle::GetInstance();
req_ptr->SetTimestamp(tso.GetTimeStamp());
std::string group_name = req_ptr->req_group();
if (req_groups_.count(group_name) > 0) {
req_groups_[group_name]->PutReq(req_ptr);
@ -152,5 +156,13 @@ ReqScheduler::PutToQueue(const BaseReqPtr& req_ptr) {
return Status::OK();
}
int64_t ReqScheduler::GetLatestReqDeliveredTime() {
return latest_req_time_.load();
}
void ReqScheduler::UpdateLatestDeliveredReqTime(int64_t time) {
latest_req_time_.store(time);
}
} // namespace server
} // namespace milvus

View File

@ -44,6 +44,11 @@ class ReqScheduler {
static void
ExecReq(const BaseReqPtr& req_ptr);
void UpdateLatestDeliveredReqTime(int64_t time);
int64_t GetLatestReqDeliveredTime();
protected:
ReqScheduler();
@ -58,6 +63,8 @@ class ReqScheduler {
private:
mutable std::mutex queue_mtx_;
std::atomic<int64_t > latest_req_time_;
std::map<std::string, ReqQueuePtr> req_groups_;

View File

@ -77,5 +77,9 @@ BaseReq::WaitToFinish() {
return status_;
}
void BaseReq::SetTimestamp(uint64_t ts) {
timestamp_ = ts;
}
} // namespace server
} // namespace milvus

View File

@ -14,6 +14,7 @@
#include "server/context/Context.h"
#include "server/delivery/request/Types.h"
#include "utils/Status.h"
#include "pulsar/message_client/ClientV2.h"
#include <condition_variable>
#include <memory>
@ -74,6 +75,9 @@ class BaseReq {
void
SetStatus(const Status& status);
void
SetTimestamp(uint64_t ts);
protected:
virtual Status
OnPreExecute();
@ -90,6 +94,8 @@ class BaseReq {
std::string req_group_;
bool async_;
Status status_;
uint64_t timestamp_;
private:
mutable std::mutex finish_mtx_;

View File

@ -16,6 +16,7 @@
// under the License.
#include "server/delivery/request/DeleteEntityByIDReq.h"
#include "src/server/delivery/ReqScheduler.h"
#include <memory>
#include <string>
@ -29,21 +30,25 @@
namespace milvus {
namespace server {
DeleteEntityByIDReq::DeleteEntityByIDReq(const ContextPtr& context, const std::string& collection_name,
const engine::IDNumbers& entity_ids)
: BaseReq(context, ReqType::kDeleteEntityByID), collection_name_(collection_name), entity_ids_(entity_ids) {
DeleteEntityByIDReq::DeleteEntityByIDReq(const ContextPtr& context, const ::milvus::grpc::DeleteByIDParam *request)
: BaseReq(context, ReqType::kDeleteEntityByID), request_(request) {
}
BaseReqPtr
DeleteEntityByIDReq::Create(const ContextPtr& context, const std::string& collection_name,
const engine::IDNumbers& entity_ids) {
return std::shared_ptr<BaseReq>(new DeleteEntityByIDReq(context, collection_name, entity_ids));
DeleteEntityByIDReq::Create(const ContextPtr& context, const ::milvus::grpc::DeleteByIDParam *request) {
return std::shared_ptr<BaseReq>(new DeleteEntityByIDReq(context, request));
}
Status
DeleteEntityByIDReq::OnExecute() {
auto &msg_client = message_client::MsgClientV2::GetInstance();
Status status = msg_client.SendMutMessage(*request_);
return status;
}
return Status::OK();
Status DeleteEntityByIDReq::OnPostExecute() {
ReqScheduler::GetInstance().UpdateLatestDeliveredReqTime(timestamp_);
return Status::OK();
}
} // namespace server

View File

@ -29,18 +29,17 @@ namespace server {
class DeleteEntityByIDReq : public BaseReq {
public:
static BaseReqPtr
Create(const ContextPtr& context, const std::string& collection_name, const engine::IDNumbers& entity_ids);
Create(const ContextPtr& context, const ::milvus::grpc::DeleteByIDParam *request);
protected:
DeleteEntityByIDReq(const ContextPtr& context, const std::string& collection_name,
const engine::IDNumbers& entity_ids);
DeleteEntityByIDReq(const ContextPtr& context, const ::milvus::grpc::DeleteByIDParam *request);
Status
OnExecute() override;
private:
const std::string collection_name_;
const engine::IDNumbers& entity_ids_;
const ::milvus::grpc::DeleteByIDParam *request_;
Status OnPostExecute();
};
} // namespace server

View File

@ -14,6 +14,7 @@
#include "utils/CommonUtil.h"
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
#include "server/delivery/ReqScheduler.h"
#include <memory>
#include <string>
@ -28,27 +29,27 @@
namespace milvus {
namespace server {
InsertReq::InsertReq(const ContextPtr& context, const std::string& collection_name, const std::string& partition_name,
const int64_t& row_count, std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data)
InsertReq::InsertReq(const ContextPtr &context, const ::milvus::grpc::InsertParam *insert_param)
: BaseReq(context, ReqType::kInsert),
collection_name_(collection_name),
partition_name_(partition_name),
row_count_(row_count),
chunk_data_(chunk_data) {
insert_param_(insert_param) {
}
BaseReqPtr
InsertReq::Create(const ContextPtr& context, const std::string& collection_name, const std::string& partition_name,
const int64_t& row_count, std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data) {
return std::shared_ptr<BaseReq>(new InsertReq(context, collection_name, partition_name, row_count, chunk_data));
InsertReq::Create(const ContextPtr &context, const ::milvus::grpc::InsertParam *insert_param) {
return std::shared_ptr<BaseReq>(new InsertReq(context, insert_param));
}
Status
InsertReq::OnExecute() {
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "insert", 0) << "Execute InsertReq.";
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "insert", 0) << "Execute InsertReq.";
auto &msg_client = message_client::MsgClientV2::GetInstance();
Status status = msg_client.SendMutMessage(*insert_param_);
return status;
}
return Status::OK();
Status InsertReq::OnPostExecute() {
ReqScheduler::GetInstance().UpdateLatestDeliveredReqTime(timestamp_);
return Status::OK();
}
} // namespace server

View File

@ -23,22 +23,20 @@ namespace server {
class InsertReq : public BaseReq {
public:
static BaseReqPtr
Create(const ContextPtr& context, const std::string& collection_name, const std::string& partition_name,
const int64_t& row_count, std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data);
static BaseReqPtr
Create(const ContextPtr &context, const ::milvus::grpc::InsertParam *chunk_data);
protected:
InsertReq(const ContextPtr& context, const std::string& collection_name, const std::string& partition_name,
const int64_t& row_count, std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data);
InsertReq(const ContextPtr &context, const ::milvus::grpc::InsertParam *chunk_data);
Status
OnExecute() override;
Status
OnExecute() override;
Status
OnPostExecute() override ;
private:
const std::string collection_name_;
const std::string partition_name_;
const int64_t row_count_;
std::unordered_map<std::string, std::vector<uint8_t>>& chunk_data_;
const ::milvus::grpc::InsertParam *insert_param_;
};
} // namespace server

File diff suppressed because it is too large Load Diff

View File

@ -337,9 +337,6 @@ class GrpcRequestHandler final : public ::milvus::grpc::MilvusService::Service,
// May remove req_handler ?
ReqHandler req_handler_;
// delivery and receive pulsar message
std::shared_ptr<message_client::MsgClientV2> msg_client_;
std::unordered_map<std::string, std::shared_ptr<Context>> context_map_;
std::shared_ptr<opentracing::Tracer> tracer_;

View File

@ -38,6 +38,7 @@
// #include "server/DBWrapper.h"
#include "server/grpc_impl/interceptor/SpanInterceptor.h"
#include "utils/Log.h"
#include "pulsar/message_client/ClientV2.h"
namespace milvus {
namespace server {
@ -99,14 +100,15 @@ GrpcServer::StartService() {
HelloService helloService;
builder.RegisterService(&helloService);
// report address to master
auto reportClient = new ReportClient(::grpc::CreateChannel("192.168.2.28:50051",
::grpc::InsecureChannelCredentials()));
auto status = reportClient->ReportAddress();
delete(reportClient);
if (!status.ok()){
return Status(milvus::DB_ERROR, "");
}
// report address to master, test only for now
// auto reportClient = new ReportClient(::grpc::CreateChannel("192.168.2.28:50051",
// ::grpc::InsecureChannelCredentials()));
// auto status = reportClient->ReportAddress();
// delete(reportClient);
// if (!status.ok()){
// return Status(milvus::DB_ERROR, "");
// }
// Add gRPC interceptor
using InterceptorI = ::grpc::experimental::ServerInterceptorFactoryInterface;

View File

@ -1,67 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include <chrono>
#include "TimeSync.h"
#include "pulsar/message_client/Producer.h"
namespace milvus {
namespace timesync {
TimeSync::TimeSync(int64_t id,
std::function<uint64_t()> timestamp,
const int interval,
const std::string &pulsar_addr,
const std::string &time_sync_topic) :
timestamp_(timestamp), interval_(interval), pulsar_addr_(pulsar_addr), time_sync_topic_(time_sync_topic) {
sync_msg_.set_peer_id(id);
auto timer = [&]() {
std::shared_ptr<pulsar::Client>
client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_);
milvus::message_client::MsgProducer producer(client, this->time_sync_topic_);
for (;;) {
if (this->stop_) break;
this->sync_msg_.set_timestamp(this->timestamp_());
this->sync_msg_.set_sync_type(milvus::grpc::READ);
auto rst = producer.send(sync_msg_.SerializeAsString());
if (rst != pulsar::ResultOk) {
//TODO, add log
}
std::this_thread::sleep_for(std::chrono::milliseconds(this->interval_));
}
auto rst = producer.close();
if (rst != pulsar::ResultOk) {
//TODO, add log or throw exception
}
rst = client->close();
if (rst != pulsar::ResultOk) {
//TODO, add log or throw exception
}
};
timer_ = std::thread(timer);
}
TimeSync::~TimeSync() {
stop_ = true;
timer_.join();
}
void TimeSync::Stop() {
stop_ = true;
}
bool TimeSync::IsStop() const {
return stop_;
}
} // namespace timesync
} // namespace milvus

View File

@ -1,45 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include <functional>
#include <cstdint>
#include <thread>
#include <string>
#include "suvlim.pb.h"
namespace milvus {
namespace timesync {
class TimeSync {
public:
TimeSync(int64_t id,
std::function<uint64_t()> timestamp,
const int interval,
const std::string &pulsar_addr,
const std::string &time_sync_topic);
virtual ~TimeSync();
void Stop();
bool IsStop() const;
private:
std::function<int64_t()> timestamp_;
const int interval_;
const std::string pulsar_addr_;
const std::string time_sync_topic_;
bool stop_ = false;
std::thread timer_;
milvus::grpc::TimeSyncMsg sync_msg_;
};
} // namespace timesync
} // namespace milvus

View File

@ -0,0 +1,36 @@
#include "TSO.h"
namespace milvus {
namespace server {
TSOracle& TSOracle::GetInstance() {
static TSOracle oracle;
return oracle;
}
uint64_t TSOracle::GetTimeStamp() {
std::lock_guard lock(mutex_);
auto now = std::chrono::high_resolution_clock::now();
uint64_t physical = GetPhysical(now);
uint64_t ts = ComposeTs(physical, 0);
if (last_time_stamp_ == ts) {
logical_++;
return ts + logical_;
}
last_time_stamp_ = ts;
logical_ = 0;
return ts;
}
uint64_t TSOracle::GetPhysical(const std::chrono::high_resolution_clock::time_point &t) {
auto nano_time = std::chrono::duration_cast<std::chrono::nanoseconds>(t.time_since_epoch());
return nano_time / std::chrono::microseconds(1);
}
uint64_t TSOracle::ComposeTs(uint64_t physical, uint64_t logical) {
return uint64_t((physical << physical_shift_bits) + logical);
}
}
}

View File

@ -0,0 +1,29 @@
#pragma once
#include <mutex>
#include <chrono>
namespace milvus {
namespace server {
const uint32_t physical_shift_bits = 18;
class TSOracle {
public:
static TSOracle& GetInstance();
uint64_t GetTimeStamp();
private:
uint64_t GetPhysical(const std::chrono::high_resolution_clock::time_point &t);
uint64_t ComposeTs(uint64_t physical, uint64_t logical);
private:
TSOracle() = default;
private:
std::mutex mutex_;
uint64_t last_time_stamp_;
uint64_t logical_;
};
}
}