Add test for proxy-go

Signed-off-by: neza2017 <yefu.chen@zilliz.com>
pull/4973/head^2
neza2017 2020-10-12 13:54:00 +08:00 committed by yefu.chen
parent d4e485d37a
commit bb517eeca1
72 changed files with 4521 additions and 1557 deletions

6
.gitignore vendored
View File

@ -26,19 +26,19 @@ cmake_build
proxy/milvus
proxy/cmake_build
proxy/cmake-build-debug
proxy/cmake-build-release
proxy/cmake_build_release
proxy/thirdparty/grpc-src
proxy/thirdparty/grpc-build
proxy/milvus/*
proxy/suvlim/
proxy/suvlim/*
proxy-go/proxy-go
# sdk
sdk/cmake_build
sdk/cmake-build-debug
sdk/cmake-build-release
sdk/cmake_build_release
# Compiled source
*.a

View File

@ -42,19 +42,42 @@ type StorageConfig struct {
}
type PulsarConfig struct {
Authentication bool
User string
Token string
Address string
Port int32
TopicNum int
Address string
Port int32
TopicNum int
}
//type ProxyConfig struct {
// Timezone string
// Address string
// Port int32
//}
type ProxyConfig struct {
Timezone string `yaml:"timezone"`
ProxyId int `yaml:"proxy_id"`
NumReaderNodes int `yaml:"numReaderNodes"`
TosSaveInterval int `yaml:"tsoSaveInterval"`
TimeTickInterval int `yaml:"timeTickInterval"`
PulsarTopics struct {
ReaderTopicPrefix string `yaml:"readerTopicPrefix"`
NumReaderTopics int `yaml:"numReaderTopics"`
DeleteTopic string `yaml:"deleteTopic"`
QueryTopic string `yaml:"queryTopic"`
ResultTopic string `yaml:"resultTopic"`
ResultGroup string `yaml:"resultGroup"`
TimeTickTopic string `yaml:"timeTickTopic"`
} `yaml:"pulsarTopics"`
Network struct {
Address string `yaml:"address"`
Port int `yaml:"port"`
} `yaml:"network"`
Logs struct {
Level string `yaml:"level"`
TraceEnable bool `yaml:"trace.enable"`
Path string `yaml:"path"`
MaxLogFileSize string `yaml:"max_log_file_size"`
LogRotateNum int `yaml:"log_rotate_num"`
} `yaml:"logs"`
Storage struct {
Path string `yaml:"path"`
AutoFlushInterval int `yaml:"auto_flush_interval"`
} `yaml:"storage"`
}
type Reader struct {
ClientId int
@ -71,10 +94,8 @@ type Writer struct {
StopFlag int64
ReaderQueueSize int
SearchByIdChanSize int
Parallelism int
TopicStart int
TopicEnd int
Bucket string
}
type ServerConfig struct {
@ -85,7 +106,7 @@ type ServerConfig struct {
Pulsar PulsarConfig
Writer Writer
Reader Reader
//Proxy ProxyConfig
Proxy ProxyConfig
}
var Config ServerConfig

View File

@ -10,11 +10,11 @@
# or implied. See the License for the specific language governing permissions and limitations under the License.
master:
address: localhost
adress: localhost
port: 53100
pulsarmoniterinterval: 1
pulsartopic: "monitor-topic"
segmentthreshole: 1073741824
segmentthreshole: 104857600
proxyidlist: [0]
querynodenum: 1
writenodenum: 1
@ -25,7 +25,7 @@ etcd:
rootpath: by-dev
segthreshold: 10000
timesync:
timesync:
interval: 400
storage:
@ -36,9 +36,6 @@ storage:
secretkey:
pulsar:
authentication: false
user: user-default
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
address: localhost
port: 6650
topicnum: 128
@ -57,14 +54,24 @@ writer:
stopflag: -2
readerqueuesize: 10000
searchbyidchansize: 10000
parallelism: 100
topicstart: 0
topicend: 128
bucket: "zilliz-hz"
proxy:
timezone: UTC+8
proxy_id: 0
proxy_id: 1
numReaderNodes: 2
tsoSaveInterval: 200
timeTickInterval: 200
pulsarTopics:
readerTopicPrefix: "milvusReader"
numReaderTopics: 2
deleteTopic: "milvusDeleter"
queryTopic: "milvusQuery"
resultTopic: "milvusResult"
resultGroup: "milvusResultGroup"
timeTickTopic: "milvusTimeTick"
network:
address: 0.0.0.0

View File

@ -5,8 +5,8 @@ if [[ ! ${jobs+1} ]]; then
jobs=$(nproc)
fi
BUILD_OUTPUT_DIR="cmake-build-release"
BUILD_TYPE="Release"
BUILD_OUTPUT_DIR="cmake-build-debug"
BUILD_TYPE="Debug"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX=$(pwd)/milvus
MAKE_CLEAN="OFF"

View File

@ -1,6 +1,5 @@
aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/../pb PB_SRC_FILES)
# add_definitions(-DBOOST_STACKTRACE_USE_ADDR2LINE)
set(DOG_SEGMENT_FILES
SegmentNaive.cpp
IndexMeta.cpp
@ -10,7 +9,6 @@ set(DOG_SEGMENT_FILES
collection_c.cpp
partition_c.cpp
segment_c.cpp
EasyAssert.cpp
${PB_SRC_FILES}
)
add_library(milvus_dog_segment SHARED
@ -20,5 +18,4 @@ add_library(milvus_dog_segment SHARED
#add_dependencies( segment sqlite mysqlpp )
target_link_libraries(milvus_dog_segment tbb utils pthread knowhere log libprotobuf dl backtrace
)
target_link_libraries(milvus_dog_segment tbb utils pthread knowhere log libprotobuf)

View File

@ -1,26 +0,0 @@
#include <iostream>
#include "EasyAssert.h"
// #define BOOST_STACKTRACE_USE_ADDR2LINE
#define BOOST_STACKTRACE_USE_BACKTRACE
#include <boost/stacktrace.hpp>
namespace milvus::impl {
void EasyAssertInfo(bool value, std::string_view expr_str, std::string_view filename, int lineno,
std::string_view extra_info) {
if (!value) {
std::string info;
info += "Assert \"" + std::string(expr_str) + "\"";
info += " at " + std::string(filename) + ":" + std::to_string(lineno) + "\n";
if(!extra_info.empty()) {
info += " => " + std::string(extra_info);
}
auto fuck = boost::stacktrace::stacktrace();
std::cout << fuck;
// std::string s = fuck;
// info += ;
throw std::runtime_error(info);
}
}
}

View File

@ -1,13 +1,18 @@
#pragma once
#include <string_view>
#include <stdio.h>
#include <stdlib.h>
/* Paste this on the file you want to debug. */
namespace milvus::impl {
inline
void EasyAssertInfo(bool value, std::string_view expr_str, std::string_view filename, int lineno,
std::string_view extra_info);
std::string_view extra_info) {
if (!value) {
std::string info;
info += "Assert \"" + std::string(expr_str) + "\"";
info += " at " + std::string(filename) + ":" + std::to_string(lineno);
info += " => " + std::string(extra_info);
throw std::runtime_error(info);
}
}
}
#define AssertInfo(expr, info) impl::EasyAssertInfo(bool(expr), #expr, __FILE__, __LINE__, (info))

View File

@ -171,7 +171,9 @@ class Schema {
const FieldMeta&
operator[](const std::string& field_name) const {
auto offset_iter = offsets_.find(field_name);
AssertInfo(offset_iter != offsets_.end(), "Cannot found field_name: " + field_name);
if (offset_iter == offsets_.end()) {
throw std::runtime_error("Cannot found field_name: " + field_name);
}
auto offset = offset_iter->second;
return (*this)[offset];
}

View File

@ -96,6 +96,9 @@ auto SegmentNaive::get_deleted_bitmap(int64_t del_barrier, Timestamp query_times
if (offset >= insert_barrier) {
continue;
}
if (offset >= insert_barrier) {
continue;
}
if (record_.timestamps_[offset] < query_timestamp) {
Assert(offset < insert_barrier);
the_offset = std::max(the_offset, offset);

View File

@ -24,34 +24,6 @@ using std::cin;
using std::cout;
using std::endl;
namespace {
auto
generate_data(int N) {
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
std::vector<int64_t> uids;
std::default_random_engine er(42);
std::normal_distribution<> distribution(0.0, 1.0);
std::default_random_engine ei(42);
for (int i = 0; i < N; ++i) {
uids.push_back(10 * N + i);
timestamps.push_back(0);
// append vec
float vec[16];
for (auto& x : vec) {
x = distribution(er);
}
raw_data.insert(
raw_data.end(), (const char*)std::begin(vec), (const char*)std::end(vec));
int age = ei() % 100;
raw_data.insert(
raw_data.end(), (const char*)&age, ((const char*)&age) + sizeof(age));
}
return std::make_tuple(raw_data, timestamps, uids);
}
} // namespace
TEST(DogSegmentTest, TestABI) {
using namespace milvus::engine;
@ -60,20 +32,6 @@ TEST(DogSegmentTest, TestABI) {
assert(true);
}
TEST(DogSegmentTest, NormalDistributionTest) {
using namespace milvus::dog_segment;
using namespace milvus::engine;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("age", DataType::INT32);
int N = 1000* 1000;
auto [raw_data, timestamps, uids] = generate_data(N);
auto segment = CreateSegment(schema);
segment->PreInsert(N);
segment->PreDelete(N);
}
TEST(DogSegmentTest, MockTest) {
using namespace milvus::dog_segment;

6
go.mod
View File

@ -4,7 +4,7 @@ go 1.15
require (
cloud.google.com/go/bigquery v1.4.0 // indirect
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48
github.com/99designs/keyring v1.1.5 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/DataDog/zstd v1.4.6-0.20200617134701-89f69fb7df32 // indirect
@ -26,7 +26,7 @@ require (
github.com/google/btree v1.0.0
github.com/google/martian/v3 v3.0.0 // indirect
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 // indirect
github.com/google/uuid v1.1.1
github.com/google/uuid v1.1.1 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.10
github.com/julienschmidt/httprouter v1.3.0 // indirect
@ -47,12 +47,14 @@ require (
github.com/prometheus/procfs v0.1.3 // indirect
github.com/rs/xid v1.2.1
github.com/sirupsen/logrus v1.6.0
github.com/spaolacci/murmur3 v1.1.0
github.com/stretchr/testify v1.6.1
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b
github.com/tikv/pd v2.1.19+incompatible
github.com/yahoo/athenz v1.9.16 // indirect
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738
go.opencensus.io v0.22.4 // indirect
go.uber.org/atomic v1.6.0
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect

2
go.sum
View File

@ -160,6 +160,8 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaI
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/data-plane-api v0.0.0-20200904023242-f4d8a28107ca h1:EvL1gA7uyPU2JVN93HbQwYOXyUjUJKYGStDN8eKD/Ss=
github.com/envoyproxy/data-plane-api v0.0.0-20200909004014-2bb47b2b6fb0 h1:0edaQ8F7kgXmqz/tFjjl5rW/nAKUZ5Zg0Rv5vKiE6+U=
github.com/envoyproxy/data-plane-api v0.0.0-20200923192109-df3147960318 h1:fdyLKTIP2g4GinIlHcG/8E1dDLgZIkyLIwV1mjvOxXk=
github.com/envoyproxy/data-plane-api v0.0.0-20200924222414-c0b715aedb66 h1:c5a7hsMEcrARmUgB2N/gkgJDPCiNKXCXzTBXKvF3SVI=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=

View File

@ -263,6 +263,100 @@ func (OpType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{4}
}
type ReqType int32
const (
// general operations
ReqType_kCmd ReqType = 0
// collection operations
ReqType_kCreateCollection ReqType = 100
ReqType_kDropCollection ReqType = 101
ReqType_kHasCollection ReqType = 102
ReqType_kListCollections ReqType = 103
ReqType_kGetCollectionInfo ReqType = 104
ReqType_kGetCollectionStats ReqType = 105
ReqType_kCountEntities ReqType = 106
// partition operations
ReqType_kCreatePartition ReqType = 200
ReqType_kDropPartition ReqType = 201
ReqType_kHasPartition ReqType = 202
ReqType_kListPartitions ReqType = 203
// index operations
ReqType_kCreateIndex ReqType = 300
ReqType_kDropIndex ReqType = 301
ReqType_kDescribeIndex ReqType = 302
// data operations
ReqType_kInsert ReqType = 400
ReqType_kGetEntityByID ReqType = 401
ReqType_kDeleteEntityByID ReqType = 402
ReqType_kSearch ReqType = 403
ReqType_kListIDInSegment ReqType = 404
// other operations
ReqType_kLoadCollection ReqType = 500
ReqType_kFlush ReqType = 501
ReqType_kCompact ReqType = 502
)
var ReqType_name = map[int32]string{
0: "kCmd",
100: "kCreateCollection",
101: "kDropCollection",
102: "kHasCollection",
103: "kListCollections",
104: "kGetCollectionInfo",
105: "kGetCollectionStats",
106: "kCountEntities",
200: "kCreatePartition",
201: "kDropPartition",
202: "kHasPartition",
203: "kListPartitions",
300: "kCreateIndex",
301: "kDropIndex",
302: "kDescribeIndex",
400: "kInsert",
401: "kGetEntityByID",
402: "kDeleteEntityByID",
403: "kSearch",
404: "kListIDInSegment",
500: "kLoadCollection",
501: "kFlush",
502: "kCompact",
}
var ReqType_value = map[string]int32{
"kCmd": 0,
"kCreateCollection": 100,
"kDropCollection": 101,
"kHasCollection": 102,
"kListCollections": 103,
"kGetCollectionInfo": 104,
"kGetCollectionStats": 105,
"kCountEntities": 106,
"kCreatePartition": 200,
"kDropPartition": 201,
"kHasPartition": 202,
"kListPartitions": 203,
"kCreateIndex": 300,
"kDropIndex": 301,
"kDescribeIndex": 302,
"kInsert": 400,
"kGetEntityByID": 401,
"kDeleteEntityByID": 402,
"kSearch": 403,
"kListIDInSegment": 404,
"kLoadCollection": 500,
"kFlush": 501,
"kCompact": 502,
}
func (x ReqType) String() string {
return proto.EnumName(ReqType_name, int32(x))
}
func (ReqType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{5}
}
type SyncType int32
const (
@ -285,7 +379,7 @@ func (x SyncType) String() string {
}
func (SyncType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{5}
return fileDescriptor_0802b3a25fb57244, []int{6}
}
type Status struct {
@ -1328,8 +1422,8 @@ type QueryResult struct {
Scores []float32 `protobuf:"fixed32,4,rep,packed,name=scores,proto3" json:"scores,omitempty"`
Distances []float32 `protobuf:"fixed32,5,rep,packed,name=distances,proto3" json:"distances,omitempty"`
ExtraParams []*KeyValuePair `protobuf:"bytes,6,rep,name=extra_params,json=extraParams,proto3" json:"extra_params,omitempty"`
QueryId int64 `protobuf:"varint,7,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
ClientId int64 `protobuf:"varint,8,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
QueryId uint64 `protobuf:"varint,7,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
ProxyId int64 `protobuf:"varint,8,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1402,16 +1496,16 @@ func (m *QueryResult) GetExtraParams() []*KeyValuePair {
return nil
}
func (m *QueryResult) GetQueryId() int64 {
func (m *QueryResult) GetQueryId() uint64 {
if m != nil {
return m.QueryId
}
return 0
}
func (m *QueryResult) GetClientId() int64 {
func (m *QueryResult) GetProxyId() int64 {
if m != nil {
return m.ClientId
return m.ProxyId
}
return 0
}
@ -2729,6 +2823,220 @@ func (m *SearchParamPB) GetExtraParams() []*KeyValuePair {
return nil
}
type QueryReqMsg struct {
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
VectorParam []*VectorParam `protobuf:"bytes,2,rep,name=vector_param,json=vectorParam,proto3" json:"vector_param,omitempty"`
PartitionTags []string `protobuf:"bytes,3,rep,name=partition_tags,json=partitionTags,proto3" json:"partition_tags,omitempty"`
Dsl string `protobuf:"bytes,4,opt,name=dsl,proto3" json:"dsl,omitempty"`
ExtraParams []*KeyValuePair `protobuf:"bytes,5,rep,name=extra_params,json=extraParams,proto3" json:"extra_params,omitempty"`
Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,7,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
QueryId uint64 `protobuf:"varint,8,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
ReqType ReqType `protobuf:"varint,9,opt,name=req_type,json=reqType,proto3,enum=milvus.grpc.ReqType" json:"req_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryReqMsg) Reset() { *m = QueryReqMsg{} }
func (m *QueryReqMsg) String() string { return proto.CompactTextString(m) }
func (*QueryReqMsg) ProtoMessage() {}
func (*QueryReqMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{43}
}
func (m *QueryReqMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryReqMsg.Unmarshal(m, b)
}
func (m *QueryReqMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_QueryReqMsg.Marshal(b, m, deterministic)
}
func (m *QueryReqMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryReqMsg.Merge(m, src)
}
func (m *QueryReqMsg) XXX_Size() int {
return xxx_messageInfo_QueryReqMsg.Size(m)
}
func (m *QueryReqMsg) XXX_DiscardUnknown() {
xxx_messageInfo_QueryReqMsg.DiscardUnknown(m)
}
var xxx_messageInfo_QueryReqMsg proto.InternalMessageInfo
func (m *QueryReqMsg) GetCollectionName() string {
if m != nil {
return m.CollectionName
}
return ""
}
func (m *QueryReqMsg) GetVectorParam() []*VectorParam {
if m != nil {
return m.VectorParam
}
return nil
}
func (m *QueryReqMsg) GetPartitionTags() []string {
if m != nil {
return m.PartitionTags
}
return nil
}
func (m *QueryReqMsg) GetDsl() string {
if m != nil {
return m.Dsl
}
return ""
}
func (m *QueryReqMsg) GetExtraParams() []*KeyValuePair {
if m != nil {
return m.ExtraParams
}
return nil
}
func (m *QueryReqMsg) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *QueryReqMsg) GetProxyId() int64 {
if m != nil {
return m.ProxyId
}
return 0
}
func (m *QueryReqMsg) GetQueryId() uint64 {
if m != nil {
return m.QueryId
}
return 0
}
func (m *QueryReqMsg) GetReqType() ReqType {
if m != nil {
return m.ReqType
}
return ReqType_kCmd
}
type ManipulationReqMsg struct {
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
PartitionTag string `protobuf:"bytes,2,opt,name=partition_tag,json=partitionTag,proto3" json:"partition_tag,omitempty"`
PrimaryKeys []uint64 `protobuf:"varint,3,rep,packed,name=primary_keys,json=primaryKeys,proto3" json:"primary_keys,omitempty"`
RowsData []*RowData `protobuf:"bytes,4,rep,name=rows_data,json=rowsData,proto3" json:"rows_data,omitempty"`
Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
SegmentId uint64 `protobuf:"varint,6,opt,name=segment_id,json=segmentId,proto3" json:"segment_id,omitempty"`
ChannelId uint64 `protobuf:"varint,7,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
ReqType ReqType `protobuf:"varint,8,opt,name=req_type,json=reqType,proto3,enum=milvus.grpc.ReqType" json:"req_type,omitempty"`
ProxyId int64 `protobuf:"varint,9,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
ExtraParams []*KeyValuePair `protobuf:"bytes,10,rep,name=extra_params,json=extraParams,proto3" json:"extra_params,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ManipulationReqMsg) Reset() { *m = ManipulationReqMsg{} }
func (m *ManipulationReqMsg) String() string { return proto.CompactTextString(m) }
func (*ManipulationReqMsg) ProtoMessage() {}
func (*ManipulationReqMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{44}
}
func (m *ManipulationReqMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ManipulationReqMsg.Unmarshal(m, b)
}
func (m *ManipulationReqMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ManipulationReqMsg.Marshal(b, m, deterministic)
}
func (m *ManipulationReqMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_ManipulationReqMsg.Merge(m, src)
}
func (m *ManipulationReqMsg) XXX_Size() int {
return xxx_messageInfo_ManipulationReqMsg.Size(m)
}
func (m *ManipulationReqMsg) XXX_DiscardUnknown() {
xxx_messageInfo_ManipulationReqMsg.DiscardUnknown(m)
}
var xxx_messageInfo_ManipulationReqMsg proto.InternalMessageInfo
func (m *ManipulationReqMsg) GetCollectionName() string {
if m != nil {
return m.CollectionName
}
return ""
}
func (m *ManipulationReqMsg) GetPartitionTag() string {
if m != nil {
return m.PartitionTag
}
return ""
}
func (m *ManipulationReqMsg) GetPrimaryKeys() []uint64 {
if m != nil {
return m.PrimaryKeys
}
return nil
}
func (m *ManipulationReqMsg) GetRowsData() []*RowData {
if m != nil {
return m.RowsData
}
return nil
}
func (m *ManipulationReqMsg) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *ManipulationReqMsg) GetSegmentId() uint64 {
if m != nil {
return m.SegmentId
}
return 0
}
func (m *ManipulationReqMsg) GetChannelId() uint64 {
if m != nil {
return m.ChannelId
}
return 0
}
func (m *ManipulationReqMsg) GetReqType() ReqType {
if m != nil {
return m.ReqType
}
return ReqType_kCmd
}
func (m *ManipulationReqMsg) GetProxyId() int64 {
if m != nil {
return m.ProxyId
}
return 0
}
func (m *ManipulationReqMsg) GetExtraParams() []*KeyValuePair {
if m != nil {
return m.ExtraParams
}
return nil
}
type InsertOrDeleteMsg struct {
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
RowsData *RowData `protobuf:"bytes,2,opt,name=rows_data,json=rowsData,proto3" json:"rows_data,omitempty"`
@ -2749,7 +3057,7 @@ func (m *InsertOrDeleteMsg) Reset() { *m = InsertOrDeleteMsg{} }
func (m *InsertOrDeleteMsg) String() string { return proto.CompactTextString(m) }
func (*InsertOrDeleteMsg) ProtoMessage() {}
func (*InsertOrDeleteMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{43}
return fileDescriptor_0802b3a25fb57244, []int{45}
}
func (m *InsertOrDeleteMsg) XXX_Unmarshal(b []byte) error {
@ -2859,7 +3167,7 @@ func (m *SearchMsg) Reset() { *m = SearchMsg{} }
func (m *SearchMsg) String() string { return proto.CompactTextString(m) }
func (*SearchMsg) ProtoMessage() {}
func (*SearchMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{44}
return fileDescriptor_0802b3a25fb57244, []int{46}
}
func (m *SearchMsg) XXX_Unmarshal(b []byte) error {
@ -2956,7 +3264,7 @@ func (m *TimeSyncMsg) Reset() { *m = TimeSyncMsg{} }
func (m *TimeSyncMsg) String() string { return proto.CompactTextString(m) }
func (*TimeSyncMsg) ProtoMessage() {}
func (*TimeSyncMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{45}
return fileDescriptor_0802b3a25fb57244, []int{47}
}
func (m *TimeSyncMsg) XXX_Unmarshal(b []byte) error {
@ -3011,7 +3319,7 @@ func (m *Key2SegMsg) Reset() { *m = Key2SegMsg{} }
func (m *Key2SegMsg) String() string { return proto.CompactTextString(m) }
func (*Key2SegMsg) ProtoMessage() {}
func (*Key2SegMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{46}
return fileDescriptor_0802b3a25fb57244, []int{48}
}
func (m *Key2SegMsg) XXX_Unmarshal(b []byte) error {
@ -3059,6 +3367,7 @@ func init() {
proto.RegisterEnum("milvus.grpc.CompareOperator", CompareOperator_name, CompareOperator_value)
proto.RegisterEnum("milvus.grpc.Occur", Occur_name, Occur_value)
proto.RegisterEnum("milvus.grpc.OpType", OpType_name, OpType_value)
proto.RegisterEnum("milvus.grpc.ReqType", ReqType_name, ReqType_value)
proto.RegisterEnum("milvus.grpc.SyncType", SyncType_name, SyncType_value)
proto.RegisterType((*Status)(nil), "milvus.grpc.Status")
proto.RegisterType((*KeyValuePair)(nil), "milvus.grpc.KeyValuePair")
@ -3103,6 +3412,8 @@ func init() {
proto.RegisterType((*BooleanQuery)(nil), "milvus.grpc.BooleanQuery")
proto.RegisterType((*GeneralQuery)(nil), "milvus.grpc.GeneralQuery")
proto.RegisterType((*SearchParamPB)(nil), "milvus.grpc.SearchParamPB")
proto.RegisterType((*QueryReqMsg)(nil), "milvus.grpc.QueryReqMsg")
proto.RegisterType((*ManipulationReqMsg)(nil), "milvus.grpc.ManipulationReqMsg")
proto.RegisterType((*InsertOrDeleteMsg)(nil), "milvus.grpc.InsertOrDeleteMsg")
proto.RegisterType((*SearchMsg)(nil), "milvus.grpc.SearchMsg")
proto.RegisterType((*TimeSyncMsg)(nil), "milvus.grpc.TimeSyncMsg")
@ -3114,190 +3425,213 @@ func init() {
}
var fileDescriptor_0802b3a25fb57244 = []byte{
// 2918 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x3a, 0xcd, 0x72, 0xdb, 0xc8,
0xd1, 0x04, 0x49, 0x91, 0x44, 0x83, 0x92, 0xa0, 0xb1, 0x6c, 0xcb, 0xf6, 0xfa, 0xb3, 0x17, 0x5f,
0x25, 0xf1, 0x3a, 0x55, 0xb6, 0x97, 0xbb, 0xb1, 0x9d, 0x78, 0x77, 0xb3, 0xfc, 0x81, 0x24, 0xc4,
0x14, 0x29, 0x0f, 0x21, 0x7b, 0x7f, 0x2a, 0x85, 0x40, 0xc4, 0x98, 0x46, 0x96, 0x00, 0x18, 0x00,
0x94, 0xcc, 0x1c, 0x73, 0x4f, 0xaa, 0x72, 0xcc, 0x31, 0x79, 0x84, 0xe4, 0x0d, 0x72, 0xcc, 0x0b,
0xec, 0x61, 0xab, 0x72, 0x4c, 0xe5, 0x31, 0x52, 0xf3, 0x03, 0x12, 0xa0, 0x48, 0x59, 0x5c, 0x25,
0x27, 0x0d, 0x7b, 0xa6, 0x7b, 0xfa, 0x7f, 0xba, 0x1b, 0x82, 0x1f, 0x8c, 0xbe, 0x19, 0x3c, 0xf4,
0xec, 0x28, 0x26, 0xe1, 0xc3, 0x41, 0x38, 0xea, 0x3f, 0xf4, 0x48, 0x14, 0xd9, 0x03, 0x92, 0xfc,
0x7d, 0x30, 0x0a, 0x83, 0x38, 0x40, 0x8a, 0xe7, 0x0e, 0x4f, 0xc6, 0xd1, 0x03, 0x7a, 0x44, 0x7b,
0x05, 0xa5, 0x5e, 0x6c, 0xc7, 0xe3, 0x08, 0xfd, 0x04, 0x80, 0x84, 0x61, 0x10, 0x5a, 0xfd, 0xc0,
0x21, 0x3b, 0xd2, 0x5d, 0xe9, 0xde, 0x46, 0xed, 0xda, 0x83, 0xd4, 0xd9, 0x07, 0x3a, 0xdd, 0x6e,
0x06, 0x0e, 0xc1, 0x32, 0x49, 0x96, 0xe8, 0x1a, 0x94, 0x42, 0x62, 0x47, 0x81, 0xbf, 0x93, 0xbf,
0x2b, 0xdd, 0x93, 0xb1, 0xf8, 0xa5, 0x3d, 0x86, 0xea, 0x73, 0x32, 0x79, 0x69, 0x0f, 0xc7, 0xe4,
0xd0, 0x76, 0x43, 0xa4, 0x42, 0xe1, 0x1b, 0x32, 0x61, 0x74, 0x65, 0x4c, 0x97, 0x68, 0x1b, 0xd6,
0x4e, 0xe8, 0xb6, 0x40, 0xe4, 0x3f, 0xb4, 0x9f, 0xc2, 0x46, 0x33, 0x18, 0x0e, 0x49, 0x3f, 0x76,
0x03, 0xbf, 0x63, 0x7b, 0x04, 0xfd, 0x08, 0x36, 0xfb, 0x53, 0x88, 0xe5, 0xdb, 0x1e, 0x11, 0x54,
0x36, 0xfa, 0x99, 0x83, 0xda, 0x10, 0x50, 0x16, 0xb5, 0xed, 0x46, 0x31, 0xfa, 0x31, 0x94, 0x22,
0x26, 0x21, 0xc3, 0x52, 0x6a, 0x57, 0x32, 0x32, 0x71, 0xe1, 0xb1, 0x38, 0x82, 0x3e, 0x00, 0x75,
0xee, 0xae, 0x68, 0x27, 0x7f, 0xb7, 0x70, 0x4f, 0xc6, 0x9b, 0xd9, 0xcb, 0x22, 0xad, 0x07, 0xf2,
0xae, 0x4b, 0x86, 0xce, 0x4a, 0x3c, 0xa2, 0xdb, 0x00, 0xaf, 0x29, 0x16, 0x3f, 0xc3, 0x25, 0x97,
0x5f, 0x27, 0x74, 0xb4, 0x7f, 0x48, 0x50, 0x3e, 0xb0, 0x47, 0x23, 0xd7, 0x1f, 0xac, 0xc6, 0xf8,
0x02, 0x06, 0xf2, 0x0b, 0x19, 0xa0, 0x54, 0xfb, 0x6f, 0x88, 0x67, 0xef, 0x14, 0x16, 0x51, 0x65,
0x5b, 0x58, 0x1c, 0x41, 0x9f, 0x40, 0x95, 0xbc, 0x8d, 0x43, 0xdb, 0x1a, 0xd9, 0xa1, 0xed, 0x45,
0x3b, 0xc5, 0xbb, 0x85, 0x7b, 0x4a, 0xed, 0x46, 0x06, 0x25, 0x6d, 0x65, 0xac, 0xb0, 0xe3, 0x87,
0xec, 0xb4, 0x16, 0x81, 0x22, 0x64, 0x59, 0xdd, 0x10, 0x4f, 0xa0, 0xea, 0x71, 0x5c, 0x6b, 0xe8,
0x46, 0x31, 0x33, 0x82, 0x52, 0xdb, 0xce, 0xa0, 0x08, 0xe2, 0x58, 0xf1, 0x66, 0xb7, 0x68, 0xcf,
0x61, 0xe3, 0xd0, 0x0e, 0x63, 0x97, 0x0a, 0xcc, 0xf8, 0xb8, 0xb8, 0x6d, 0x54, 0x28, 0xc4, 0xf6,
0x40, 0xe8, 0x8d, 0x2e, 0xb5, 0x21, 0xac, 0x4f, 0x89, 0xad, 0x2e, 0xc3, 0x03, 0xb8, 0x32, 0x4a,
0xb0, 0xad, 0xd8, 0x1e, 0x58, 0x76, 0x18, 0xda, 0x13, 0xe1, 0x4f, 0x5b, 0xd3, 0x2d, 0xd3, 0x1e,
0xd4, 0xe9, 0x86, 0xf6, 0x02, 0x36, 0x5f, 0x92, 0x7e, 0x1c, 0x84, 0x38, 0x38, 0xc5, 0xa4, 0x1f,
0x84, 0x0e, 0x73, 0x97, 0x61, 0x60, 0xc7, 0x96, 0x63, 0xc7, 0xf6, 0x8e, 0x74, 0xb7, 0x70, 0x2f,
0x8f, 0x65, 0x06, 0x69, 0xd9, 0xb1, 0x8d, 0xee, 0x80, 0x72, 0xec, 0xfa, 0x76, 0x38, 0xe1, 0xfb,
0x94, 0xf3, 0x2a, 0x06, 0x0e, 0xa2, 0x07, 0xb4, 0x5f, 0x81, 0xac, 0xfb, 0xb1, 0x1b, 0x4f, 0x0c,
0x27, 0x5a, 0x8d, 0xf9, 0x1f, 0xc2, 0x26, 0x61, 0x98, 0x96, 0xeb, 0xa4, 0x18, 0x2f, 0xe0, 0x75,
0x22, 0x08, 0x72, 0xa6, 0x77, 0xa1, 0x2a, 0x98, 0xe6, 0x1c, 0x3f, 0x86, 0x72, 0xc8, 0x56, 0x11,
0x63, 0x57, 0xa9, 0xbd, 0x97, 0xb9, 0x65, 0x4e, 0x40, 0x9c, 0x1c, 0xd6, 0xbe, 0x06, 0x85, 0xef,
0x71, 0xa3, 0x21, 0x28, 0xfe, 0x9a, 0x26, 0x15, 0x6e, 0x29, 0xb6, 0x46, 0x4f, 0x01, 0xc2, 0xe0,
0xd4, 0xe2, 0x18, 0x4c, 0xd8, 0x79, 0x5f, 0x4c, 0x73, 0x82, 0xe5, 0x30, 0xb9, 0x45, 0x1b, 0x88,
0x58, 0x3d, 0x20, 0xb1, 0x3d, 0x17, 0x82, 0xd2, 0x5c, 0x08, 0xa2, 0x0f, 0xa0, 0x18, 0x4f, 0x46,
0x3c, 0x7c, 0x36, 0x6a, 0x57, 0x33, 0xf4, 0xa9, 0x4e, 0xcd, 0xc9, 0x88, 0x60, 0x76, 0x84, 0x3a,
0x8c, 0xe3, 0x7a, 0x2c, 0x90, 0x0a, 0x98, 0x2e, 0xb5, 0x3a, 0x94, 0x78, 0x08, 0xa1, 0x27, 0xa0,
0xf0, 0x5b, 0x3c, 0x12, 0xdb, 0x89, 0x2e, 0xb2, 0xf9, 0x74, 0xca, 0x12, 0xe6, 0x0c, 0xd1, 0x65,
0xa4, 0xdd, 0x86, 0x32, 0x0e, 0x4e, 0x99, 0x79, 0x11, 0x14, 0x8f, 0x87, 0xc1, 0x31, 0xe3, 0xb1,
0x8a, 0xd9, 0x5a, 0xfb, 0x73, 0x1e, 0x14, 0xc3, 0x8f, 0x48, 0x18, 0xaf, 0xe8, 0xdd, 0xb3, 0xc0,
0xcf, 0xbf, 0x3b, 0xf0, 0x3f, 0x04, 0xaa, 0xbd, 0x88, 0xbb, 0x55, 0x61, 0x41, 0xec, 0x09, 0x16,
0x71, 0x85, 0x1e, 0x63, 0xcc, 0x2e, 0x70, 0x98, 0xe2, 0x02, 0x87, 0x41, 0xff, 0x0f, 0xeb, 0x99,
0xa8, 0xd8, 0x59, 0x63, 0xec, 0x56, 0xd3, 0xf1, 0x70, 0x26, 0xf1, 0x94, 0x56, 0x4a, 0x3c, 0xff,
0x96, 0x40, 0xe9, 0x11, 0x3b, 0xec, 0xbf, 0x59, 0x51, 0x47, 0xcf, 0xa0, 0x7a, 0xc2, 0x5c, 0x88,
0xdf, 0x2b, 0xb2, 0xce, 0xce, 0x02, 0x1f, 0x63, 0x84, 0xb1, 0x72, 0x92, 0x72, 0x59, 0xea, 0x0d,
0xd1, 0x90, 0x79, 0x83, 0x8c, 0xe9, 0xf2, 0xac, 0xa8, 0x45, 0x16, 0xfa, 0xe7, 0x8b, 0xba, 0xb6,
0x92, 0xa8, 0xa7, 0xb0, 0xcd, 0x25, 0x35, 0xfc, 0x1e, 0x19, 0x78, 0xc4, 0x17, 0x6e, 0xa1, 0xc1,
0xfa, 0x6b, 0x77, 0x48, 0x66, 0xb6, 0x90, 0xd8, 0xd5, 0x0a, 0x05, 0x26, 0x96, 0x78, 0x06, 0xd5,
0x88, 0xe1, 0x4e, 0xa5, 0x95, 0xce, 0x48, 0x9b, 0x52, 0x23, 0x56, 0xa2, 0xd9, 0x0f, 0xed, 0x4f,
0x12, 0x54, 0x58, 0x6a, 0x71, 0xc9, 0x8a, 0x99, 0x45, 0x85, 0x82, 0xeb, 0x44, 0x22, 0x9b, 0xd0,
0x25, 0xba, 0x05, 0xf2, 0x89, 0x3d, 0x74, 0x1d, 0x2b, 0x0c, 0x4e, 0x99, 0xb7, 0x55, 0x70, 0x85,
0x01, 0x70, 0x70, 0x9a, 0x75, 0xc5, 0xe2, 0x45, 0x5c, 0x51, 0xfb, 0x6b, 0x1e, 0x94, 0x17, 0x63,
0x12, 0x4e, 0x30, 0x89, 0xc6, 0xc3, 0x15, 0xb3, 0xf6, 0x87, 0x50, 0x21, 0x42, 0x2e, 0xa1, 0x91,
0x6c, 0x0e, 0x48, 0x84, 0xc6, 0xd3, 0x63, 0xe8, 0x3a, 0x94, 0x69, 0x62, 0xf2, 0xc7, 0x49, 0x2e,
0x28, 0x85, 0xc1, 0x69, 0x67, 0xec, 0xd1, 0xe2, 0x28, 0xea, 0x07, 0x21, 0xe1, 0x2f, 0x67, 0x1e,
0x8b, 0x5f, 0xe8, 0x3d, 0x90, 0x1d, 0x37, 0x8a, 0x6d, 0xbf, 0x4f, 0xb8, 0xc1, 0xf3, 0x78, 0x06,
0xb8, 0x9c, 0xf3, 0xa3, 0x1b, 0x50, 0xf9, 0x0d, 0x95, 0xdd, 0x72, 0x9d, 0x9d, 0x32, 0xe3, 0xa6,
0xcc, 0x7e, 0x1b, 0x0e, 0xd5, 0x73, 0x7f, 0xe8, 0x12, 0x3f, 0xa6, 0x7b, 0x15, 0xb6, 0x57, 0xe1,
0x00, 0xc3, 0xd1, 0x7e, 0x09, 0x4a, 0x2f, 0x0e, 0xe9, 0x7b, 0x4a, 0x46, 0xc3, 0xc9, 0x6a, 0x3a,
0x7b, 0x1f, 0xaa, 0x11, 0xc3, 0xb5, 0x42, 0x8a, 0x2c, 0x9e, 0x50, 0x25, 0x9a, 0xd1, 0xd3, 0x5e,
0x81, 0xdc, 0x08, 0x82, 0xe1, 0xf7, 0x20, 0x7e, 0x1b, 0xe0, 0x38, 0x08, 0x86, 0x29, 0xd2, 0x15,
0x2c, 0x1f, 0x27, 0xb4, 0xb4, 0x28, 0x5d, 0xf5, 0xe1, 0xe0, 0xb4, 0x19, 0x8c, 0xfd, 0x15, 0x4d,
0xfe, 0x08, 0xb6, 0x53, 0xf9, 0x81, 0x9a, 0xb2, 0x4f, 0x89, 0xb0, 0xbb, 0x0a, 0x18, 0xf5, 0xcf,
0x90, 0xd7, 0x6e, 0x41, 0xb9, 0x19, 0x78, 0x9e, 0xed, 0x3b, 0xd4, 0x9d, 0xfb, 0x9e, 0x93, 0x14,
0xb6, 0x7d, 0xcf, 0xd1, 0xfe, 0x29, 0x01, 0x18, 0xbe, 0x43, 0xde, 0xf2, 0x50, 0xfc, 0xdf, 0xd4,
0x71, 0xd9, 0x57, 0xac, 0x30, 0xff, 0x8a, 0xdd, 0x06, 0x70, 0x29, 0x0b, 0x7c, 0xbb, 0xc8, 0xb7,
0x19, 0x84, 0x6d, 0x5f, 0x2e, 0xe9, 0x7c, 0x0e, 0xb0, 0x3b, 0x1c, 0x47, 0x22, 0xbb, 0xd6, 0xe0,
0xea, 0x1c, 0xcb, 0x99, 0x94, 0x73, 0x25, 0xcb, 0x38, 0xaf, 0x1a, 0x8e, 0xa0, 0xda, 0x0c, 0xbc,
0x91, 0xdd, 0x5f, 0xf5, 0x15, 0x7b, 0x0f, 0xe4, 0xf8, 0x4d, 0x48, 0xa2, 0x37, 0xc1, 0x90, 0x97,
0x00, 0x12, 0x9e, 0x01, 0xb4, 0x23, 0xd8, 0x6c, 0x91, 0x21, 0x89, 0x49, 0x63, 0x62, 0xb4, 0x56,
0xa4, 0x7c, 0x03, 0x2a, 0x73, 0x95, 0x4e, 0xd9, 0x15, 0x35, 0xce, 0x57, 0xe9, 0x9e, 0xc4, 0xf0,
0x5f, 0x07, 0xab, 0xd9, 0xf4, 0x16, 0xc8, 0xb4, 0x7e, 0xb1, 0x5c, 0xff, 0x75, 0x20, 0xac, 0x59,
0xa1, 0x00, 0x4a, 0x49, 0xfb, 0x1a, 0xb6, 0xf6, 0x48, 0x2c, 0x8a, 0xb4, 0x56, 0xb4, 0x22, 0xd3,
0xb7, 0x01, 0x22, 0x9e, 0xf6, 0x69, 0x48, 0x73, 0x7f, 0x95, 0x05, 0xc4, 0x70, 0xb4, 0x31, 0x6c,
0x24, 0xe5, 0x1f, 0x7f, 0x84, 0xff, 0x1b, 0xea, 0xa0, 0x55, 0xe7, 0xcc, 0xf5, 0x22, 0x96, 0xb0,
0x65, 0x51, 0xc2, 0xf0, 0xd6, 0xe8, 0x11, 0xa8, 0xfc, 0x95, 0x64, 0x15, 0x0e, 0x17, 0x89, 0xa5,
0x3c, 0x8f, 0xf8, 0x91, 0x2b, 0xaa, 0xba, 0x02, 0x9e, 0x01, 0xb4, 0x3f, 0x48, 0xa2, 0x42, 0xa3,
0xd5, 0x15, 0xfa, 0x18, 0x64, 0x9a, 0xed, 0x2d, 0x56, 0x87, 0x49, 0xe7, 0xd4, 0x61, 0xfb, 0x39,
0x5c, 0x71, 0xc4, 0x1a, 0x35, 0xce, 0x3c, 0xde, 0xd4, 0x32, 0xb7, 0x17, 0x3c, 0xde, 0x33, 0xb6,
0xf6, 0x73, 0x99, 0x37, 0xbc, 0x51, 0x16, 0x3d, 0xa9, 0xf6, 0xad, 0x04, 0x90, 0xe2, 0x7e, 0x03,
0xf2, 0x2e, 0x8f, 0xf1, 0x22, 0xce, 0xbb, 0x0e, 0xad, 0xcc, 0x52, 0xb1, 0xc9, 0xd6, 0xd3, 0xc2,
0xb1, 0xf0, 0xee, 0xc2, 0xf1, 0x13, 0xa8, 0xf2, 0xe8, 0xbc, 0x70, 0x5f, 0xe5, 0x4e, 0xf3, 0x49,
0x74, 0xc9, 0xe0, 0xdd, 0x83, 0xad, 0x94, 0x16, 0x44, 0xd5, 0x5e, 0x4b, 0x7a, 0xf1, 0x8b, 0xd4,
0xec, 0x42, 0x45, 0xdf, 0x49, 0x20, 0x9b, 0x24, 0xf4, 0xd8, 0x4b, 0xfb, 0xae, 0xaa, 0xfa, 0x16,
0xc8, 0xae, 0x1f, 0x5b, 0x49, 0xc3, 0x4f, 0xfd, 0xa9, 0xe2, 0xfa, 0x31, 0xe3, 0x91, 0x3e, 0x1f,
0x4e, 0x30, 0x3e, 0x1e, 0x12, 0xb1, 0x4f, 0x3d, 0x4a, 0xc2, 0x0a, 0x87, 0xf1, 0x23, 0xbc, 0x44,
0x18, 0x13, 0xf6, 0xc8, 0x16, 0xf9, 0xd3, 0xc5, 0x00, 0xf4, 0x99, 0xdd, 0x86, 0xb5, 0xe3, 0x20,
0x88, 0x62, 0x56, 0x4a, 0xe6, 0x31, 0xff, 0x71, 0xc9, 0x1a, 0xd2, 0x06, 0x85, 0x65, 0xa8, 0x90,
0xe8, 0x6f, 0x47, 0x21, 0x7a, 0x0a, 0x95, 0x60, 0x44, 0x42, 0x3b, 0x0e, 0x42, 0xe1, 0x91, 0x59,
0x1d, 0x89, 0xb3, 0x5d, 0x71, 0x06, 0x4f, 0x4f, 0xa3, 0x1d, 0x28, 0xb3, 0xb5, 0xef, 0x08, 0x6f,
0x49, 0x7e, 0x6a, 0x7f, 0x93, 0x00, 0xb0, 0xed, 0x0f, 0xc8, 0x85, 0x34, 0x58, 0x4b, 0xd3, 0x39,
0x5b, 0x96, 0xa6, 0x98, 0x9d, 0xde, 0x30, 0x53, 0x4c, 0xe1, 0x3c, 0xc5, 0xac, 0xd6, 0xd5, 0x7f,
0x2b, 0x25, 0x9d, 0xda, 0x85, 0xd8, 0xbe, 0x03, 0x0a, 0x2f, 0x47, 0x38, 0x23, 0x79, 0xc6, 0x08,
0x30, 0x50, 0x83, 0x71, 0x93, 0x6a, 0x18, 0x0b, 0x2b, 0x34, 0x8c, 0x34, 0x04, 0xe3, 0x60, 0xf4,
0x8d, 0x70, 0x06, 0xb6, 0xbe, 0x64, 0x64, 0xbc, 0x85, 0x2a, 0x2d, 0x51, 0x88, 0xed, 0x73, 0xc9,
0xee, 0xc1, 0x5a, 0xd0, 0xef, 0x8f, 0x13, 0x83, 0xa3, 0x0c, 0x99, 0x2e, 0xdd, 0xc1, 0xfc, 0x00,
0xfa, 0x0c, 0xd6, 0x07, 0xc4, 0x27, 0xa1, 0x3d, 0xb4, 0x98, 0x64, 0xc2, 0x42, 0xd9, 0x8b, 0xf7,
0xf8, 0x09, 0x5e, 0x98, 0x56, 0x07, 0xa9, 0x5f, 0xda, 0xef, 0xf3, 0x50, 0x4d, 0x6f, 0xa3, 0xcf,
0x61, 0xfd, 0x98, 0xb3, 0x22, 0x08, 0x4a, 0x0b, 0xba, 0xdd, 0x34, 0xb3, 0xfb, 0x39, 0x5c, 0x3d,
0x4e, 0x33, 0xff, 0x04, 0x20, 0x26, 0xa1, 0x37, 0xe5, 0x47, 0x3a, 0xd3, 0x7e, 0x4e, 0x63, 0x77,
0x3f, 0x87, 0xe5, 0x78, 0x1a, 0xc8, 0x3f, 0x03, 0x25, 0xa4, 0x4e, 0x29, 0x30, 0xf9, 0x94, 0xe8,
0x7a, 0xb6, 0xe2, 0x9e, 0x3a, 0xed, 0x7e, 0x0e, 0x43, 0x38, 0x73, 0xe1, 0x4f, 0xa7, 0x29, 0x98,
0x23, 0x17, 0x17, 0x74, 0x14, 0x29, 0xdf, 0x99, 0x65, 0x5f, 0xf6, 0x93, 0x66, 0x5f, 0x86, 0xa7,
0xfd, 0x4b, 0x82, 0xf5, 0x54, 0xe7, 0x71, 0xd8, 0xb8, 0xf8, 0xbb, 0xb5, 0xe2, 0xd0, 0xe5, 0xac,
0xe9, 0x0a, 0x0b, 0x34, 0xbd, 0xdc, 0x74, 0x97, 0x0c, 0xa6, 0xdf, 0x15, 0x60, 0x8b, 0x77, 0xf3,
0xdd, 0x90, 0x57, 0x2e, 0x07, 0xd1, 0xe0, 0xe2, 0xc2, 0x66, 0x7a, 0x23, 0x6e, 0xe3, 0x77, 0xb5,
0xe9, 0x2a, 0x14, 0xc6, 0xae, 0x93, 0xcc, 0x2c, 0xc6, 0xae, 0xb3, 0xa8, 0x4b, 0x3d, 0xdb, 0x90,
0xd3, 0xba, 0xcb, 0xf5, 0x48, 0x14, 0xdb, 0xde, 0x88, 0xa5, 0xd9, 0x22, 0x9e, 0x01, 0xe6, 0xca,
0x90, 0xd2, 0x5c, 0x19, 0x42, 0xb7, 0xfb, 0x6f, 0x6c, 0xdf, 0x27, 0xc3, 0x59, 0x53, 0x22, 0x0b,
0x88, 0x41, 0x19, 0xc8, 0x07, 0x23, 0xd6, 0x8f, 0x6c, 0xcc, 0x15, 0x52, 0xdd, 0x11, 0x7b, 0x34,
0xf3, 0xc1, 0x28, 0xdb, 0xbb, 0xc8, 0xd9, 0xde, 0xe5, 0x8c, 0x11, 0x60, 0x25, 0x23, 0xfc, 0x3d,
0x0f, 0x32, 0xf7, 0xb6, 0x95, 0x94, 0x9f, 0x4a, 0x5c, 0x5c, 0xf5, 0x17, 0x4c, 0x5c, 0x67, 0xf4,
0x5d, 0x58, 0x30, 0x15, 0x10, 0x66, 0x2a, 0xce, 0xcc, 0x74, 0xbe, 0x05, 0x32, 0xea, 0x29, 0xbd,
0x43, 0x3d, 0xe5, 0x95, 0x1a, 0xca, 0x64, 0x14, 0x57, 0x61, 0x6c, 0xf2, 0x51, 0x9c, 0x98, 0x75,
0xc8, 0xd3, 0x59, 0x87, 0xf6, 0x16, 0x14, 0xd3, 0xf5, 0x48, 0x6f, 0xe2, 0xf7, 0xa9, 0x16, 0xaf,
0x43, 0x79, 0x44, 0x48, 0x68, 0x19, 0x8e, 0x28, 0xf6, 0x4a, 0xf4, 0xa7, 0xc1, 0xc4, 0x30, 0xa7,
0x62, 0xe4, 0xb9, 0x18, 0x53, 0x00, 0xaa, 0x81, 0x1c, 0x4d, 0xfc, 0xbe, 0xb5, 0xb4, 0x90, 0xa2,
0xf4, 0x99, 0x4f, 0x54, 0x22, 0xb1, 0xd2, 0xbe, 0x06, 0x78, 0x4e, 0x26, 0xb5, 0x1e, 0x19, 0xd0,
0x8b, 0x85, 0xe2, 0xa4, 0x25, 0x8a, 0xcb, 0x9f, 0xef, 0xba, 0x05, 0x56, 0x99, 0xcc, 0x5c, 0xf7,
0xfe, 0x5f, 0x8a, 0x20, 0x4f, 0xbf, 0x7b, 0x20, 0x05, 0xca, 0xbd, 0xa3, 0x66, 0x53, 0xef, 0xf5,
0xd4, 0x1c, 0xda, 0x06, 0xf5, 0xa8, 0xa3, 0x7f, 0x71, 0xa8, 0x37, 0x4d, 0xbd, 0x65, 0xe9, 0x18,
0x77, 0xb1, 0x2a, 0x21, 0x04, 0x1b, 0xcd, 0x6e, 0xa7, 0xa3, 0x37, 0x4d, 0x6b, 0xb7, 0x6e, 0xb4,
0xf5, 0x96, 0x9a, 0x47, 0x57, 0x61, 0xeb, 0x50, 0xc7, 0x07, 0x46, 0xaf, 0x67, 0x74, 0x3b, 0x56,
0x4b, 0xef, 0x18, 0x7a, 0x4b, 0x2d, 0xa0, 0x1b, 0x70, 0xb5, 0xd9, 0x6d, 0xb7, 0xf5, 0xa6, 0x49,
0xc1, 0x9d, 0xae, 0x69, 0xe9, 0x5f, 0x18, 0x3d, 0xb3, 0xa7, 0x16, 0x29, 0x6d, 0xa3, 0xdd, 0xd6,
0xf7, 0xea, 0x6d, 0xab, 0x8e, 0xf7, 0x8e, 0x0e, 0xf4, 0x8e, 0xa9, 0xae, 0x51, 0x3a, 0x09, 0xb4,
0x65, 0x1c, 0xe8, 0x1d, 0x4a, 0x4e, 0x2d, 0xa3, 0x6b, 0x80, 0x12, 0xb0, 0xd1, 0x69, 0xe9, 0x5f,
0x58, 0xe6, 0x97, 0x87, 0xba, 0x5a, 0x41, 0xb7, 0xe0, 0x7a, 0x02, 0x4f, 0xdf, 0x53, 0x3f, 0xd0,
0x55, 0x19, 0xa9, 0x50, 0x4d, 0x36, 0xcd, 0xee, 0xe1, 0x73, 0x15, 0xd2, 0xd4, 0x71, 0xf7, 0x15,
0xd6, 0x9b, 0x5d, 0xdc, 0x52, 0x95, 0x34, 0xf8, 0xa5, 0xde, 0x34, 0xbb, 0xd8, 0x32, 0x5a, 0x6a,
0x95, 0x32, 0x9f, 0x80, 0x7b, 0x7a, 0x1d, 0x37, 0xf7, 0x2d, 0xac, 0xf7, 0x8e, 0xda, 0xa6, 0xba,
0x4e, 0x55, 0xb0, 0x6b, 0xb4, 0x75, 0x26, 0xd1, 0x6e, 0xf7, 0xa8, 0xd3, 0x52, 0x37, 0xd0, 0x26,
0x28, 0x07, 0xba, 0x59, 0x4f, 0x74, 0xb2, 0x49, 0xef, 0x6f, 0xd6, 0x9b, 0xfb, 0x7a, 0x02, 0x51,
0xd1, 0x0e, 0x6c, 0x37, 0xeb, 0x1d, 0x8a, 0xd4, 0xc4, 0x7a, 0xdd, 0xd4, 0xad, 0xdd, 0x6e, 0xbb,
0xa5, 0x63, 0x75, 0x8b, 0x0a, 0x38, 0xb7, 0x63, 0xb4, 0x75, 0x15, 0xa5, 0x30, 0x5a, 0x7a, 0x5b,
0x9f, 0x61, 0x5c, 0x49, 0x61, 0x24, 0x3b, 0x14, 0x63, 0x9b, 0x0a, 0xd3, 0x38, 0x32, 0xda, 0x2d,
0xa1, 0x28, 0x6e, 0xb4, 0xab, 0x68, 0x0b, 0xd6, 0x13, 0x61, 0x3a, 0x6d, 0xa3, 0x67, 0xaa, 0xd7,
0xd0, 0x75, 0xb8, 0x92, 0x80, 0x0e, 0x74, 0x13, 0x1b, 0x4d, 0xae, 0xd5, 0xeb, 0xf4, 0x6c, 0xf7,
0xc8, 0xb4, 0xba, 0xbb, 0xd6, 0x81, 0x7e, 0xd0, 0xc5, 0x5f, 0xaa, 0x3b, 0xf7, 0xff, 0x28, 0x41,
0x25, 0xa9, 0xf0, 0x51, 0x05, 0x8a, 0x9d, 0x6e, 0x47, 0x57, 0x73, 0x74, 0xd5, 0xe8, 0x76, 0xdb,
0xaa, 0x44, 0x57, 0x46, 0xc7, 0x7c, 0xaa, 0xe6, 0x91, 0x0c, 0x6b, 0x46, 0xc7, 0xfc, 0xf0, 0xb1,
0x5a, 0x10, 0xcb, 0x8f, 0x6a, 0x6a, 0x51, 0x2c, 0x1f, 0x7f, 0xac, 0xae, 0xd1, 0xe5, 0x6e, 0xbb,
0x5b, 0x37, 0x55, 0x40, 0x00, 0xa5, 0x56, 0xf7, 0xa8, 0xd1, 0xd6, 0x55, 0x85, 0xae, 0x7b, 0x26,
0x36, 0x3a, 0x7b, 0xea, 0x36, 0xe5, 0x40, 0x58, 0xa2, 0x61, 0x74, 0xea, 0xf8, 0x4b, 0xd5, 0xa1,
0xda, 0x14, 0x20, 0x8e, 0x4c, 0xee, 0x37, 0x61, 0x73, 0xae, 0x26, 0x45, 0x25, 0xc8, 0xb7, 0x4d,
0x35, 0x87, 0xca, 0x50, 0x68, 0x9b, 0xba, 0x2a, 0x51, 0x80, 0xfe, 0x42, 0xcd, 0xd3, 0xbf, 0x7b,
0xa6, 0x5a, 0xa0, 0x1b, 0x7b, 0xa6, 0xae, 0x16, 0x29, 0xa0, 0xa3, 0xab, 0x6b, 0xf7, 0x9f, 0xc2,
0x1a, 0xab, 0x73, 0xa8, 0xe3, 0x1b, 0x9d, 0x97, 0xf5, 0xb6, 0xd1, 0xe2, 0x72, 0x1d, 0x1c, 0xf5,
0x4c, 0x55, 0x62, 0x5c, 0xed, 0x77, 0x8f, 0xda, 0xd4, 0xc9, 0xab, 0x50, 0xa1, 0x50, 0x6a, 0x75,
0xb5, 0x70, 0xff, 0x2e, 0x94, 0x78, 0xf2, 0xa6, 0x67, 0x8c, 0x4e, 0x4f, 0xc7, 0xf4, 0x66, 0x2a,
0x11, 0xb3, 0x87, 0x2a, 0xdd, 0xbf, 0x03, 0x95, 0x24, 0x98, 0x29, 0x45, 0xac, 0xd7, 0x29, 0x6d,
0x19, 0xd6, 0x5e, 0x61, 0x83, 0x1e, 0xa8, 0x7d, 0xb7, 0x0e, 0xeb, 0x07, 0x2c, 0xf4, 0x7b, 0x24,
0x3c, 0x71, 0xfb, 0x04, 0xfd, 0x1c, 0xd4, 0x66, 0x48, 0xec, 0x98, 0xcc, 0xba, 0x71, 0xb4, 0xf0,
0x93, 0xd0, 0xcd, 0x45, 0xfd, 0xb8, 0x96, 0x43, 0xbb, 0xb0, 0xbe, 0x6f, 0x47, 0x29, 0xec, 0x5b,
0x73, 0x35, 0x74, 0x3a, 0xc1, 0xdf, 0xbc, 0x76, 0xa6, 0xda, 0xe2, 0x13, 0xa7, 0x1c, 0x32, 0x00,
0xb5, 0x48, 0xd4, 0x0f, 0xdd, 0x63, 0x72, 0x51, 0x62, 0x0b, 0xf9, 0xd4, 0x72, 0xe8, 0x05, 0xb5,
0xd3, 0xd8, 0x8f, 0x2f, 0x4a, 0xe7, 0xce, 0x92, 0xcd, 0xe9, 0x68, 0x2a, 0x87, 0x7e, 0x01, 0x9b,
0xbd, 0x37, 0xf4, 0x67, 0xb2, 0x17, 0xcd, 0x69, 0x49, 0x8c, 0xae, 0x96, 0xd2, 0x4a, 0xbe, 0x9d,
0x6a, 0x39, 0x74, 0x08, 0x28, 0x4b, 0x8b, 0x8d, 0x3f, 0xce, 0xe5, 0x70, 0xd9, 0x26, 0x1b, 0x77,
0xe4, 0x50, 0x0b, 0x36, 0x5a, 0x61, 0x30, 0xba, 0xa8, 0xbc, 0x4b, 0x2c, 0xf9, 0x29, 0x28, 0xdc,
0x15, 0xd8, 0xa0, 0x0d, 0x65, 0xeb, 0xd3, 0xd9, 0xf0, 0x6d, 0x19, 0x7a, 0x13, 0xd6, 0x13, 0x03,
0xbe, 0x83, 0xc0, 0xb2, 0x0d, 0x2d, 0x87, 0x9e, 0x81, 0x4c, 0x25, 0xf9, 0x7e, 0x1c, 0xe8, 0xb0,
0xc9, 0x05, 0x98, 0x7e, 0x60, 0x9c, 0xd3, 0x43, 0xf6, 0x2b, 0xe6, 0x72, 0x32, 0xd5, 0x7d, 0x3b,
0xba, 0x20, 0x8d, 0xe5, 0x0e, 0xfd, 0x1c, 0x36, 0xa8, 0x99, 0xa7, 0xe7, 0xa3, 0xf3, 0x8d, 0x72,
0x73, 0xf1, 0x2d, 0xc2, 0x67, 0xa8, 0x72, 0xc3, 0x60, 0x74, 0x39, 0xc1, 0x3e, 0x81, 0x12, 0x2f,
0x8c, 0xd1, 0xce, 0x9c, 0x66, 0xa7, 0xdf, 0xbe, 0xe6, 0xe4, 0x99, 0x7e, 0xe8, 0x64, 0x6a, 0x59,
0x9f, 0x4e, 0xd5, 0x1a, 0x13, 0xa3, 0x35, 0xc7, 0x42, 0x76, 0x28, 0x76, 0x73, 0xf1, 0x80, 0x5f,
0xcb, 0xa1, 0x7d, 0xda, 0x96, 0xcd, 0x86, 0x73, 0xe8, 0xff, 0xe6, 0xba, 0x82, 0xb9, 0xb9, 0xdd,
0x39, 0x0c, 0x7d, 0x06, 0x25, 0x5e, 0x62, 0xa2, 0xa5, 0xdf, 0x57, 0x6e, 0x66, 0x77, 0x52, 0x1f,
0x30, 0x58, 0x1c, 0x6e, 0xce, 0x7d, 0xe7, 0x41, 0xef, 0x2f, 0x20, 0x94, 0xfd, 0x0a, 0x74, 0x2e,
0xc5, 0x27, 0x50, 0x68, 0x7a, 0xce, 0x92, 0xcc, 0x30, 0xc7, 0x64, 0x6a, 0x8e, 0x9f, 0x43, 0x75,
0x80, 0xd9, 0x90, 0x15, 0x65, 0x8b, 0xde, 0xb9, 0xe9, 0xeb, 0x32, 0xe3, 0xee, 0xc1, 0xd6, 0x61,
0x48, 0x86, 0x81, 0xed, 0x5c, 0x32, 0x0d, 0x3c, 0x81, 0x35, 0x36, 0x89, 0x9e, 0x0b, 0xbf, 0xd9,
0x74, 0x7a, 0x19, 0xe2, 0x33, 0x36, 0xc0, 0x1f, 0xd9, 0xfd, 0x18, 0xdd, 0x38, 0x3b, 0x47, 0x11,
0x63, 0xe9, 0x65, 0xc8, 0x0d, 0xa8, 0x08, 0xbb, 0x35, 0xd0, 0xcd, 0x65, 0xe6, 0x3c, 0x6c, 0x9c,
0xa7, 0xfe, 0x46, 0xed, 0xab, 0x47, 0x03, 0x37, 0x7e, 0x33, 0x3e, 0x7e, 0xd0, 0x0f, 0xbc, 0x87,
0xfd, 0xdf, 0x46, 0x8f, 0x1e, 0x3d, 0x79, 0x18, 0x8d, 0x4f, 0x86, 0xae, 0xf7, 0x70, 0xc9, 0xff,
0xf1, 0x1c, 0x97, 0xd8, 0x3f, 0xf0, 0x7c, 0xf4, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xae, 0x2c,
0xe7, 0xd4, 0xe9, 0x23, 0x00, 0x00,
// 3284 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3a, 0x4d, 0x73, 0x1b, 0xc9,
0x75, 0x18, 0x0c, 0x08, 0x60, 0xde, 0x00, 0x64, 0xb3, 0x45, 0x49, 0x94, 0xb4, 0x8a, 0xb4, 0xb3,
0xb5, 0x89, 0x56, 0xa9, 0x92, 0xb4, 0xdc, 0x8d, 0xa4, 0x44, 0xbb, 0x9b, 0x05, 0x81, 0x21, 0x39,
0x11, 0x08, 0x50, 0x0d, 0x50, 0xda, 0x8f, 0x4a, 0x21, 0x43, 0x4c, 0x0b, 0x9c, 0x25, 0x80, 0x81,
0x66, 0x06, 0xa2, 0x90, 0x63, 0xee, 0x49, 0x65, 0x93, 0x5c, 0x72, 0x71, 0x95, 0x7d, 0xb6, 0x5d,
0xae, 0xf2, 0x3f, 0xf0, 0xc9, 0x5e, 0x1f, 0x7c, 0xdc, 0xc3, 0x56, 0xf9, 0xe8, 0xf2, 0xcd, 0x17,
0xdb, 0x67, 0x57, 0x7f, 0x0c, 0x30, 0x03, 0x02, 0x14, 0x21, 0xda, 0x3e, 0xa1, 0xe7, 0x75, 0xf7,
0xeb, 0xf7, 0xdd, 0xef, 0xbd, 0x06, 0xbc, 0x3b, 0x38, 0xea, 0xdc, 0xed, 0xd9, 0x41, 0x48, 0xfd,
0xbb, 0x1d, 0x7f, 0xd0, 0xbe, 0xdb, 0xa3, 0x41, 0x60, 0x77, 0x68, 0xf4, 0x7b, 0x67, 0xe0, 0x7b,
0xa1, 0x87, 0xf5, 0x9e, 0xdb, 0x7d, 0x39, 0x0c, 0xee, 0xb0, 0x25, 0xc6, 0x33, 0xc8, 0x36, 0x42,
0x3b, 0x1c, 0x06, 0xf8, 0x1f, 0x00, 0xa8, 0xef, 0x7b, 0x7e, 0xab, 0xed, 0x39, 0x74, 0x5d, 0xb9,
0xa9, 0xdc, 0x5a, 0xde, 0xb8, 0x74, 0x27, 0xb6, 0xf6, 0x8e, 0xc9, 0xa6, 0xcb, 0x9e, 0x43, 0x89,
0x46, 0xa3, 0x21, 0xbe, 0x04, 0x59, 0x9f, 0xda, 0x81, 0xd7, 0x5f, 0x4f, 0xdf, 0x54, 0x6e, 0x69,
0x44, 0x7e, 0x19, 0xf7, 0xa1, 0xf0, 0x98, 0x8e, 0x9e, 0xda, 0xdd, 0x21, 0xdd, 0xb3, 0x5d, 0x1f,
0x23, 0x50, 0x8f, 0xe8, 0x88, 0xe3, 0xd5, 0x08, 0x1b, 0xe2, 0x35, 0x58, 0x7a, 0xc9, 0xa6, 0xe5,
0x46, 0xf1, 0x61, 0xfc, 0x23, 0x2c, 0x97, 0xbd, 0x6e, 0x97, 0xb6, 0x43, 0xd7, 0xeb, 0xd7, 0xec,
0x1e, 0xc5, 0x7f, 0x07, 0x2b, 0xed, 0x31, 0xa4, 0xd5, 0xb7, 0x7b, 0x54, 0x62, 0x59, 0x6e, 0x27,
0x16, 0x1a, 0x5d, 0xc0, 0xc9, 0xad, 0x55, 0x37, 0x08, 0xf1, 0xdf, 0x43, 0x36, 0xe0, 0x1c, 0xf2,
0x5d, 0xfa, 0xc6, 0x85, 0x04, 0x4f, 0x82, 0x79, 0x22, 0x97, 0xe0, 0xf7, 0x00, 0x4d, 0x9d, 0x15,
0xac, 0xa7, 0x6f, 0xaa, 0xb7, 0x34, 0xb2, 0x92, 0x3c, 0x2c, 0x30, 0x1a, 0xa0, 0x6d, 0xb9, 0xb4,
0xeb, 0x2c, 0x44, 0x23, 0xbe, 0x0e, 0xf0, 0x9c, 0xed, 0x12, 0x6b, 0x04, 0xe7, 0xda, 0xf3, 0x08,
0x8f, 0xf1, 0x8d, 0x02, 0xb9, 0x5d, 0x7b, 0x30, 0x70, 0xfb, 0x9d, 0xc5, 0x08, 0x9f, 0x41, 0x40,
0x7a, 0x26, 0x01, 0x0c, 0x6b, 0xfb, 0x90, 0xf6, 0xec, 0x75, 0x75, 0x16, 0x56, 0x3e, 0x45, 0xe4,
0x12, 0xfc, 0x11, 0x14, 0xe8, 0xab, 0xd0, 0xb7, 0x5b, 0x03, 0xdb, 0xb7, 0x7b, 0xc1, 0x7a, 0xe6,
0xa6, 0x7a, 0x4b, 0xdf, 0xb8, 0x92, 0xd8, 0x12, 0xd7, 0x32, 0xd1, 0xf9, 0xf2, 0x3d, 0xbe, 0xda,
0x08, 0x40, 0x97, 0xbc, 0x2c, 0xae, 0x88, 0x07, 0x50, 0xe8, 0x89, 0xbd, 0xad, 0xae, 0x1b, 0x84,
0x5c, 0x09, 0xfa, 0xc6, 0x5a, 0x62, 0x8b, 0x44, 0x4e, 0xf4, 0xde, 0xe4, 0x14, 0xe3, 0x31, 0x2c,
0xef, 0xd9, 0x7e, 0xe8, 0x32, 0x86, 0x39, 0x1d, 0x67, 0xd7, 0x0d, 0x02, 0x35, 0xb4, 0x3b, 0x52,
0x6e, 0x6c, 0x68, 0x74, 0xa1, 0x38, 0x46, 0xb6, 0x38, 0x0f, 0x77, 0xe0, 0xc2, 0x20, 0xda, 0xdd,
0x0a, 0xed, 0x4e, 0xcb, 0xf6, 0x7d, 0x7b, 0x24, 0xed, 0x69, 0x75, 0x3c, 0xd5, 0xb4, 0x3b, 0x25,
0x36, 0x61, 0x3c, 0x81, 0x95, 0xa7, 0xb4, 0x1d, 0x7a, 0x3e, 0xf1, 0x8e, 0x09, 0x6d, 0x7b, 0xbe,
0xc3, 0xcd, 0xa5, 0xeb, 0xd9, 0x61, 0xcb, 0xb1, 0x43, 0x7b, 0x5d, 0xb9, 0xa9, 0xde, 0x4a, 0x13,
0x8d, 0x43, 0x2a, 0x76, 0x68, 0xe3, 0x1b, 0xa0, 0x1f, 0xb8, 0x7d, 0xdb, 0x1f, 0x89, 0x79, 0x46,
0x79, 0x81, 0x80, 0x00, 0xb1, 0x05, 0xc6, 0xbf, 0x81, 0x66, 0xf6, 0x43, 0x37, 0x1c, 0x59, 0x4e,
0xb0, 0x18, 0xf1, 0x7f, 0x0b, 0x2b, 0x94, 0xef, 0x6c, 0xb9, 0x4e, 0x8c, 0x70, 0x95, 0x14, 0xa9,
0x44, 0x28, 0x88, 0xde, 0x82, 0x82, 0x24, 0x5a, 0x50, 0x7c, 0x1f, 0x72, 0x3e, 0x1f, 0x05, 0x9c,
0x5c, 0x7d, 0xe3, 0xad, 0xc4, 0x29, 0x53, 0x0c, 0x92, 0x68, 0xb1, 0xf1, 0x25, 0xe8, 0x62, 0x4e,
0x28, 0x0d, 0x43, 0xe6, 0x2b, 0x16, 0x54, 0x84, 0xa6, 0xf8, 0x18, 0x3f, 0x04, 0xf0, 0xbd, 0xe3,
0x96, 0xd8, 0xc1, 0x99, 0x9d, 0xb6, 0xc5, 0x38, 0x25, 0x44, 0xf3, 0xa3, 0x53, 0x8c, 0x8e, 0xf4,
0xd5, 0x5d, 0x1a, 0xda, 0x53, 0x2e, 0xa8, 0x4c, 0xb9, 0x20, 0x7e, 0x0f, 0x32, 0xe1, 0x68, 0x20,
0xdc, 0x67, 0x79, 0xe3, 0x62, 0x02, 0x3f, 0x93, 0x69, 0x73, 0x34, 0xa0, 0x84, 0x2f, 0x61, 0x06,
0xe3, 0xb8, 0x3d, 0xee, 0x48, 0x2a, 0x61, 0x43, 0xa3, 0x04, 0x59, 0xe1, 0x42, 0xf8, 0x01, 0xe8,
0xe2, 0x94, 0x1e, 0x0d, 0xed, 0x48, 0x16, 0xc9, 0x78, 0x3a, 0x26, 0x89, 0x08, 0x82, 0xd8, 0x30,
0x30, 0xae, 0x43, 0x8e, 0x78, 0xc7, 0x5c, 0xbd, 0x18, 0x32, 0x07, 0x5d, 0xef, 0x80, 0xd3, 0x58,
0x20, 0x7c, 0x6c, 0x7c, 0x3f, 0x0d, 0xba, 0xd5, 0x0f, 0xa8, 0x1f, 0x2e, 0x68, 0xdd, 0x13, 0xc7,
0x4f, 0xbf, 0xde, 0xf1, 0xdf, 0x07, 0x26, 0xbd, 0x40, 0x98, 0x95, 0x3a, 0xc3, 0xf7, 0x24, 0x89,
0x24, 0xcf, 0x96, 0x71, 0x62, 0x67, 0x18, 0x4c, 0x66, 0x86, 0xc1, 0xe0, 0x77, 0xa0, 0x98, 0xf0,
0x8a, 0xf5, 0x25, 0x4e, 0x6e, 0x21, 0xee, 0x0f, 0x27, 0x02, 0x4f, 0x76, 0xa1, 0xc0, 0xf3, 0x5b,
0x05, 0xf4, 0x06, 0xb5, 0xfd, 0xf6, 0xe1, 0x82, 0x32, 0x7a, 0x04, 0x85, 0x97, 0xdc, 0x84, 0xc4,
0xb9, 0x32, 0xea, 0xac, 0xcf, 0xb0, 0x31, 0x8e, 0x98, 0xe8, 0x2f, 0x63, 0x26, 0xcb, 0xac, 0x21,
0xe8, 0x72, 0x6b, 0xd0, 0x08, 0x1b, 0x9e, 0x64, 0x35, 0xc3, 0x5d, 0xff, 0x74, 0x56, 0x97, 0x16,
0x62, 0xf5, 0x18, 0xd6, 0x04, 0xa7, 0x56, 0xbf, 0x41, 0x3b, 0x3d, 0xda, 0x97, 0x66, 0x61, 0x40,
0xf1, 0xb9, 0xdb, 0xa5, 0x13, 0x5d, 0x28, 0xfc, 0x68, 0x9d, 0x01, 0x23, 0x4d, 0x3c, 0x82, 0x42,
0xc0, 0xf7, 0x8e, 0xb9, 0x55, 0x4e, 0x70, 0x1b, 0x13, 0x23, 0xd1, 0x83, 0xc9, 0x87, 0xf1, 0xff,
0x0a, 0xe4, 0x79, 0x68, 0x71, 0xe9, 0x82, 0x91, 0x05, 0x81, 0xea, 0x3a, 0x81, 0x8c, 0x26, 0x6c,
0x88, 0xaf, 0x81, 0xf6, 0xd2, 0xee, 0xba, 0x4e, 0xcb, 0xf7, 0x8e, 0xb9, 0xb5, 0xe5, 0x49, 0x9e,
0x03, 0x88, 0x77, 0x9c, 0x34, 0xc5, 0xcc, 0x59, 0x4c, 0xd1, 0xf8, 0x49, 0x1a, 0xf4, 0x27, 0x43,
0xea, 0x8f, 0x08, 0x0d, 0x86, 0xdd, 0x05, 0xa3, 0xf6, 0xfb, 0x90, 0xa7, 0x92, 0x2f, 0x29, 0x91,
0x64, 0x0c, 0x88, 0x98, 0x26, 0xe3, 0x65, 0xf8, 0x32, 0xe4, 0x58, 0x60, 0xea, 0x0f, 0xa3, 0x58,
0x90, 0xf5, 0xbd, 0xe3, 0xda, 0xb0, 0xc7, 0x92, 0xa3, 0xa0, 0xed, 0xf9, 0x54, 0xdc, 0x9c, 0x69,
0x22, 0xbf, 0xf0, 0x5b, 0xa0, 0x39, 0x6e, 0x10, 0xda, 0xfd, 0x36, 0x15, 0x0a, 0x4f, 0x93, 0x09,
0xe0, 0x7c, 0xc6, 0x8f, 0xaf, 0x40, 0xfe, 0x05, 0xe3, 0xbd, 0xe5, 0x3a, 0xeb, 0xb9, 0x9b, 0xca,
0xad, 0x0c, 0xc9, 0xf1, 0x6f, 0xcb, 0x61, 0x53, 0x03, 0xdf, 0x7b, 0xc5, 0xa7, 0xf2, 0x9c, 0xd0,
0x1c, 0xff, 0xb6, 0x1c, 0xe3, 0x5f, 0x41, 0x6f, 0x84, 0x3e, 0xbb, 0x4d, 0xe9, 0xa0, 0x3b, 0x5a,
0x4c, 0x62, 0x6f, 0x43, 0x21, 0xe0, 0x7b, 0x5b, 0x3e, 0xdb, 0x2c, 0x2f, 0x50, 0x3d, 0x98, 0xe0,
0x33, 0x9e, 0x81, 0xb6, 0xe9, 0x79, 0xdd, 0x37, 0x40, 0x7e, 0x1d, 0xe0, 0xc0, 0xf3, 0xba, 0x31,
0xd4, 0x79, 0xa2, 0x1d, 0x44, 0xb8, 0x8c, 0x20, 0x9e, 0xf3, 0x11, 0xef, 0xb8, 0xec, 0x0d, 0xfb,
0x0b, 0x2a, 0xfc, 0x1e, 0xac, 0xc5, 0xa2, 0x03, 0x53, 0x64, 0x9b, 0x21, 0xe1, 0x67, 0xa9, 0x04,
0xb7, 0x4f, 0xa0, 0x37, 0xae, 0x41, 0xae, 0xec, 0xf5, 0x7a, 0x76, 0xdf, 0x61, 0xc6, 0xdc, 0xee,
0x39, 0x51, 0x5a, 0xdb, 0xee, 0x39, 0xc6, 0xaf, 0x15, 0x00, 0xab, 0xef, 0xd0, 0x57, 0xc2, 0x11,
0xff, 0x32, 0x59, 0x5c, 0xf2, 0x0e, 0x53, 0xa7, 0xef, 0xb0, 0xeb, 0x00, 0x2e, 0x23, 0x41, 0x4c,
0x67, 0xc4, 0x34, 0x87, 0xf0, 0xe9, 0xf3, 0x85, 0x9c, 0x4f, 0x01, 0xb6, 0xba, 0xc3, 0x40, 0xc6,
0xd6, 0x0d, 0xb8, 0x38, 0x45, 0x72, 0x22, 0xe0, 0x5c, 0x48, 0x12, 0x2e, 0x72, 0x86, 0x7d, 0x28,
0x94, 0xbd, 0xde, 0xc0, 0x6e, 0x2f, 0x7a, 0x87, 0xbd, 0x05, 0x5a, 0x78, 0xe8, 0xd3, 0xe0, 0xd0,
0xeb, 0x8a, 0x04, 0x40, 0x21, 0x13, 0x80, 0xb1, 0x0f, 0x2b, 0x15, 0xda, 0xa5, 0x21, 0xdd, 0x1c,
0x59, 0x95, 0x05, 0x31, 0x5f, 0x81, 0xfc, 0x54, 0x9e, 0x93, 0x73, 0x65, 0x86, 0xf3, 0x45, 0xbc,
0x22, 0xb1, 0xfa, 0xcf, 0xbd, 0xc5, 0x74, 0x7a, 0x0d, 0x34, 0x96, 0xbd, 0xb4, 0xdc, 0xfe, 0x73,
0x4f, 0x6a, 0x33, 0xcf, 0x00, 0x0c, 0x93, 0xf1, 0x25, 0xac, 0x6e, 0xd3, 0x50, 0xa6, 0x68, 0x95,
0x60, 0x41, 0xa2, 0xaf, 0x03, 0x04, 0x22, 0xe8, 0x33, 0x8f, 0x16, 0xf6, 0xaa, 0x49, 0x88, 0xe5,
0x18, 0x43, 0x58, 0x8e, 0x92, 0x3f, 0x71, 0x05, 0xff, 0x39, 0xc4, 0xc1, 0x72, 0xce, 0x89, 0xe9,
0x05, 0x3c, 0x5c, 0x6b, 0x32, 0x81, 0x11, 0x85, 0xd1, 0x3d, 0x40, 0xe2, 0x8e, 0xe4, 0xf9, 0x8d,
0x60, 0x89, 0x07, 0xbc, 0x1e, 0xed, 0x07, 0xae, 0xcc, 0xe9, 0x54, 0x32, 0x01, 0x18, 0xff, 0xa5,
0xc8, 0xfc, 0x8c, 0xe5, 0x56, 0xf8, 0x43, 0xd0, 0x58, 0xac, 0x6f, 0xf1, 0x2c, 0x4c, 0x39, 0x25,
0x0b, 0xdb, 0x49, 0x91, 0xbc, 0x23, 0xc7, 0x78, 0xf3, 0xc4, 0xd5, 0xcd, 0x34, 0x73, 0x7d, 0xc6,
0xd5, 0x3d, 0x21, 0x6b, 0x27, 0x95, 0xb8, 0xc1, 0x37, 0x73, 0xb2, 0x22, 0x35, 0xbe, 0x55, 0x00,
0x62, 0xd4, 0x2f, 0x43, 0xda, 0x15, 0x3e, 0x9e, 0x21, 0x69, 0xd7, 0x61, 0x79, 0x59, 0xcc, 0x37,
0xf9, 0x78, 0x9c, 0x36, 0xaa, 0xaf, 0x4f, 0x1b, 0x3f, 0x82, 0x82, 0xf0, 0xce, 0x33, 0x57, 0x55,
0xee, 0x38, 0x9e, 0x04, 0xe7, 0x74, 0xde, 0x6d, 0x58, 0x8d, 0x49, 0x41, 0xe6, 0xec, 0x1b, 0x51,
0x25, 0x7e, 0x96, 0x8c, 0x5d, 0x8a, 0xe8, 0x3b, 0x05, 0xb4, 0x26, 0xf5, 0x7b, 0xfc, 0x9e, 0x7d,
0x5d, 0x4e, 0x7d, 0x0d, 0x34, 0xb7, 0x1f, 0xb6, 0xa2, 0x72, 0x9f, 0xd9, 0x53, 0xde, 0xed, 0x87,
0x9c, 0x46, 0x76, 0x7d, 0x38, 0xde, 0xf0, 0xa0, 0x4b, 0xe5, 0x3c, 0xb3, 0x28, 0x85, 0xe8, 0x02,
0x26, 0x96, 0x88, 0x04, 0x61, 0x48, 0xf9, 0x15, 0x9b, 0xe1, 0xe6, 0x93, 0xe7, 0x00, 0x76, 0xc9,
0xae, 0xc1, 0xd2, 0x81, 0xe7, 0x05, 0x21, 0x4f, 0x24, 0xd3, 0x44, 0x7c, 0x9c, 0x33, 0x83, 0xb4,
0x41, 0xe7, 0x11, 0xca, 0xa7, 0xe6, 0xab, 0x81, 0x8f, 0x1f, 0x42, 0xde, 0x1b, 0x50, 0xdf, 0x0e,
0x3d, 0x5f, 0x5a, 0x64, 0x52, 0x46, 0x72, 0x6d, 0x5d, 0xae, 0x21, 0xe3, 0xd5, 0x78, 0x1d, 0x72,
0x7c, 0xdc, 0x77, 0xa4, 0xb5, 0x44, 0x9f, 0xc6, 0x4f, 0x15, 0x00, 0x62, 0xf7, 0x3b, 0xf4, 0x4c,
0x12, 0xdc, 0x88, 0xe3, 0x39, 0x99, 0x94, 0xc6, 0x88, 0x1d, 0x9f, 0x30, 0x11, 0x8c, 0x7a, 0x9a,
0x60, 0x16, 0xab, 0xe9, 0xbf, 0x55, 0xa2, 0x3a, 0xed, 0x4c, 0x64, 0xdf, 0x00, 0x5d, 0x24, 0x23,
0x82, 0x90, 0x34, 0x27, 0x04, 0x38, 0x68, 0x93, 0x53, 0x13, 0x2b, 0x17, 0xd5, 0x05, 0xca, 0x45,
0xe6, 0x82, 0xa1, 0x37, 0x38, 0x92, 0xc6, 0xc0, 0xc7, 0xe7, 0xf4, 0x8c, 0x57, 0x50, 0x60, 0x29,
0x0a, 0xb5, 0xfb, 0x82, 0xb3, 0x5b, 0xb0, 0xe4, 0xb5, 0xdb, 0xc3, 0x48, 0xe1, 0x38, 0x81, 0xa6,
0xce, 0x66, 0x88, 0x58, 0x80, 0x3f, 0x81, 0x62, 0x87, 0xf6, 0xa9, 0x6f, 0x77, 0x5b, 0x9c, 0x33,
0xa9, 0xa1, 0xe4, 0xc1, 0xdb, 0x62, 0x85, 0x48, 0x4b, 0x0b, 0x9d, 0xd8, 0x97, 0xf1, 0x9f, 0x69,
0x28, 0xc4, 0xa7, 0xf1, 0xa7, 0x50, 0x3c, 0x10, 0xa4, 0x48, 0x84, 0xca, 0x8c, 0x5a, 0x37, 0x4e,
0xec, 0x4e, 0x8a, 0x14, 0x0e, 0xe2, 0xc4, 0x3f, 0x00, 0x08, 0xa9, 0xdf, 0x1b, 0xd3, 0xa3, 0x9c,
0x28, 0x3e, 0xc7, 0xbe, 0xbb, 0x93, 0x22, 0x5a, 0x38, 0x76, 0xe4, 0x7f, 0x02, 0xdd, 0x67, 0x46,
0x29, 0x77, 0x8a, 0x1e, 0xd1, 0xe5, 0x64, 0xbe, 0x3d, 0x36, 0xda, 0x9d, 0x14, 0x01, 0x7f, 0x62,
0xc2, 0x1f, 0x8f, 0x43, 0xb0, 0xd8, 0x9c, 0x99, 0x51, 0x4f, 0xc4, 0x6c, 0x67, 0x12, 0x7d, 0xf9,
0x27, 0x8b, 0xbe, 0x7c, 0x9f, 0xf1, 0x1b, 0x05, 0x8a, 0xb1, 0xba, 0x63, 0x6f, 0xf3, 0xec, 0xf7,
0xd6, 0x82, 0x2d, 0x97, 0x93, 0xaa, 0x53, 0x67, 0x48, 0x7a, 0xbe, 0xea, 0xce, 0xe9, 0x4c, 0xbf,
0x9b, 0xd4, 0x29, 0x2f, 0x76, 0x83, 0xce, 0x5f, 0xa9, 0x4e, 0x7d, 0x17, 0x96, 0x13, 0x32, 0x8a,
0xee, 0xf0, 0x62, 0x5c, 0x3c, 0x41, 0x54, 0xce, 0x66, 0x26, 0xe5, 0xec, 0xb9, 0xfc, 0x8b, 0xe7,
0x6e, 0x6e, 0x8f, 0x06, 0xa1, 0xdd, 0x1b, 0xac, 0x67, 0xf9, 0x5d, 0x3a, 0x01, 0x24, 0x4a, 0x93,
0x5c, 0xa2, 0x34, 0x49, 0x14, 0x34, 0xf9, 0x64, 0x41, 0x73, 0x17, 0xf2, 0x3e, 0x7d, 0x21, 0x32,
0x05, 0x8d, 0xbb, 0xe9, 0x54, 0x69, 0x48, 0x5f, 0xf0, 0x7b, 0x37, 0xe7, 0x8b, 0x81, 0xf1, 0x3d,
0x15, 0xf0, 0xae, 0xdd, 0x77, 0x07, 0xc3, 0xae, 0xcd, 0x53, 0xfa, 0x05, 0x05, 0x7f, 0xa2, 0xa2,
0x4f, 0xcf, 0x68, 0x5e, 0xbc, 0x0d, 0x85, 0x81, 0xef, 0xf6, 0x6c, 0x7f, 0xd4, 0x3a, 0xa2, 0x23,
0x21, 0xde, 0x0c, 0xd1, 0x25, 0xec, 0x31, 0x1d, 0x05, 0x6f, 0x50, 0xd4, 0x26, 0xe5, 0xb7, 0x34,
0x2d, 0xbf, 0x64, 0x2a, 0x28, 0xc5, 0x3b, 0x4e, 0x05, 0xd9, 0x74, 0xfb, 0xd0, 0xee, 0xf7, 0x69,
0x77, 0x52, 0x16, 0x6a, 0x12, 0x32, 0x25, 0xc7, 0xfc, 0x19, 0xe4, 0x98, 0x50, 0x97, 0x96, 0x54,
0xd7, 0xb4, 0x95, 0xc0, 0x42, 0x2e, 0xf1, 0x1f, 0x2a, 0xac, 0x8a, 0xf6, 0x56, 0xdd, 0x17, 0xc9,
0xfc, 0x42, 0xfa, 0x49, 0xc8, 0x55, 0x84, 0xbd, 0xd7, 0xc9, 0x15, 0x81, 0x3a, 0x74, 0x9d, 0xa8,
0x89, 0x37, 0x74, 0x9d, 0x59, 0x6d, 0x9b, 0x93, 0x4a, 0x5e, 0x54, 0x1d, 0xea, 0xe9, 0xea, 0x50,
0xe3, 0xea, 0x78, 0x07, 0xd2, 0xde, 0x40, 0x2a, 0x22, 0x59, 0x5b, 0xd4, 0x07, 0x5c, 0x0f, 0x69,
0x6f, 0xc0, 0x72, 0xa2, 0x76, 0xd7, 0x95, 0x27, 0x08, 0x1d, 0xe4, 0x05, 0xe0, 0xdc, 0x4a, 0xf8,
0x59, 0x1a, 0x34, 0x11, 0x80, 0x17, 0x12, 0x7e, 0xec, 0x2e, 0x17, 0xa2, 0x3f, 0xe3, 0x5d, 0x7e,
0x42, 0xde, 0xea, 0x8c, 0x36, 0x99, 0x54, 0x53, 0x66, 0xa2, 0xa6, 0xd3, 0x35, 0x90, 0x10, 0x4f,
0xf6, 0x35, 0xe2, 0xc9, 0x2d, 0x14, 0xc9, 0xa2, 0xde, 0x74, 0x9e, 0x93, 0x29, 0x7a, 0xd3, 0x32,
0x5a, 0x6a, 0xe3, 0x68, 0x69, 0xbc, 0x02, 0xbd, 0xe9, 0xf6, 0x68, 0x63, 0xd4, 0x6f, 0x33, 0x29,
0x5e, 0x86, 0xdc, 0x80, 0x52, 0xbf, 0x65, 0x39, 0xb2, 0xfe, 0xc9, 0xb2, 0x4f, 0x8b, 0xb3, 0xd1,
0x1c, 0xb3, 0x91, 0x16, 0x6c, 0x8c, 0x01, 0x78, 0x03, 0xb4, 0x60, 0xd4, 0x6f, 0xb7, 0xe6, 0xd6,
0x16, 0x0c, 0x3f, 0xb7, 0x89, 0x7c, 0x20, 0x47, 0xc6, 0x97, 0x00, 0x8f, 0xe9, 0x68, 0xa3, 0x41,
0x3b, 0xec, 0x60, 0x29, 0x38, 0x65, 0x8e, 0xe0, 0xd2, 0xa7, 0x9b, 0xae, 0xca, 0x93, 0xf5, 0x89,
0xe9, 0xde, 0xfe, 0x41, 0x06, 0xb4, 0xf1, 0x43, 0x20, 0xd6, 0x21, 0xd7, 0xd8, 0x2f, 0x97, 0xcd,
0x46, 0x03, 0xa5, 0xf0, 0x1a, 0xa0, 0xfd, 0x9a, 0xf9, 0xd9, 0x9e, 0x59, 0x6e, 0x9a, 0x95, 0x96,
0x49, 0x48, 0x9d, 0x20, 0x05, 0x63, 0x58, 0x2e, 0xd7, 0x6b, 0x35, 0xb3, 0xdc, 0x6c, 0x6d, 0x95,
0xac, 0xaa, 0x59, 0x41, 0x69, 0x7c, 0x11, 0x56, 0xf7, 0x4c, 0xb2, 0x6b, 0x35, 0x1a, 0x56, 0xbd,
0xd6, 0xaa, 0x98, 0x35, 0xcb, 0xac, 0x20, 0x15, 0x5f, 0x81, 0x8b, 0xe5, 0x7a, 0xb5, 0x6a, 0x96,
0x9b, 0x0c, 0x5c, 0xab, 0x37, 0x5b, 0xe6, 0x67, 0x56, 0xa3, 0xd9, 0x40, 0x19, 0x86, 0xdb, 0xaa,
0x56, 0xcd, 0xed, 0x52, 0xb5, 0x55, 0x22, 0xdb, 0xfb, 0xbb, 0x66, 0xad, 0x89, 0x96, 0x18, 0x9e,
0x08, 0x5a, 0xb1, 0x76, 0xcd, 0x1a, 0x43, 0x87, 0x72, 0xf8, 0x12, 0xe0, 0x08, 0x6c, 0xd5, 0x2a,
0xe6, 0x67, 0xad, 0xe6, 0xe7, 0x7b, 0x26, 0xca, 0xe3, 0x6b, 0x70, 0x39, 0x82, 0xc7, 0xcf, 0x29,
0xed, 0x9a, 0x48, 0xc3, 0x08, 0x0a, 0xd1, 0x64, 0xb3, 0xbe, 0xf7, 0x18, 0x41, 0x1c, 0x3b, 0xa9,
0x3f, 0x23, 0x66, 0xb9, 0x4e, 0x2a, 0x48, 0x8f, 0x83, 0x9f, 0x9a, 0xe5, 0x66, 0x9d, 0xb4, 0xac,
0x0a, 0x2a, 0x30, 0xe2, 0x23, 0x70, 0xc3, 0x2c, 0x91, 0xf2, 0x4e, 0x8b, 0x98, 0x8d, 0xfd, 0x6a,
0x13, 0x15, 0x99, 0x08, 0xb6, 0xac, 0xaa, 0xc9, 0x39, 0xda, 0xaa, 0xef, 0xd7, 0x2a, 0x68, 0x19,
0xaf, 0x80, 0xbe, 0x6b, 0x36, 0x4b, 0x91, 0x4c, 0x56, 0xd8, 0xf9, 0xe5, 0x52, 0x79, 0xc7, 0x8c,
0x20, 0x08, 0xaf, 0xc3, 0x5a, 0xb9, 0x54, 0x63, 0x9b, 0xca, 0xc4, 0x2c, 0x35, 0xcd, 0xd6, 0x56,
0xbd, 0x5a, 0x31, 0x09, 0x5a, 0x65, 0x0c, 0x4e, 0xcd, 0x58, 0x55, 0x13, 0xe1, 0xd8, 0x8e, 0x8a,
0x59, 0x35, 0x27, 0x3b, 0x2e, 0xc4, 0x76, 0x44, 0x33, 0x6c, 0xc7, 0x1a, 0x63, 0x66, 0x73, 0xdf,
0xaa, 0x56, 0xa4, 0xa0, 0x84, 0xd2, 0x2e, 0xe2, 0x55, 0x28, 0x46, 0xcc, 0xd4, 0xaa, 0x56, 0xa3,
0x89, 0x2e, 0xe1, 0xcb, 0x70, 0x21, 0x02, 0xed, 0x9a, 0x4d, 0x62, 0x95, 0x85, 0x54, 0x2f, 0xb3,
0xb5, 0xf5, 0xfd, 0x66, 0xab, 0xbe, 0xd5, 0xda, 0x35, 0x77, 0xeb, 0xe4, 0x73, 0xb4, 0x7e, 0xfb,
0x6b, 0x05, 0xf2, 0x51, 0xd1, 0x8b, 0xf3, 0x90, 0xa9, 0xd5, 0x6b, 0x26, 0x4a, 0xb1, 0xd1, 0x66,
0xbd, 0x5e, 0x45, 0x0a, 0x1b, 0x59, 0xb5, 0xe6, 0x43, 0x94, 0xc6, 0x1a, 0x2c, 0x59, 0xb5, 0xe6,
0xfb, 0xf7, 0x91, 0x2a, 0x87, 0x1f, 0x6c, 0xa0, 0x8c, 0x1c, 0xde, 0xff, 0x10, 0x2d, 0xb1, 0xe1,
0x56, 0xb5, 0x5e, 0x6a, 0x22, 0xc0, 0x00, 0xd9, 0x4a, 0x7d, 0x7f, 0xb3, 0x6a, 0x22, 0x9d, 0x8d,
0x1b, 0x4d, 0x62, 0xd5, 0xb6, 0xd1, 0x1a, 0xa3, 0x40, 0x6a, 0x62, 0xd3, 0xaa, 0x95, 0xc8, 0xe7,
0xc8, 0x61, 0xd2, 0x94, 0x20, 0xb1, 0x99, 0xde, 0x2e, 0xc3, 0xca, 0x54, 0x99, 0x86, 0xb3, 0x90,
0xae, 0x36, 0x51, 0x0a, 0xe7, 0x40, 0xad, 0x36, 0x4d, 0xa4, 0x30, 0x80, 0xf9, 0x04, 0xa5, 0xd9,
0xef, 0x76, 0x13, 0xa9, 0x6c, 0x62, 0xbb, 0x69, 0xa2, 0x0c, 0x03, 0xd4, 0x4c, 0xb4, 0x74, 0xfb,
0x21, 0x2c, 0xf1, 0xd4, 0x9f, 0x19, 0xbe, 0x55, 0x7b, 0x5a, 0xaa, 0x5a, 0x15, 0xc1, 0xd7, 0xee,
0x7e, 0xa3, 0x89, 0x14, 0x4e, 0xd5, 0x4e, 0x7d, 0xbf, 0xca, 0x8c, 0xbc, 0x00, 0x79, 0x06, 0x65,
0x5a, 0x47, 0xea, 0xed, 0x9b, 0x90, 0x15, 0xc1, 0x9b, 0xad, 0xb1, 0x6a, 0x0d, 0x93, 0xb0, 0x93,
0x19, 0x47, 0x5c, 0x1f, 0x48, 0xb9, 0xfd, 0x2b, 0x15, 0x72, 0xf2, 0xa2, 0x65, 0x18, 0x8f, 0xca,
0x3d, 0x07, 0xa5, 0x98, 0x82, 0x8e, 0xca, 0x3e, 0xb5, 0x43, 0x3a, 0x69, 0x42, 0x21, 0x07, 0x5f,
0x80, 0x95, 0xa3, 0x8a, 0xef, 0x0d, 0x62, 0x40, 0xca, 0xec, 0xec, 0x68, 0xc7, 0x0e, 0x62, 0xb0,
0xe7, 0xcc, 0x71, 0x8e, 0xaa, 0x6e, 0x10, 0x4e, 0x80, 0x01, 0xea, 0x30, 0x73, 0x38, 0xda, 0xa6,
0x61, 0xb2, 0xaf, 0x85, 0x0e, 0x99, 0x92, 0x93, 0xf0, 0x46, 0x68, 0x87, 0x01, 0x72, 0x39, 0x6a,
0xde, 0xfc, 0x8c, 0xda, 0xdf, 0xe8, 0x2b, 0x7c, 0x11, 0x90, 0x24, 0x6d, 0xfc, 0x48, 0x8a, 0x7e,
0xae, 0xe0, 0x0b, 0xb0, 0xcc, 0x49, 0x9b, 0x00, 0x7f, 0xc1, 0xa2, 0x40, 0x91, 0x91, 0x36, 0x81,
0x7d, 0xa3, 0xe0, 0x35, 0x58, 0xe1, 0xa4, 0x8d, 0x81, 0x01, 0xfa, 0xa5, 0x82, 0x57, 0xa1, 0x20,
0xb1, 0xf2, 0x2e, 0x2a, 0xfa, 0x61, 0x1a, 0xaf, 0x00, 0x70, 0x8c, 0x02, 0xf0, 0xa3, 0xb4, 0x38,
0x82, 0x06, 0x6d, 0xdf, 0x3d, 0x90, 0xab, 0x7e, 0xcc, 0xe4, 0x9d, 0x3b, 0x12, 0xa9, 0x03, 0xfa,
0x6f, 0x95, 0x2f, 0x19, 0xb7, 0xd6, 0x36, 0x47, 0x56, 0x05, 0x7d, 0xad, 0xe2, 0x4b, 0xb0, 0x7a,
0x24, 0xb2, 0x8a, 0x18, 0xfc, 0x7f, 0x54, 0xbe, 0x55, 0x5c, 0x78, 0xe8, 0x7f, 0x55, 0xce, 0x17,
0xa3, 0xcb, 0xaa, 0x8c, 0x1f, 0x55, 0xd0, 0xff, 0xa9, 0x82, 0x5c, 0xcf, 0x76, 0x62, 0xe2, 0xfd,
0xbd, 0x8a, 0x75, 0xc8, 0x1e, 0xf1, 0x76, 0x28, 0xfa, 0x83, 0x8a, 0x8b, 0x90, 0x3f, 0x92, 0x9d,
0x4d, 0xf4, 0x47, 0xf5, 0xf6, 0x0d, 0xc8, 0x47, 0xe1, 0x99, 0x69, 0x94, 0x98, 0x25, 0x66, 0x2d,
0x1a, 0x2c, 0x3d, 0x23, 0x16, 0x53, 0xf9, 0xc6, 0x77, 0x45, 0x28, 0xee, 0xf2, 0x60, 0xde, 0xa0,
0xfe, 0x4b, 0xb7, 0x4d, 0xf1, 0x3f, 0x03, 0x9a, 0xd6, 0x36, 0x9e, 0xf9, 0xea, 0x7d, 0x75, 0x56,
0xd3, 0xd1, 0x48, 0xe1, 0x2d, 0x28, 0x26, 0x4c, 0x00, 0x5f, 0x9b, 0x6a, 0x14, 0xc4, 0xaf, 0xec,
0xab, 0x97, 0x4e, 0x94, 0x94, 0xa2, 0xad, 0x9e, 0xc2, 0x16, 0xe0, 0x48, 0xc2, 0x67, 0x45, 0x36,
0x93, 0x4e, 0x23, 0x85, 0x9f, 0x30, 0xcf, 0x1b, 0xf6, 0xc3, 0xb3, 0xe2, 0xb9, 0x31, 0x67, 0x72,
0xdc, 0x7f, 0x4f, 0xe1, 0x7f, 0x81, 0x95, 0xc6, 0x21, 0xfb, 0x1c, 0x1b, 0xf5, 0x94, 0x94, 0x64,
0x7f, 0x7e, 0x2e, 0xae, 0xe8, 0xef, 0x21, 0x46, 0x0a, 0xef, 0x01, 0x4e, 0xe2, 0xe2, 0x3d, 0xde,
0x53, 0x29, 0x9c, 0x37, 0xc9, 0x7b, 0xba, 0x29, 0x5c, 0x81, 0xe5, 0xa4, 0x6f, 0x9e, 0x8e, 0x6d,
0x8e, 0x26, 0x3f, 0x06, 0x3d, 0xe6, 0x07, 0x38, 0x59, 0x84, 0x4f, 0x5e, 0x18, 0xe6, 0x6d, 0x2f,
0x43, 0x31, 0xe1, 0x22, 0xf3, 0x11, 0xcc, 0x9b, 0x30, 0x52, 0xf8, 0x11, 0x68, 0x63, 0xc7, 0x5b,
0x98, 0x02, 0x13, 0x56, 0xa6, 0xc2, 0xc3, 0x94, 0x1c, 0x92, 0x7f, 0xd4, 0x98, 0x8f, 0xa6, 0x10,
0x8f, 0x1c, 0xa7, 0xe3, 0x98, 0x6f, 0xd0, 0x8f, 0x61, 0x99, 0xa9, 0x79, 0x12, 0x6c, 0x4e, 0x57,
0xca, 0xd5, 0xd9, 0xa7, 0x48, 0x9b, 0x61, 0xc2, 0x8d, 0x87, 0xb8, 0x37, 0x62, 0xec, 0x23, 0xc8,
0x8a, 0x78, 0x85, 0xd7, 0xa7, 0x24, 0x3b, 0x7e, 0xde, 0x9f, 0xe2, 0x67, 0xfc, 0x5f, 0x0e, 0x2e,
0x96, 0x62, 0x22, 0xbe, 0x4d, 0x91, 0x90, 0xec, 0xfc, 0x5f, 0x9d, 0xfd, 0x86, 0x69, 0xa4, 0xf0,
0x0e, 0x14, 0xe2, 0x2f, 0x10, 0xf8, 0x6f, 0xa6, 0x5a, 0x1f, 0x53, 0x8f, 0x13, 0xa7, 0x10, 0xf4,
0x09, 0x64, 0x45, 0x0c, 0xc5, 0x73, 0x9f, 0x90, 0xaf, 0x26, 0x67, 0x62, 0x6f, 0xb4, 0xdc, 0x0f,
0x57, 0xa6, 0x9e, 0xb2, 0xf1, 0xdb, 0x33, 0x10, 0x25, 0x1f, 0xba, 0x4f, 0xc5, 0xf8, 0x00, 0xd4,
0x72, 0xcf, 0x99, 0x13, 0x19, 0xa6, 0x88, 0x8c, 0x3d, 0x56, 0xa6, 0x70, 0x09, 0x60, 0xf2, 0x92,
0x84, 0x93, 0x65, 0xcc, 0xd4, 0x13, 0xd3, 0x3c, 0xe5, 0x6e, 0xc3, 0xea, 0x9e, 0x4f, 0xbb, 0x89,
0xfb, 0xe2, 0x8d, 0xc2, 0xc0, 0x03, 0x58, 0xe2, 0xf7, 0xcb, 0x94, 0xfb, 0x4d, 0x9e, 0xe0, 0xe6,
0x6d, 0x7c, 0xc4, 0x5f, 0x29, 0xd9, 0x5d, 0x84, 0xaf, 0x9c, 0x6c, 0x16, 0xcb, 0xb7, 0xb7, 0x79,
0x9b, 0x37, 0x21, 0x2f, 0xf5, 0xb6, 0x89, 0xaf, 0xce, 0x53, 0xe7, 0xde, 0xe6, 0x69, 0xe2, 0xdf,
0xdc, 0xf8, 0xe2, 0x5e, 0xc7, 0x0d, 0x0f, 0x87, 0x07, 0x77, 0xda, 0x5e, 0xef, 0x6e, 0xfb, 0xdf,
0x83, 0x7b, 0xf7, 0x1e, 0xdc, 0x0d, 0x86, 0x2f, 0xbb, 0x6e, 0xef, 0xee, 0x9c, 0xbf, 0x2a, 0x1e,
0x64, 0xf9, 0x7f, 0x14, 0x3f, 0xf8, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x0a, 0x17, 0x37,
0xcc, 0x28, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -208,8 +208,8 @@ message QueryResult {
repeated float scores = 4;
repeated float distances = 5;
repeated KeyValuePair extra_params = 6;
int64 query_id = 7;
int64 client_id = 8;
uint64 query_id = 7;
int64 proxy_id = 8;
}
/**
@ -675,6 +675,69 @@ enum OpType {
DELETE = 1;
}
enum ReqType {
// general operations
kCmd = 0;
/* collection operations */
kCreateCollection = 100;
kDropCollection = 101;
kHasCollection = 102;
kListCollections = 103;
kGetCollectionInfo = 104;
kGetCollectionStats = 105;
kCountEntities = 106;
/* partition operations */
kCreatePartition = 200;
kDropPartition = 201;
kHasPartition = 202;
kListPartitions = 203;
/* index operations */
kCreateIndex = 300;
kDropIndex = 301;
kDescribeIndex = 302;
/* data operations */
kInsert = 400;
kGetEntityByID = 401;
kDeleteEntityByID = 402;
kSearch = 403;
kListIDInSegment = 404;
/* other operations */
kLoadCollection = 500;
kFlush = 501;
kCompact = 502;
}
message QueryReqMsg {
string collection_name = 1;
repeated VectorParam vector_param = 2;
repeated string partition_tags = 3;
string dsl = 4;
repeated KeyValuePair extra_params = 5;
uint64 timestamp =6;
int64 proxy_id = 7;
uint64 query_id = 8;
ReqType req_type = 9;
}
message ManipulationReqMsg {
string collection_name = 1;
string partition_tag = 2;
repeated uint64 primary_keys = 3;
repeated RowData rows_data = 4;
uint64 timestamp =5;
uint64 segment_id = 6;
uint64 channel_id = 7;
ReqType req_type = 8;
int64 proxy_id = 9;
repeated KeyValuePair extra_params = 10;
}
message InsertOrDeleteMsg {
string collection_name = 1;
RowData rows_data = 2;

View File

@ -3,11 +3,13 @@ package informer
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
"github.com/czs007/suvlim/pkg/master/mock"
"log"
"strconv"
"time"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/pkg/master/mock"
)
func NewPulsarClient() PulsarClient {
@ -15,25 +17,11 @@ func NewPulsarClient() PulsarClient {
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
var client pulsar.Client
var err error
if conf.Config.Pulsar.Authentication {
client, err = pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
Authentication: pulsar.NewAuthenticationToken(conf.Config.Pulsar.Token),
//OperationTimeout: 30 * time.Second,
//ConnectionTimeout: 30 * time.Second,
})
} else {
client, err = pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
//OperationTimeout: 30 * time.Second,
//ConnectionTimeout: 30 * time.Second,
})
}
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
OperationTimeout: 30 * time.Second,
ConnectionTimeout: 30 * time.Second,
})
if err != nil {
log.Fatalf("Could not instantiate Pulsar client: %v", err)
}
@ -65,10 +53,8 @@ func (pc PulsarClient) Listener(ssChan chan mock.SegmentStats) error {
if err != nil {
log.Println("SegmentUnMarshal Failed")
}
//fmt.Printf("Received message msgId: %#v -- content: '%s'\n",
// msg.ID(), m.SegementID)
fmt.Println("Received SegmentStats -- segmentID:", m.SegementID,
",memSize:", m.MemorySize, ",memRate:", m.MemoryRate, ",numRows:", m.Rows, ",status:", m.Status)
fmt.Printf("Received message msgId: %#v -- content: '%s'\n",
msg.ID(), m.SegementID)
ssChan <- m
consumer.Ack(msg)
}

View File

@ -3,8 +3,10 @@ package mock
import (
"bytes"
"encoding/gob"
masterpb "github.com/czs007/suvlim/pkg/master/grpc/master"
"github.com/golang/protobuf/proto"
"time"
masterpb "github.com/czs007/suvlim/pkg/master/grpc/master"
)
type SegmentStats struct {
@ -57,7 +59,7 @@ type Segment struct {
Rows int64 `json:"rows"`
}
func NewSegment(id uint64, collectioID uint64, cName string, ptag string, chStart int, chEnd int, openTime uint64, closeTime uint64) Segment {
func NewSegment(id uint64, collectioID uint64, cName string, ptag string, chStart int, chEnd int, openTime time.Time, closeTime time.Time) Segment {
return Segment{
SegmentID: id,
CollectionID: collectioID,
@ -65,8 +67,8 @@ func NewSegment(id uint64, collectioID uint64, cName string, ptag string, chStar
PartitionTag: ptag,
ChannelStart: chStart,
ChannelEnd: chEnd,
OpenTimeStamp: openTime,
CloseTimeStamp: closeTime,
OpenTimeStamp: uint64(openTime.Unix()),
CloseTimeStamp: uint64(closeTime.Unix()),
}
}
func Segment2JSON(s Segment) (string, error) {

View File

@ -61,19 +61,16 @@ func SegmentStatsController() {
}
}
func GetPhysicalTimeNow() uint64 {
return uint64(time.Now().UnixNano() / int64(time.Millisecond))
}
func ComputeCloseTime(segmentCloseLog *map[uint64]uint64, ss mock.SegmentStats, kvbase kv.Base) error {
segmentID := ss.SegementID
if _, ok := (*segmentCloseLog)[segmentID]; ok {
// This segment has been closed
log.Println("Segment", segmentID, "has been closed")
return nil
}
if int(ss.MemorySize) > int(conf.Config.Master.SegmentThreshole*0.8) {
currentTime := GetPhysicalTimeNow()
currentTime := time.Now()
memRate := int(ss.MemoryRate)
if memRate == 0 {
//memRate = 1
@ -83,54 +80,34 @@ func ComputeCloseTime(segmentCloseLog *map[uint64]uint64, ss mock.SegmentStats,
sec := float64(conf.Config.Master.SegmentThreshole*0.2) / float64(memRate)
data, err := kvbase.Load("segment/" + strconv.Itoa(int(ss.SegementID)))
if err != nil {
log.Println("Load segment failed")
return err
}
seg, err := mock.JSON2Segment(data)
if err != nil {
log.Println("JSON2Segment failed")
return err
}
seg.CloseTimeStamp = currentTime + uint64(sec * 1000)
// Reduce time gap between Proxy and Master
seg.CloseTimeStamp = seg.CloseTimeStamp + uint64(5 * 1000)
fmt.Println("Close segment = ", seg.SegmentID, ",Close time = ", seg.CloseTimeStamp)
segmentLogicTime := seg.CloseTimeStamp << 46 >> 46
seg.CloseTimeStamp = uint64(currentTime.Add(time.Duration(sec) * time.Second).Unix()) << 18 + segmentLogicTime
fmt.Println("memRate = ", memRate, ",sec = ", sec ,",Close time = ", seg.CloseTimeStamp)
updateData, err := mock.Segment2JSON(*seg)
if err != nil {
log.Println("Update segment, Segment2JSON failed")
return err
}
err = kvbase.Save("segment/"+strconv.Itoa(int(ss.SegementID)), updateData)
if err != nil {
log.Println("Save segment failed")
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(ss.SegementID)), updateData)
(*segmentCloseLog)[segmentID] = seg.CloseTimeStamp
//create new segment
newSegID := id.New().Uint64()
newSeg := mock.NewSegment(newSegID, seg.CollectionID, seg.CollectionName, "default", seg.ChannelStart, seg.ChannelEnd, currentTime, 1 << 46 - 1)
newSeg := mock.NewSegment(newSegID, seg.CollectionID, seg.CollectionName, "default", seg.ChannelStart, seg.ChannelEnd, currentTime, time.Unix(1<<36-1, 0))
newSegData, err := mock.Segment2JSON(*&newSeg)
if err != nil {
log.Println("Create new segment, Segment2JSON failed")
return err
}
//save to kv store
err = kvbase.Save("segment/"+strconv.Itoa(int(newSegID)), newSegData)
if err != nil {
log.Println("Save segment failed")
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(newSegID)), newSegData)
// update collection data
c, _ := kvbase.Load("collection/" + strconv.Itoa(int(seg.CollectionID)))
collection, err := mock.JSON2Collection(c)
if err != nil {
log.Println("JSON2Segment failed")
return err
}
segIDs := collection.SegmentIDs
@ -138,14 +115,9 @@ func ComputeCloseTime(segmentCloseLog *map[uint64]uint64, ss mock.SegmentStats,
collection.SegmentIDs = segIDs
cData, err := mock.Collection2JSON(*collection)
if err != nil {
log.Println("Collection2JSON failed")
return err
}
err = kvbase.Save("collection/"+strconv.Itoa(int(seg.CollectionID)), cData)
if err != nil {
log.Println("Save collection failed")
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(seg.CollectionID)), cData)
}
return nil
}
@ -175,7 +147,7 @@ func UpdateSegmentStatus(ss mock.SegmentStats, kvbase kv.Base) error {
if err != nil {
return err
}
err = kvbase.Save("segment/"+strconv.Itoa(int(seg.SegmentID)), segData)
err = kvbase.Save("segment/"+strconv.Itoa(int(seg.CollectionID)), segData)
if err != nil {
return err
}
@ -262,8 +234,8 @@ func CollectionController(ch chan *messagepb.Mapping) {
time.Now(), fieldMetas, []uint64{sID, s2ID},
[]string{"default"})
cm := mock.GrpcMarshal(&c)
s := mock.NewSegment(sID, cID, collection.CollectionName, "default", 0, 511, GetPhysicalTimeNow(), 1 << 46 - 1)
s2 := mock.NewSegment(s2ID, cID, collection.CollectionName, "default", 512, 1023, GetPhysicalTimeNow(), 1 << 46 - 1)
s := mock.NewSegment(sID, cID, collection.CollectionName, "default", 0, 511, time.Now(), time.Unix(1<<36-1, 0))
s2 := mock.NewSegment(s2ID, cID, collection.CollectionName, "default", 512, 1023, time.Now(), time.Unix(1<<36-1, 0))
collectionData, _ := mock.Collection2JSON(*cm)
segmentData, err := mock.Segment2JSON(s)
if err != nil {
@ -298,75 +270,37 @@ func WriteCollection2Datastore(collection *messagepb.Mapping) error {
})
defer cli.Close()
kvbase := kv.NewEtcdKVBase(cli, conf.Config.Etcd.Rootpath)
sID := id.New().Uint64()
cID := id.New().Uint64()
fieldMetas := []*messagepb.FieldMeta{}
if collection.Schema != nil {
fieldMetas = collection.Schema.FieldMetas
}
queryNodeNum := conf.Config.Master.QueryNodeNum
topicNum := conf.Config.Pulsar.TopicNum
var topicNumPerQueryNode int
if topicNum % queryNodeNum != 0 {
topicNumPerQueryNode = topicNum / queryNodeNum + 1
} else {
topicNumPerQueryNode = topicNum / queryNodeNum
}
fmt.Println("QueryNodeNum = ", queryNodeNum)
fmt.Println("TopicNum = ", topicNum)
fmt.Println("TopicNumPerQueryNode = ", topicNumPerQueryNode)
sIDs := make([]uint64, queryNodeNum)
for i := 0; i < queryNodeNum; i++ {
// For generating different id
time.Sleep(1000 * time.Millisecond)
sIDs[i] = id.New().Uint64()
}
c := mock.NewCollection(cID, collection.CollectionName,
time.Now(), fieldMetas, sIDs,
time.Now(), fieldMetas, []uint64{sID},
[]string{"default"})
cm := mock.GrpcMarshal(&c)
s := mock.NewSegment(sID, cID, collection.CollectionName, "default", 0, conf.Config.Pulsar.TopicNum, time.Now(), time.Unix(1<<46-1, 0))
collectionData, err := mock.Collection2JSON(*cm)
if err != nil {
log.Fatal(err)
return err
}
segmentData, err := mock.Segment2JSON(s)
if err != nil {
log.Fatal(err)
return err
}
err = kvbase.Save("collection/"+strconv.FormatUint(cID, 10), collectionData)
if err != nil {
log.Fatal(err)
return err
}
for i := 0; i < queryNodeNum; i++ {
chStart := i * topicNumPerQueryNode
chEnd := (i + 1) * topicNumPerQueryNode
if chEnd > topicNum {
chEnd = topicNum - 1
}
s := mock.NewSegment(sIDs[i], cID, collection.CollectionName, "default", chStart, chEnd, GetPhysicalTimeNow(), 1 << 46 - 1)
segmentData, err := mock.Segment2JSON(s)
if err != nil {
log.Fatal(err)
return err
}
err = kvbase.Save("segment/"+strconv.FormatUint(sIDs[i], 10), segmentData)
if err != nil {
log.Fatal(err)
return err
}
err = kvbase.Save("segment/"+strconv.FormatUint(sID, 10), segmentData)
if err != nil {
log.Fatal(err)
return err
}
return nil
}

View File

@ -1,7 +1,7 @@
syntax = "proto3";
package milvus.grpc;
option go_package="msgpb";
option go_package="master/grpc/message";
enum ErrorCode {
SUCCESS = 0;
@ -208,8 +208,8 @@ message QueryResult {
repeated float scores = 4;
repeated float distances = 5;
repeated KeyValuePair extra_params = 6;
int64 query_id = 7;
int64 client_id = 8;
uint64 query_id = 7;
int64 proxy_id = 8;
}
/**
@ -675,6 +675,68 @@ enum OpType {
DELETE = 1;
}
enum ReqType {
// general operations
kCmd = 0;
/* collection operations */
kCreateCollection = 100;
kDropCollection = 101;
kHasCollection = 102;
kListCollections = 103;
kGetCollectionInfo = 104;
kGetCollectionStats = 105;
kCountEntities = 106;
/* partition operations */
kCreatePartition = 200;
kDropPartition = 201;
kHasPartition = 202;
kListPartitions = 203;
/* index operations */
kCreateIndex = 300;
kDropIndex = 301;
kDescribeIndex = 302;
/* data operations */
kInsert = 400;
kGetEntityByID = 401;
kDeleteEntityByID = 402;
kSearch = 403;
kListIDInSegment = 404;
/* other operations */
kLoadCollection = 500;
kFlush = 501;
kCompact = 502;
}
message QueryReqMsg {
string collection_name = 1;
repeated VectorParam vector_param = 2;
repeated string partition_tags = 3;
string dsl = 4;
repeated KeyValuePair extra_params = 5;
uint64 timestamp =6;
int64 proxy_id = 7;
uint64 query_id = 8;
ReqType req_type = 9;
}
message ManipulationReqMsg {
string collection_name = 1;
string partition_tag = 2;
repeated uint64 primary_keys = 3;
repeated RowData rows_data = 4;
uint64 timestamp =5;
uint64 segment_id = 6;
uint64 channel_id = 7;
ReqType req_type = 8;
int64 proxy_id = 9;
repeated KeyValuePair extra_params = 10;
}
message InsertOrDeleteMsg {
string collection_name = 1;
RowData rows_data = 2;

17
proxy-go/main.go Normal file
View File

@ -0,0 +1,17 @@
package main
import (
proxy "github.com/czs007/suvlim/proxy-go/proxy_node"
"log"
)
func main() {
cfg, err := proxy.ReadProxyOptionsFromConfig()
if err != nil {
log.Fatalf("read proxy options form config file , error = %v", err)
}
err = proxy.StartProxy(cfg)
if err != nil {
log.Fatalf("start proxy failed, error = %v", err)
}
}

View File

@ -0,0 +1,130 @@
package proxy_node
import (
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"sync"
)
type manipulationReq struct {
pb.ManipulationReqMsg
wg sync.WaitGroup
proxy *proxyServer
}
// TsMsg interfaces
func (req *manipulationReq) Ts() Timestamp {
return Timestamp(req.Timestamp)
}
func (req *manipulationReq) SetTs(ts Timestamp) {
req.Timestamp = uint64(ts)
}
// BaseRequest interfaces
func (req *manipulationReq) Type() pb.ReqType {
return req.ReqType
}
func (req *manipulationReq) PreExecute() pb.Status {
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *manipulationReq) Execute() pb.Status {
req.proxy.reqSch.manipulationsChan <- req
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *manipulationReq) PostExecute() pb.Status { // send into pulsar
req.wg.Add(1)
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *manipulationReq) WaitToFinish() pb.Status { // wait unitl send into pulsar
req.wg.Wait()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (s *proxyServer) restartManipulationRoutine(buf_size int) error {
s.reqSch.manipulationsChan = make(chan *manipulationReq, buf_size)
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: s.pulsarAddr})
if err != nil {
return err
}
readers := make([]pulsar.Producer, len(s.readerTopics))
for i, t := range s.readerTopics {
p, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: t})
if err != nil {
return err
}
readers[i] = p
}
deleter, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: s.deleteTopic})
if err != nil {
return err
}
go func() {
for {
select {
case <-s.ctx.Done():
deleter.Close()
for _, r := range readers {
r.Close()
}
pulsarClient.Close()
return
case ip := <-s.reqSch.manipulationsChan:
ts, st := s.getTimestamp(1)
if st.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("get time stamp failed, error code = %d, msg = %s, drop inset rows = %d", st.ErrorCode, st.Reason, len(ip.RowsData))
continue
}
mq := pb.ManipulationReqMsg{
CollectionName: ip.CollectionName,
PartitionTag: ip.PartitionTag,
PrimaryKeys: ip.PrimaryKeys,
RowsData: ip.RowsData,
Timestamp: uint64(ts[0]),
SegmentId: ip.SegmentId,
ChannelId: ip.ChannelId,
ReqType: ip.ReqType,
ProxyId: ip.ProxyId,
ExtraParams: ip.ExtraParams,
}
mb, err := proto.Marshal(&mq)
if err != nil {
log.Printf("Marshal ManipulationReqMsg failed, error = %v", err)
continue
}
switch ip.ReqType {
case pb.ReqType_kInsert:
if _, err := readers[mq.ChannelId].Send(s.ctx, &pulsar.ProducerMessage{Payload: mb}); err != nil {
log.Printf("post into puslar failed, error = %v", err)
}
break
case pb.ReqType_kDeleteEntityByID:
if _, err = deleter.Send(s.ctx, &pulsar.ProducerMessage{Payload: mb}); err != nil {
log.Printf("post into pulsar filed, error = %v", err)
}
default:
log.Printf("post unexpect ReqType = %d", ip.ReqType)
break
}
s.reqSch.m_timestamp_mux.Lock()
if s.reqSch.m_timestamp <= ts[0] {
s.reqSch.m_timestamp = ts[0]
} else {
log.Printf("there is some wrong with m_timestamp, it goes back, current = %d, previous = %d", ts[0], s.reqSch.m_timestamp)
}
s.reqSch.m_timestamp_mux.Unlock()
ip.wg.Done()
}
}
}()
return nil
}

View File

@ -0,0 +1,171 @@
package proxy_node
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
etcd "go.etcd.io/etcd/clientv3"
"strconv"
"time"
)
type BaseRequest interface {
Type() pb.ReqType
PreExecute() pb.Status
Execute() pb.Status
PostExecute() pb.Status
WaitToFinish() pb.Status
}
type ProxyOptions struct {
//proxy server
address string //grpc server address
master_address string //master server addess
collectionMetaRootPath string // etcd root path,read metas of collections and segments from etcd
pulsarAddr string // pulsar address for reader
readerTopicsPrefix string
numReadTopics int
deleteTopic string
queryTopic string
resultTopic string
resultGroup string
numReaderNode int
proxyId int64 //start from 1
etcdEndpoints []string
//timestamporacle
tsoRootPath string //etcd root path, store timestamp into this key
tsoSaveInterval uint64
//timetick
timeTickInterval uint64
timeTickTopic string
timeTickPeerId int64 //start from 1
// inner member
proxyServer *proxyServer
tso *timestampOracle
timeTick *timeTick
ctx context.Context
cancel context.CancelFunc
}
func ReadProxyOptionsFromConfig() (*ProxyOptions, error) {
etcdRootpath := conf.Config.Etcd.Rootpath
if etcdRootpath[len(etcdRootpath)-1] == '/' {
etcdRootpath = etcdRootpath[0 : len(etcdRootpath)-1]
}
return &ProxyOptions{
address: conf.Config.Proxy.Network.Address + ":" + strconv.Itoa(conf.Config.Proxy.Network.Port),
master_address: conf.Config.Master.Address + ":" + strconv.Itoa(int(conf.Config.Master.Port)),
collectionMetaRootPath: etcdRootpath,
pulsarAddr: conf.Config.Pulsar.Address + ":" + strconv.Itoa(int(conf.Config.Pulsar.Port)),
readerTopicsPrefix: conf.Config.Proxy.PulsarTopics.ReaderTopicPrefix,
numReadTopics: conf.Config.Proxy.PulsarTopics.NumReaderTopics,
deleteTopic: conf.Config.Proxy.PulsarTopics.DeleteTopic,
queryTopic: conf.Config.Proxy.PulsarTopics.QueryTopic,
resultTopic: conf.Config.Proxy.PulsarTopics.ResultTopic,
resultGroup: conf.Config.Proxy.PulsarTopics.ResultGroup,
numReaderNode: conf.Config.Proxy.NumReaderNodes,
proxyId: int64(conf.Config.Proxy.ProxyId),
etcdEndpoints: []string{conf.Config.Etcd.Address + ":" + strconv.Itoa(int(conf.Config.Etcd.Port))},
tsoRootPath: etcdRootpath,
tsoSaveInterval: uint64(conf.Config.Proxy.TosSaveInterval),
timeTickInterval: uint64(conf.Config.Proxy.TimeTickInterval),
timeTickTopic: conf.Config.Proxy.PulsarTopics.TimeTickTopic,
timeTickPeerId: int64(conf.Config.Proxy.ProxyId),
}, nil
}
func StartProxy(opt *ProxyOptions) error {
//global context
opt.ctx, opt.cancel = context.WithCancel(context.Background())
///////////////////// timestamporacle //////////////////////////
etcdTso, err := etcd.New(etcd.Config{Endpoints: opt.etcdEndpoints})
if err != nil {
return err
}
tso := &timestampOracle{
client: etcdTso,
ctx: opt.ctx,
rootPath: opt.tsoRootPath,
saveInterval: opt.tsoSaveInterval,
}
tso.Restart(opt.proxyId)
/////////////////// proxy server ///////////////////////////////
//readerTopics, send insert and delete message into these topics
readerTopics := make([]string, 0, opt.numReadTopics)
for i := 0; i < opt.numReadTopics; i++ {
readerTopics = append(readerTopics, opt.readerTopicsPrefix+strconv.Itoa(i))
}
etcdProxy, err := etcd.New(etcd.Config{Endpoints: opt.etcdEndpoints})
if err != nil {
return err
}
srv := &proxyServer{
address: opt.address,
master_address: opt.master_address,
rootPath: opt.collectionMetaRootPath,
pulsarAddr: opt.pulsarAddr,
readerTopics: readerTopics,
deleteTopic: opt.deleteTopic,
queryTopic: opt.queryTopic,
resultTopic: opt.resultTopic,
resultGroup: opt.resultTopic,
numReaderNode: opt.numReaderNode,
proxyId: opt.proxyId,
getTimestamp: tso.GetTimestamp,
client: etcdProxy,
ctx: opt.ctx,
}
errChan := make(chan error, 1)
go func() {
err := startProxyServer(srv)
errChan <- err
}()
time.Sleep(100 * time.Millisecond) //wait unit grpc server has started
if len(errChan) > 0 {
return <-errChan
}
////////////////////////// time tick /////////////////////////////////
ttClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: opt.pulsarAddr})
if err != nil {
return err
}
ttProducer, err := ttClient.CreateProducer(pulsar.ProducerOptions{Topic: opt.timeTickTopic})
if err != nil {
return err
}
tt := &timeTick{
interval: opt.timeTickInterval,
pulsarProducer: ttProducer,
peer_id: opt.timeTickPeerId,
ctx: opt.ctx,
areRequestsDelivered: func(ts Timestamp) bool { return srv.reqSch.AreRequestsDelivered(ts, 2) },
getTimestamp: func() (Timestamp, pb.Status) {
ts, st := tso.GetTimestamp(1)
return ts[0], st
},
}
s := tt.Restart()
if s.ErrorCode != pb.ErrorCode_SUCCESS {
return fmt.Errorf(s.Reason)
}
opt.proxyServer = srv
opt.tso = tso
opt.timeTick = tt
return nil
}

View File

@ -0,0 +1,435 @@
package proxy_node
import (
"context"
"encoding/binary"
"encoding/json"
"github.com/apache/pulsar-client-go/pulsar"
mpb "github.com/czs007/suvlim/pkg/master/grpc/master"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
etcd "go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
"sort"
"strconv"
"testing"
"time"
)
func TestProxyNode(t *testing.T) {
startTestMaster("localhost:11000", t)
testOpt := ProxyOptions{
address: "localhost:11001",
master_address: "localhost:11000",
collectionMetaRootPath: "/collections/meta",
pulsarAddr: "pulsar://localhost:6650",
readerTopicsPrefix: "reader-",
numReadTopics: 2,
deleteTopic: "deleteT",
queryTopic: "queryT",
resultTopic: "resultT",
resultGroup: "resultG",
numReaderNode: 2,
proxyId: 1,
etcdEndpoints: []string{"127.0.0.1:2379"},
tsoRootPath: "/tso",
tsoSaveInterval: 200,
timeTickInterval: 200,
timeTickTopic: "timetick",
timeTickPeerId: 1,
}
if err := StartProxy(&testOpt); err != nil {
t.Fatal(err)
}
startTime := uint64(time.Now().UnixNano()) / uint64(1e6)
t.Logf("start time stamp = %d", startTime)
etcdClient, err := etcd.New(etcd.Config{Endpoints: testOpt.etcdEndpoints})
assert.Nil(t, err)
//defer etcdClient.Close()
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: testOpt.pulsarAddr})
assert.Nil(t, err)
defer pulsarClient.Close()
go func() {
time.Sleep(time.Second)
for {
ts, err := etcdClient.Get(testOpt.ctx, testOpt.tsoRootPath+tsoKeyPath)
assert.Nil(t, err)
if len(ts.Kvs) != 1 {
t.Fatalf("save tso into etcd falied")
}
value, err := strconv.ParseUint(string(ts.Kvs[0].Value), 10, 64)
assert.Nil(t, err)
curValue, st := testOpt.tso.GetTimestamp(1)
assert.Equalf(t, st.ErrorCode, pb.ErrorCode_SUCCESS, "%s", st.Reason)
curTime := ToPhysicalTime(uint64(curValue[0]))
t.Logf("current time stamp = %d, saved time stamp = %d", curTime, value)
assert.GreaterOrEqual(t, uint64(curValue[0]), value)
assert.GreaterOrEqual(t, value, startTime)
time.Sleep(time.Duration(testOpt.tsoSaveInterval) * time.Millisecond)
}
}()
tickComsumer, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: testOpt.timeTickTopic,
SubscriptionName: testOpt.timeTickTopic + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer tickComsumer.Close()
reader, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topics: testOpt.proxyServer.readerTopics,
SubscriptionName: testOpt.readerTopicsPrefix + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer reader.Close()
query, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: testOpt.queryTopic,
SubscriptionName: testOpt.queryTopic + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer query.Close()
deleteC, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: testOpt.deleteTopic,
SubscriptionName: testOpt.deleteTopic + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer deleteC.Close()
result, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: testOpt.resultTopic})
assert.Nil(t, err)
defer result.Close()
tick := time.Tick(500 * time.Millisecond)
// read pulsar channel until empty
func() {
cnt := 0
for {
select {
case <-tick:
cnt++
if cnt >= 3 {
return
}
case cm, ok := <-tickComsumer.Chan():
assert.Truef(t, ok, "time tick consumer topic has closed")
tickComsumer.AckID(cm.ID())
case cm, ok := <-reader.Chan():
assert.Truef(t, ok, "reader comsumer topic has closed")
reader.AckID(cm.ID())
case cm, ok := <-deleteC.Chan():
assert.Truef(t, ok, "delete topic has closed")
deleteC.AckID(cm.ID())
case cm, ok := <-query.Chan():
assert.Truef(t, ok, "query topic has closed")
query.AckID(cm.ID())
}
}
}()
go func() {
lastT := startTime
for {
cm, ok := <-tickComsumer.Chan()
assert.Truef(t, ok, "time tick consumer topic has closed")
tickComsumer.AckID(cm.ID())
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(cm.Payload(), &tsm); err != nil {
t.Fatal(err)
}
curT := ToPhysicalTime(tsm.Timestamp)
t.Logf("time tick = %d", curT)
assert.Greater(t, curT, lastT)
lastT = curT
}
}()
cm100 := mpb.Collection{
Id: 100,
Name: "cm100",
Schema: nil,
CreateTime: 0,
SegmentIds: []uint64{101, 102},
PartitionTags: nil,
Indexes: nil,
}
sm101 := mpb.Segment{
SegmentId: 101,
CollectionId: 100,
ChannelStart: 0,
ChannelEnd: 1,
Status: mpb.SegmentStatus_OPENED,
}
sm102 := mpb.Segment{
SegmentId: 102,
CollectionId: 100,
ChannelStart: 1,
ChannelEnd: 2,
Status: mpb.SegmentStatus_OPENED,
}
if cm100b, err := json.Marshal(&cm100); err != nil {
t.Fatal(err)
} else if _, err := etcdClient.Put(testOpt.ctx, testOpt.collectionMetaRootPath+"/"+keyCollectionPath+"/100", string(cm100b)); err != nil {
t.Fatal(err)
}
if sm101b, err := json.Marshal(&sm101); err != nil {
t.Fatal(err)
} else if _, err := etcdClient.Put(testOpt.ctx, testOpt.collectionMetaRootPath+"/"+keySegmentPath+"/101", string(sm101b)); err != nil {
t.Fatal(err)
}
if sm102b, err := json.Marshal(&sm102); err != nil {
t.Fatal(err)
} else if _, err := etcdClient.Put(testOpt.ctx, testOpt.collectionMetaRootPath+"/"+keySegmentPath+"/102", string(sm102b)); err != nil {
t.Fatal(err)
}
ctx1, _ := context.WithTimeout(testOpt.ctx, time.Second)
grpcConn, err := grpc.DialContext(ctx1, testOpt.address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer grpcConn.Close()
proxyClient := pb.NewMilvusServiceClient(grpcConn)
insertParm := pb.InsertParam{
CollectionName: "cm100",
Schema: nil,
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(10)},
{Blob: uint64ToBytes(11)},
{Blob: uint64ToBytes(12)},
{Blob: uint64ToBytes(13)},
{Blob: uint64ToBytes(14)},
{Blob: uint64ToBytes(15)},
},
EntityIdArray: []int64{10, 11, 12, 13, 14, 15},
PartitionTag: "",
ExtraParams: nil,
}
deleteParm := pb.DeleteByIDParam{
CollectionName: "cm100",
IdArray: []int64{20, 21},
}
searchParm := pb.SearchParam{
CollectionName: "cm100",
VectorParam: nil,
Dsl: "",
PartitionTag: nil,
ExtraParams: nil,
}
go func() {
cm, ok := <-query.Chan()
assert.Truef(t, ok, "query topic has closed")
query.AckID(cm.ID())
var qm pb.QueryReqMsg
if err := proto.Unmarshal(cm.Payload(), &qm); err != nil {
t.Fatal(err)
}
assert.Equal(t, qm.ProxyId, testOpt.proxyId)
assert.Equal(t, qm.CollectionName, "cm100")
t.Logf("query time stamp = %d", ToPhysicalTime(qm.Timestamp))
assert.Greater(t, ToPhysicalTime(qm.Timestamp), startTime)
r1 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{11, 13, 15},
ValidRow: []bool{true, true, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(11)},
{Blob: uint64ToBytes(13)},
{Blob: uint64ToBytes(15)},
},
},
RowNum: 3,
Scores: []float32{11, 13, 15},
Distances: []float32{11, 13, 15},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
r2 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{12, 14, 16},
ValidRow: []bool{true, false, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(12)},
{Blob: uint64ToBytes(14)},
{Blob: uint64ToBytes(16)},
},
},
RowNum: 3,
Scores: []float32{12, 14, 16},
Distances: []float32{12, 14, 16},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
if b1, err := proto.Marshal(&r1); err != nil {
t.Fatal(err)
} else if _, err := result.Send(testOpt.ctx, &pulsar.ProducerMessage{Payload: b1}); err != nil {
t.Fatal(err)
}
if b2, err := proto.Marshal(&r2); err != nil {
t.Fatal(err)
} else if _, err := result.Send(testOpt.ctx, &pulsar.ProducerMessage{Payload: b2}); err != nil {
t.Fatal(err)
}
}()
insertR, err := proxyClient.Insert(testOpt.ctx, &insertParm)
assert.Nil(t, err)
assert.Equalf(t, insertR.Status.ErrorCode, pb.ErrorCode_SUCCESS, "%s", insertR.Status.Reason)
assert.Equal(t, len(insertR.EntityIdArray), 6)
sort.Slice(insertR.EntityIdArray, func(i, j int) bool {
return insertR.EntityIdArray[i] < insertR.EntityIdArray[j]
})
for i := 0; i < len(insertR.EntityIdArray); i++ {
assert.Equal(t, insertR.EntityIdArray[i], int64(i+10))
}
var insertPrimaryKey []uint64
readerM1, ok := <-reader.Chan()
assert.True(t, ok)
reader.AckID(readerM1.ID())
var m1 pb.ManipulationReqMsg
if err := proto.UnmarshalMerge(readerM1.Payload(), &m1); err != nil {
t.Fatal(err)
}
assert.Equal(t, m1.CollectionName, "cm100")
assert.Equal(t, len(m1.PrimaryKeys), len(m1.RowsData))
t.Logf("reader time stamp = %d", ToPhysicalTime(m1.Timestamp))
assert.GreaterOrEqual(t, ToPhysicalTime(m1.Timestamp), startTime)
for i, k := range m1.PrimaryKeys {
insertPrimaryKey = append(insertPrimaryKey, k)
rowValue := binary.LittleEndian.Uint64(m1.RowsData[i].Blob)
t.Logf("insert primary key = %d, row data= %d", k, rowValue)
assert.Equal(t, k, rowValue)
}
readerM2, ok := <-reader.Chan()
assert.True(t, ok)
reader.AckID(readerM2.ID())
var m2 pb.ManipulationReqMsg
if err := proto.UnmarshalMerge(readerM2.Payload(), &m2); err != nil {
t.Fatal(err)
}
assert.Equal(t, m2.CollectionName, "cm100")
assert.Equal(t, len(m2.PrimaryKeys), len(m2.RowsData))
t.Logf("read time stamp = %d", ToPhysicalTime(m2.Timestamp))
assert.GreaterOrEqual(t, ToPhysicalTime(m2.Timestamp), startTime)
for i, k := range m2.PrimaryKeys {
insertPrimaryKey = append(insertPrimaryKey, k)
rowValue := binary.LittleEndian.Uint64(m2.RowsData[i].Blob)
t.Logf("insert primary key = %d, row data= %d", k, rowValue)
assert.Equal(t, k, rowValue)
}
sort.Slice(insertPrimaryKey, func(i, j int) bool {
return insertPrimaryKey[i] < insertPrimaryKey[j]
})
assert.Equal(t, len(insertPrimaryKey), 6)
for i := 0; i < len(insertPrimaryKey); i++ {
assert.Equal(t, insertPrimaryKey[i], uint64(i+10))
}
deleteR, err := proxyClient.DeleteByID(testOpt.ctx, &deleteParm)
assert.Nil(t, err)
assert.Equal(t, deleteR.ErrorCode, pb.ErrorCode_SUCCESS)
deleteM, ok := <-deleteC.Chan()
assert.True(t, ok)
deleteC.AckID(deleteM.ID())
var dm pb.ManipulationReqMsg
if err := proto.UnmarshalMerge(deleteM.Payload(), &dm); err != nil {
t.Fatal(err)
}
assert.Equal(t, dm.CollectionName, "cm100")
assert.Equal(t, len(dm.PrimaryKeys), 2)
t.Logf("delete time stamp = %d", ToPhysicalTime(dm.Timestamp))
assert.GreaterOrEqual(t, ToPhysicalTime(dm.Timestamp), startTime)
for i := 0; i < len(dm.PrimaryKeys); i++ {
assert.Equal(t, dm.PrimaryKeys[i], uint64(i+20))
}
searchR, err := proxyClient.Search(testOpt.ctx, &searchParm)
assert.Nil(t, err)
assert.Equal(t, searchR.Status.ErrorCode, pb.ErrorCode_SUCCESS)
assert.Equal(t, searchR.Entities.Status.ErrorCode, pb.ErrorCode_SUCCESS)
assert.Equal(t, len(searchR.Entities.Ids), 3)
assert.Equal(t, searchR.Entities.Ids, []int64{16, 15, 13})
assert.Equal(t, len(searchR.Entities.ValidRow), 3)
assert.Equal(t, searchR.Entities.ValidRow, []bool{true, true, true})
assert.Equal(t, len(searchR.Entities.RowsData), 3)
assert.Equal(t, searchR.Entities.RowsData, []*pb.RowData{
{Blob: uint64ToBytes(16)},
{Blob: uint64ToBytes(15)},
{Blob: uint64ToBytes(13)},
})
assert.Equal(t, len(searchR.Scores), 3)
assert.Equal(t, searchR.Scores, []float32{16, 15, 13})
assert.Equal(t, len(searchR.Distances), 3)
assert.Equal(t, searchR.Distances, []float32{16, 15, 13})
time.Sleep(time.Second)
}
func TestReadProxyOptionsFromConfig(t *testing.T) {
conf, err := ReadProxyOptionsFromConfig()
assert.Nil(t, err)
t.Log(conf.address)
t.Log(conf.master_address)
t.Log(conf.collectionMetaRootPath)
t.Log(conf.pulsarAddr)
t.Log(conf.readerTopicsPrefix)
t.Log(conf.numReadTopics)
t.Log(conf.deleteTopic)
t.Log(conf.queryTopic)
t.Log(conf.resultTopic)
t.Log(conf.resultGroup)
t.Log(conf.numReaderNode)
t.Log(conf.proxyId)
t.Log(conf.etcdEndpoints)
t.Log(conf.tsoRootPath)
t.Log(conf.tsoSaveInterval)
t.Log(conf.timeTickInterval)
t.Log(conf.timeTickTopic)
t.Log(conf.timeTickPeerId)
}

View File

@ -0,0 +1,252 @@
package proxy_node
import (
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"sort"
"sync"
)
type queryReq struct {
pb.QueryReqMsg
result []*pb.QueryResult
wg sync.WaitGroup
proxy *proxyServer
}
// BaseRequest interfaces
func (req *queryReq) Type() pb.ReqType {
return req.ReqType
}
func (req *queryReq) PreExecute() pb.Status {
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *queryReq) Execute() pb.Status {
req.proxy.reqSch.queryChan <- req
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *queryReq) PostExecute() pb.Status { // send into pulsar
req.wg.Add(1)
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *queryReq) WaitToFinish() pb.Status { // wait unitl send into pulsar
req.wg.Wait()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (s *proxyServer) restartQueryRoutine(buf_size int) error {
s.reqSch.queryChan = make(chan *queryReq, buf_size)
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: s.pulsarAddr})
if err != nil {
return nil
}
query, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: s.queryTopic})
if err != nil {
return err
}
result, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: s.resultTopic,
SubscriptionName: s.resultGroup,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
if err != nil {
return err
}
resultMap := make(map[uint64]*queryReq)
go func() {
defer result.Close()
defer query.Close()
defer pulsarClient.Close()
for {
select {
case <-s.ctx.Done():
return
case qm := <-s.reqSch.queryChan:
ts, st := s.getTimestamp(1)
if st.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("get time stamp failed, error code = %d, msg = %s", st.ErrorCode, st.Reason)
break
}
q := pb.QueryReqMsg{
CollectionName: qm.CollectionName,
VectorParam: qm.VectorParam,
PartitionTags: qm.PartitionTags,
Dsl: qm.Dsl,
ExtraParams: qm.ExtraParams,
Timestamp: uint64(ts[0]),
ProxyId: qm.ProxyId,
QueryId: qm.QueryId,
ReqType: qm.ReqType,
}
qb, err := proto.Marshal(&q)
if err != nil {
log.Printf("Marshal QueryReqMsg failed, error = %v", err)
continue
}
if _, err := query.Send(s.ctx, &pulsar.ProducerMessage{Payload: qb}); err != nil {
log.Printf("post into puslar failed, error = %v", err)
}
s.reqSch.q_timestamp_mux.Lock()
if s.reqSch.q_timestamp <= ts[0] {
s.reqSch.q_timestamp = ts[0]
} else {
log.Printf("there is some wrong with q_timestamp, it goes back, current = %d, previous = %d", ts[0], s.reqSch.q_timestamp)
}
s.reqSch.q_timestamp_mux.Unlock()
resultMap[qm.QueryId] = qm
//log.Printf("start search, query id = %d", qm.QueryId)
case cm, ok := <-result.Chan():
if !ok {
log.Printf("consumer of result topic has closed")
return
}
var rm pb.QueryResult
if err := proto.Unmarshal(cm.Message.Payload(), &rm); err != nil {
log.Printf("Unmarshal QueryReqMsg failed, error = %v", err)
break
}
if rm.ProxyId != s.proxyId {
break
}
qm, ok := resultMap[rm.QueryId]
if !ok {
log.Printf("unknown query id = %d", rm.QueryId)
break
}
qm.result = append(qm.result, &rm)
if len(qm.result) == s.numReaderNode {
qm.wg.Done()
delete(resultMap, rm.QueryId)
}
result.AckID(cm.ID())
}
}
}()
return nil
}
func (s *proxyServer) reduceResult(query *queryReq) *pb.QueryResult {
if s.numReaderNode == 1 {
return query.result[0]
}
var result []*pb.QueryResult
for _, r := range query.result {
if r.Status.ErrorCode == pb.ErrorCode_SUCCESS {
result = append(result, r)
}
}
if len(result) == 0 {
return query.result[0]
}
if len(result) == 1 {
return result[0]
}
var entities []*struct {
Ids int64
ValidRow bool
RowsData *pb.RowData
Scores float32
Distances float32
}
var rows int
result_err := func(msg string) *pb.QueryResult {
return &pb.QueryResult{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: msg,
},
}
}
for _, r := range result {
if len(r.Entities.Ids) > rows {
rows = len(r.Entities.Ids)
}
if len(r.Entities.Ids) != len(r.Entities.ValidRow) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Entities.ValidRow)=%d", len(r.Entities.Ids), len(r.Entities.ValidRow)))
}
if len(r.Entities.Ids) != len(r.Entities.RowsData) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Entities.RowsData)=%d", len(r.Entities.Ids), len(r.Entities.RowsData)))
}
if len(r.Entities.Ids) != len(r.Scores) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Scores)=%d", len(r.Entities.Ids), len(r.Scores)))
}
if len(r.Entities.Ids) != len(r.Distances) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Distances)=%d", len(r.Entities.Ids), len(r.Distances)))
}
for i := 0; i < len(r.Entities.Ids); i++ {
entity := struct {
Ids int64
ValidRow bool
RowsData *pb.RowData
Scores float32
Distances float32
}{
Ids: r.Entities.Ids[i],
ValidRow: r.Entities.ValidRow[i],
RowsData: r.Entities.RowsData[i],
Scores: r.Scores[i],
Distances: r.Distances[i],
}
entities = append(entities, &entity)
}
}
sort.Slice(entities, func(i, j int) bool {
if entities[i].ValidRow == true {
if entities[j].ValidRow == false {
return true
}
return entities[i].Scores > entities[j].Scores
} else {
return false
}
})
rIds := make([]int64, 0, rows)
rValidRow := make([]bool, 0, rows)
rRowsData := make([]*pb.RowData, 0, rows)
rScores := make([]float32, 0, rows)
rDistances := make([]float32, 0, rows)
for i := 0; i < rows; i++ {
rIds = append(rIds, entities[i].Ids)
rValidRow = append(rValidRow, entities[i].ValidRow)
rRowsData = append(rRowsData, entities[i].RowsData)
rScores = append(rScores, entities[i].Scores)
rDistances = append(rDistances, entities[i].Distances)
}
return &pb.QueryResult{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
Entities: &pb.Entities{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
Ids: rIds,
ValidRow: rValidRow,
RowsData: rRowsData,
},
RowNum: int64(rows),
Scores: rScores,
Distances: rDistances,
ExtraParams: result[0].ExtraParams,
QueryId: query.QueryId,
ProxyId: query.ProxyId,
}
}

View File

@ -0,0 +1,372 @@
package proxy_node
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"sort"
"strconv"
"sync"
)
const ReadStopFlagEnd int64 = 0
type ReaderTimeSync interface {
Start() error
Close()
TimeSync() <-chan TimeSyncMsg
ManipulationReqMsg() <-chan *pb.ManipulationReqMsg
IsManipulationReqMsgChanFull() bool
}
type TimeSyncMsg struct {
Timestamp uint64
NumRecorders int64
}
type ReaderTimeSyncOption func(*ReaderTimeSyncCfg)
type ReaderTimeSyncCfg struct {
pulsarAddr string
pulsarClient pulsar.Client
timeSyncConsumer pulsar.Consumer
readerConsumer pulsar.Consumer
readerProducer []pulsar.Producer
timesyncMsgChan chan TimeSyncMsg
manipulationReqMsgChan chan *pb.ManipulationReqMsg //output insert or delete msg
readStopFlagClientId int64
interval int64
proxyIdList []int64
readerQueueSize int
revTimesyncFromReader map[uint64]int
ctx context.Context
cancel context.CancelFunc
}
/*
layout of timestamp
time ms logic number
/-------46 bit-----------\/------18bit-----\
+-------------------------+================+
*/
func toMillisecond(ts *pb.TimeSyncMsg) int {
// get Millisecond in second
return int(ts.GetTimestamp() >> 18)
}
func NewReaderTimeSync(
timeSyncTopic string,
timeSyncSubName string,
readTopics []string,
readSubName string,
proxyIdList []int64,
readStopFlagClientId int64,
opts ...ReaderTimeSyncOption,
) (ReaderTimeSync, error) {
//pulsarAddr := "pulsar://"
//pulsarAddr += conf.Config.Pulsar.Address
//pulsarAddr += ":"
//pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
//interval := int64(conf.Config.Timesync.Interval)
//check if proxyId has duplication
if len(proxyIdList) == 0 {
return nil, fmt.Errorf("proxy id list is empty")
}
if len(proxyIdList) > 1 {
sort.Slice(proxyIdList, func(i int, j int) bool { return proxyIdList[i] < proxyIdList[j] })
}
for i := 1; i < len(proxyIdList); i++ {
if proxyIdList[i] == proxyIdList[i-1] {
return nil, fmt.Errorf("there are two proxies have the same id = %d", proxyIdList[i])
}
}
r := &ReaderTimeSyncCfg{
//interval: interval,
proxyIdList: proxyIdList,
}
for _, opt := range opts {
opt(r)
}
if r.interval == 0 {
r.interval = int64(conf.Config.Timesync.Interval)
if r.interval == 0 {
return nil, fmt.Errorf("interval is unsetted")
}
}
if len(r.pulsarAddr) == 0 {
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
r.pulsarAddr = pulsarAddr
}
//check if read topic is empty
if len(readTopics) == 0 {
return nil, fmt.Errorf("read topic is empyt")
}
//set default value
if r.readerQueueSize == 0 {
r.readerQueueSize = 1024
}
if readStopFlagClientId >= ReadStopFlagEnd {
return nil, fmt.Errorf("read stop flag client id should less than %d", ReadStopFlagEnd)
}
r.readStopFlagClientId = readStopFlagClientId
r.timesyncMsgChan = make(chan TimeSyncMsg, len(readTopics)*r.readerQueueSize)
r.manipulationReqMsgChan = make(chan *pb.ManipulationReqMsg, len(readTopics)*r.readerQueueSize)
r.revTimesyncFromReader = make(map[uint64]int)
r.ctx, r.cancel = context.WithCancel(context.Background())
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: r.pulsarAddr})
if err != nil {
return nil, fmt.Errorf("connect pulsar failed, %v", err)
}
r.pulsarClient = client
timeSyncChan := make(chan pulsar.ConsumerMessage, len(r.proxyIdList))
if r.timeSyncConsumer, err = r.pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: timeSyncTopic,
SubscriptionName: timeSyncSubName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: timeSyncChan,
}); err != nil {
return nil, fmt.Errorf("failed to subscribe topic %s, error = %v", timeSyncTopic, err)
}
readerChan := make(chan pulsar.ConsumerMessage, len(readTopics)*r.readerQueueSize)
if r.readerConsumer, err = r.pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topics: readTopics,
SubscriptionName: readSubName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: readerChan,
}); err != nil {
return nil, fmt.Errorf("failed to subscrive reader topics : %v, error = %v", readTopics, err)
}
r.readerProducer = make([]pulsar.Producer, 0, len(readTopics))
for i := 0; i < len(readTopics); i++ {
rp, err := r.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readTopics[i]})
if err != nil {
return nil, fmt.Errorf("failed to create reader producer %s, error = %v", readTopics[i], err)
}
r.readerProducer = append(r.readerProducer, rp)
}
return r, nil
}
func (r *ReaderTimeSyncCfg) Close() {
r.cancel()
r.timeSyncConsumer.Close()
r.readerConsumer.Close()
for i := 0; i < len(r.readerProducer); i++ {
r.readerProducer[i].Close()
}
r.pulsarClient.Close()
}
func (r *ReaderTimeSyncCfg) Start() error {
go r.startReadTopics()
go r.startTimeSync()
return r.ctx.Err()
}
func (r *ReaderTimeSyncCfg) ManipulationReqMsg() <-chan *pb.ManipulationReqMsg {
return r.manipulationReqMsgChan
}
func (r *ReaderTimeSyncCfg) TimeSync() <-chan TimeSyncMsg {
return r.timesyncMsgChan
}
func (r *ReaderTimeSyncCfg) TimeSyncChanLen() int {
return len(r.timesyncMsgChan)
}
func (r *ReaderTimeSyncCfg) IsManipulationReqMsgChanFull() bool {
return len(r.manipulationReqMsgChan) == len(r.readerProducer)*r.readerQueueSize
}
func (r *ReaderTimeSyncCfg) alignTimeSync(ts []*pb.TimeSyncMsg) []*pb.TimeSyncMsg {
if len(r.proxyIdList) > 1 {
if len(ts) > 1 {
for i := 1; i < len(r.proxyIdList); i++ {
curIdx := len(ts) - 1 - i
preIdx := len(ts) - i
timeGap := toMillisecond(ts[curIdx]) - toMillisecond(ts[preIdx])
if int64(timeGap) >= (r.interval/2) || int64(timeGap) <= (-r.interval/2) {
ts = ts[preIdx:]
return ts
}
}
ts = ts[len(ts)-len(r.proxyIdList):]
sort.Slice(ts, func(i int, j int) bool { return ts[i].Peer_Id < ts[j].Peer_Id })
for i := 0; i < len(r.proxyIdList); i++ {
if ts[i].Peer_Id != r.proxyIdList[i] {
ts = ts[:0]
return ts
}
}
}
} else {
if len(ts) > 1 {
ts = ts[len(ts)-1:]
}
}
return ts
}
func (r *ReaderTimeSyncCfg) readTimeSync(ctx context.Context, ts []*pb.TimeSyncMsg, n int) ([]*pb.TimeSyncMsg, error) {
for i := 0; i < n; i++ {
select {
case <-ctx.Done():
return nil, ctx.Err()
case cm, ok := <-r.timeSyncConsumer.Chan():
if ok == false {
return nil, fmt.Errorf("timesync consumer closed")
}
msg := cm.Message
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(msg.Payload(), &tsm); err != nil {
return nil, err
}
ts = append(ts, &tsm)
r.timeSyncConsumer.AckID(msg.ID())
}
}
return ts, nil
}
func (r *ReaderTimeSyncCfg) sendEOFMsg(ctx context.Context, msg *pulsar.ProducerMessage, index int, wg *sync.WaitGroup) {
if _, err := r.readerProducer[index].Send(ctx, msg); err != nil {
//TODO, log error
log.Printf("Send timesync flag error %v", err)
}
wg.Done()
}
func (r *ReaderTimeSyncCfg) startTimeSync() {
tsm := make([]*pb.TimeSyncMsg, 0, len(r.proxyIdList)*2)
ctx, _ := context.WithCancel(r.ctx)
var err error
for {
//var start time.Time
for len(tsm) != len(r.proxyIdList) {
tsm = r.alignTimeSync(tsm)
tsm, err = r.readTimeSync(ctx, tsm, len(r.proxyIdList)-len(tsm))
if err != nil {
if ctx.Err() != nil {
return
} else {
//TODO, log error msg
log.Printf("read time sync error %v", err)
}
}
}
ts := tsm[0].Timestamp
for i := 1; i < len(tsm); i++ {
if tsm[i].Timestamp < ts {
ts = tsm[i].Timestamp
}
}
tsm = tsm[:0]
//send timestamp flag to reader channel
msg := pb.ManipulationReqMsg{Timestamp: ts, ProxyId: r.readStopFlagClientId}
payload, err := proto.Marshal(&msg)
if err != nil {
//TODO log error
log.Printf("Marshal timesync flag error %v", err)
} else {
wg := sync.WaitGroup{}
wg.Add(len(r.readerProducer))
for index := range r.readerProducer {
go r.sendEOFMsg(ctx, &pulsar.ProducerMessage{Payload: payload}, index, &wg)
}
wg.Wait()
}
}
}
func (r *ReaderTimeSyncCfg) isReadStopFlag(imsg *pb.ManipulationReqMsg) bool {
return imsg.ProxyId < ReadStopFlagEnd
}
func (r *ReaderTimeSyncCfg) startReadTopics() {
ctx, _ := context.WithCancel(r.ctx)
tsm := TimeSyncMsg{Timestamp: 0, NumRecorders: 0}
for {
select {
case <-ctx.Done():
return
case cm, ok := <-r.readerConsumer.Chan():
if ok == false {
//TODO,log error
log.Printf("reader consumer closed")
}
msg := cm.Message
var imsg pb.ManipulationReqMsg
if err := proto.Unmarshal(msg.Payload(), &imsg); err != nil {
//TODO, log error
log.Printf("unmarshal InsertOrDeleteMsg error %v", err)
break
}
if r.isReadStopFlag(&imsg) { //timestamp flag
if imsg.ProxyId == r.readStopFlagClientId {
gval := r.revTimesyncFromReader[imsg.Timestamp]
gval++
if gval >= len(r.readerProducer) {
if imsg.Timestamp >= tsm.Timestamp {
tsm.Timestamp = imsg.Timestamp
r.timesyncMsgChan <- tsm
tsm.NumRecorders = 0
}
delete(r.revTimesyncFromReader, imsg.Timestamp)
} else {
r.revTimesyncFromReader[imsg.Timestamp] = gval
}
}
} else {
if r.IsManipulationReqMsgChanFull() {
log.Printf("WARN : Insert or delete chan is full ...")
}
tsm.NumRecorders++
r.manipulationReqMsgChan <- &imsg
}
r.readerConsumer.AckID(msg.ID())
}
}
}
func WithReaderQueueSize(size int) ReaderTimeSyncOption {
return func(r *ReaderTimeSyncCfg) {
r.readerQueueSize = size
}
}
func WithPulsarAddress(addr string) ReaderTimeSyncOption {
return func(r *ReaderTimeSyncCfg) {
r.pulsarAddr = addr
}
}
func WithInterval(interval int64) ReaderTimeSyncOption {
return func(r *ReaderTimeSyncCfg) {
r.interval = interval
}
}

View File

@ -0,0 +1,564 @@
package proxy_node
import (
"context"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"log"
"sync"
"testing"
"time"
)
const (
timeSyncTopic = "rtimesync"
timeSyncTopic2 = "rtimesync2"
timeSyncTopic3 = "rtimesync3"
timeSyncSubName = "rtimesync-g"
timeSyncSubName1 = "rtimesync-g1"
timeSyncSubName2 = "rtimesync-g2"
timeSyncSubName3 = "rtimesync-g3"
readerTopic1 = "rreader1"
readerTopic12 = "rreader12"
readerTopic13 = "rreader13"
readerTopic2 = "rreader2"
readerTopic22 = "rreader22"
readerTopic23 = "rreader23"
readerTopic3 = "rreader3"
readerTopic32 = "rreader32"
readerTopic33 = "rreader33"
readerTopic4 = "rreader4"
readerTopic42 = "rreader42"
readerTopic43 = "rreader43"
readerSubName = "rreader-g"
readerSubName1 = "rreader-g1"
readerSubName2 = "rreader-g2"
readerSubName3 = "rreader-g3"
interval = 200
readStopFlag int64 = -1
readStopFlag1 int64 = -1
readStopFlag2 int64 = -2
readStopFlag3 int64 = -3
)
func TestAlignTimeSync(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 2,
Timestamp: toTimestamp(20),
},
}
r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 3, "proxyIdList should be : 1 2 3")
for i := 0; i < len(r.proxyIdList); i++ {
assert.Equal(t, r.proxyIdList[i], ts[i].Peer_Id)
}
}
func TestAlignTimeSync2(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(150),
},
{
Peer_Id: 2,
Timestamp: toTimestamp(20),
},
}
ts = r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 3, "proxyIdList should be : 1 2 3")
assert.Equal(t, len(ts), 1)
assert.Equal(t, ts[0].Peer_Id, int64(2))
}
func TestAlignTimeSync3(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 2,
Timestamp: toTimestamp(20),
},
}
ts = r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 3, "proxyIdList should be : 1 2 3")
for i := 0; i < len(r.proxyIdList); i++ {
assert.Equal(t, r.proxyIdList[i], ts[i].Peer_Id)
}
}
func TestAlignTimeSync4(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(25),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(35),
},
}
ts = r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 1, "proxyIdList should be : 1")
assert.Equal(t, len(ts), 1)
assert.Equal(t, getMillisecond(ts[0].Timestamp), uint64(35))
}
func TestAlignTimeSync5(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(20),
},
}
ts = r.alignTimeSync(ts)
assert.Zero(t, len(ts))
}
func TestNewReaderTimeSync(t *testing.T) {
r, err := NewReaderTimeSync(
timeSyncTopic,
timeSyncSubName,
[]string{readerTopic1, readerTopic2, readerTopic3, readerTopic4},
readerSubName,
[]int64{2, 1},
readStopFlag,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(8),
)
assert.Nil(t, err)
rr := r.(*ReaderTimeSyncCfg)
assert.NotNil(t, rr.pulsarClient)
assert.NotNil(t, rr.timeSyncConsumer)
assert.NotNil(t, rr.readerConsumer)
assert.NotNil(t, rr.readerProducer)
assert.Equal(t, rr.interval, int64(interval))
assert.Equal(t, rr.readStopFlagClientId, int64(readStopFlag))
assert.Equal(t, rr.readerQueueSize, 8)
assert.Equal(t, len(rr.proxyIdList), 2)
assert.Equal(t, rr.proxyIdList[0], int64(1))
assert.Equal(t, rr.proxyIdList[1], int64(2))
r.Close()
}
func TestPulsarClient(t *testing.T) {
t.Skip("skip pulsar client")
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
assert.Nil(t, err)
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
go startWriteTimeSync(1, timeSyncTopic, client, 2*time.Second, t)
go startWriteTimeSync(2, timeSyncTopic, client, 2*time.Second, t)
timeSyncChan := make(chan pulsar.ConsumerMessage)
consumer, err := client.Subscribe(pulsar.ConsumerOptions{
Topic: timeSyncTopic,
SubscriptionName: timeSyncSubName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: timeSyncChan,
})
assert.Nil(t, err)
for {
select {
case cm := <-timeSyncChan:
msg := cm.Message
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(msg.Payload(), &tsm); err != nil {
log.Fatal(err)
}
consumer.AckID(msg.ID())
log.Printf("read time stamp, id = %d, time stamp = %d\n", tsm.Peer_Id, tsm.Timestamp)
case <-ctx.Done():
break
}
if ctx.Err() != nil {
break
}
}
}
func TestReaderTimesync(t *testing.T) {
r, err := NewReaderTimeSync(timeSyncTopic,
timeSyncSubName,
[]string{readerTopic1, readerTopic2, readerTopic3, readerTopic4},
readerSubName,
[]int64{2, 1},
readStopFlag,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024),
)
assert.Nil(t, err)
rr := r.(*ReaderTimeSyncCfg)
pt1, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic})
assert.Nil(t, err)
pt2, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic})
assert.Nil(t, err)
pr1, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic1})
assert.Nil(t, err)
pr2, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic2})
assert.Nil(t, err)
pr3, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic3})
assert.Nil(t, err)
pr4, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic4})
assert.Nil(t, err)
go startProxy(pt1, 1, pr1, 1, pr2, 2, 2*time.Second, t)
go startProxy(pt2, 2, pr3, 3, pr4, 4, 2*time.Second, t)
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
r.Start()
var tsm1, tsm2 TimeSyncMsg
var totalRecordes int64 = 0
for {
if ctx.Err() != nil {
break
}
select {
case <-ctx.Done():
tsm1.NumRecorders = 0
break
case tsm1 = <-r.TimeSync():
}
if tsm1.NumRecorders > 0 {
log.Printf("timestamp %d, num records = %d", getMillisecond(tsm1.Timestamp), tsm1.NumRecorders)
totalRecordes += tsm1.NumRecorders
for i := int64(0); i < tsm1.NumRecorders; i++ {
im := <-r.ManipulationReqMsg()
//log.Printf("%d - %d", getMillisecond(im.Timestamp), getMillisecond(tsm2.Timestamp))
if im.Timestamp < tsm2.Timestamp {
t.Fatalf("time sync error , im.Timestamp = %d, tsm2.Timestamp = %d", im.Timestamp, tsm2.Timestamp)
}
}
tsm2 = tsm1
}
}
log.Printf("total recordes = %d", totalRecordes)
if totalRecordes != 800 {
t.Fatalf("total records should be 800")
}
r.Close()
pt1.Close()
pt2.Close()
pr1.Close()
pr2.Close()
pr3.Close()
pr4.Close()
}
func TestReaderTimesync2(t *testing.T) {
client, _ := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
pt1, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic2})
pt2, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic2})
pr1, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic12})
pr2, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic22})
pr3, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic32})
pr4, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic42})
go startProxy(pt1, 1, pr1, 1, pr2, 2, 2*time.Second, t)
go startProxy(pt2, 2, pr3, 3, pr4, 4, 2*time.Second, t)
r1, _ := NewReaderTimeSync(timeSyncTopic2,
timeSyncSubName1,
[]string{readerTopic12, readerTopic22, readerTopic32, readerTopic42},
readerSubName1,
[]int64{2, 1},
readStopFlag1,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024),
)
r2, _ := NewReaderTimeSync(timeSyncTopic2,
timeSyncSubName2,
[]string{readerTopic12, readerTopic22, readerTopic32, readerTopic42},
readerSubName2,
[]int64{2, 1},
readStopFlag2,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024),
)
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
rt := []ReaderTimeSync{r1, r2}
var wg sync.WaitGroup
for _, r := range rt {
r := r
_ = r.Start()
wg.Add(1)
go func() {
var tsm1, tsm2 TimeSyncMsg
var totalRecordes int64 = 0
work := false
defer wg.Done()
for {
if ctx.Err() != nil {
break
}
select {
case tsm1 = <-r.TimeSync():
work = true
default:
work = false
}
if work {
if tsm1.NumRecorders > 0 {
//log.Printf("timestamp %d, num records = %d", getMillisecond(tsm1.Timestamp), tsm1.NumRecorders)
totalRecordes += tsm1.NumRecorders
for i := int64(0); i < tsm1.NumRecorders; i++ {
im := <-r.ManipulationReqMsg()
//log.Printf("%d - %d", getMillisecond(im.Timestamp), getMillisecond(tsm2.Timestamp))
assert.GreaterOrEqual(t, im.Timestamp, tsm2.Timestamp)
}
tsm2 = tsm1
}
}
}
log.Printf("total recordes = %d", totalRecordes)
assert.Equal(t, totalRecordes, int64(800))
}()
}
wg.Wait()
r1.Close()
r2.Close()
pt1.Close()
pt2.Close()
pr1.Close()
pr2.Close()
pr3.Close()
pr4.Close()
}
func TestReaderTimesync3(t *testing.T) {
client, _ := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
pt, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic3})
pr1, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic13})
pr2, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic23})
pr3, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic33})
pr4, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic43})
defer func() {
pr1.Close()
pr2.Close()
pr3.Close()
pr4.Close()
pt.Close()
client.Close()
}()
go func() {
total := 2 * 1000 / 10
ticker := time.Tick(10 * time.Millisecond)
var timestamp uint64 = 0
prlist := []pulsar.Producer{pr1, pr2, pr3, pr4}
for i := 1; i <= total; i++ {
<-ticker
timestamp += 10
for idx, pr := range prlist {
msg := pb.ManipulationReqMsg{ProxyId: int64(idx + 1), Timestamp: toTimestamp(timestamp)}
mb, err := proto.Marshal(&msg)
assert.Nil(t, err)
if _, err := pr.Send(context.Background(), &pulsar.ProducerMessage{Payload: mb}); err != nil {
t.Fatal(err)
}
}
if i%20 == 0 {
tm := pb.TimeSyncMsg{Peer_Id: 1, Timestamp: toTimestamp(timestamp)}
tb, err := proto.Marshal(&tm)
assert.Nil(t, err)
if _, err := pt.Send(context.Background(), &pulsar.ProducerMessage{Payload: tb}); err != nil {
t.Fatal(err)
}
}
}
}()
r, err := NewReaderTimeSync(timeSyncTopic3,
timeSyncSubName3,
[]string{readerTopic13, readerTopic23, readerTopic33, readerTopic43},
readerSubName3,
[]int64{1},
readStopFlag3,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024))
assert.Nil(t, err)
defer r.Close()
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
if err := r.Start(); err != nil {
t.Fatal(err)
}
var tsm1, tsm2 TimeSyncMsg
var totalRecords int64 = 0
for {
if ctx.Err() != nil {
break
}
select {
case <-ctx.Done():
tsm1.NumRecorders = 0
break
case tsm1 = <-r.TimeSync():
}
if tsm1.NumRecorders > 0 {
totalRecords += tsm1.NumRecorders
for i := int64(0); i < tsm1.NumRecorders; i++ {
im := <-r.ManipulationReqMsg()
assert.GreaterOrEqual(t, im.Timestamp, tsm2.Timestamp)
}
tsm2 = tsm1
}
}
log.Printf("total records = %d", totalRecords)
assert.Equal(t, totalRecords, int64(800))
}
func getMillisecond(ts uint64) uint64 {
return ts >> 18
}
func toTimestamp(ts uint64) uint64 {
return ts << 18
}
func startWriteTimeSync(id int64, topic string, client pulsar.Client, duration time.Duration, t *testing.T) {
p, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: topic})
ticker := time.Tick(interval * time.Millisecond)
numSteps := int(duration / (interval * time.Millisecond))
var tm uint64 = 0
for i := 0; i < numSteps; i++ {
<-ticker
tm += interval
tsm := pb.TimeSyncMsg{Timestamp: toTimestamp(tm), Peer_Id: id}
tb, _ := proto.Marshal(&tsm)
if _, err := p.Send(context.Background(), &pulsar.ProducerMessage{Payload: tb}); err != nil {
t.Fatalf("send failed tsm id=%d, timestamp=%d, err=%v", tsm.Peer_Id, tsm.Timestamp, err)
} else {
//log.Printf("send tsm id=%d, timestamp=%d", tsm.Peer_Id, tsm.Timestamp)
}
}
}
func startProxy(pt pulsar.Producer, ptid int64, pr1 pulsar.Producer, prid1 int64, pr2 pulsar.Producer, prid2 int64, duration time.Duration, t *testing.T) {
total := int(duration / (10 * time.Millisecond))
ticker := time.Tick(10 * time.Millisecond)
var timestamp uint64 = 0
for i := 1; i <= total; i++ {
<-ticker
timestamp += 10
msg := pb.ManipulationReqMsg{ProxyId: int64(prid1), Timestamp: toTimestamp(timestamp)}
mb, err := proto.Marshal(&msg)
if err != nil {
t.Fatalf("marshal error %v", err)
}
if _, err := pr1.Send(context.Background(), &pulsar.ProducerMessage{Payload: mb}); err != nil {
t.Fatalf("send msg error %v", err)
}
msg.ProxyId = prid2
mb, err = proto.Marshal(&msg)
if err != nil {
t.Fatalf("marshal error %v", err)
}
if _, err := pr2.Send(context.Background(), &pulsar.ProducerMessage{Payload: mb}); err != nil {
t.Fatalf("send msg error %v", err)
}
//log.Printf("send msg id = [ %d %d ], timestamp = %d", prid1, prid2, timestamp)
if i%20 == 0 {
tm := pb.TimeSyncMsg{Peer_Id: ptid, Timestamp: toTimestamp(timestamp)}
tb, err := proto.Marshal(&tm)
if err != nil {
t.Fatalf("marshal error %v", err)
}
if _, err := pt.Send(context.Background(), &pulsar.ProducerMessage{Payload: tb}); err != nil {
t.Fatalf("send msg error %v", err)
}
//log.Printf("send timestamp id = %d, timestamp = %d", ptid, timestamp)
}
}
}

View File

@ -0,0 +1,56 @@
package proxy_node
import "sync"
type requestScheduler struct {
//definitions requestQueue
//manipulations requestQueue
manipulationsChan chan *manipulationReq // manipulation queue
m_timestamp Timestamp
m_timestamp_mux sync.Mutex
//queries requestQueue
queryChan chan *queryReq
q_timestamp Timestamp
q_timestamp_mux sync.Mutex
}
// @param selection
// bit_0 = 1: select definition queue
// bit_1 = 1: select manipulation queue
// bit_2 = 1: select query queue
// example: if mode = 3, then both definition and manipulation queues are selected
func (rs *requestScheduler) AreRequestsDelivered(ts Timestamp, selection uint32) bool {
r1 := func() bool {
if selection&uint32(2) == 0 {
return true
}
rs.m_timestamp_mux.Lock()
defer rs.m_timestamp_mux.Unlock()
if rs.m_timestamp >= ts {
return true
}
if len(rs.manipulationsChan) == 0 {
return true
}
return false
}()
r2 := func() bool {
if selection&uint32(4) == 0 {
return true
}
rs.q_timestamp_mux.Lock()
defer rs.q_timestamp_mux.Unlock()
if rs.q_timestamp >= ts {
return true
}
if len(rs.queryChan) == 0 {
return true
}
return false
}()
return r1 && r2
}

View File

@ -0,0 +1,454 @@
package proxy_node
import (
"context"
"encoding/json"
"fmt"
mpb "github.com/czs007/suvlim/pkg/master/grpc/master"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
etcd "go.etcd.io/etcd/clientv3"
"go.uber.org/atomic"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"log"
"net"
"sync"
"time"
)
const (
keyCollectionPath = "collection"
keySegmentPath = "segment"
)
type proxyServer struct {
pb.UnimplementedMilvusServiceServer
address string
master_address string
rootPath string // etcd root path
pulsarAddr string // pulsar address for reader
readerTopics []string //reader topics
deleteTopic string
queryTopic string
resultTopic string
resultGroup string
numReaderNode int
proxyId int64
getTimestamp func(count uint32) ([]Timestamp, pb.Status)
client *etcd.Client
ctx context.Context
////////////////////////////////////////////////////////////////
master_conn *grpc.ClientConn
master_client mpb.MasterClient
grpcServer *grpc.Server
reqSch *requestScheduler
///////////////////////////////////////////////////////////////
collection_list map[uint64]*mpb.Collection
name_collection_id map[string]uint64
segment_list map[uint64]*mpb.Segment
collection_mux sync.Mutex
queryId atomic.Uint64
}
func (s *proxyServer) CreateCollection(ctx context.Context, req *pb.Mapping) (*pb.Status, error) {
log.Printf("create collection %s", req.CollectionName)
return s.master_client.CreateCollection(ctx, req)
}
func (s *proxyServer) CountCollection(ctx context.Context, req *pb.CollectionName) (*pb.CollectionRowCount, error) {
s.collection_mux.Lock()
defer s.collection_mux.Unlock()
collection_id, ok := s.name_collection_id[req.CollectionName]
if !ok {
return &pb.CollectionRowCount{
CollectionRowCount: 0,
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: fmt.Sprintf("unable to get collection %s", req.CollectionName),
},
}, nil
}
if info, ok := s.collection_list[collection_id]; ok {
count := int64(0)
for _, seg_id := range info.SegmentIds {
if seg, ok := s.segment_list[seg_id]; ok {
count += seg.Rows
}
}
return &pb.CollectionRowCount{
CollectionRowCount: count,
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
}, nil
}
return &pb.CollectionRowCount{
CollectionRowCount: 0,
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: fmt.Sprintf("unable to get collection %s", req.CollectionName),
},
}, nil
}
func (s *proxyServer) CreateIndex(ctx context.Context, req *pb.IndexParam) (*pb.Status, error) {
log.Printf("create index, collection name = %s, index name = %s, filed_name = %s", req.CollectionName, req.IndexName, req.FieldName)
return s.master_client.CreateIndex(ctx, req)
}
func (s *proxyServer) DeleteByID(ctx context.Context, req *pb.DeleteByIDParam) (*pb.Status, error) {
log.Printf("delete entites, total = %d", len(req.IdArray))
pm := &manipulationReq{
ManipulationReqMsg: pb.ManipulationReqMsg{
CollectionName: req.CollectionName,
ReqType: pb.ReqType_kDeleteEntityByID,
ProxyId: s.proxyId,
},
proxy: s,
}
for _, id := range req.IdArray {
pm.PrimaryKeys = append(pm.PrimaryKeys, uint64(id))
}
if len(pm.PrimaryKeys) > 1 {
if st := pm.PreExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
if st := pm.Execute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
if st := pm.PostExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
if st := pm.WaitToFinish(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
}
return &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}, nil
}
func (s *proxyServer) Insert(ctx context.Context, req *pb.InsertParam) (*pb.EntityIds, error) {
log.Printf("Insert Entities, total = %d", len(req.RowsData))
ipm := make(map[uint32]*manipulationReq)
//TODO
if len(req.EntityIdArray) == 0 { //primary key is empty, set primary key by server
log.Printf("Set primary key")
}
if len(req.EntityIdArray) != len(req.RowsData) {
return &pb.EntityIds{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: fmt.Sprintf("length of EntityIdArray not equal to lenght of RowsData"),
},
EntityIdArray: req.EntityIdArray,
}, nil
}
for i := 0; i < len(req.EntityIdArray); i++ {
key := uint64(req.EntityIdArray[i])
hash, err := Hash32_Uint64(key)
if err != nil {
return nil, status.Errorf(codes.Unknown, "hash failed on %d", key)
}
hash = hash % uint32(len(s.readerTopics))
ip, ok := ipm[hash]
if !ok {
segId, err := s.getSegmentId(int32(hash), req.CollectionName)
if err != nil {
return nil, err
}
ipm[hash] = &manipulationReq{
ManipulationReqMsg: pb.ManipulationReqMsg{
CollectionName: req.CollectionName,
PartitionTag: req.PartitionTag,
SegmentId: segId,
ChannelId: uint64(hash),
ReqType: pb.ReqType_kInsert,
ProxyId: s.proxyId,
ExtraParams: req.ExtraParams,
},
proxy: s,
}
ip = ipm[hash]
}
ip.PrimaryKeys = append(ip.PrimaryKeys, key)
ip.RowsData = append(ip.RowsData, req.RowsData[i])
}
for _, ip := range ipm {
if st := ip.PreExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS { //do nothing
return &pb.EntityIds{
Status: &st,
EntityIdArray: req.EntityIdArray,
}, nil
}
if st := ip.Execute(); st.ErrorCode != pb.ErrorCode_SUCCESS { // push into chan
return &pb.EntityIds{
Status: &st,
EntityIdArray: req.EntityIdArray,
}, nil
}
if st := ip.PostExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS { //post to pulsar
return &pb.EntityIds{
Status: &st,
EntityIdArray: req.EntityIdArray,
}, nil
}
}
for _, ip := range ipm {
if st := ip.WaitToFinish(); st.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("Wait to finish failed, error code = %d", st.ErrorCode)
}
}
return &pb.EntityIds{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
EntityIdArray: req.EntityIdArray,
}, nil
}
func (s *proxyServer) Search(ctx context.Context, req *pb.SearchParam) (*pb.QueryResult, error) {
qm := &queryReq{
QueryReqMsg: pb.QueryReqMsg{
CollectionName: req.CollectionName,
VectorParam: req.VectorParam,
PartitionTags: req.PartitionTag,
Dsl: req.Dsl,
ExtraParams: req.ExtraParams,
ProxyId: s.proxyId,
QueryId: s.queryId.Add(1),
ReqType: pb.ReqType_kSearch,
},
proxy: s,
}
log.Printf("search on collection %s, proxy id = %d, query id = %d", req.CollectionName, qm.ProxyId, qm.QueryId)
if st := qm.PreExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
if st := qm.Execute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
if st := qm.PostExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
if st := qm.WaitToFinish(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
return s.reduceResult(qm), nil
}
//check if proxySerer is set correct
func (s *proxyServer) check() error {
if len(s.address) == 0 {
return fmt.Errorf("proxy address is unset")
}
if len(s.master_address) == 0 {
return fmt.Errorf("master address is unset")
}
if len(s.rootPath) == 0 {
return fmt.Errorf("root path for etcd is unset")
}
if len(s.pulsarAddr) == 0 {
return fmt.Errorf("pulsar address is unset")
}
if len(s.readerTopics) == 0 {
return fmt.Errorf("reader topics is unset")
}
if len(s.deleteTopic) == 0 {
return fmt.Errorf("delete topic is unset")
}
if len(s.queryTopic) == 0 {
return fmt.Errorf("query topic is unset")
}
if len(s.resultTopic) == 0 {
return fmt.Errorf("result topic is unset")
}
if len(s.resultGroup) == 0 {
return fmt.Errorf("result group is unset")
}
if s.numReaderNode <= 0 {
return fmt.Errorf("number of reader nodes is unset")
}
if s.proxyId <= 0 {
return fmt.Errorf("proxyId is unset")
}
log.Printf("proxy id = %d", s.proxyId)
if s.getTimestamp == nil {
return fmt.Errorf("getTimestamp is unset")
}
if s.client == nil {
return fmt.Errorf("etcd client is unset")
}
if s.ctx == nil {
return fmt.Errorf("context is unset")
}
return nil
}
func (s *proxyServer) getSegmentId(channelId int32, colName string) (uint64, error) {
s.collection_mux.Lock()
defer s.collection_mux.Unlock()
colId, ok := s.name_collection_id[colName]
if !ok {
return 0, status.Errorf(codes.Unknown, "can't get collection id of %s", colName)
}
colInfo, ok := s.collection_list[colId]
if !ok {
return 0, status.Errorf(codes.Unknown, "can't get collection, name = %s, id = %d", colName, colId)
}
for _, segId := range colInfo.SegmentIds {
seg, ok := s.segment_list[segId]
if !ok {
return 0, status.Errorf(codes.Unknown, "can't get segment of %d", segId)
}
if seg.Status == mpb.SegmentStatus_OPENED {
if seg.ChannelStart >= channelId && channelId < seg.ChannelEnd {
return segId, nil
}
}
}
return 0, status.Errorf(codes.Unknown, "can't get segment id, channel id = %d", channelId)
}
func (s *proxyServer) connectMaster() error {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
conn, err := grpc.DialContext(ctx, s.master_address, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
return err
}
s.master_conn = conn
s.master_client = mpb.NewMasterClient(conn)
return nil
}
func (s *proxyServer) Close() {
s.client.Close()
s.master_conn.Close()
s.grpcServer.Stop()
}
func (s *proxyServer) WatchEtcd() error {
s.collection_mux.Lock()
defer s.collection_mux.Unlock()
cos, err := s.client.Get(s.ctx, s.rootPath+"/"+keyCollectionPath, etcd.WithPrefix())
if err != nil {
return err
}
for _, cob := range cos.Kvs {
var co mpb.Collection
if err := json.Unmarshal(cob.Value, &co); err != nil {
return err
}
s.name_collection_id[co.Name] = co.Id
s.collection_list[co.Id] = &co
log.Printf("watch collection, name = %s, id = %d", co.Name, co.Id)
}
segs, err := s.client.Get(s.ctx, s.rootPath+"/"+keySegmentPath, etcd.WithPrefix())
if err != nil {
return err
}
for _, segb := range segs.Kvs {
var seg mpb.Segment
if err := json.Unmarshal(segb.Value, &seg); err != nil {
return err
}
s.segment_list[seg.SegmentId] = &seg
log.Printf("watch segment id = %d\n", seg.SegmentId)
}
cow := s.client.Watch(s.ctx, s.rootPath+"/"+keyCollectionPath, etcd.WithPrefix(), etcd.WithRev(cos.Header.Revision+1))
segw := s.client.Watch(s.ctx, s.rootPath+"/"+keySegmentPath, etcd.WithPrefix(), etcd.WithRev(segs.Header.Revision+1))
go func() {
for {
select {
case <-s.ctx.Done():
return
case coe := <-cow:
func() {
s.collection_mux.Lock()
defer s.collection_mux.Unlock()
for _, e := range coe.Events {
var co mpb.Collection
if err := json.Unmarshal(e.Kv.Value, &co); err != nil {
log.Printf("unmarshal Collection failed, error = %v", err)
} else {
s.name_collection_id[co.Name] = co.Id
s.collection_list[co.Id] = &co
log.Printf("watch collection, name = %s, id = %d", co.Name, co.Id)
}
}
}()
case sege := <-segw:
func() {
s.collection_mux.Lock()
defer s.collection_mux.Unlock()
for _, e := range sege.Events {
var seg mpb.Segment
if err := json.Unmarshal(e.Kv.Value, &seg); err != nil {
log.Printf("unmarshal Segment failed, error = %v", err)
} else {
s.segment_list[seg.SegmentId] = &seg
log.Printf("watch segment id = %d\n", seg.SegmentId)
}
}
}()
}
}
}()
return nil
}
func startProxyServer(srv *proxyServer) error {
if err := srv.check(); err != nil {
return err
}
srv.reqSch = &requestScheduler{}
if err := srv.restartManipulationRoutine(1024); err != nil {
return err
}
if err := srv.restartQueryRoutine(1024); err != nil {
return err
}
srv.name_collection_id = make(map[string]uint64)
srv.collection_list = make(map[uint64]*mpb.Collection)
srv.segment_list = make(map[uint64]*mpb.Segment)
if err := srv.connectMaster(); err != nil {
return err
}
if err := srv.WatchEtcd(); err != nil {
return err
}
srv.queryId.Store(uint64(time.Now().UnixNano()))
lis, err := net.Listen("tcp", srv.address)
if err != nil {
return err
}
s := grpc.NewServer()
pb.RegisterMilvusServiceServer(s, srv)
return s.Serve(lis)
}

View File

@ -0,0 +1,522 @@
package proxy_node
import (
"context"
"encoding/binary"
"encoding/json"
"github.com/apache/pulsar-client-go/pulsar"
mpb "github.com/czs007/suvlim/pkg/master/grpc/master"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
"net"
"sort"
"testing"
"time"
"unsafe"
)
type testMasterServer struct {
mpb.UnimplementedMasterServer
}
func (*testMasterServer) CreateCollection(ctx context.Context, req *pb.Mapping) (*pb.Status, error) {
return &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS, Reason: req.CollectionName}, nil
}
func (*testMasterServer) CreateIndex(ctx context.Context, req *pb.IndexParam) (*pb.Status, error) {
return &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS, Reason: req.IndexName}, nil
}
func startTestMaster(master_addr string, t *testing.T) *grpc.Server {
lis, err := net.Listen("tcp", master_addr)
assert.Nil(t, err)
s := grpc.NewServer()
mpb.RegisterMasterServer(s, &testMasterServer{})
go func() {
if err := s.Serve(lis); err != nil {
t.Fatal(err)
}
}()
return s
}
func startTestProxyServer(proxy_addr string, master_addr string, t *testing.T) *proxyServer {
client, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
var timestamp uint64 = 1000
p := &proxyServer{
address: proxy_addr,
master_address: master_addr,
rootPath: "/proxy/root",
pulsarAddr: "pulsar://localhost:6650",
readerTopics: []string{"reader1", "reader2"},
deleteTopic: "deleteT",
queryTopic: "queryer",
resultTopic: "resulter",
resultGroup: "reusltG",
numReaderNode: 2,
proxyId: 1,
getTimestamp: func(count uint32) ([]Timestamp, pb.Status) {
timestamp += 100
t := make([]Timestamp, count)
for i := 0; i < int(count); i++ {
t[i] = Timestamp(timestamp)
}
return t, pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
},
client: client,
ctx: ctx,
}
go func() {
if err := startProxyServer(p); err != nil {
t.Fatal(err)
}
}()
return p
}
func uint64ToBytes(v uint64) []byte {
b := make([]byte, unsafe.Sizeof(v))
binary.LittleEndian.PutUint64(b, v)
return b
}
func TestProxyServer_CreateCollectionAndIndex(t *testing.T) {
_ = startTestMaster("localhost:10000", t)
//defer ms.Stop()
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10001", "localhost:10000", t)
//defer ps.Close()
time.Sleep(100 * time.Millisecond)
ctx := ps.ctx
conn, err := grpc.DialContext(ctx, "localhost:10001", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := pb.NewMilvusServiceClient(conn)
st, err := cli.CreateCollection(ctx, &pb.Mapping{CollectionName: "testCollectionName"})
assert.Nil(t, err)
assert.Equalf(t, st.ErrorCode, pb.ErrorCode_SUCCESS, "CreateCollection failed")
assert.Equalf(t, st.Reason, "testCollectionName", "CreateCollection failed")
st, err = cli.CreateIndex(ctx, &pb.IndexParam{IndexName: "testIndexName"})
assert.Nil(t, err)
assert.Equalf(t, st.ErrorCode, pb.ErrorCode_SUCCESS, "CreateIndex failed")
assert.Equalf(t, st.Reason, "testIndexName", "CreateIndex failed")
}
func TestProxyServer_WatchEtcd(t *testing.T) {
client, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
defer client.Close()
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
col1 := mpb.Collection{
Id: 1,
Name: "c1",
SegmentIds: []uint64{2, 3},
}
seg2 := mpb.Segment{
SegmentId: 2,
Rows: 10,
}
seg3 := mpb.Segment{
SegmentId: 3,
Rows: 10,
}
if cb1, err := json.Marshal(&col1); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keyCollectionPath+"/1", string(cb1)); err != nil {
t.Fatal(err)
}
if sb2, err := json.Marshal(&seg2); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/2", string(sb2)); err != nil {
t.Fatal(err)
}
if sb3, err := json.Marshal(&seg3); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/3", string(sb3)); err != nil {
t.Fatal(err)
}
_ = startTestMaster("localhost:10002", t)
//defer ms.Stop()
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10003", "localhost:10002", t)
//defer ps.Close()
time.Sleep(100 * time.Millisecond)
conn, err := grpc.DialContext(ps.ctx, "localhost:10003", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := pb.NewMilvusServiceClient(conn)
cr, err := cli.CountCollection(ps.ctx, &pb.CollectionName{CollectionName: "c1"})
assert.Nil(t, err)
assert.Equalf(t, cr.Status.ErrorCode, pb.ErrorCode_SUCCESS, "CountCollection failed : %s", cr.Status.Reason)
assert.Equalf(t, cr.CollectionRowCount, int64(20), "collection count expect to be 20, count = %d", cr.CollectionRowCount)
col4 := mpb.Collection{
Id: 4,
Name: "c4",
SegmentIds: []uint64{5},
}
seg5 := mpb.Segment{
SegmentId: 5,
Rows: 10,
}
if cb4, err := json.Marshal(&col4); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ps.ctx, "/proxy/root/"+keyCollectionPath+"/4", string(cb4)); err != nil {
t.Fatal(err)
}
if sb5, err := json.Marshal(&seg5); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ps.ctx, "/proxy/root/"+keySegmentPath+"/5", string(sb5)); err != nil {
t.Fatal(err)
}
cr, err = cli.CountCollection(ps.ctx, &pb.CollectionName{CollectionName: "c4"})
assert.Nil(t, err)
assert.Equalf(t, cr.Status.ErrorCode, pb.ErrorCode_SUCCESS, "CountCollection failed : %s", cr.Status.Reason)
assert.Equalf(t, cr.CollectionRowCount, int64(10), "collection count expect to be 10, count = %d", cr.CollectionRowCount)
}
func TestProxyServer_InsertAndDelete(t *testing.T) {
client, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
defer client.Close()
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
col10 := mpb.Collection{
Id: 10,
Name: "col10",
Schema: nil,
CreateTime: 0,
SegmentIds: []uint64{11, 12},
PartitionTags: nil,
Indexes: nil,
}
seg11 := mpb.Segment{
SegmentId: 11,
CollectionId: 10,
ChannelStart: 0,
ChannelEnd: 1,
Status: mpb.SegmentStatus_OPENED,
}
seg12 := mpb.Segment{
SegmentId: 12,
CollectionId: 10,
ChannelStart: 1,
ChannelEnd: 2,
Status: mpb.SegmentStatus_OPENED,
}
if cb10, err := json.Marshal(&col10); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keyCollectionPath+"/10", string(cb10)); err != nil {
t.Fatal(err)
}
if sb11, err := json.Marshal(&seg11); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/11", string(sb11)); err != nil {
t.Fatal(err)
}
if sb12, err := json.Marshal(&seg12); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/12", string(sb12)); err != nil {
t.Fatal(err)
}
_ = startTestMaster("localhost:10004", t)
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10005", "localhost:10004", t)
//defer ps.Close()
time.Sleep(100 * time.Millisecond)
conn, err := grpc.DialContext(ps.ctx, "localhost:10005", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: ps.pulsarAddr})
assert.Nil(t, err)
defer pulsarClient.Close()
reader, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topics: ps.readerTopics,
SubscriptionName: "reader-group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer reader.Close()
deleter, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: ps.deleteTopic,
SubscriptionName: "delete-group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
pctx, _ := context.WithTimeout(ps.ctx, time.Second)
isbreak := false
for {
if isbreak {
break
}
select {
case <-pctx.Done():
isbreak = true
break
case cm, ok := <-reader.Chan():
if !ok {
t.Fatalf("reader closed")
}
reader.AckID(cm.ID())
break
case cm, ok := <-deleter.Chan():
assert.Truef(t, ok, "deleter closed")
deleter.AckID(cm.ID())
}
}
ip := pb.InsertParam{
CollectionName: "col10",
Schema: nil,
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(1)},
{Blob: uint64ToBytes(2)},
{Blob: uint64ToBytes(3)},
{Blob: uint64ToBytes(4)},
{Blob: uint64ToBytes(5)},
},
EntityIdArray: []int64{1, 2, 3, 4, 5},
PartitionTag: "",
ExtraParams: nil,
}
dp := pb.DeleteByIDParam{
CollectionName: "deleteCollection",
IdArray: []int64{1, 2, 3, 4, 5},
}
serverClient := pb.NewMilvusServiceClient(conn)
ir, err := serverClient.Insert(ps.ctx, &ip)
assert.Nil(t, err)
assert.Equalf(t, ir.Status.ErrorCode, pb.ErrorCode_SUCCESS, "Insert failed, error code = %d, reason = %s", ir.Status.ErrorCode, ir.Status.Reason)
assert.Equalf(t, len(ir.EntityIdArray), 5, "insert failed, len(ir.EntityIdArray) expect to be 5")
sort.Slice(ir.EntityIdArray, func(i int, j int) bool { return ir.EntityIdArray[i] < ir.EntityIdArray[j] })
for i := 0; i < 5; i++ {
assert.Equal(t, ir.EntityIdArray[i], int64(i+1))
}
dr, err := serverClient.DeleteByID(ps.ctx, &dp)
assert.Nil(t, err)
assert.Equalf(t, dr.ErrorCode, pb.ErrorCode_SUCCESS, "delete failed, error code = %d, reason = %s", dr.ErrorCode, dr.Reason)
var primaryKey []uint64
isbreak = false
for {
if isbreak {
break
}
select {
case <-ps.ctx.Done():
isbreak = true
break
case cm, ok := <-reader.Chan():
assert.Truef(t, ok, "reader closed")
msg := cm.Message
var m pb.ManipulationReqMsg
if err := proto.Unmarshal(msg.Payload(), &m); err != nil {
t.Fatal(err)
}
for i, k := range m.PrimaryKeys {
primaryKey = append(primaryKey, k)
rowValue := binary.LittleEndian.Uint64(m.RowsData[i].Blob)
t.Logf("primary key = %d, rowvalue =%d", k, rowValue)
assert.Equalf(t, k, rowValue, "key expect equal to row value")
}
reader.AckID(cm.ID())
break
case cm, ok := <-deleter.Chan():
assert.Truef(t, ok, "deleter closed")
var m pb.ManipulationReqMsg
if err := proto.Unmarshal(cm.Message.Payload(), &m); err != nil {
t.Fatal(err)
}
assert.Equalf(t, m.CollectionName, "deleteCollection", "delete failed, collection name = %s", m.CollectionName)
assert.Equalf(t, len(m.PrimaryKeys), 5, "delete failed,len(m.PrimaryKeys) = %d", len(m.PrimaryKeys))
for i, v := range m.PrimaryKeys {
assert.Equalf(t, v, uint64(i+1), "delete failed")
}
}
}
assert.Equalf(t, len(primaryKey), 5, "Receive from pulsar failed")
sort.Slice(primaryKey, func(i int, j int) bool { return primaryKey[i] < primaryKey[j] })
for i := 0; i < 5; i++ {
assert.Equalf(t, primaryKey[i], uint64(i+1), "insert failed")
}
t.Logf("m_timestamp = %d", ps.reqSch.m_timestamp)
assert.Equalf(t, ps.reqSch.m_timestamp, Timestamp(1300), "insert failed")
}
func TestProxyServer_Search(t *testing.T) {
_ = startTestMaster("localhost:10006", t)
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10007", "localhost:10006", t)
time.Sleep(100 * time.Millisecond)
conn, err := grpc.DialContext(ps.ctx, "localhost:10007", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: ps.pulsarAddr})
assert.Nil(t, err)
defer pulsarClient.Close()
query, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: ps.queryTopic,
SubscriptionName: "query-group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer query.Close()
result, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: ps.resultTopic})
assert.Nil(t, err)
defer result.Close()
pctx, _ := context.WithTimeout(ps.ctx, time.Second)
func() {
for {
select {
case <-pctx.Done():
return
case cm, ok := <-query.Chan():
if !ok {
t.Fatal("query topic is closed")
}
query.AckID(cm.ID())
}
}
}()
go func() {
cm, ok := <-query.Chan()
query.AckID(cm.ID())
assert.Truef(t, ok, "query topic is closed")
var qm pb.QueryReqMsg
if err := proto.Unmarshal(cm.Payload(), &qm); err != nil {
t.Fatal(err)
}
if qm.ProxyId != ps.proxyId {
t.Fatalf("search failed, incorrect proxy id = %d", qm.ProxyId)
}
if qm.CollectionName != "collection_search" {
t.Fatalf("search failed, incorrect collection name = %s", qm.CollectionName)
}
r1 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{1, 3, 5},
ValidRow: []bool{true, true, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(1)},
{Blob: uint64ToBytes(3)},
{Blob: uint64ToBytes(5)},
},
},
RowNum: 3,
Scores: []float32{1, 3, 5},
Distances: []float32{1, 3, 5},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
r2 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{2, 4, 6},
ValidRow: []bool{true, false, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(2)},
{Blob: uint64ToBytes(4)},
{Blob: uint64ToBytes(6)},
},
},
RowNum: 3,
Scores: []float32{2, 4, 6},
Distances: []float32{2, 4, 6},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
b1, err := proto.Marshal(&r1)
assert.Nil(t, err)
b2, err := proto.Marshal(&r2)
assert.Nil(t, err)
if _, err := result.Send(ps.ctx, &pulsar.ProducerMessage{Payload: b1}); err != nil {
t.Fatal(err)
}
if _, err := result.Send(ps.ctx, &pulsar.ProducerMessage{Payload: b2}); err != nil {
t.Fatal(err)
}
}()
sm := pb.SearchParam{
CollectionName: "collection_search",
VectorParam: nil,
Dsl: "",
PartitionTag: nil,
ExtraParams: nil,
}
serverClient := pb.NewMilvusServiceClient(conn)
qr, err := serverClient.Search(ps.ctx, &sm)
assert.Nil(t, err)
assert.Equalf(t, qr.Status.ErrorCode, pb.ErrorCode_SUCCESS, "query failed")
assert.Equalf(t, qr.Entities.Status.ErrorCode, pb.ErrorCode_SUCCESS, "query failed")
assert.Equalf(t, len(qr.Entities.Ids), 3, "query failed")
assert.Equalf(t, qr.Entities.Ids, []int64{6, 5, 3}, "query failed")
assert.Equalf(t, len(qr.Entities.ValidRow), 3, "query failed")
assert.Equalf(t, qr.Entities.ValidRow, []bool{true, true, true}, "query failed")
assert.Equalf(t, len(qr.Entities.RowsData), 3, "query failed")
assert.Equalf(t, qr.Entities.RowsData, []*pb.RowData{
{Blob: uint64ToBytes(6)},
{Blob: uint64ToBytes(5)},
{Blob: uint64ToBytes(3)},
}, "query failed")
assert.Equalf(t, len(qr.Scores), 3, "query failed")
assert.Equalf(t, qr.Scores, []float32{6, 5, 3}, "query failed")
assert.Equalf(t, len(qr.Distances), 3, "query failed")
assert.Equalf(t, qr.Distances, []float32{6, 5, 3}, "query failed")
}

View File

@ -0,0 +1,119 @@
package proxy_node
import (
"context"
"fmt"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
etcd "go.etcd.io/etcd/clientv3"
"log"
"strconv"
"sync"
"time"
)
const (
tsoKeyPath string = "/timestampOracle"
)
type timestamp struct {
physical uint64 // 18-63 bits
logical uint64 // 8-17 bits
id uint64 // 0-7 bits
}
type Timestamp uint64
type timestampOracle struct {
client *etcd.Client // client of a reliable meta service, i.e. etcd client
ctx context.Context
rootPath string // this timestampOracle's working root path on the reliable kv service
saveInterval uint64
lastSavedTime uint64
tso timestamp // monotonically increasing m_timestamp
mux sync.Mutex
}
func ToTimeStamp(t *timestamp) Timestamp {
ts := (t.physical << 18) + (t.logical << 8) + (t.id & uint64(0xFF))
return Timestamp(ts)
}
func ToPhysicalTime(t uint64) uint64 {
return t >> 18
}
func (tso *timestampOracle) Restart(id int64) {
go func() {
tso.loadTimestamp()
tso.tso.id = uint64(id)
ticker := time.Tick(time.Duration(tso.saveInterval) * time.Millisecond)
for {
select {
case <-ticker:
_, s := tso.GetTimestamp(1)
if s.ErrorCode == pb.ErrorCode_SUCCESS {
_ = tso.saveTimestamp()
}
break
case <-tso.ctx.Done():
if err := tso.client.Close(); err != nil {
log.Printf("close etcd client error %v", err)
}
return
}
}
}()
}
func (tso *timestampOracle) GetTimestamp(count uint32) ([]Timestamp, pb.Status) {
physical := uint64(time.Now().UnixNano()) / uint64(1e6)
var ctso timestamp
tso.mux.Lock()
if tso.tso.physical < physical {
tso.tso.physical = physical
}
ctso = tso.tso
tso.mux.Unlock()
tt := make([]Timestamp, 0, count)
for i := uint32(0); i < count; i++ {
ctso.logical = uint64(i)
tt = append(tt, ToTimeStamp(&ctso))
}
return tt, pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (tso *timestampOracle) saveTimestamp() pb.Status {
tso.mux.Lock()
physical := tso.tso.physical
tso.mux.Unlock()
if _, err := tso.client.Put(tso.ctx, tso.rootPath+tsoKeyPath, strconv.FormatUint(physical, 10)); err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("put into etcd failed, error = %v", err)}
}
tso.mux.Lock()
tso.lastSavedTime = physical
tso.mux.Unlock()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (tso *timestampOracle) loadTimestamp() pb.Status {
ts, err := tso.client.Get(tso.ctx, tso.rootPath+tsoKeyPath)
if err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("get from etcd failed, error = %v", err)}
}
if len(ts.Kvs) != 0 {
n, err := strconv.ParseUint(string(ts.Kvs[0].Value), 10, 64)
if err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("ParseUint failed, error = %v", err)}
}
tso.mux.Lock()
tso.tso.physical = n
tso.lastSavedTime = n
tso.mux.Unlock()
} else {
tso.mux.Lock()
tso.tso.physical = uint64(time.Now().UnixNano()) / uint64(1e6)
tso.lastSavedTime = tso.tso.physical
tso.mux.Unlock()
}
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR}
}

View File

@ -0,0 +1,34 @@
package proxy_node
import (
"context"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/clientv3"
"testing"
"time"
)
func TestTimestampOracle(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
defer cli.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tso := timestampOracle{
client: cli,
ctx: ctx,
rootPath: "/proxy/tso",
saveInterval: 200,
}
tso.Restart(0)
time.Sleep(time.Second)
tso.loadTimestamp()
tso.mux.Lock()
assert.GreaterOrEqualf(t, tso.tso.physical, uint64(100), "physical error")
t.Log("physical = ", tso.tso.physical)
tso.mux.Unlock()
ts, _ := tso.GetTimestamp(1)
t.Log("Timestamp = ", ts[0])
}

View File

@ -0,0 +1,74 @@
package proxy_node
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"time"
)
type timeTick struct {
lastTick Timestamp
currentTick Timestamp
interval uint64
pulsarProducer pulsar.Producer
peer_id int64
ctx context.Context
areRequestsDelivered func(ts Timestamp) bool
getTimestamp func() (Timestamp, pb.Status)
}
func (tt *timeTick) tick() pb.Status {
if tt.lastTick == tt.currentTick {
ts, s := tt.getTimestamp()
if s.ErrorCode != pb.ErrorCode_SUCCESS {
return s
}
tt.currentTick = ts
}
if tt.areRequestsDelivered(tt.currentTick) == false {
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
tsm := pb.TimeSyncMsg{
Timestamp: uint64(tt.currentTick),
Peer_Id: tt.peer_id,
SyncType: pb.SyncType_READ,
}
payload, err := proto.Marshal(&tsm)
if err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("marshal TimeSync failed, error = %v", err)}
}
if _, err := tt.pulsarProducer.Send(tt.ctx, &pulsar.ProducerMessage{Payload: payload}); err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("send into pulsar failed, error = %v", err)}
}
tt.lastTick = tt.currentTick
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (tt *timeTick) Restart() pb.Status {
tt.lastTick = 0
ts, s := tt.getTimestamp()
if s.ErrorCode != pb.ErrorCode_SUCCESS {
return s
}
tt.currentTick = ts
tick := time.Tick(time.Millisecond * time.Duration(tt.interval))
go func() {
for {
select {
case <-tick:
if s := tt.tick(); s.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("timeTick error ,status = %d", int(s.ErrorCode))
}
case <-tt.ctx.Done():
tt.pulsarProducer.Close()
return
}
}
}()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}

View File

@ -0,0 +1,85 @@
package proxy_node
import (
"context"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
func TestTimeTick(t *testing.T) {
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
assert.Nil(t,err)
producer, err := client.CreateProducer(pulsar.ProducerOptions{Topic: "timesync"})
assert.Nil(t,err)
consumer, err := client.Subscribe(pulsar.ConsumerOptions{
Topic: "timesync",
SubscriptionName: "timesync_group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t,err)
ctx, _ := context.WithTimeout(context.Background(), 4*time.Second)
var curTs Timestamp
curTs = 0
tt := timeTick{
interval: 200,
pulsarProducer: producer,
peer_id: 1,
ctx: ctx,
areRequestsDelivered: func(ts Timestamp) bool { return true },
getTimestamp: func() (Timestamp, pb.Status) {
curTs = curTs + 100
return curTs, pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
},
}
tt.Restart()
ctx2, _ := context.WithTimeout(context.Background(), time.Second*2)
isbreak := false
for {
if isbreak {
break
}
select {
case <-ctx2.Done():
isbreak = true
break
case cm, ok := <-consumer.Chan():
if !ok {
t.Fatalf("consumer closed")
}
consumer.AckID(cm.ID())
break
}
}
var lastTimestamp uint64 = 0
for {
select {
case <-ctx.Done():
return
case cm, ok := <-consumer.Chan():
if ok == false {
return
}
msg := cm.Message
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(msg.Payload(), &tsm); err != nil {
return
}
if tsm.Timestamp <= lastTimestamp {
t.Fatalf("current = %d, last = %d", uint64(tsm.Timestamp), uint64(lastTimestamp))
}
t.Log("current = ", tsm.Timestamp)
lastTimestamp = tsm.Timestamp
}
}
}

View File

@ -0,0 +1,21 @@
package proxy_node
import (
"encoding/binary"
"github.com/spaolacci/murmur3"
"unsafe"
)
func Hash32_Bytes(b []byte) (uint32, error) {
h := murmur3.New32()
if _, err := h.Write(b); err != nil {
return 0, err
}
return h.Sum32() & 0x7fffffff, nil
}
func Hash32_Uint64(v uint64) (uint32, error) {
b := make([]byte, unsafe.Sizeof(v))
binary.LittleEndian.PutUint64(b, v)
return Hash32_Bytes(b)
}

View File

@ -0,0 +1,30 @@
package proxy_node
import (
"github.com/stretchr/testify/assert"
"testing"
"unsafe"
)
func TestUint64(t *testing.T) {
var i int64 = -1
var u uint64 = uint64(i)
t.Log(i)
t.Log(u)
}
func TestHash32_Uint64(t *testing.T) {
var u uint64 = 0x12
h, err := Hash32_Uint64(u)
assert.Nil(t, err)
t.Log(h)
b := make([]byte, unsafe.Sizeof(u))
b[0] = 0x12
h2, err := Hash32_Bytes(b)
assert.Nil(t, err)
t.Log(h2)
assert.Equal(t, h, h2)
}

View File

@ -5,7 +5,7 @@ if [[ ! ${jobs+1} ]]; then
jobs=$(nproc)
fi
BUILD_OUTPUT_DIR="cmake_build_release"
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Release"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX=$(pwd)/milvus

View File

@ -81,12 +81,6 @@ ConfigMgr::ConfigMgr() {
/* pulsar */
{"pulsar.authentication", CreateBoolConfig("pulsar.authentication", false, &config.pulsar.authentication,
false, nullptr, nullptr)},
{"pulsar.user", CreateStringConfig("pulsar.user", false, &config.pulsar.user.value,
"user-default", nullptr, nullptr)},
{"pulsar.token", CreateStringConfig("pulsar.token", false, &config.pulsar.token.value,
"fake-token", nullptr, nullptr)},
{"pulsar.address", CreateStringConfig("pulsar.address", false, &config.pulsar.address.value,
"localhost", nullptr, nullptr)},
{"pulsar.port", CreateIntegerConfig("pulsar.port", false, 0, 65535, &config.pulsar.port.value,

View File

@ -74,9 +74,6 @@ struct ServerConfig {
} network;
struct Pulsar{
bool authentication{false};
String user{"user-default"};
String token{"fake-token"};
String address{"localhost"};
Integer port{6650};
Integer topicnum{1024};

View File

@ -7,8 +7,6 @@
#include <omp.h>
#include <numeric>
#include <algorithm>
#include <unistd.h>
#include "nlohmann/json.hpp"
#include "log/Log.h"
namespace milvus::message_client {
@ -28,15 +26,7 @@ Status MsgClientV2::Init(const std::string &insert_delete,
const std::string &search_by_id,
const std::string &search_result) {
//create pulsar client
std::shared_ptr<MsgClient> pulsar_client;
if (config.pulsar.authentication) {
pulsar::ClientConfiguration clientConfig;
clientConfig.setAuth(pulsar::AuthToken::createWithToken(config.pulsar.token.value));
pulsar_client = std::make_shared<MsgClient>(service_url_, clientConfig);
} else {
pulsar_client = std::make_shared<MsgClient>(service_url_);
}
auto pulsar_client = std::make_shared<MsgClient>(service_url_);
//create pulsar producer
ProducerConfiguration producerConfiguration;
producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::CustomPartition);
@ -177,11 +167,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
const std::function<uint64_t(const std::string &collection_name,
uint64_t channel_id,
uint64_t timestamp)> &segment_id) {
const uint64_t num_records_log = 100 * 10000;
static uint64_t num_inserted = 0;
static uint64_t size_inserted = 0;
using stdclock = std::chrono::high_resolution_clock;
static stdclock::duration time_cost;
auto start = stdclock::now();
// may have retry policy?
auto row_count = request.rows_data_size();
@ -200,14 +186,11 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
mut_msg.set_collection_name(request.collection_name());
mut_msg.set_partition_tag(request.partition_tag());
uint64_t uid = request.entity_id_array(i);
// auto channel_id = makeHash(&uid, sizeof(uint64_t)) % topic_num;
//TODO:: don't prove the correction
auto channel_id = this_thread;
auto channel_id = makeHash(&uid, sizeof(uint64_t)) % topic_num;
try {
mut_msg.set_segment_id(segment_id(request.collection_name(), channel_id, timestamp));
mut_msg.mutable_rows_data()->CopyFrom(request.rows_data(i));
mut_msg.mutable_extra_params()->CopyFrom(request.extra_params());
mut_msg.set_channel_id(channel_id);
auto callback = [&stats, &msg_sended, this_thread](Result result, const pulsar::MessageId &messageId) {
msg_sended += 1;
@ -215,7 +198,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
stats[this_thread] = Status(DB_ERROR, pulsar::strResult(result));
}
};
paralle_mut_producers_[channel_id]->sendAsync(mut_msg, callback);
paralle_mut_producers_[this_thread]->sendAsync(mut_msg, callback);
}
catch (const std::exception &e) {
msg_sended += 1;
@ -226,35 +209,10 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
}
auto end = stdclock::now();
time_cost += (end - start);
num_inserted += row_count;
size_inserted += request.ByteSize();
if (num_inserted >= num_records_log) {
// char buff[128];
// auto r = getcwd(buff, 128);
auto path = std::string("/tmp");
std::ofstream file(path + "/proxy2pulsar.benchmark", std::fstream::app);
nlohmann::json json;
json["InsertTime"] = milvus::CommonUtil::TimeToString(start);
json["DurationInMilliseconds"] = std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count();
json["SizeInMB"] = size_inserted / 1024.0 / 1024.0;
json["ThroughputInMB"] = double(size_inserted) / std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count() * 1000 / 1024.0 / 1024;
json["NumRecords"] = num_inserted;
file << json.dump() << std::endl;
/*
file << "[" << milvus::CommonUtil::TimeToString(start) << "]"
<< " Insert " << num_inserted << " records, "
<< "size:" << size_inserted / 1024.0 / 1024.0 << "M, "
<< "cost" << std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count() / 1000.0 << "s, "
<< "throughput: "
<< double(size_inserted) / std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count() * 1000 / 1024.0
/ 1024
<< "M/s" << std::endl;
*/
time_cost = stdclock::duration(0);
num_inserted = 0;
size_inserted = 0;
}
auto data_size = request.ByteSize();
LOG_SERVER_INFO_ << "InsertReq Batch size:" << data_size / 1024.0 / 1024.0 << "M, "
<< "throughput: " << data_size / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1000 / 1024.0 / 1024
<< "M/s";
for (auto &stat : stats) {
if (!stat.ok()) {
@ -301,9 +259,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::DeleteByIDParam &request,
auto end = stdclock::now();
auto data_size = request.ByteSize();
LOG_SERVER_INFO_ << "InsertReq Batch size:" << data_size / 1024.0 / 1024.0 << "M, "
<< "throughput: "
<< data_size / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1000
/ 1024.0 / 1024
<< "throughput: " << data_size / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1000 / 1024.0 / 1024
<< "M/s";
for (auto &stat : stats) {

View File

@ -45,11 +45,15 @@ namespace message_client {
}
Result MsgProducer::send(milvus::grpc::InsertOrDeleteMsg &msg) {
int32_t channel_id = makeHash(std::to_string(msg.uid())) % 1024;
msg.set_channel_id(channel_id);
auto msg_str = msg.SerializeAsString();
return send(msg_str, msg.uid());
}
void MsgProducer::sendAsync(milvus::grpc::InsertOrDeleteMsg &msg, pulsar::SendCallback callback) {
int32_t channel_id = makeHash(std::to_string(msg.uid())) % 1024;
msg.set_channel_id(channel_id);
auto msg_str = msg.SerializeAsString();
return sendAsync(msg_str, msg.uid(), callback);
}

View File

@ -15,29 +15,13 @@ Status MessageWrapper::Init() {
(std::string{"pulsar://"} + config.pulsar.address() + ":" + std::to_string(config.pulsar.port()));
int client_id = config.proxy_id();
msg_client_ = std::make_shared<message_client::MsgClientV2>(client_id, pulsar_server_addr, config.pulsar.topicnum());
Status status;
if (config.pulsar.authentication) {
std::string insert_or_delete_topic_name = "InsertOrDelete-" + config.pulsar.user.value;
std::string search_topic_name = "Search-" + config.pulsar.user.value;
std::string search_by_id_topic_name = "SearchById-" + config.pulsar.user.value;
std::string search_result = "SearchResult-" + config.pulsar.user.value;
status = msg_client_->Init(insert_or_delete_topic_name,
search_topic_name,
"TimeSync",
search_by_id_topic_name,
search_result);
} else {
status = msg_client_->Init("InsertOrDelete", "Search", "TimeSync", "SearchById", "SearchResult");
}
// timeSync
time_sync_ = std::make_shared<timesync::TimeSync>(client_id, GetMessageTimeSyncTime, config.timesync.interval(), pulsar_server_addr, "TimeSync");
auto status = msg_client_->Init("InsertOrDelete", "Search", "TimeSync", "SearchById", "SearchResult");
if (!status.ok()){
return status;
}
// timeSync
time_sync_ = std::make_shared<timesync::TimeSync>(client_id, GetMessageTimeSyncTime, config.timesync.interval(), pulsar_server_addr, "TimeSync");
return status;
}
const std::shared_ptr<message_client::MsgClientV2> &MessageWrapper::MessageClient() {

View File

@ -72,7 +72,7 @@ Status MetaWrapper::Init() {
auto f = [&](const etcdserverpb::WatchResponse &res) {
UpdateMeta(res);
};
watcher_ = std::make_shared<milvus::master::Watcher>(etcd_addr, etcd_root_path_, f, true);
watcher_ = std::make_shared<milvus::master::Watcher>(etcd_addr, segment_path_, f, true);
return SyncMeta();
}
catch (const std::exception &e) {

View File

@ -23,9 +23,6 @@
#include <unordered_map>
#include <utility>
#include <vector>
#include <unistd.h>
#include "utils/CommonUtil.h"
#include "nlohmann/json.hpp"
#ifdef ENABLE_CPU_PROFILING
#include <gperftools/profiler.h>
@ -46,84 +43,15 @@ InsertReq::Create(const ContextPtr &context, const ::milvus::grpc::InsertParam *
Status
InsertReq::OnExecute() {
#ifndef BENCHMARK
#define BENCHMARK
#endif
#ifdef BENCHMARK
const uint64_t count_msg_num = 50000 * 10;
const double MB = 1024 * 1024;
using stdclock = std::chrono::high_resolution_clock;
static uint64_t inserted_count, inserted_size = 0;
static stdclock::time_point start, end;
const int interval = 2;
const int per_log_records = 10000 * 100;
static uint64_t ready_log_records = 0;
static int log_flag = 0;
static bool shouldBenchmark = false;
static std::stringstream log;
// char buff[128];
// auto r = getcwd(buff, 128);
auto path = std::string("/tmp");
std::ofstream file(path + "/proxy.benchmark", std::fstream::app);
#endif
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "insert", 0) << "Execute InsertReq.";
auto &msg_client = MessageWrapper::GetInstance().MessageClient();
auto segment_id = [](const std::string &collection_name,
uint64_t channel_id,
uint64_t timestamp) {
return MetaWrapper::GetInstance().AskSegmentId(collection_name, channel_id, timestamp);
return MetaWrapper::GetInstance().AskSegmentId(collection_name, channel_id, timestamp);
};
#ifdef BENCHMARK
if (inserted_count >= count_msg_num && !shouldBenchmark) {
shouldBenchmark = true;
start = stdclock::now();
inserted_count = 0;
inserted_size = 0;
}
#endif
Status status;
status = msg_client->SendMutMessage(*insert_param_, timestamp_, segment_id);
#ifdef BENCHMARK
inserted_count += insert_param_->rows_data_size();
inserted_size += insert_param_->ByteSize();
if (shouldBenchmark) {
end = stdclock::now();
ready_log_records += inserted_count;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() / 1000.0;
if (duration > interval) {
nlohmann::json json;
json["InsertTime"] = milvus::CommonUtil::TimeToString(start);
json["DurationInMilliseconds"] = duration * 1000;
json["SizeInMB"] = inserted_size / MB;
json["ThroughputInMB"] = double(inserted_size) / duration / MB;
json["NumRecords"] = inserted_count;
file << json.dump() << std::endl;
// log << "[" << milvus::CommonUtil::TimeToString(start) << "] "
// << "Insert "
// << inserted_count << " records, "
// << "size: " << inserted_size / MB << "MB, "
// << "cost: " << duration << "s, "
// << "throughput: "
// << double(inserted_size) / duration / MB
// << "M/s\n";
auto new_flag = ready_log_records / per_log_records;
if (new_flag != log_flag) {
log_flag = new_flag;
file << log.str();
file.flush();
log.str("");
}
inserted_size = 0;
inserted_count = 0;
start = stdclock::now();
}
}
#endif
return status;
}

View File

@ -25,17 +25,9 @@ TimeSync::TimeSync(int64_t id,
timestamp_(timestamp), interval_(interval), pulsar_addr_(pulsar_addr), time_sync_topic_(time_sync_topic) {
sync_msg_.set_peer_id(id);
auto timer = [&]() {
//create pulsar client
std::shared_ptr<milvus::message_client::MsgClient> pulsar_client;
if (config.pulsar.authentication) {
pulsar::ClientConfiguration clientConfig;
clientConfig.setAuth(pulsar::AuthToken::createWithToken(config.pulsar.token.value));
pulsar_client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_, clientConfig);
} else {
pulsar_client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_);
}
milvus::message_client::MsgProducer producer(pulsar_client, this->time_sync_topic_);
std::shared_ptr<milvus::message_client::MsgClient>
client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_);
milvus::message_client::MsgProducer producer(client, this->time_sync_topic_);
for (;;) {
if (this->stop_) break;
@ -52,7 +44,7 @@ TimeSync::TimeSync(int64_t id,
if (rst != pulsar::ResultOk) {
//TODO, add log or throw exception
}
rst = pulsar_client->close();
rst = client->close();
if (rst != pulsar::ResultOk) {
//TODO, add log or throw exception
}

View File

@ -175,14 +175,6 @@ CommonUtil::TimeStrToTime(const std::string& time_str, time_t& time_integer, tm&
return true;
}
std::string CommonUtil::TimeToString(std::chrono::high_resolution_clock::time_point t) {
std::time_t tt = std::chrono::system_clock::to_time_t(t);
char buf[100] = {0};
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&tt));
return std::string(buf);
}
void
CommonUtil::ConvertTime(time_t time_integer, tm& time_struct) {
localtime_r(&time_integer, &time_struct);

View File

@ -15,7 +15,6 @@
#include <time.h>
#include <string>
#include <chrono>
namespace milvus {
@ -41,9 +40,6 @@ class CommonUtil {
TimeStrToTime(const std::string& time_str, time_t& time_integer, tm& time_struct,
const std::string& format = "%d-%d-%d %d:%d:%d");
static std::string
TimeToString(std::chrono::high_resolution_clock::time_point t);
static void
ConvertTime(time_t time_integer, tm& time_struct);
static void

View File

@ -42,7 +42,11 @@ type MessageClient struct {
}
func (mc *MessageClient) GetTimeNow() uint64 {
return mc.timestampBatchEnd
msg, ok := <-mc.timeSyncCfg.TimeSync()
if !ok {
fmt.Println("cnn't get data from timesync chan")
}
return msg.Timestamp
}
func (mc *MessageClient) TimeSyncStart() uint64 {
@ -138,20 +142,7 @@ func (mc *MessageClient) createConsumer(topicName string) pulsar.Consumer {
}
func (mc *MessageClient) createClient(url string) pulsar.Client {
if conf.Config.Pulsar.Authentication {
// create client with Authentication
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: url,
Authentication: pulsar.NewAuthenticationToken(conf.Config.Pulsar.Token),
})
if err != nil {
log.Fatal(err)
}
return client
}
// create client without Authentication
// create client
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: url,
})
@ -170,23 +161,8 @@ func (mc *MessageClient) InitClient(url string) {
//create producer
mc.searchResultProducers = make(map[int64]pulsar.Producer)
proxyIdList := conf.Config.Master.ProxyIdList
searchResultTopicName := "SearchResult-"
searchTopicName := "Search"
key2SegTopicName := "Key2Seg"
timeSyncTopicName := "TimeSync"
insertOrDeleteTopicName := "InsertOrDelete-"
if conf.Config.Pulsar.Authentication {
searchResultTopicName = "SearchResult-" + conf.Config.Pulsar.User + "-"
searchTopicName = "Search-" + conf.Config.Pulsar.User
key2SegTopicName = "Key2Seg-" + conf.Config.Pulsar.User
// timeSyncTopicName = "TimeSync-" + conf.Config.Pulsar.User
insertOrDeleteTopicName = "InsertOrDelete-" + conf.Config.Pulsar.User + "-"
}
for _, key := range proxyIdList{
topic := searchResultTopicName
topic := "SearchResult-"
topic = topic + strconv.Itoa(int(key))
mc.searchResultProducers[key] = mc.creatProducer(topic)
}
@ -195,8 +171,8 @@ func (mc *MessageClient) InitClient(url string) {
mc.segmentsStatisticProducer = mc.creatProducer(SegmentsStatisticTopicName)
//create consumer
mc.searchConsumer = mc.createConsumer(searchTopicName)
mc.key2segConsumer = mc.createConsumer(key2SegTopicName)
mc.searchConsumer = mc.createConsumer("Search")
mc.key2segConsumer = mc.createConsumer("Key2Seg")
// init channel
mc.searchChan = make(chan *msgpb.SearchMsg, conf.Config.Reader.SearchChanSize)
@ -206,11 +182,11 @@ func (mc *MessageClient) InitClient(url string) {
mc.Key2SegMsg = make([]*msgpb.Key2SegMsg, 0)
//init timesync
timeSyncTopic := timeSyncTopicName
timeSyncTopic := "TimeSync"
timeSyncSubName := "reader" + strconv.Itoa(mc.MessageClientID)
readTopics := make([]string, 0)
for i := conf.Config.Reader.TopicStart; i < conf.Config.Reader.TopicEnd; i++ {
str := insertOrDeleteTopicName
str := "ManipulationReqMsg-"
str = str + strconv.Itoa(i)
readTopics = append(readTopics, str)
}
@ -228,7 +204,6 @@ func (mc *MessageClient) InitClient(url string) {
log.Fatal(err)
}
mc.timeSyncCfg = timeSync.(*timesync.ReaderTimeSyncCfg)
mc.timeSyncCfg.RoleType = timesync.Reader
mc.timestampBatchStart = 0
mc.timestampBatchEnd = 0

View File

@ -36,9 +36,9 @@ func GetSegmentObjId(key string) string {
func isCollectionObj(key string) bool {
prefix := path.Join(conf.Config.Etcd.Rootpath, CollectonPrefix) + "/"
prefix = strings.TrimSpace(prefix)
// println("prefix is :$", prefix)
println("prefix is :$", prefix)
index := strings.Index(key, prefix)
// println("index is :", index)
println("index is :", index)
return index == 0
}
@ -54,15 +54,8 @@ func isSegmentChannelRangeInQueryNodeChannelRange(segment *mock.Segment) bool {
log.Printf("Illegal segment channel range")
return false
}
var queryNodeChannelStart = conf.Config.Reader.TopicStart
var queryNodeChannelEnd = conf.Config.Reader.TopicEnd
if segment.ChannelStart >= queryNodeChannelStart && segment.ChannelEnd <= queryNodeChannelEnd {
return true
}
return false
// TODO: add query node channel range check
return true
}
func printCollectionStruct(obj *mock.Collection) {
@ -95,7 +88,7 @@ func (node *QueryNode) processCollectionCreate(id string, value string) {
println("error of json 2 collection")
println(err.Error())
}
//printCollectionStruct(collection)
printCollectionStruct(collection)
newCollection := node.NewCollection(collection.ID, collection.Name, collection.GrpcMarshalString)
for _, partitionTag := range collection.PartitionTags {
newCollection.NewPartition(partitionTag)
@ -109,11 +102,12 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
println("error of json 2 segment")
println(err.Error())
}
//printSegmentStruct(segment)
printSegmentStruct(segment)
if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
return
}
// TODO: fix this after channel range config finished
//if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
// return
//}
collection := node.GetCollectionByID(segment.CollectionID)
if collection != nil {
@ -131,7 +125,7 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
}
func (node *QueryNode) processCreate(key string, msg string) {
println("process create", key)
println("process create", key, ":", msg)
if isCollectionObj(key) {
objID := GetCollectionObjId(key)
node.processCollectionCreate(objID, msg)
@ -144,18 +138,19 @@ func (node *QueryNode) processCreate(key string, msg string) {
}
func (node *QueryNode) processSegmentModify(id string, value string) {
// println("Modify Segment: ", id)
println("Modify Segment: ", id)
segment, err := mock.JSON2Segment(value)
if err != nil {
println("error of json 2 segment")
println(err.Error())
}
// printSegmentStruct(segment)
printSegmentStruct(segment)
if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
return
}
// TODO: fix this after channel range config finished
//if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
// return
//}
seg, err := node.GetSegmentBySegmentID(int64(segment.SegmentID)) // todo change to uint64
if seg != nil {
@ -164,13 +159,13 @@ func (node *QueryNode) processSegmentModify(id string, value string) {
}
func (node *QueryNode) processCollectionModify(id string, value string) {
// println("Modify Collection: ", id)
println("Modify Collection: ", id)
collection, err := mock.JSON2Collection(value)
if err != nil {
println("error of json 2 collection")
println(err.Error())
}
// printCollectionStruct(collection)
printCollectionStruct(collection)
goCollection := node.GetCollectionByID(collection.ID)
if goCollection != nil {
@ -180,7 +175,7 @@ func (node *QueryNode) processCollectionModify(id string, value string) {
}
func (node *QueryNode) processModify(key string, msg string) {
// println("process modify")
println("process modify")
if isCollectionObj(key) {
objID := GetCollectionObjId(key)
node.processCollectionModify(objID, msg)
@ -219,7 +214,7 @@ func (node *QueryNode) processResp(resp clientv3.WatchResponse) error {
if err != nil {
return err
}
// println("processResp!!!!!\n")
println("processResp!!!!!\n")
for _, ev := range resp.Events {
if ev.IsCreate() {

View File

@ -16,10 +16,6 @@ import "C"
import (
"encoding/json"
"fmt"
"github.com/czs007/suvlim/conf"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/czs007/suvlim/pkg/master/kv"
"github.com/czs007/suvlim/reader/message_client"
"github.com/stretchr/testify/assert"
"log"
"sort"
@ -27,6 +23,9 @@ import (
"sync/atomic"
"time"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/czs007/suvlim/pkg/master/kv"
"github.com/czs007/suvlim/reader/message_client"
//"github.com/stretchr/testify/assert"
)
@ -69,21 +68,8 @@ type QueryInfo struct {
type MsgCounter struct {
InsertCounter int64
InsertTime time.Time
DeleteCounter int64
DeleteTime time.Time
SearchCounter int64
SearchTime time.Time
}
type InsertLog struct {
MsgLength int
DurationInMilliseconds int64
InsertTime time.Time
NumSince int64
Speed float64
}
type QueryNode struct {
@ -99,7 +85,6 @@ type QueryNode struct {
insertData InsertData
kvBase *kv.EtcdKVBase
msgCounter *MsgCounter
InsertLogs []InsertLog
}
func NewQueryNode(queryNodeId uint64, timeSync uint64) *QueryNode {
@ -109,7 +94,7 @@ func NewQueryNode(queryNodeId uint64, timeSync uint64) *QueryNode {
ReadTimeSyncMin: timeSync,
ReadTimeSyncMax: timeSync,
WriteTimeSync: timeSync,
ServiceTimeSync: timeSync,
ServiceTimeSync: timeSync,
TSOTimeSync: timeSync,
}
@ -149,7 +134,7 @@ func CreateQueryNode(queryNodeId uint64, timeSync uint64, mc *message_client.Mes
ReadTimeSyncMin: timeSync,
ReadTimeSyncMax: timeSync,
WriteTimeSync: timeSync,
ServiceTimeSync: timeSync,
ServiceTimeSync: timeSync,
TSOTimeSync: timeSync,
}
@ -164,11 +149,8 @@ func CreateQueryNode(queryNodeId uint64, timeSync uint64, mc *message_client.Mes
msgCounter := MsgCounter{
InsertCounter: 0,
InsertTime: time.Now(),
DeleteCounter: 0,
DeleteTime: time.Now(),
SearchCounter: 0,
SearchTime: time.Now(),
}
return &QueryNode{
@ -179,7 +161,6 @@ func CreateQueryNode(queryNodeId uint64, timeSync uint64, mc *message_client.Mes
queryNodeTimeSync: queryNodeTimeSync,
buffer: buffer,
msgCounter: &msgCounter,
InsertLogs: make([]InsertLog, 0),
}
}
@ -264,37 +245,13 @@ func (node *QueryNode) InitQueryNodeCollection() {
func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
const Debug = true
const CountInsertMsgBaseline = 1000 * 1000
var BaselineCounter int64 = 0
const CountMsgNum = 1000 * 1000
if Debug {
for {
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin)
assert.NotEqual(nil, 0, timeRange.timestampMax)
var printFlag = true
var startTime = true
var start time.Time
if node.msgCounter.InsertCounter/CountInsertMsgBaseline != BaselineCounter {
node.WriteQueryLog()
BaselineCounter = node.msgCounter.InsertCounter/CountInsertMsgBaseline
}
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
node.WriterDelete()
node.PreInsertAndDelete()
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
}
} else {
for {
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
@ -306,6 +263,12 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
continue
}
if startTime {
fmt.Println("============> Start Test <============")
startTime = false
start = time.Now()
}
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
@ -315,8 +278,37 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
// Test insert time
if printFlag && node.msgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", node.msgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
}
}
for {
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin)
assert.NotEqual(nil, 0, timeRange.timestampMax)
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
node.WriterDelete()
node.PreInsertAndDelete()
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
}
wg.Done()
}
@ -340,16 +332,16 @@ func (node *QueryNode) RunSearch(wg *sync.WaitGroup) {
node.messageClient.SearchMsg = node.messageClient.SearchMsg[:0]
node.messageClient.SearchMsg = append(node.messageClient.SearchMsg, msg)
fmt.Println("Do Search...")
//for {
//if node.messageClient.SearchMsg[0].Timestamp < node.queryNodeTimeSync.ServiceTimeSync {
var status = node.Search(node.messageClient.SearchMsg)
if status.ErrorCode != 0 {
fmt.Println("Search Failed")
node.PublishFailedSearchResult()
for {
if node.messageClient.SearchMsg[0].Timestamp < node.queryNodeTimeSync.ServiceTimeSync {
var status = node.Search(node.messageClient.SearchMsg)
if status.ErrorCode != 0 {
fmt.Println("Search Failed")
node.PublishFailedSearchResult()
}
break
}
}
//break
//}
//}
default:
}
}
@ -492,9 +484,9 @@ func (node *QueryNode) PreInsertAndDelete() msgPb.Status {
func (node *QueryNode) DoInsertAndDelete() msgPb.Status {
var wg sync.WaitGroup
// Do insert
for segmentID := range node.insertData.insertRecords {
for segmentID, records := range node.insertData.insertRecords {
wg.Add(1)
go node.DoInsert(segmentID, &wg)
go node.DoInsert(segmentID, &records, &wg)
}
// Do delete
@ -512,7 +504,7 @@ func (node *QueryNode) DoInsertAndDelete() msgPb.Status {
return msgPb.Status{ErrorCode: msgPb.ErrorCode_SUCCESS}
}
func (node *QueryNode) DoInsert(segmentID int64, wg *sync.WaitGroup) msgPb.Status {
func (node *QueryNode) DoInsert(segmentID int64, records *[][]byte, wg *sync.WaitGroup) msgPb.Status {
fmt.Println("Doing insert..., len = ", len(node.insertData.insertIDs[segmentID]))
var targetSegment, err = node.GetSegmentBySegmentID(segmentID)
if err != nil {
@ -522,13 +514,10 @@ func (node *QueryNode) DoInsert(segmentID int64, wg *sync.WaitGroup) msgPb.Statu
ids := node.insertData.insertIDs[segmentID]
timestamps := node.insertData.insertTimestamps[segmentID]
records := node.insertData.insertRecords[segmentID]
offsets := node.insertData.insertOffset[segmentID]
err = targetSegment.SegmentInsert(offsets, &ids, &timestamps, &records)
node.QueryLog(len(ids))
node.msgCounter.InsertCounter += int64(len(ids))
err = targetSegment.SegmentInsert(offsets, &ids, &timestamps, records)
if err != nil {
fmt.Println(err.Error())
return msgPb.Status{ErrorCode: 1}
@ -567,7 +556,7 @@ func (node *QueryNode) QueryJson2Info(queryJson *string) *QueryInfo {
return nil
}
//fmt.Println(query)
fmt.Println(query)
return &query
}
@ -594,8 +583,8 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// So the ServiceTimeSync is always less than searchTimestamp.
// Here, we manually make searchTimestamp's logic time minus `conf.Config.Timesync.Interval` milliseconds.
// Which means `searchTimestamp.logicTime = searchTimestamp.logicTime - conf.Config.Timesync.Interval`.
var logicTimestamp = searchTimestamp << 46 >> 46
searchTimestamp = (searchTimestamp>>18-uint64(conf.Config.Timesync.Interval+600))<<18 + logicTimestamp
// var logicTimestamp = searchTimestamp << 46 >> 46
// searchTimestamp = (searchTimestamp >> 18 - uint64(conf.Config.Timesync.Interval)) << 18 + logicTimestamp
var vector = msg.Records
// We now only the first Json is valid.
@ -604,7 +593,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// 1. Timestamp check
// TODO: return or wait? Or adding graceful time
if searchTimestamp > node.queryNodeTimeSync.ServiceTimeSync {
fmt.Println("Invalid query time, timestamp = ", searchTimestamp>>18, ", SearchTimeSync = ", node.queryNodeTimeSync.ServiceTimeSync>>18)
fmt.Println("Invalid query time, timestamp = ", searchTimestamp >> 18, ", SearchTimeSync = ", node.queryNodeTimeSync.ServiceTimeSync >> 18)
return msgPb.Status{ErrorCode: 1}
}
@ -613,12 +602,6 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// 3. Do search in all segments
for _, segment := range node.SegmentsMap {
if segment.GetRowCount() <= 0 {
// Skip empty segment
continue
}
//fmt.Println("Search in segment:", segment.SegmentId, ",segment rows:", segment.GetRowCount())
var res, err = segment.SegmentSearch(query, searchTimestamp, vector)
if err != nil {
fmt.Println(err.Error())
@ -634,9 +617,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
sort.Slice(resultsTmp, func(i, j int) bool {
return resultsTmp[i].ResultDistance < resultsTmp[j].ResultDistance
})
if len(resultsTmp) > query.TopK {
resultsTmp = resultsTmp[:query.TopK]
}
resultsTmp = resultsTmp[:query.TopK]
var entities = msgPb.Entities{
Ids: make([]int64, 0),
}

View File

@ -16,7 +16,6 @@ import (
"fmt"
"github.com/czs007/suvlim/errors"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/stretchr/testify/assert"
"strconv"
"unsafe"
)
@ -84,16 +83,15 @@ func (s *Segment) CloseSegment(collection* Collection) error {
}
// Build index after closing segment
//s.SegmentStatus = SegmentIndexing
//fmt.Println("Building index...")
//s.buildIndex(collection)
s.SegmentStatus = SegmentIndexing
fmt.Println("Building index...")
s.buildIndex(collection)
// TODO: remove redundant segment indexed status
// Change segment status to indexed
//s.SegmentStatus = SegmentIndexed
//fmt.Println("Segment closed and indexed")
s.SegmentStatus = SegmentIndexed
fmt.Println("Segment closed and indexed")
fmt.Println("Segment closed")
return nil
}
@ -144,13 +142,9 @@ func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]
var numOfRow = len(*entityIDs)
var sizeofPerRow = len((*records)[0])
assert.Equal(nil, numOfRow, len(*records))
var rawData = make([]byte, numOfRow * sizeofPerRow)
var copyOffset = 0
var rawData = make([]byte, numOfRow*sizeofPerRow)
for i := 0; i < len(*records); i++ {
copy(rawData[copyOffset:], (*records)[i])
copyOffset += sizeofPerRow
copy(rawData, (*records)[i])
}
var cOffset = C.long(offset)
@ -242,7 +236,7 @@ func (s *Segment) SegmentSearch(query *QueryInfo, timestamp uint64, vectorRecord
return nil, errors.New("Search failed, error code = " + strconv.Itoa(int(status)))
}
//fmt.Println("Search Result---- Ids =", resultIds, ", Distances =", resultDistances)
fmt.Println("Search Result---- Ids =", resultIds, ", Distances =", resultDistances)
return &SearchResult{ResultIds: resultIds, ResultDistances: resultDistances}, nil
}

View File

@ -19,10 +19,11 @@ func (node *QueryNode) SegmentsManagement() {
for _, partition := range collection.Partitions {
for _, segment := range partition.Segments {
if segment.SegmentStatus != SegmentOpened {
log.Println("Segment have been closed")
continue
}
// fmt.Println("timeNow = ", timeNow, "SegmentCloseTime = ", segment.SegmentCloseTime)
fmt.Println("timeNow = ", timeNow, "SegmentCloseTime = ", segment.SegmentCloseTime)
if timeNow >= segment.SegmentCloseTime {
go segment.CloseSegment(collection)
}

View File

@ -1,13 +1,8 @@
package reader
import (
"encoding/json"
"errors"
"fmt"
log "github.com/apache/pulsar/pulsar-client-go/logutil"
"os"
"strconv"
"time"
)
// Function `GetSegmentByEntityId` should return entityIDs, timestamps and segmentIDs
@ -73,54 +68,3 @@ func (c *Collection) GetPartitionByName(partitionName string) (partition *Partit
return nil
// TODO: remove from c.Partitions
}
func (node *QueryNode) QueryLog(length int) {
node.msgCounter.InsertCounter += int64(length)
timeNow := time.Now()
duration := timeNow.Sub(node.msgCounter.InsertTime)
speed := float64(length) / duration.Seconds()
insertLog := InsertLog{
MsgLength: length,
DurationInMilliseconds: duration.Milliseconds(),
InsertTime: timeNow,
NumSince: node.msgCounter.InsertCounter,
Speed: speed,
}
node.InsertLogs = append(node.InsertLogs, insertLog)
node.msgCounter.InsertTime = timeNow
}
func (node *QueryNode) WriteQueryLog() {
f, err := os.OpenFile("/tmp/query_node_insert.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
// write logs
for _, insertLog := range node.InsertLogs {
insertLogJson, err := json.Marshal(&insertLog)
if err != nil {
log.Fatal(err)
}
writeString := string(insertLogJson) + "\n"
fmt.Println(writeString)
_, err2 := f.WriteString(writeString)
if err2 != nil {
log.Fatal(err2)
}
}
// reset InsertLogs buffer
node.InsertLogs = make([]InsertLog, 0)
err = f.Close()
if err != nil {
log.Fatal(err)
}
fmt.Println("write log done")
}

View File

@ -62,8 +62,8 @@ function collect() {
# official go code ship with the crate, so we need to generate it manually.
cd ${ROOT_DIR}/proto
PB_FILES=("message.proto")
GRPC_FILES=("pdpb.proto" "metapb.proto")
PB_FILES=()
GRPC_FILES=("message.proto" "pdpb.proto" "metapb.proto")
ALL_FILES=("${PB_FILES[@]}")
ALL_FILES+=("${GRPC_FILES[@]}")

View File

@ -1,7 +1,7 @@
#!/bin/bash
BUILD_OUTPUT_DIR="cmake_build_release"
BUILD_TYPE="Release"
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Debug"
MAKE_CLEAN="OFF"
RUN_CPPLINT="OFF"

View File

@ -25,12 +25,21 @@ struct TestParameters {
std::string port_;
std::string collection_name_;
int64_t id_start_ = -1;
int64_t id_count_ = 0;
int64_t loop_ = 0;
// collection parameters, only works when collection_name_ is empty
int64_t index_type_ = (int64_t)milvus::IndexType::IVFSQ8; // sq8
int64_t index_file_size_ = 1024; // 1024 MB
int64_t nlist_ = 16384;
int64_t metric_type_ = (int64_t)milvus::MetricType::L2; // L2
int64_t dimensions_ = 128;
int64_t row_count_ = 1; // 1 million
// query parameters
int64_t concurrency_ = 20; // 20 connections
int64_t query_count_ = 1000;
int64_t nq_ = 1;
int64_t topk_ = 10;
int64_t nprobe_ = 16;
bool print_result_ = false;
bool is_valid = true;
};

View File

@ -0,0 +1,29 @@
#include <Status.h>
#include <Field.h>
#include <MilvusApi.h>
#include <interface/ConnectionImpl.h>
#include "utils/Utils.h"
int main(int argc , char**argv) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid) {
return 0;
}
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1" : parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530" : parameters.port_;
client.Connect(connect_param);
milvus::Status stat;
const std::string collectin_name = "collection1";
int64_t count = 0;
stat = client.CountEntities(collectin_name, count);
if (!stat.ok()){
std::cerr << "Error: " << stat.message() << std::endl;
}
std::cout << "Collection " << collectin_name << " rows: " << count << std::endl;
}

View File

@ -0,0 +1,55 @@
#include <Status.h>
#include <Field.h>
#include <MilvusApi.h>
#include <interface/ConnectionImpl.h>
#include "utils/Utils.h"
int main(int argc , char**argv) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid){
return 0;
}
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1":parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530":parameters.port_ ;
client.Connect(connect_param);
milvus::Status stat;
const std::string collectin_name = "collection0";
// Create
// milvus::FieldPtr field_ptr1 = std::make_shared<milvus::Field>();
// milvus::FieldPtr field_ptr2 = std::make_shared<milvus::Field>();
milvus::FieldPtr field_ptr3 = std::make_shared<milvus::Field>();
milvus::FieldPtr field_ptr4 = std::make_shared<milvus::Field>();
// field_ptr1->field_name = "field_1";
// field_ptr1->field_type = milvus::DataType::INT64;
//
// field_ptr2->field_name = "field_2";
// field_ptr2->field_type = milvus::DataType::FLOAT;
field_ptr3->field_name = "field_3";
field_ptr3->field_type = milvus::DataType::INT32;
field_ptr3->dim = 1;
field_ptr4->field_name = "field_vec";
field_ptr4->field_type = milvus::DataType::VECTOR_FLOAT;
field_ptr4->dim = 16;
// milvus::Mapping mapping = {collectin_name, {field_ptr1, field_ptr2, field_ptr3, field_ptr4}};
milvus::Mapping mapping = {collectin_name, {field_ptr3, field_ptr4}};
stat = client.CreateCollection(mapping, "test_extra_params");
// Get Collection info
milvus::Mapping map;
client.GetCollectionInfo(collectin_name, map);
for (auto &f : map.fields) {
std::cout << f->field_name << ":" << int(f->field_type) << ":" << f->dim << "DIM" << std::endl;
}
}

View File

@ -1,101 +0,0 @@
#include <Status.h>
#include <Field.h>
#include <MilvusApi.h>
#include <interface/ConnectionImpl.h>
#include "utils/Utils.h"
const int DIM = 128;
bool check_field(milvus::FieldPtr left, milvus::FieldPtr right){
if (left->field_name != right->field_name){
std::cout<<"filed_name not match! want "<< left->field_name << " but get "<<right->field_name << std::endl;
return false;
}
if (left->field_type != right->field_type){
std::cout<<"filed_type not match! want "<< int(left->field_type) << " but get "<< int(right->field_type) << std::endl;
return false;
}
if (left->dim != right->dim){
std::cout<<"dim not match! want "<< left->dim << " but get "<<right->dim << std::endl;
return false;
}
return true;
}
bool check_schema(const milvus::Mapping & map){
// Get Collection info
bool ret = false;
milvus::FieldPtr field_ptr1 = std::make_shared<milvus::Field>();
milvus::FieldPtr field_ptr2 = std::make_shared<milvus::Field>();
field_ptr1->field_name = "age";
field_ptr1->field_type = milvus::DataType::INT32;
field_ptr1->dim = 1;
field_ptr2->field_name = "field_vec";
field_ptr2->field_type = milvus::DataType::VECTOR_FLOAT;
field_ptr2->dim = DIM;
std::vector<milvus::FieldPtr> fields{field_ptr1, field_ptr2};
auto size_ = map.fields.size();
for ( int i =0; i != size_; ++ i){
auto ret = check_field(fields[i], map.fields[i]);
if (!ret){
return false;
}
}
for (auto &f : map.fields) {
std::cout << f->field_name << ":" << int(f->field_type) << ":" << f->dim << "DIM" << std::endl;
}
return true;
}
int main(int argc , char**argv) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid) {
return 0;
}
if (parameters.collection_name_.empty()){
std::cout<< "should specify collection name!" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
const std::string collection_name = parameters.collection_name_;
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1" : parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530" : parameters.port_;
client.Connect(connect_param);
milvus::Mapping map;
client.GetCollectionInfo(collection_name, map);
auto check_ret = check_schema(map);
if (!check_ret){
std::cout<<" Schema is not right!"<< std::endl;
return 0;
}
milvus::Status stat;
int64_t count = 0;
stat = client.CountEntities(collection_name, count);
if (!stat.ok()){
std::cerr << "Error: " << stat.message() << std::endl;
}
std::cout << "Collection " <<collection_name<< " rows: " << count << std::endl;
return 0;
}

View File

@ -1,64 +0,0 @@
#include <Status.h>
#include <Field.h>
#include <MilvusApi.h>
#include <interface/ConnectionImpl.h>
#include "utils/Utils.h"
const int DIM = 512;
int main(int argc , char**argv) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid){
return 0;
}
if (parameters.collection_name_.empty()){
std::cout<< "should specify collection name!" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1":parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530":parameters.port_ ;
client.Connect(connect_param);
const std::string collection_name = parameters.collection_name_;
// Create
milvus::FieldPtr field_ptr1 = std::make_shared<milvus::Field>();
milvus::FieldPtr field_ptr2 = std::make_shared<milvus::Field>();
field_ptr1->field_name = "age";
field_ptr1->field_type = milvus::DataType::INT32;
field_ptr1->dim = 1;
field_ptr2->field_name = "field_vec";
field_ptr2->field_type = milvus::DataType::VECTOR_FLOAT;
field_ptr2->dim = DIM;
milvus::Mapping mapping = {collection_name, {field_ptr1, field_ptr2}};
milvus::Status stat;
stat = client.CreateCollection(mapping, "extra_params");
if (!stat.ok()){
std::cout << "create collection failed!" << std::endl;
return 0;
}
std::cout << "create collection done!" << std::endl;
// Get Collection info
milvus::Mapping map;
client.GetCollectionInfo(collection_name, map);
for (auto &f : map.fields) {
std::cout << f->field_name << ":" << int(f->field_type) << ":" << f->dim << "DIM" << std::endl;
}
return 0;
}

View File

@ -15,31 +15,12 @@
#include <string>
#include "interface/ConnectionImpl.h"
#include "utils/Utils.h"
int ID_START = 0;
void generate_ids(std::vector<int64_t> & ids_array, int count);
void generate_ids(std::vector<int64_t>& ids_array, int count) {
for (int i = 0; i < count; i++) {
ids_array.push_back(ID_START++);
}
}
int
main(int argc, char *argv[]) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid){
return 0;
}
if (parameters.collection_name_.empty()){
std::cout<< "should specify collection name!" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
const std::string collection_name = parameters.collection_name_;
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1":parameters.address_;
@ -48,8 +29,10 @@ main(int argc, char *argv[]) {
client.Connect(connect_param);
std::vector<int64_t> delete_ids;
generate_ids(delete_ids, 3);
client.DeleteEntityByID(collection_name, delete_ids);
delete_ids.push_back(1);
delete_ids.push_back(2);
delete_ids.push_back(3);
client.DeleteEntityByID("collection1", delete_ids);
return 0;
}

View File

@ -21,33 +21,21 @@
#include "utils/TimeRecorder.h"
#include <random>
int N = 6000000;
int DIM = 512;
int LOOP = 2000;
const int N = 200000;
const int DIM = 16;
const int LOOP = 10;
int ID_START = 0;
std::default_random_engine eng(42);
void generate_ids(std::vector<int64_t> & ids_array, int count);
void generate_ids(std::vector<int64_t>& ids_array, int count) {
for (int i = 0; i < count; i++) {
ids_array.push_back(ID_START++);
}
}
const milvus::FieldValue GetData(int count) {
const milvus::FieldValue GetData() {
milvus::FieldValue value_map;
std::vector<int32_t> int32_data;
for (int i = 0; i < count; i++) {
int32_data.push_back(ID_START++);
for (int i = 0; i < N; i++) {
int32_data.push_back(i);
}
std::default_random_engine eng(rand() % 20);
std::normal_distribution<float> dis(0, 1);
std::vector<milvus::VectorData> vector_data;
for (int i = 0; i < count; i++) {
for (int i = 0; i < N; i++) {
std::vector<float> float_data(DIM);
for(auto &x: float_data) {
x = dis(eng);
@ -59,64 +47,10 @@ const milvus::FieldValue GetData(int count) {
value_map.int32_value["INT32"] = int32_data;
value_map.vector_value["VECTOR"] = vector_data;
value_map.row_num = count;
value_map.row_num = N;
return value_map;
}
bool check_field(milvus::FieldPtr left, milvus::FieldPtr right){
if (left->field_name != right->field_name){
std::cout<<"filed_name not match! want "<< left->field_name << " but get "<<right->field_name << std::endl;
return false;
}
if (left->field_type != right->field_type){
std::cout<<"filed_type not match! want "<< int(left->field_type) << " but get "<< int(right->field_type) << std::endl;
return false;
}
if (left->dim != right->dim){
std::cout<<"dim not match! want "<< left->dim << " but get "<<right->dim << std::endl;
return false;
}
return true;
}
bool check_schema(const milvus::Mapping & map){
// Get Collection info
bool ret = false;
milvus::FieldPtr field_ptr1 = std::make_shared<milvus::Field>();
milvus::FieldPtr field_ptr2 = std::make_shared<milvus::Field>();
field_ptr1->field_name = "age";
field_ptr1->field_type = milvus::DataType::INT32;
field_ptr1->dim = 1;
field_ptr2->field_name = "field_vec";
field_ptr2->field_type = milvus::DataType::VECTOR_FLOAT;
field_ptr2->dim = DIM;
std::vector<milvus::FieldPtr> fields{field_ptr1, field_ptr2};
auto size_ = map.fields.size();
for ( int i =0; i != size_; ++ i){
auto ret = check_field(fields[i], map.fields[i]);
if (!ret){
return false;
}
}
for (auto &f : map.fields) {
std::cout << f->field_name << ":" << int(f->field_type) << ":" << f->dim << "DIM" << std::endl;
}
return true;
}
int
main(int argc, char* argv[]) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
@ -124,83 +58,24 @@ main(int argc, char* argv[]) {
return 0;
}
if (parameters.collection_name_.empty()){
std::cout<< "should specify collection name!" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
if (parameters.id_start_ < 0){
std::cout<< "id_start should >= 0 !" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
if (parameters.id_count_ <= 0){
std::cout<< "id_count should > 0 !" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
if (parameters.loop_ <= 0){
std::cout<< "loop should > 0 !" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
N = parameters.id_count_;
ID_START = parameters.id_start_;
LOOP = parameters.loop_;
std::cout<<"N: " << N << std::endl;
std::cout<<"ID_START: " << ID_START << std::endl;
std::cout<<"LOOP: " << LOOP << std::endl;
const std::string collection_name = parameters.collection_name_;
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1":parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530":parameters.port_ ;
client.Connect(connect_param);
milvus::Mapping map;
client.GetCollectionInfo(collection_name, map);
auto check_ret = check_schema(map);
if (!check_ret){
std::cout<<" Schema is not right!"<< std::endl;
return 0;
std::vector<int64_t> ids_array;
auto data = GetData();
for (int64_t i = 0; i < N; i++) {
ids_array.push_back(i);
}
int per_count = N / LOOP;
int failed_count = 0;
std::cout<<"PER_COUNT: " << per_count << std::endl;
milvus_sdk::TimeRecorder insert_timer("insert");
for (int64_t i = 0, j=0; j < N;) {
i=j;
j += per_count;
if( j > N ) j = N;
std::vector<int64_t> ids_array;
generate_ids(ids_array, j - i);
auto data = GetData(j - i);
insert_timer.Start();
auto status = client.Insert(collection_name, "default", data, ids_array);
if (!status.ok()){
failed_count += 1;
}
insert_timer.End();
milvus_sdk::TimeRecorder insert("insert");
for (int j = 0; j < LOOP; ++j) {
auto status = client.Insert("collection0", "tag01", data, ids_array);
if (!status.ok()){
return -1;
}
}
if (failed_count > 0) {
std::cout <<" test done, failed_count is :" << failed_count<< std::endl;
}
insert_timer.Print(LOOP);
return 0;
}

View File

@ -1,3 +1,5 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
@ -18,24 +20,34 @@
#include <random>
const int TOP_K = 10;
const int LOOP = 1000;
const int DIM = 512;
int main(int argc , char**argv) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid){
return 0;
}
std::default_random_engine eng(42);
const milvus::VectorParam
get_vector_param() {
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1":parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530":parameters.port_ ;
client.Connect(connect_param);
std::vector<int64_t> ids_array;
std::vector<std::string> partition_list;
partition_list.emplace_back("partition-1");
partition_list.emplace_back("partition-2");
partition_list.emplace_back("partition-3");
milvus::VectorParam vectorParam;
std::vector<milvus::VectorData> vector_records;
std::default_random_engine eng(rand() % 20);
std::normal_distribution<float> dis(0, 1);
for (int j = 0; j < 1; ++j) {
for (int j = 0; j < 10; ++j) {
milvus::VectorData vectorData;
std::vector<float> float_data;
for (int i = 0; i < DIM; ++i) {
for (int i = 0; i < 100; ++i) {
float_data.emplace_back(dis(eng));
}
@ -52,106 +64,11 @@ get_vector_param() {
vectorParam.json_param = vector_param_json_string;
vectorParam.vector_records = vector_records;
return vectorParam;
}
bool check_field(milvus::FieldPtr left, milvus::FieldPtr right){
if (left->field_name != right->field_name){
std::cout<<"filed_name not match! want "<< left->field_name << " but get "<<right->field_name << std::endl;
return false;
}
if (left->field_type != right->field_type){
std::cout<<"filed_type not match! want "<< int(left->field_type) << " but get "<< int(right->field_type) << std::endl;
return false;
}
if (left->dim != right->dim){
std::cout<<"dim not match! want "<< left->dim << " but get "<<right->dim << std::endl;
return false;
}
return true;
}
bool check_schema(const milvus::Mapping & map){
// Get Collection info
bool ret = false;
milvus::FieldPtr field_ptr1 = std::make_shared<milvus::Field>();
milvus::FieldPtr field_ptr2 = std::make_shared<milvus::Field>();
field_ptr1->field_name = "age";
field_ptr1->field_type = milvus::DataType::INT32;
field_ptr1->dim = 1;
field_ptr2->field_name = "field_vec";
field_ptr2->field_type = milvus::DataType::VECTOR_FLOAT;
field_ptr2->dim = DIM;
std::vector<milvus::FieldPtr> fields{field_ptr1, field_ptr2};
auto size_ = map.fields.size();
for ( int i =0; i != size_; ++ i){
auto ret = check_field(fields[i], map.fields[i]);
if (!ret){
return false;
}
}
for (auto &f : map.fields) {
std::cout << f->field_name << ":" << int(f->field_type) << ":" << f->dim << "DIM" << std::endl;
}
return true;
}
int main(int argc , char**argv) {
TestParameters parameters = milvus_sdk::Utils::ParseTestParameters(argc, argv);
if (!parameters.is_valid){
return 0;
}
if (parameters.collection_name_.empty()){
std::cout<< "should specify collection name!" << std::endl;
milvus_sdk::Utils::PrintHelp(argc, argv);
return 0;
}
const std::string collection_name = parameters.collection_name_;
auto client = milvus::ConnectionImpl();
milvus::ConnectParam connect_param;
connect_param.ip_address = parameters.address_.empty() ? "127.0.0.1":parameters.address_;
connect_param.port = parameters.port_.empty() ? "19530":parameters.port_ ;
client.Connect(connect_param);
milvus::Mapping map;
client.GetCollectionInfo(collection_name, map);
auto check_ret = check_schema(map);
if (!check_ret){
std::cout<<" Schema is not right!"<< std::endl;
return 0;
}
std::vector<std::string> partition_list;
partition_list.emplace_back("default");
auto vectorParam = get_vector_param();
milvus::TopKQueryResult result;
milvus_sdk::TimeRecorder test_search("search");
for (int k = 0; k < LOOP; ++k) {
test_search.Start();
auto status = client.Search(collection_name, partition_list, "dsl", vectorParam, result);
test_search.End();
}
test_search.Print(LOOP);
return 0;
milvus_sdk::TimeRecorder test_search("search");
auto status = client.Search("collection0", partition_list, "dsl", vectorParam, result);
return 0;
}

View File

@ -16,26 +16,14 @@
namespace milvus_sdk {
TimeRecorder::TimeRecorder(const std::string& title) : title_(title) {
start_ = std::chrono::system_clock::now();
std::cout << title_ << " begin..." << std::endl;
}
void TimeRecorder::Start() {
start_ = std::chrono::system_clock::now();
}
void TimeRecorder::End() {
end_ = std::chrono::system_clock::now();
int64_t span = (std::chrono::duration_cast<std::chrono::milliseconds>(end_ - start_)).count();
total_time_ = total_time_ + span;
}
void TimeRecorder::Print(int loop) {
uint64_t per_cost = total_time_ / loop;
std::cout << title_ << " totally cost: " << total_time_ << " ms" << std::endl;
std::cout << title_ << " per cost: " << per_cost << " ms" << std::endl;
}
TimeRecorder::~TimeRecorder() {
std::chrono::system_clock::time_point end = std::chrono::system_clock::now();
int64_t span = (std::chrono::duration_cast<std::chrono::milliseconds>(end - start_)).count();
std::cout << title_ << " totally cost: " << span << " ms" << std::endl;
}
} // namespace milvus_sdk

View File

@ -19,16 +19,12 @@ namespace milvus_sdk {
class TimeRecorder {
public:
explicit TimeRecorder(const std::string& title);
void Start();
void End();
void Print(int loop);
~TimeRecorder();
private:
std::string title_;
std::chrono::system_clock::time_point start_;
std::chrono::system_clock::time_point end_;
int64_t total_time_ = 0;
};
} // namespace milvus_sdk

View File

@ -38,11 +38,21 @@ print_help(const std::string& app_name) {
printf(" -t --collection_name target collection name, specify this will ignore collection parameters, "
"default empty\n");
printf(" -h --help Print help information\n");
printf(" -i --id_start "
"id_start, default:-1\n");
printf(" -c --count id count, default:0\n");
printf(" -l --loop loop, default:0\n");
printf(" -i --index "
"Collection index type(1=IDMAP, 2=IVFLAT, 3=IVFSQ8, 5=IVFSQ8H), default:3\n");
printf(" -f --index_file_size Collection index file size, default:1024\n");
printf(" -l --nlist Collection index nlist, default:16384\n");
printf(" -m --metric "
"Collection metric type(1=L2, 2=IP, 3=HAMMING, 4=JACCARD, 5=TANIMOTO, 6=SUBSTRUCTURE, 7=SUPERSTRUCTURE), "
"default:1\n");
printf(" -d --dimension Collection dimension, default:128\n");
printf(" -r --rowcount Collection total row count(unit:million), default:1\n");
printf(" -c --concurrency Max client connections, default:20\n");
printf(" -q --query_count Query total count, default:1000\n");
printf(" -n --nq nq of each query, default:1\n");
printf(" -k --topk topk of each query, default:10\n");
printf(" -b --nprobe nprobe of each query, default:16\n");
printf(" -v --print_result Print query result, default:false\n");
printf("\n");
}
@ -462,9 +472,10 @@ Utils::PrintTopKQueryResult(milvus::TopKQueryResult& topk_query_result) {
void
Utils::PrintHelp(int argc, char* argv[]) {
std::string app_name = basename(argv[0]);
print_help(app_name);
Utils::HAHE(int argc){
std::cout<<"FUCK"<<std::endl;
}
@ -476,10 +487,18 @@ static struct option long_options[] = {{"server", optional_argument, nullptr, 's
{"port", optional_argument, nullptr, 'p'},
{"help", no_argument, nullptr, 'h'},
{"collection_name", no_argument, nullptr, 't'},
{"id_start", optional_argument, nullptr, 'i'},
{"count", optional_argument, nullptr, 'c'},
{"loop", optional_argument, nullptr, 'l'},
{"index", optional_argument, nullptr, 'i'},
{"index_file_size", optional_argument, nullptr, 'f'},
{"nlist", optional_argument, nullptr, 'l'},
{"metric", optional_argument, nullptr, 'm'},
{"dimension", optional_argument, nullptr, 'd'},
{"rowcount", optional_argument, nullptr, 'r'},
{"concurrency", optional_argument, nullptr, 'c'},
{"query_count", optional_argument, nullptr, 'q'},
{"nq", optional_argument, nullptr, 'n'},
{"topk", optional_argument, nullptr, 'k'},
{"nprobe", optional_argument, nullptr, 'b'},
{"print", optional_argument, nullptr, 'v'},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
@ -487,7 +506,7 @@ static struct option long_options[] = {{"server", optional_argument, nullptr, 's
TestParameters parameters;
int value;
while ((value = getopt_long(argc, argv, "s:p:t:i:l:c:k:h", long_options, &option_index)) != -1) {
while ((value = getopt_long(argc, argv, "s:p:t:i:f:l:m:d:r:c:q:n:k:b:vh", long_options, &option_index)) != -1) {
switch (value) {
case 's': {
char* address_ptr = strdup(optarg);
@ -509,29 +528,74 @@ static struct option long_options[] = {{"server", optional_argument, nullptr, 's
}
case 'i': {
char* ptr = strdup(optarg);
parameters.id_start_ = atol(ptr);
parameters.index_type_ = atol(ptr);
free(ptr);
break;
}
case 'c': {
case 'f': {
char* ptr = strdup(optarg);
parameters.id_count_ = atol(ptr);
parameters.index_file_size_ = atol(ptr);
free(ptr);
break;
}
case 'l': {
char* ptr = strdup(optarg);
parameters.loop_ = atol(ptr);
parameters.nlist_ = atol(ptr);
free(ptr);
break;
}
case 'm': {
char* ptr = strdup(optarg);
parameters.metric_type_ = atol(ptr);
free(ptr);
break;
}
case 'd': {
char* ptr = strdup(optarg);
parameters.dimensions_ = atol(ptr);
free(ptr);
break;
}
case 'r': {
char* ptr = strdup(optarg);
parameters.row_count_ = atol(ptr);
free(ptr);
break;
}
case 'c': {
char* ptr = strdup(optarg);
parameters.concurrency_ = atol(ptr);
free(ptr);
break;
}
case 'q': {
char* ptr = strdup(optarg);
parameters.query_count_ = atol(ptr);
free(ptr);
break;
}
case 'n': {
char* ptr = strdup(optarg);
parameters.nq_ = atol(ptr);
free(ptr);
break;
}
case 'k': {
char* ptr = strdup(optarg);
parameters.topk_ = atol(ptr);
free(ptr);
break;
}
case 'b': {
char* ptr = strdup(optarg);
parameters.nprobe_ = atol(ptr);
free(ptr);
break;
}
case 'v': {
parameters.print_result_ = true;
break;
}
case 'h':
default:
print_help(app_name);

View File

@ -94,8 +94,8 @@ class Utils {
static TestParameters
ParseTestParameters(int argc, char* argv[]);
static
void PrintHelp(int argc, char* argv[]);
static void
HAHE(int argc);
};

View File

@ -6,13 +6,12 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/czs007/suvlim/conf"
// "github.com/aws/aws-sdk-go/service/s3/s3manager"
// "github.com/aws/aws-sdk-go/service/s3/s3manager"
. "github.com/czs007/suvlim/storage/pkg/types"
"io"
)
var bucketName = conf.Config.Writer.Bucket
var bucketName = "zilliz-hz"
type S3Store struct {
client *s3.S3

View File

@ -3,13 +3,12 @@ package minio_driver
import (
"bytes"
"context"
"github.com/czs007/suvlim/conf"
. "github.com/czs007/suvlim/storage/pkg/types"
"github.com/minio/minio-go/v7"
"io"
. "github.com/czs007/suvlim/storage/pkg/types"
)
var bucketName = conf.Config.Writer.Bucket
var bucketName = "zilliz"
type minioStore struct {
client *minio.Client

View File

@ -2,33 +2,15 @@ package readertimesync
import (
"context"
"encoding/json"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"os"
"sort"
"strconv"
"sync"
"time"
)
type InsertLog struct {
MsgLength int
DurationInMilliseconds int64
InsertTime time.Time
NumSince int64
Speed float64
}
type TimeSyncRole int
const (
Reader TimeSyncRole = 0
Writer TimeSyncRole = 1
)
const ReadStopFlagEnd int64 = 0
@ -65,10 +47,8 @@ type ReaderTimeSyncCfg struct {
revTimesyncFromReader map[uint64]int
ctx context.Context
cancel context.CancelFunc
InsertLogs []InsertLog
RoleType TimeSyncRole
ctx context.Context
cancel context.CancelFunc
}
/*
@ -135,17 +115,7 @@ func NewReaderTimeSync(
r.revTimesyncFromReader = make(map[uint64]int)
r.ctx, r.cancel = context.WithCancel(context.Background())
var client pulsar.Client
var err error
if conf.Config.Pulsar.Authentication {
client, err = pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
Authentication: pulsar.NewAuthenticationToken(conf.Config.Pulsar.Token),
})
} else {
client, err = pulsar.NewClient(pulsar.ClientOptions{URL: pulsarAddr})
}
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: pulsarAddr})
if err != nil {
return nil, fmt.Errorf("connect pulsar failed, %v", err)
}
@ -323,56 +293,9 @@ func (r *ReaderTimeSyncCfg) isReadStopFlag(imsg *pb.InsertOrDeleteMsg) bool {
return imsg.ClientId < ReadStopFlagEnd
}
func (r *ReaderTimeSyncCfg) WriteInsertLog() {
fileName := "/tmp/reader_get_pulsar.txt"
if r.RoleType == Writer {
fileName = "/tmp/writer_get_pulsar.txt"
}
f, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
// write logs
for _, insertLog := range r.InsertLogs {
insertLogJson, err := json.Marshal(&insertLog)
if err != nil {
log.Fatal(err)
}
writeString := string(insertLogJson) + "\n"
//fmt.Println(writeString)
_, err2 := f.WriteString(writeString)
if err2 != nil {
log.Fatal(err2)
}
}
// reset InsertLogs buffer
r.InsertLogs = make([]InsertLog, 0)
err = f.Close()
if err != nil {
log.Fatal(err)
}
fmt.Println("write get pulsar log done")
}
func (r *ReaderTimeSyncCfg) startReadTopics() {
ctx, _ := context.WithCancel(r.ctx)
tsm := TimeSyncMsg{Timestamp: 0, NumRecorders: 0}
const Debug = true
const WriterBaseline = 1000 * 1000
const LogBaseline = 100000
var Counter int64 = 0
var LastCounter int64 = 0
r.InsertLogs = make([]InsertLog, 0)
InsertTime := time.Now()
var BaselineCounter int64 = 0
for {
select {
case <-ctx.Done():
@ -409,31 +332,7 @@ func (r *ReaderTimeSyncCfg) startReadTopics() {
log.Printf("WARN : Insert or delete chan is full ...")
}
tsm.NumRecorders++
if Debug {
r.insertOrDeleteChan <- &imsg
Counter++
if Counter%LogBaseline == 0 {
timeNow := time.Now()
duration := timeNow.Sub(InsertTime)
speed := float64(Counter-LastCounter) / duration.Seconds()
insertLog := InsertLog{
MsgLength: int(Counter - LastCounter),
DurationInMilliseconds: duration.Milliseconds(),
InsertTime: timeNow,
NumSince: Counter,
Speed: speed,
}
r.InsertLogs = append(r.InsertLogs, insertLog)
LastCounter = Counter
InsertTime = timeNow
}
if Counter/WriterBaseline != BaselineCounter {
r.WriteInsertLog()
BaselineCounter = Counter / WriterBaseline
}
} else {
r.insertOrDeleteChan <- &imsg
}
r.insertOrDeleteChan <- &imsg
}
r.readerConsumer.AckID(msg.ID())
}

View File

@ -9,6 +9,8 @@ import (
"github.com/czs007/suvlim/writer/write_node"
"log"
"strconv"
"sync"
"time"
)
func main() {
@ -22,6 +24,7 @@ func main() {
//TODO::close client / consumer/ producer
mc.ReceiveMessage()
wg := sync.WaitGroup{}
ctx := context.Background()
kv, err := storage.NewStore(ctx, conf.Config.Storage.Driver)
// TODO:: if err != nil, should retry link
@ -42,25 +45,34 @@ func main() {
}
const Debug = true
const CountMsgNum = 1000 * 1000
if Debug {
const CountInsertMsgBaseline = 1000 * 1000
var BaselineCounter int64 = 0
var printFlag = true
var startTime = true
var start time.Time
for {
if ctx.Err() != nil {
break
}
msgLength := wn.MessageClient.PrepareBatchMsg()
if msgLength > 0 {
if startTime {
fmt.Println("============> Start Test <============")
startTime = false
start = time.Now()
}
if wn.MsgCounter.InsertCounter/CountInsertMsgBaseline != BaselineCounter {
wn.WriteWriterLog()
BaselineCounter = wn.MsgCounter.InsertCounter/CountInsertMsgBaseline
wn.DoWriteNode(ctx, &wg)
fmt.Println("write node do a batch message, storage len: ", msgLength)
}
if msgLength > 0 {
wn.DoWriteNode(ctx)
fmt.Println("write node do a batch message, storage len: ", msgLength)
// Test insert time
if printFlag && wn.MsgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", wn.MsgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
}
}
@ -72,7 +84,7 @@ func main() {
}
msgLength := wn.MessageClient.PrepareBatchMsg()
if msgLength > 0 {
wn.DoWriteNode(ctx)
wn.DoWriteNode(ctx, &wg)
fmt.Println("write node do a batch message, storage len: ", msgLength)
}
}

View File

@ -87,7 +87,7 @@ func (mc *MessageClient) creatProducer(topicName string) pulsar.Producer {
func (mc *MessageClient) createConsumer(topicName string) pulsar.Consumer {
consumer, err := mc.client.Subscribe(pulsar.ConsumerOptions{
Topic: topicName,
SubscriptionName: "writer" + strconv.Itoa(mc.MessageClientID),
SubscriptionName: "writer",
})
if err != nil {
@ -97,20 +97,7 @@ func (mc *MessageClient) createConsumer(topicName string) pulsar.Consumer {
}
func (mc *MessageClient) createClient(url string) pulsar.Client {
if conf.Config.Pulsar.Authentication {
// create client with Authentication
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: url,
Authentication: pulsar.NewAuthenticationToken(conf.Config.Pulsar.Token),
})
if err != nil {
log.Fatal(err)
}
return client
}
// create client without Authentication
// create client
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: url,
})
@ -126,23 +113,11 @@ func (mc *MessageClient) InitClient(url string) {
mc.client = mc.createClient(url)
mc.MessageClientID = conf.Config.Writer.ClientId
key2SegTopicName := "Key2Seg"
searchByIdTopicName := "SearchById"
timeSyncTopicName := "TimeSync"
insertOrDeleteTopicName := "InsertOrDelete-"
if conf.Config.Pulsar.Authentication {
key2SegTopicName = "Key2Seg-" + conf.Config.Pulsar.User
searchByIdTopicName = "Search-" + conf.Config.Pulsar.User
// timeSyncTopicName = "TimeSync-" + conf.Config.Pulsar.User
insertOrDeleteTopicName = "InsertOrDelete-" + conf.Config.Pulsar.User + "-"
}
//create producer
mc.key2segProducer = mc.creatProducer(key2SegTopicName)
mc.key2segProducer = mc.creatProducer("Key2Seg")
//create consumer
mc.searchByIdConsumer = mc.createConsumer(searchByIdTopicName)
mc.searchByIdConsumer = mc.createConsumer("SearchById")
//init channel
mc.searchByIdChan = make(chan *msgpb.EntityIdentity, conf.Config.Writer.SearchByIdChanSize)
@ -152,11 +127,11 @@ func (mc *MessageClient) InitClient(url string) {
mc.DeleteMsg = make([]*msgpb.InsertOrDeleteMsg, 0)
//init timesync
timeSyncTopic := timeSyncTopicName
timeSyncTopic := "TimeSync"
timeSyncSubName := "writer" + strconv.Itoa(mc.MessageClientID)
readTopics := make([]string, 0)
for i := conf.Config.Writer.TopicStart; i < conf.Config.Writer.TopicEnd; i++ {
str := insertOrDeleteTopicName
str := "ManipulationReqMsg-"
str = str + strconv.Itoa(i)
readTopics = append(readTopics, str)
}
@ -174,7 +149,6 @@ func (mc *MessageClient) InitClient(url string) {
log.Fatal(err)
}
mc.timeSyncCfg = timeSync.(*timesync.ReaderTimeSyncCfg)
mc.timeSyncCfg.RoleType = timesync.Writer
mc.timestampBatchStart = 0
mc.timestampBatchEnd = 0

View File

@ -2,19 +2,13 @@ package write_node
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"github.com/czs007/suvlim/conf"
msgpb "github.com/czs007/suvlim/pkg/master/grpc/message"
storage "github.com/czs007/suvlim/storage/pkg"
"github.com/czs007/suvlim/storage/pkg/types"
"github.com/czs007/suvlim/writer/message_client"
"log"
"os"
"strconv"
"sync"
"time"
)
type SegmentIdInfo struct {
@ -25,19 +19,7 @@ type SegmentIdInfo struct {
type MsgCounter struct {
InsertCounter int64
InsertTime time.Time
// InsertedRecordSize float64
DeleteCounter int64
DeleteTime time.Time
}
type InsertLog struct {
MsgLength int
DurationInMilliseconds int64
InsertTime time.Time
NumSince int64
Speed float64
}
type WriteNode struct {
@ -45,7 +27,6 @@ type WriteNode struct {
MessageClient *message_client.MessageClient
TimeSync uint64
MsgCounter *MsgCounter
InsertLogs []InsertLog
}
func (wn *WriteNode) Close() {
@ -61,10 +42,7 @@ func NewWriteNode(ctx context.Context,
msgCounter := MsgCounter{
InsertCounter: 0,
InsertTime: time.Now(),
DeleteCounter: 0,
DeleteTime: time.Now(),
// InsertedRecordSize: 0,
}
return &WriteNode{
@ -72,7 +50,6 @@ func NewWriteNode(ctx context.Context,
MessageClient: &mc,
TimeSync: timeSync,
MsgCounter: &msgCounter,
InsertLogs: make([]InsertLog, 0),
}, err
}
@ -83,19 +60,18 @@ func (wn *WriteNode) InsertBatchData(ctx context.Context, data []*msgpb.InsertOr
var suffixKeys []string
var binaryData [][]byte
var timeStamp []uint64
byteArr := make([]byte, 8)
intData := uint64(0)
binary.BigEndian.PutUint64(byteArr, intData)
for i := 0; i < len(data); i++ {
prefixKey = data[i].CollectionName + "-" + strconv.FormatUint(uint64(data[i].Uid), 10)
suffixKey = strconv.FormatUint(uint64(data[i].SegmentId), 10)
prefixKeys = append(prefixKeys, []byte(prefixKey))
suffixKeys = append(suffixKeys, suffixKey)
binaryData = append(binaryData, byteArr)
binaryData = append(binaryData, []byte(data[i].String()))
timeStamp = append(timeStamp, uint64(data[i].Timestamp))
}
wn.MsgCounter.InsertCounter += int64(len(timeStamp))
error := (*wn.KvStore).PutRows(ctx, prefixKeys, binaryData, suffixKeys, timeStamp)
if error != nil {
fmt.Println("Can't insert data!")
@ -106,7 +82,7 @@ func (wn *WriteNode) InsertBatchData(ctx context.Context, data []*msgpb.InsertOr
return nil
}
func (wn *WriteNode) DeleteBatchData(ctx context.Context, data []*msgpb.InsertOrDeleteMsg) error {
func (wn *WriteNode) DeleteBatchData(ctx context.Context, data []*msgpb.InsertOrDeleteMsg, wg *sync.WaitGroup) error {
var prefixKey string
var prefixKeys [][]byte
var timeStamps []uint64
@ -139,8 +115,10 @@ func (wn *WriteNode) DeleteBatchData(ctx context.Context, data []*msgpb.InsertOr
err := (*wn.KvStore).DeleteRows(ctx, prefixKeys, timeStamps)
if err != nil {
fmt.Println("Can't delete data")
wg.Done()
return err
}
wg.Done()
return nil
}
@ -148,79 +126,10 @@ func (wn *WriteNode) UpdateTimeSync(timeSync uint64) {
wn.TimeSync = timeSync
}
func (wn *WriteNode) DoWriteNode(ctx context.Context) {
numInsertData := len(wn.MessageClient.InsertMsg)
numGoRoute := conf.Config.Writer.Parallelism
batchSize := numInsertData / numGoRoute
if numInsertData%numGoRoute != 0 {
batchSize += 1
}
start := 0
end := 0
wg := sync.WaitGroup{}
for end < numInsertData {
if end+batchSize >= numInsertData {
end = numInsertData
} else {
end = end + batchSize
}
wg.Add(1)
go wn.InsertBatchData(ctx, wn.MessageClient.InsertMsg[start:end], &wg)
start = end
}
func (wn *WriteNode) DoWriteNode(ctx context.Context, wg *sync.WaitGroup) {
wg.Add(2)
go wn.InsertBatchData(ctx, wn.MessageClient.InsertMsg, wg)
go wn.DeleteBatchData(ctx, wn.MessageClient.DeleteMsg, wg)
wg.Wait()
wn.WriterLog(numInsertData)
wn.DeleteBatchData(ctx, wn.MessageClient.DeleteMsg)
wn.UpdateTimeSync(wn.MessageClient.TimeSync())
}
func (wn *WriteNode) WriterLog(length int) {
wn.MsgCounter.InsertCounter += int64(length)
timeNow := time.Now()
duration := timeNow.Sub(wn.MsgCounter.InsertTime)
speed := float64(length) / duration.Seconds()
insertLog := InsertLog{
MsgLength: length,
DurationInMilliseconds: duration.Milliseconds(),
InsertTime: timeNow,
NumSince: wn.MsgCounter.InsertCounter,
Speed: speed,
}
wn.InsertLogs = append(wn.InsertLogs, insertLog)
wn.MsgCounter.InsertTime = timeNow
}
func (wn *WriteNode) WriteWriterLog() {
f, err := os.OpenFile("/tmp/write_node_insert.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
// write logs
for _, insertLog := range wn.InsertLogs {
insertLogJson, err := json.Marshal(&insertLog)
if err != nil {
log.Fatal(err)
}
writeString := string(insertLogJson) + "\n"
//fmt.Println(writeString)
_, err2 := f.WriteString(writeString)
if err2 != nil {
log.Fatal(err2)
}
}
// reset InsertLogs buffer
wn.InsertLogs = make([]InsertLog, 0)
err = f.Close()
if err != nil {
log.Fatal(err)
}
fmt.Println("write write node log done")
}