mirror of https://github.com/milvus-io/milvus.git
Update msgstream/unmarshal interface
Signed-off-by: Xiangyu Wang <xiangyu.wang@zilliz.com>pull/4973/head^2
parent
abffdbff62
commit
fd9e42d3ed
|
@ -9,5 +9,5 @@ dataservice:
|
|||
defaultSizePerRecord: 1024
|
||||
# old name: segmentExpireDuration: 2000
|
||||
IDAssignExpiration: 2000 # ms
|
||||
insertChannelNumPerCollection: 4
|
||||
insertChannelNum: 16
|
||||
dataNodeNum: 1
|
3
go.mod
3
go.mod
|
@ -18,6 +18,7 @@ require (
|
|||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-basic/ipv4 v1.0.0
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/mock v1.3.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
|
@ -26,6 +27,7 @@ require (
|
|||
github.com/klauspost/compress v1.10.11 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/minio/minio-go/v7 v7.0.5
|
||||
github.com/modern-go/reflect2 v1.0.1
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/onsi/ginkgo v1.12.1 // indirect
|
||||
|
@ -57,6 +59,7 @@ require (
|
|||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20200904194848-62affa334b73
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f // indirect
|
||||
golang.org/x/text v0.3.3
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
|
|
|
@ -1144,7 +1144,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_milvus_2eproto::offsets[] PROT
|
|||
~0u, // no _weak_field_map_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, base_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, db_name_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, collection_name_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::FlushRequest, collection_names_),
|
||||
~0u, // no _has_bits_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::milvus::RegisterLinkResponse, _internal_metadata_),
|
||||
~0u, // no _extensions_
|
||||
|
@ -1359,72 +1359,72 @@ const char descriptor_table_protodef_milvus_2eproto[] PROTOBUF_SECTION_VARIABLE(
|
|||
"s\022\013\n\003IDs\030\001 \003(\003\022\020\n\010row_data\030\002 \003(\014\022\016\n\006scor"
|
||||
"es\030\003 \003(\002\"J\n\rSearchResults\022+\n\006status\030\001 \001("
|
||||
"\0132\033.milvus.proto.common.Status\022\014\n\004hits\030\002"
|
||||
" \003(\014\"d\n\014FlushRequest\022*\n\004base\030\001 \001(\0132\034.mil"
|
||||
" \003(\014\"e\n\014FlushRequest\022*\n\004base\030\001 \001(\0132\034.mil"
|
||||
"vus.proto.common.MsgBase\022\017\n\007db_name\030\002 \001("
|
||||
"\t\022\027\n\017collection_name\030\003 \001(\t\"r\n\024RegisterLi"
|
||||
"nkResponse\022-\n\007address\030\001 \001(\0132\034.milvus.pro"
|
||||
"to.common.Address\022+\n\006status\030\002 \001(\0132\033.milv"
|
||||
"us.proto.common.Status*@\n\017PlaceholderTyp"
|
||||
"e\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR"
|
||||
"_FLOAT\020e2\357\020\n\rMilvusService\022_\n\020CreateColl"
|
||||
"ection\022,.milvus.proto.milvus.CreateColle"
|
||||
"ctionRequest\032\033.milvus.proto.common.Statu"
|
||||
"s\"\000\022[\n\016DropCollection\022*.milvus.proto.mil"
|
||||
"vus.DropCollectionRequest\032\033.milvus.proto"
|
||||
".common.Status\"\000\022_\n\rHasCollection\022).milv"
|
||||
"us.proto.milvus.HasCollectionRequest\032!.m"
|
||||
"ilvus.proto.milvus.BoolResponse\"\000\022[\n\016Loa"
|
||||
"dCollection\022*.milvus.proto.milvus.LoadCo"
|
||||
"llectionRequest\032\033.milvus.proto.common.St"
|
||||
"atus\"\000\022a\n\021ReleaseCollection\022-.milvus.pro"
|
||||
"to.milvus.ReleaseCollectionRequest\032\033.mil"
|
||||
"vus.proto.common.Status\"\000\022w\n\022DescribeCol"
|
||||
"lection\022..milvus.proto.milvus.DescribeCo"
|
||||
"llectionRequest\032/.milvus.proto.milvus.De"
|
||||
"scribeCollectionResponse\"\000\022v\n\027GetCollect"
|
||||
"ionStatistics\022+.milvus.proto.milvus.Coll"
|
||||
"ectionStatsRequest\032,.milvus.proto.milvus"
|
||||
".CollectionStatsResponse\"\000\022l\n\017ShowCollec"
|
||||
"tions\022*.milvus.proto.milvus.ShowCollecti"
|
||||
"onRequest\032+.milvus.proto.milvus.ShowColl"
|
||||
"ectionResponse\"\000\022]\n\017CreatePartition\022+.mi"
|
||||
"lvus.proto.milvus.CreatePartitionRequest"
|
||||
"\032\033.milvus.proto.common.Status\"\000\022Y\n\rDropP"
|
||||
"artition\022).milvus.proto.milvus.DropParti"
|
||||
"tionRequest\032\033.milvus.proto.common.Status"
|
||||
"\"\000\022]\n\014HasPartition\022(.milvus.proto.milvus"
|
||||
".HasPartitionRequest\032!.milvus.proto.milv"
|
||||
"us.BoolResponse\"\000\022Y\n\016LoadPartitions\022(.mi"
|
||||
"lvus.proto.milvus.LoadPartitonRequest\032\033."
|
||||
"milvus.proto.common.Status\"\000\022`\n\021ReleaseP"
|
||||
"artitions\022,.milvus.proto.milvus.ReleaseP"
|
||||
"artitionRequest\032\033.milvus.proto.common.St"
|
||||
"atus\"\000\022s\n\026GetPartitionStatistics\022*.milvu"
|
||||
"s.proto.milvus.PartitionStatsRequest\032+.m"
|
||||
"ilvus.proto.milvus.PartitionStatsRespons"
|
||||
"e\"\000\022i\n\016ShowPartitions\022).milvus.proto.mil"
|
||||
"vus.ShowPartitionRequest\032*.milvus.proto."
|
||||
"milvus.ShowPartitionResponse\"\000\022U\n\013Create"
|
||||
"Index\022\'.milvus.proto.milvus.CreateIndexR"
|
||||
"equest\032\033.milvus.proto.common.Status\"\000\022h\n"
|
||||
"\rDescribeIndex\022).milvus.proto.milvus.Des"
|
||||
"cribeIndexRequest\032*.milvus.proto.milvus."
|
||||
"DescribeIndexResponse\"\000\022b\n\rGetIndexState"
|
||||
"\022&.milvus.proto.milvus.IndexStateRequest"
|
||||
"\032\'.milvus.proto.milvus.IndexStateRespons"
|
||||
"e\"\000\022S\n\006Insert\022\".milvus.proto.milvus.Inse"
|
||||
"rtRequest\032#.milvus.proto.milvus.InsertRe"
|
||||
"sponse\"\000\022R\n\006Search\022\".milvus.proto.milvus"
|
||||
".SearchRequest\032\".milvus.proto.milvus.Sea"
|
||||
"rchResults\"\000\022I\n\005Flush\022!.milvus.proto.mil"
|
||||
"vus.FlushRequest\032\033.milvus.proto.common.S"
|
||||
"tatus\"\000\022Q\n\014GetDdChannel\022\032.milvus.proto.c"
|
||||
"ommon.Empty\032#.milvus.proto.milvus.String"
|
||||
"Response\"\0002g\n\014ProxyService\022W\n\014RegisterLi"
|
||||
"nk\022\032.milvus.proto.common.Empty\032).milvus."
|
||||
"proto.milvus.RegisterLinkResponse\"\000BBZ@g"
|
||||
"ithub.com/zilliztech/milvus-distributed/"
|
||||
"internal/proto/milvuspbb\006proto3"
|
||||
"\t\022\030\n\020collection_names\030\003 \003(\t\"r\n\024RegisterL"
|
||||
"inkResponse\022-\n\007address\030\001 \001(\0132\034.milvus.pr"
|
||||
"oto.common.Address\022+\n\006status\030\002 \001(\0132\033.mil"
|
||||
"vus.proto.common.Status*@\n\017PlaceholderTy"
|
||||
"pe\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTO"
|
||||
"R_FLOAT\020e2\357\020\n\rMilvusService\022_\n\020CreateCol"
|
||||
"lection\022,.milvus.proto.milvus.CreateColl"
|
||||
"ectionRequest\032\033.milvus.proto.common.Stat"
|
||||
"us\"\000\022[\n\016DropCollection\022*.milvus.proto.mi"
|
||||
"lvus.DropCollectionRequest\032\033.milvus.prot"
|
||||
"o.common.Status\"\000\022_\n\rHasCollection\022).mil"
|
||||
"vus.proto.milvus.HasCollectionRequest\032!."
|
||||
"milvus.proto.milvus.BoolResponse\"\000\022[\n\016Lo"
|
||||
"adCollection\022*.milvus.proto.milvus.LoadC"
|
||||
"ollectionRequest\032\033.milvus.proto.common.S"
|
||||
"tatus\"\000\022a\n\021ReleaseCollection\022-.milvus.pr"
|
||||
"oto.milvus.ReleaseCollectionRequest\032\033.mi"
|
||||
"lvus.proto.common.Status\"\000\022w\n\022DescribeCo"
|
||||
"llection\022..milvus.proto.milvus.DescribeC"
|
||||
"ollectionRequest\032/.milvus.proto.milvus.D"
|
||||
"escribeCollectionResponse\"\000\022v\n\027GetCollec"
|
||||
"tionStatistics\022+.milvus.proto.milvus.Col"
|
||||
"lectionStatsRequest\032,.milvus.proto.milvu"
|
||||
"s.CollectionStatsResponse\"\000\022l\n\017ShowColle"
|
||||
"ctions\022*.milvus.proto.milvus.ShowCollect"
|
||||
"ionRequest\032+.milvus.proto.milvus.ShowCol"
|
||||
"lectionResponse\"\000\022]\n\017CreatePartition\022+.m"
|
||||
"ilvus.proto.milvus.CreatePartitionReques"
|
||||
"t\032\033.milvus.proto.common.Status\"\000\022Y\n\rDrop"
|
||||
"Partition\022).milvus.proto.milvus.DropPart"
|
||||
"itionRequest\032\033.milvus.proto.common.Statu"
|
||||
"s\"\000\022]\n\014HasPartition\022(.milvus.proto.milvu"
|
||||
"s.HasPartitionRequest\032!.milvus.proto.mil"
|
||||
"vus.BoolResponse\"\000\022Y\n\016LoadPartitions\022(.m"
|
||||
"ilvus.proto.milvus.LoadPartitonRequest\032\033"
|
||||
".milvus.proto.common.Status\"\000\022`\n\021Release"
|
||||
"Partitions\022,.milvus.proto.milvus.Release"
|
||||
"PartitionRequest\032\033.milvus.proto.common.S"
|
||||
"tatus\"\000\022s\n\026GetPartitionStatistics\022*.milv"
|
||||
"us.proto.milvus.PartitionStatsRequest\032+."
|
||||
"milvus.proto.milvus.PartitionStatsRespon"
|
||||
"se\"\000\022i\n\016ShowPartitions\022).milvus.proto.mi"
|
||||
"lvus.ShowPartitionRequest\032*.milvus.proto"
|
||||
".milvus.ShowPartitionResponse\"\000\022U\n\013Creat"
|
||||
"eIndex\022\'.milvus.proto.milvus.CreateIndex"
|
||||
"Request\032\033.milvus.proto.common.Status\"\000\022h"
|
||||
"\n\rDescribeIndex\022).milvus.proto.milvus.De"
|
||||
"scribeIndexRequest\032*.milvus.proto.milvus"
|
||||
".DescribeIndexResponse\"\000\022b\n\rGetIndexStat"
|
||||
"e\022&.milvus.proto.milvus.IndexStateReques"
|
||||
"t\032\'.milvus.proto.milvus.IndexStateRespon"
|
||||
"se\"\000\022S\n\006Insert\022\".milvus.proto.milvus.Ins"
|
||||
"ertRequest\032#.milvus.proto.milvus.InsertR"
|
||||
"esponse\"\000\022R\n\006Search\022\".milvus.proto.milvu"
|
||||
"s.SearchRequest\032\".milvus.proto.milvus.Se"
|
||||
"archResults\"\000\022I\n\005Flush\022!.milvus.proto.mi"
|
||||
"lvus.FlushRequest\032\033.milvus.proto.common."
|
||||
"Status\"\000\022Q\n\014GetDdChannel\022\032.milvus.proto."
|
||||
"common.Empty\032#.milvus.proto.milvus.Strin"
|
||||
"gResponse\"\0002g\n\014ProxyService\022W\n\014RegisterL"
|
||||
"ink\022\032.milvus.proto.common.Empty\032).milvus"
|
||||
".proto.milvus.RegisterLinkResponse\"\000BBZ@"
|
||||
"github.com/zilliztech/milvus-distributed"
|
||||
"/internal/proto/milvuspbb\006proto3"
|
||||
;
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_milvus_2eproto_deps[2] = {
|
||||
&::descriptor_table_common_2eproto,
|
||||
|
@ -1476,7 +1476,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_mil
|
|||
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_milvus_2eproto_once;
|
||||
static bool descriptor_table_milvus_2eproto_initialized = false;
|
||||
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_milvus_2eproto = {
|
||||
&descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 7311,
|
||||
&descriptor_table_milvus_2eproto_initialized, descriptor_table_protodef_milvus_2eproto, "milvus.proto", 7312,
|
||||
&descriptor_table_milvus_2eproto_once, descriptor_table_milvus_2eproto_sccs, descriptor_table_milvus_2eproto_deps, 41, 2,
|
||||
schemas, file_default_instances, TableStruct_milvus_2eproto::offsets,
|
||||
file_level_metadata_milvus_2eproto, 41, file_level_enum_descriptors_milvus_2eproto, file_level_service_descriptors_milvus_2eproto,
|
||||
|
@ -17153,16 +17153,13 @@ FlushRequest::FlushRequest()
|
|||
}
|
||||
FlushRequest::FlushRequest(const FlushRequest& from)
|
||||
: ::PROTOBUF_NAMESPACE_ID::Message(),
|
||||
_internal_metadata_(nullptr) {
|
||||
_internal_metadata_(nullptr),
|
||||
collection_names_(from.collection_names_) {
|
||||
_internal_metadata_.MergeFrom(from._internal_metadata_);
|
||||
db_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
if (!from.db_name().empty()) {
|
||||
db_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.db_name_);
|
||||
}
|
||||
collection_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
if (!from.collection_name().empty()) {
|
||||
collection_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.collection_name_);
|
||||
}
|
||||
if (from.has_base()) {
|
||||
base_ = new ::milvus::proto::common::MsgBase(*from.base_);
|
||||
} else {
|
||||
|
@ -17174,7 +17171,6 @@ FlushRequest::FlushRequest(const FlushRequest& from)
|
|||
void FlushRequest::SharedCtor() {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_FlushRequest_milvus_2eproto.base);
|
||||
db_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
collection_name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
base_ = nullptr;
|
||||
}
|
||||
|
||||
|
@ -17185,7 +17181,6 @@ FlushRequest::~FlushRequest() {
|
|||
|
||||
void FlushRequest::SharedDtor() {
|
||||
db_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
collection_name_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
if (this != internal_default_instance()) delete base_;
|
||||
}
|
||||
|
||||
|
@ -17204,8 +17199,8 @@ void FlushRequest::Clear() {
|
|||
// Prevent compiler warnings about cached_has_bits being unused
|
||||
(void) cached_has_bits;
|
||||
|
||||
collection_names_.Clear();
|
||||
db_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
collection_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
if (GetArenaNoVirtual() == nullptr && base_ != nullptr) {
|
||||
delete base_;
|
||||
}
|
||||
|
@ -17235,11 +17230,16 @@ const char* FlushRequest::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_I
|
|||
CHK_(ptr);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
// string collection_name = 3;
|
||||
// repeated string collection_names = 3;
|
||||
case 3:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
|
||||
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_collection_name(), ptr, ctx, "milvus.proto.milvus.FlushRequest.collection_name");
|
||||
CHK_(ptr);
|
||||
ptr -= 1;
|
||||
do {
|
||||
ptr += 1;
|
||||
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_collection_names(), ptr, ctx, "milvus.proto.milvus.FlushRequest.collection_names");
|
||||
CHK_(ptr);
|
||||
if (!ctx->DataAvailable(ptr)) break;
|
||||
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
default: {
|
||||
|
@ -17298,15 +17298,16 @@ bool FlushRequest::MergePartialFromCodedStream(
|
|||
break;
|
||||
}
|
||||
|
||||
// string collection_name = 3;
|
||||
// repeated string collection_names = 3;
|
||||
case 3: {
|
||||
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
|
||||
input, this->mutable_collection_name()));
|
||||
input, this->add_collection_names()));
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->collection_name().data(), static_cast<int>(this->collection_name().length()),
|
||||
this->collection_names(this->collection_names_size() - 1).data(),
|
||||
static_cast<int>(this->collection_names(this->collection_names_size() - 1).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
|
||||
"milvus.proto.milvus.FlushRequest.collection_name"));
|
||||
"milvus.proto.milvus.FlushRequest.collection_names"));
|
||||
} else {
|
||||
goto handle_unusual;
|
||||
}
|
||||
|
@ -17356,14 +17357,14 @@ void FlushRequest::SerializeWithCachedSizes(
|
|||
2, this->db_name(), output);
|
||||
}
|
||||
|
||||
// string collection_name = 3;
|
||||
if (this->collection_name().size() > 0) {
|
||||
// repeated string collection_names = 3;
|
||||
for (int i = 0, n = this->collection_names_size(); i < n; i++) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->collection_name().data(), static_cast<int>(this->collection_name().length()),
|
||||
this->collection_names(i).data(), static_cast<int>(this->collection_names(i).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.milvus.FlushRequest.collection_name");
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
|
||||
3, this->collection_name(), output);
|
||||
"milvus.proto.milvus.FlushRequest.collection_names");
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
|
||||
3, this->collection_names(i), output);
|
||||
}
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
|
@ -17397,15 +17398,14 @@ void FlushRequest::SerializeWithCachedSizes(
|
|||
2, this->db_name(), target);
|
||||
}
|
||||
|
||||
// string collection_name = 3;
|
||||
if (this->collection_name().size() > 0) {
|
||||
// repeated string collection_names = 3;
|
||||
for (int i = 0, n = this->collection_names_size(); i < n; i++) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->collection_name().data(), static_cast<int>(this->collection_name().length()),
|
||||
this->collection_names(i).data(), static_cast<int>(this->collection_names(i).length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.milvus.FlushRequest.collection_name");
|
||||
target =
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
|
||||
3, this->collection_name(), target);
|
||||
"milvus.proto.milvus.FlushRequest.collection_names");
|
||||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
|
||||
WriteStringToArray(3, this->collection_names(i), target);
|
||||
}
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
|
@ -17429,6 +17429,14 @@ size_t FlushRequest::ByteSizeLong() const {
|
|||
// Prevent compiler warnings about cached_has_bits being unused
|
||||
(void) cached_has_bits;
|
||||
|
||||
// repeated string collection_names = 3;
|
||||
total_size += 1 *
|
||||
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->collection_names_size());
|
||||
for (int i = 0, n = this->collection_names_size(); i < n; i++) {
|
||||
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
|
||||
this->collection_names(i));
|
||||
}
|
||||
|
||||
// string db_name = 2;
|
||||
if (this->db_name().size() > 0) {
|
||||
total_size += 1 +
|
||||
|
@ -17436,13 +17444,6 @@ size_t FlushRequest::ByteSizeLong() const {
|
|||
this->db_name());
|
||||
}
|
||||
|
||||
// string collection_name = 3;
|
||||
if (this->collection_name().size() > 0) {
|
||||
total_size += 1 +
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
|
||||
this->collection_name());
|
||||
}
|
||||
|
||||
// .milvus.proto.common.MsgBase base = 1;
|
||||
if (this->has_base()) {
|
||||
total_size += 1 +
|
||||
|
@ -17477,14 +17478,11 @@ void FlushRequest::MergeFrom(const FlushRequest& from) {
|
|||
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
|
||||
(void) cached_has_bits;
|
||||
|
||||
collection_names_.MergeFrom(from.collection_names_);
|
||||
if (from.db_name().size() > 0) {
|
||||
|
||||
db_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.db_name_);
|
||||
}
|
||||
if (from.collection_name().size() > 0) {
|
||||
|
||||
collection_name_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.collection_name_);
|
||||
}
|
||||
if (from.has_base()) {
|
||||
mutable_base()->::milvus::proto::common::MsgBase::MergeFrom(from.base());
|
||||
}
|
||||
|
@ -17511,10 +17509,9 @@ bool FlushRequest::IsInitialized() const {
|
|||
void FlushRequest::InternalSwap(FlushRequest* other) {
|
||||
using std::swap;
|
||||
_internal_metadata_.Swap(&other->_internal_metadata_);
|
||||
collection_names_.InternalSwap(CastToBase(&other->collection_names_));
|
||||
db_name_.Swap(&other->db_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
|
||||
GetArenaNoVirtual());
|
||||
collection_name_.Swap(&other->collection_name_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
|
||||
GetArenaNoVirtual());
|
||||
swap(base_, other->base_);
|
||||
}
|
||||
|
||||
|
|
|
@ -6689,10 +6689,27 @@ class FlushRequest :
|
|||
// accessors -------------------------------------------------------
|
||||
|
||||
enum : int {
|
||||
kCollectionNamesFieldNumber = 3,
|
||||
kDbNameFieldNumber = 2,
|
||||
kCollectionNameFieldNumber = 3,
|
||||
kBaseFieldNumber = 1,
|
||||
};
|
||||
// repeated string collection_names = 3;
|
||||
int collection_names_size() const;
|
||||
void clear_collection_names();
|
||||
const std::string& collection_names(int index) const;
|
||||
std::string* mutable_collection_names(int index);
|
||||
void set_collection_names(int index, const std::string& value);
|
||||
void set_collection_names(int index, std::string&& value);
|
||||
void set_collection_names(int index, const char* value);
|
||||
void set_collection_names(int index, const char* value, size_t size);
|
||||
std::string* add_collection_names();
|
||||
void add_collection_names(const std::string& value);
|
||||
void add_collection_names(std::string&& value);
|
||||
void add_collection_names(const char* value);
|
||||
void add_collection_names(const char* value, size_t size);
|
||||
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& collection_names() const;
|
||||
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_collection_names();
|
||||
|
||||
// string db_name = 2;
|
||||
void clear_db_name();
|
||||
const std::string& db_name() const;
|
||||
|
@ -6704,17 +6721,6 @@ class FlushRequest :
|
|||
std::string* release_db_name();
|
||||
void set_allocated_db_name(std::string* db_name);
|
||||
|
||||
// string collection_name = 3;
|
||||
void clear_collection_name();
|
||||
const std::string& collection_name() const;
|
||||
void set_collection_name(const std::string& value);
|
||||
void set_collection_name(std::string&& value);
|
||||
void set_collection_name(const char* value);
|
||||
void set_collection_name(const char* value, size_t size);
|
||||
std::string* mutable_collection_name();
|
||||
std::string* release_collection_name();
|
||||
void set_allocated_collection_name(std::string* collection_name);
|
||||
|
||||
// .milvus.proto.common.MsgBase base = 1;
|
||||
bool has_base() const;
|
||||
void clear_base();
|
||||
|
@ -6728,8 +6734,8 @@ class FlushRequest :
|
|||
class _Internal;
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
|
||||
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> collection_names_;
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr db_name_;
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr collection_name_;
|
||||
::milvus::proto::common::MsgBase* base_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
friend struct ::TableStruct_milvus_2eproto;
|
||||
|
@ -12613,55 +12619,69 @@ inline void FlushRequest::set_allocated_db_name(std::string* db_name) {
|
|||
// @@protoc_insertion_point(field_set_allocated:milvus.proto.milvus.FlushRequest.db_name)
|
||||
}
|
||||
|
||||
// string collection_name = 3;
|
||||
inline void FlushRequest::clear_collection_name() {
|
||||
collection_name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
// repeated string collection_names = 3;
|
||||
inline int FlushRequest::collection_names_size() const {
|
||||
return collection_names_.size();
|
||||
}
|
||||
inline const std::string& FlushRequest::collection_name() const {
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
return collection_name_.GetNoArena();
|
||||
inline void FlushRequest::clear_collection_names() {
|
||||
collection_names_.Clear();
|
||||
}
|
||||
inline void FlushRequest::set_collection_name(const std::string& value) {
|
||||
|
||||
collection_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value);
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
inline const std::string& FlushRequest::collection_names(int index) const {
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
return collection_names_.Get(index);
|
||||
}
|
||||
inline void FlushRequest::set_collection_name(std::string&& value) {
|
||||
|
||||
collection_name_.SetNoArena(
|
||||
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value));
|
||||
// @@protoc_insertion_point(field_set_rvalue:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
inline std::string* FlushRequest::mutable_collection_names(int index) {
|
||||
// @@protoc_insertion_point(field_mutable:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
return collection_names_.Mutable(index);
|
||||
}
|
||||
inline void FlushRequest::set_collection_name(const char* value) {
|
||||
inline void FlushRequest::set_collection_names(int index, const std::string& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
collection_names_.Mutable(index)->assign(value);
|
||||
}
|
||||
inline void FlushRequest::set_collection_names(int index, std::string&& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
collection_names_.Mutable(index)->assign(std::move(value));
|
||||
}
|
||||
inline void FlushRequest::set_collection_names(int index, const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
|
||||
collection_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
|
||||
// @@protoc_insertion_point(field_set_char:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
collection_names_.Mutable(index)->assign(value);
|
||||
// @@protoc_insertion_point(field_set_char:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
}
|
||||
inline void FlushRequest::set_collection_name(const char* value, size_t size) {
|
||||
|
||||
collection_name_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
|
||||
::std::string(reinterpret_cast<const char*>(value), size));
|
||||
// @@protoc_insertion_point(field_set_pointer:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
inline void FlushRequest::set_collection_names(int index, const char* value, size_t size) {
|
||||
collection_names_.Mutable(index)->assign(
|
||||
reinterpret_cast<const char*>(value), size);
|
||||
// @@protoc_insertion_point(field_set_pointer:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
}
|
||||
inline std::string* FlushRequest::mutable_collection_name() {
|
||||
|
||||
// @@protoc_insertion_point(field_mutable:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
return collection_name_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
inline std::string* FlushRequest::add_collection_names() {
|
||||
// @@protoc_insertion_point(field_add_mutable:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
return collection_names_.Add();
|
||||
}
|
||||
inline std::string* FlushRequest::release_collection_name() {
|
||||
// @@protoc_insertion_point(field_release:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
|
||||
return collection_name_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
inline void FlushRequest::add_collection_names(const std::string& value) {
|
||||
collection_names_.Add()->assign(value);
|
||||
// @@protoc_insertion_point(field_add:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
}
|
||||
inline void FlushRequest::set_allocated_collection_name(std::string* collection_name) {
|
||||
if (collection_name != nullptr) {
|
||||
|
||||
} else {
|
||||
|
||||
}
|
||||
collection_name_.SetAllocatedNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), collection_name);
|
||||
// @@protoc_insertion_point(field_set_allocated:milvus.proto.milvus.FlushRequest.collection_name)
|
||||
inline void FlushRequest::add_collection_names(std::string&& value) {
|
||||
collection_names_.Add(std::move(value));
|
||||
// @@protoc_insertion_point(field_add:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
}
|
||||
inline void FlushRequest::add_collection_names(const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
collection_names_.Add()->assign(value);
|
||||
// @@protoc_insertion_point(field_add_char:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
}
|
||||
inline void FlushRequest::add_collection_names(const char* value, size_t size) {
|
||||
collection_names_.Add()->assign(reinterpret_cast<const char*>(value), size);
|
||||
// @@protoc_insertion_point(field_add_pointer:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
}
|
||||
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
|
||||
FlushRequest::collection_names() const {
|
||||
// @@protoc_insertion_point(field_list:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
return collection_names_;
|
||||
}
|
||||
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
|
||||
FlushRequest::mutable_collection_names() {
|
||||
// @@protoc_insertion_point(field_mutable_list:milvus.proto.milvus.FlushRequest.collection_names)
|
||||
return &collection_names_;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
|
|
@ -1,137 +0,0 @@
|
|||
package writerclient
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/writerpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
type Client struct {
|
||||
kvClient kv.TxnBase // client of a reliable kv service, i.e. etcd client
|
||||
kvPrefix string
|
||||
|
||||
flushStream msgstream.MsgStream
|
||||
}
|
||||
|
||||
func NewWriterClient(etcdAddress string, kvRootPath string, writeNodeSegKvSubPath string, flushStream msgstream.MsgStream) (*Client, error) {
|
||||
// init kv client
|
||||
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvClient := etcdkv.NewEtcdKV(etcdClient, kvRootPath)
|
||||
|
||||
return &Client{
|
||||
kvClient: kvClient,
|
||||
kvPrefix: writeNodeSegKvSubPath,
|
||||
flushStream: flushStream,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type SegmentDescription struct {
|
||||
SegmentID UniqueID
|
||||
IsClosed bool
|
||||
OpenTime Timestamp
|
||||
CloseTime Timestamp
|
||||
}
|
||||
|
||||
func (c *Client) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
|
||||
baseMsg := msgstream.BaseMsg{
|
||||
BeginTimestamp: 0,
|
||||
EndTimestamp: 0,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
|
||||
flushMsg := internalpb2.FlushMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kFlush,
|
||||
Timestamp: timestamp,
|
||||
},
|
||||
SegmentID: segmentID,
|
||||
CollectionID: collectionID,
|
||||
PartitionTag: partitionTag,
|
||||
}
|
||||
|
||||
fMsg := &msgstream.FlushMsg{
|
||||
BaseMsg: baseMsg,
|
||||
FlushMsg: flushMsg,
|
||||
}
|
||||
msgPack := msgstream.MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, fMsg)
|
||||
|
||||
err := c.flushStream.Produce(&msgPack)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) DescribeSegment(segmentID UniqueID) (*SegmentDescription, error) {
|
||||
// query etcd
|
||||
ret := &SegmentDescription{
|
||||
SegmentID: segmentID,
|
||||
IsClosed: false,
|
||||
}
|
||||
|
||||
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
|
||||
|
||||
etcdKV, ok := c.kvClient.(*etcdkv.EtcdKV)
|
||||
if !ok {
|
||||
return nil, errors.New("type assertion failed for etcd kv")
|
||||
}
|
||||
count, err := etcdKV.GetCount(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if count <= 0 {
|
||||
ret.IsClosed = false
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
value, err := c.kvClient.Load(key)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
flushMeta := pb.SegmentFlushMeta{}
|
||||
err = proto.UnmarshalText(value, &flushMeta)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
ret.IsClosed = flushMeta.IsClosed
|
||||
ret.OpenTime = flushMeta.OpenTime
|
||||
ret.CloseTime = flushMeta.CloseTime
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetInsertBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
|
||||
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
|
||||
|
||||
value, err := c.kvClient.Load(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flushMeta := pb.SegmentFlushMeta{}
|
||||
err = proto.UnmarshalText(value, &flushMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make(map[int64][]string)
|
||||
for _, field := range flushMeta.Fields {
|
||||
ret[field.FieldID] = field.BinlogPaths
|
||||
}
|
||||
return ret, nil
|
||||
}
|
|
@ -2,6 +2,7 @@ package datanode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
@ -82,9 +83,9 @@ func NewDataNode(ctx context.Context) *DataNode {
|
|||
node := &DataNode{
|
||||
ctx: ctx2,
|
||||
cancel: cancel2,
|
||||
NodeID: Params.NodeID, // GOOSE TODO How to init
|
||||
NodeID: Params.NodeID, // GOOSE TODO: How to init
|
||||
Role: typeutil.DataNodeRole,
|
||||
State: internalpb2.StateCode_INITIALIZING,
|
||||
State: internalpb2.StateCode_INITIALIZING, // GOOSE TODO: atomic
|
||||
dataSyncService: nil,
|
||||
metaService: nil,
|
||||
masterService: nil,
|
||||
|
@ -96,15 +97,26 @@ func NewDataNode(ctx context.Context) *DataNode {
|
|||
}
|
||||
|
||||
func (node *DataNode) SetMasterServiceInterface(ms MasterServiceInterface) error {
|
||||
node.masterService = ms
|
||||
return nil
|
||||
switch {
|
||||
case ms == nil, node.masterService != nil:
|
||||
return errors.New("Nil parameter or repeatly set")
|
||||
default:
|
||||
node.masterService = ms
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (node *DataNode) SetDataServiceInterface(ds DataServiceInterface) error {
|
||||
node.dataService = ds
|
||||
return nil
|
||||
switch {
|
||||
case ds == nil, node.dataService != nil:
|
||||
return errors.New("Nil parameter or repeatly set")
|
||||
default:
|
||||
node.dataService = ds
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Suppose dataservice is in INITIALIZING
|
||||
func (node *DataNode) Init() error {
|
||||
|
||||
req := &datapb.RegisterNodeRequest{
|
||||
|
@ -145,11 +157,15 @@ func (node *DataNode) Init() error {
|
|||
}
|
||||
|
||||
var alloc allocator = newAllocatorImpl(node.masterService)
|
||||
|
||||
chanSize := 100
|
||||
node.flushChan = make(chan *flushMsg, chanSize)
|
||||
|
||||
node.dataSyncService = newDataSyncService(node.ctx, node.flushChan, replica, alloc)
|
||||
node.metaService = newMetaService(node.ctx, replica, node.masterService)
|
||||
|
||||
node.replica = replica
|
||||
node.dataSyncService.initNodes()
|
||||
|
||||
// --- Opentracing ---
|
||||
cfg := &config.Configuration{
|
||||
|
@ -174,19 +190,38 @@ func (node *DataNode) Init() error {
|
|||
}
|
||||
|
||||
func (node *DataNode) Start() error {
|
||||
|
||||
go node.dataSyncService.start()
|
||||
node.metaService.init()
|
||||
node.State = internalpb2.StateCode_HEALTHY
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *DataNode) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
log.Println("Init insert channel names:", in.GetChannelNames())
|
||||
Params.InsertChannelNames = append(Params.InsertChannelNames, in.GetChannelNames()...)
|
||||
// DataNode is HEALTHY until StartSync() is called
|
||||
func (node *DataNode) StartSync() {
|
||||
node.dataSyncService.init()
|
||||
go node.dataSyncService.start()
|
||||
node.State = internalpb2.StateCode_HEALTHY
|
||||
}
|
||||
|
||||
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS}, nil
|
||||
func (node *DataNode) WatchDmChannels(in *datapb.WatchDmChannelRequest) (*commonpb.Status, error) {
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
case node.State != internalpb2.StateCode_HEALTHY:
|
||||
status.Reason = fmt.Sprintf("DataNode %d not healthy!", node.NodeID)
|
||||
return status, errors.New(status.GetReason())
|
||||
|
||||
case len(Params.InsertChannelNames) != 0:
|
||||
status.Reason = fmt.Sprintf("DataNode has %d already set insert channels!", node.NodeID)
|
||||
return status, errors.New(status.GetReason())
|
||||
|
||||
default:
|
||||
Params.InsertChannelNames = in.GetChannelNames()
|
||||
status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
node.StartSync()
|
||||
return status, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (node *DataNode) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package datanode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -10,6 +13,14 @@ import (
|
|||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
func makeNewChannelNames(names []string, suffix string) []string {
|
||||
|
@ -77,3 +88,366 @@ func clearEtcd(rootPath string) error {
|
|||
return nil
|
||||
|
||||
}
|
||||
|
||||
type (
|
||||
Factory interface {
|
||||
}
|
||||
|
||||
MetaFactory struct {
|
||||
}
|
||||
|
||||
DataFactory struct {
|
||||
rawData []byte
|
||||
}
|
||||
|
||||
AllocatorFactory struct {
|
||||
ID UniqueID
|
||||
}
|
||||
|
||||
MasterServiceFactory struct {
|
||||
ID UniqueID
|
||||
collectionName string
|
||||
collectionID UniqueID
|
||||
}
|
||||
)
|
||||
|
||||
func (mf *MetaFactory) CollectionMetaFactory(collectionID UniqueID, collectionName string) *etcdpb.CollectionMeta {
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
Description: "test collection by meta factory",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
{
|
||||
FieldID: 0,
|
||||
Name: "RowID",
|
||||
Description: "RowID field",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "f0_tk1",
|
||||
Value: "f0_tv1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 1,
|
||||
Name: "Timestamp",
|
||||
Description: "Timestamp field",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "f1_tk1",
|
||||
Value: "f1_tv1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 100,
|
||||
Name: "float_vector_field",
|
||||
Description: "field 100",
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "2",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "indexkey",
|
||||
Value: "indexvalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 101,
|
||||
Name: "binary_vector_field",
|
||||
Description: "field 101",
|
||||
DataType: schemapb.DataType_VECTOR_BINARY,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "32",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "indexkey",
|
||||
Value: "indexvalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 102,
|
||||
Name: "bool_field",
|
||||
Description: "field 102",
|
||||
DataType: schemapb.DataType_BOOL,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 103,
|
||||
Name: "int8_field",
|
||||
Description: "field 103",
|
||||
DataType: schemapb.DataType_INT8,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 104,
|
||||
Name: "int16_field",
|
||||
Description: "field 104",
|
||||
DataType: schemapb.DataType_INT16,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 105,
|
||||
Name: "int32_field",
|
||||
Description: "field 105",
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 106,
|
||||
Name: "int64_field",
|
||||
Description: "field 106",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 107,
|
||||
Name: "float32_field",
|
||||
Description: "field 107",
|
||||
DataType: schemapb.DataType_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 108,
|
||||
Name: "float64_field",
|
||||
Description: "field 108",
|
||||
DataType: schemapb.DataType_DOUBLE,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
collection := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &sch,
|
||||
CreateTime: Timestamp(1),
|
||||
SegmentIDs: make([]UniqueID, 0),
|
||||
PartitionTags: make([]string, 0),
|
||||
}
|
||||
return &collection
|
||||
}
|
||||
|
||||
func NewDataFactory() *DataFactory {
|
||||
return &DataFactory{rawData: GenRowData()}
|
||||
}
|
||||
|
||||
func GenRowData() (rawData []byte) {
|
||||
const DIM = 2
|
||||
const N = 1
|
||||
|
||||
// Float vector
|
||||
var fvector = [DIM]float32{1, 2}
|
||||
for _, ele := range fvector {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
rawData = append(rawData, buf...)
|
||||
}
|
||||
|
||||
// Binary vector
|
||||
// Dimension of binary vector is 32
|
||||
// size := 4, = 32 / 8
|
||||
var bvector = []byte{255, 255, 255, 0}
|
||||
rawData = append(rawData, bvector...)
|
||||
|
||||
// Bool
|
||||
var fieldBool = true
|
||||
buf := new(bytes.Buffer)
|
||||
if err := binary.Write(buf, binary.LittleEndian, fieldBool); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rawData = append(rawData, buf.Bytes()...)
|
||||
|
||||
// int8
|
||||
var dataInt8 int8 = 100
|
||||
bint8 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint8, binary.LittleEndian, dataInt8); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint8.Bytes()...)
|
||||
|
||||
// int16
|
||||
var dataInt16 int16 = 200
|
||||
bint16 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint16, binary.LittleEndian, dataInt16); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint16.Bytes()...)
|
||||
|
||||
// int32
|
||||
var dataInt32 int32 = 300
|
||||
bint32 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint32, binary.LittleEndian, dataInt32); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint32.Bytes()...)
|
||||
|
||||
// int64
|
||||
var dataInt64 int64 = 400
|
||||
bint64 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint64, binary.LittleEndian, dataInt64); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint64.Bytes()...)
|
||||
|
||||
// float32
|
||||
var datafloat float32 = 1.1
|
||||
bfloat32 := new(bytes.Buffer)
|
||||
if err := binary.Write(bfloat32, binary.LittleEndian, datafloat); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bfloat32.Bytes()...)
|
||||
|
||||
// float64
|
||||
var datafloat64 float64 = 2.2
|
||||
bfloat64 := new(bytes.Buffer)
|
||||
if err := binary.Write(bfloat64, binary.LittleEndian, datafloat64); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bfloat64.Bytes()...)
|
||||
log.Println("Rawdata length:", len(rawData))
|
||||
return
|
||||
}
|
||||
|
||||
// n: number of TsinsertMsgs to generate
|
||||
func (df *DataFactory) GetMsgStreamTsInsertMsgs(n int) (inMsgs []msgstream.TsMsg) {
|
||||
for i := 0; i < n; i++ {
|
||||
var msg msgstream.TsMsg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{uint32(i)},
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0, // GOOSE TODO
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionName: "col1", // GOOSE TODO
|
||||
PartitionName: "default",
|
||||
SegmentID: 1, // GOOSE TODO
|
||||
ChannelID: "0", // GOOSE TODO
|
||||
Timestamps: []Timestamp{Timestamp(i + 1000)},
|
||||
RowIDs: []UniqueID{UniqueID(i)},
|
||||
RowData: []*commonpb.Blob{{Value: df.rawData}},
|
||||
},
|
||||
}
|
||||
inMsgs = append(inMsgs, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// n: number of insertMsgs to generate
|
||||
func (df *DataFactory) GetMsgStreamInsertMsgs(n int) (inMsgs []*msgstream.InsertMsg) {
|
||||
for i := 0; i < n; i++ {
|
||||
var msg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{uint32(i)},
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0, // GOOSE TODO
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionName: "col1", // GOOSE TODO
|
||||
PartitionName: "default",
|
||||
SegmentID: 1, // GOOSE TODO
|
||||
ChannelID: "0", // GOOSE TODO
|
||||
Timestamps: []Timestamp{Timestamp(i + 1000)},
|
||||
RowIDs: []UniqueID{UniqueID(i)},
|
||||
RowData: []*commonpb.Blob{{Value: df.rawData}},
|
||||
},
|
||||
}
|
||||
inMsgs = append(inMsgs, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewAllocatorFactory(id ...UniqueID) *AllocatorFactory {
|
||||
f := &AllocatorFactory{}
|
||||
if len(id) == 1 {
|
||||
f.ID = id[0]
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (alloc AllocatorFactory) setID(id UniqueID) {
|
||||
alloc.ID = id
|
||||
}
|
||||
|
||||
func (alloc AllocatorFactory) allocID() (UniqueID, error) {
|
||||
if alloc.ID == 0 {
|
||||
return UniqueID(0), nil // GOOSE TODO: random ID generating
|
||||
}
|
||||
return alloc.ID, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) setID(id UniqueID) {
|
||||
m.ID = id // GOOSE TODO: random ID generator
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) setCollectionID(id UniqueID) {
|
||||
m.collectionID = id
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) setCollectionName(name string) {
|
||||
m.collectionName = name
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
resp := &masterpb.IDResponse{
|
||||
Status: &commonpb.Status{},
|
||||
ID: m.ID,
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
resp := &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{},
|
||||
CollectionNames: []string{m.collectionName},
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
func (m *MasterServiceFactory) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
f := MetaFactory{}
|
||||
meta := f.CollectionMetaFactory(m.collectionID, m.collectionName)
|
||||
resp := &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{},
|
||||
CollectionID: m.collectionID,
|
||||
Schema: meta.Schema,
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return &internalpb2.ComponentStates{
|
||||
State: &internalpb2.ComponentInfo{},
|
||||
SubcomponentStates: make([]*internalpb2.ComponentInfo, 0),
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -19,18 +19,27 @@ type dataSyncService struct {
|
|||
|
||||
func newDataSyncService(ctx context.Context, flushChan chan *flushMsg,
|
||||
replica collectionReplica, alloc allocator) *dataSyncService {
|
||||
|
||||
return &dataSyncService{
|
||||
service := &dataSyncService{
|
||||
ctx: ctx,
|
||||
fg: nil,
|
||||
flushChan: flushChan,
|
||||
replica: replica,
|
||||
idAllocator: alloc,
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) init() {
|
||||
if len(Params.InsertChannelNames) == 0 {
|
||||
log.Println("InsertChannels not readly, init datasync service failed")
|
||||
return
|
||||
}
|
||||
|
||||
dsService.initNodes()
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) start() {
|
||||
dsService.initNodes()
|
||||
log.Println("Data Sync Service Start Successfully")
|
||||
dsService.fg.Start()
|
||||
}
|
||||
|
||||
|
@ -60,7 +69,6 @@ func (dsService *dataSyncService) initNodes() {
|
|||
var ddStreamNode Node = newDDInputNode(dsService.ctx)
|
||||
|
||||
var filterDmNode Node = newFilteredDmNode()
|
||||
|
||||
var ddNode Node = newDDNode(dsService.ctx, mt, dsService.flushChan, dsService.replica, dsService.idAllocator)
|
||||
var insertBufferNode Node = newInsertBufferNode(dsService.ctx, mt, dsService.replica, dsService.idAllocator)
|
||||
var gcNode Node = newGCNode(dsService.replica)
|
||||
|
|
|
@ -2,7 +2,6 @@ package datanode
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -42,116 +41,15 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||
allocFactory := AllocatorFactory{}
|
||||
sync := newDataSyncService(ctx, flushChan, replica, allocFactory)
|
||||
sync.replica.addCollection(collMeta.ID, collMeta.Schema)
|
||||
sync.init()
|
||||
go sync.start()
|
||||
|
||||
// test data generate
|
||||
// GOOSE TODO orgnize
|
||||
const DIM = 2
|
||||
const N = 1
|
||||
var rawData []byte
|
||||
|
||||
// Float vector
|
||||
var fvector = [DIM]float32{1, 2}
|
||||
for _, ele := range fvector {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
rawData = append(rawData, buf...)
|
||||
}
|
||||
|
||||
// Binary vector
|
||||
// Dimension of binary vector is 32
|
||||
var bvector = [4]byte{255, 255, 255, 0}
|
||||
for _, ele := range bvector {
|
||||
bs := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bs, uint32(ele))
|
||||
rawData = append(rawData, bs...)
|
||||
}
|
||||
|
||||
// Bool
|
||||
bb := make([]byte, 4)
|
||||
var fieldBool = true
|
||||
var fieldBoolInt uint32
|
||||
if fieldBool {
|
||||
fieldBoolInt = 1
|
||||
} else {
|
||||
fieldBoolInt = 0
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint32(bb, fieldBoolInt)
|
||||
rawData = append(rawData, bb...)
|
||||
|
||||
// int8
|
||||
var dataInt8 int8 = 100
|
||||
bint8 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint8, uint32(dataInt8))
|
||||
rawData = append(rawData, bint8...)
|
||||
|
||||
// int16
|
||||
var dataInt16 int16 = 200
|
||||
bint16 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint16, uint32(dataInt16))
|
||||
rawData = append(rawData, bint16...)
|
||||
|
||||
// int32
|
||||
var dataInt32 int32 = 300
|
||||
bint32 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint32, uint32(dataInt32))
|
||||
rawData = append(rawData, bint32...)
|
||||
|
||||
// int64
|
||||
var dataInt64 int64 = 300
|
||||
bint64 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint64, uint32(dataInt64))
|
||||
rawData = append(rawData, bint64...)
|
||||
|
||||
// float32
|
||||
var datafloat float32 = 1.1
|
||||
bfloat32 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bfloat32, math.Float32bits(datafloat))
|
||||
rawData = append(rawData, bfloat32...)
|
||||
|
||||
// float64
|
||||
var datafloat64 float64 = 2.2
|
||||
bfloat64 := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bfloat64, math.Float64bits(datafloat64))
|
||||
rawData = append(rawData, bfloat64...)
|
||||
|
||||
timeRange := TimeRange{
|
||||
timestampMin: 0,
|
||||
timestampMax: math.MaxUint64,
|
||||
}
|
||||
|
||||
// messages generate
|
||||
const MSGLENGTH = 1
|
||||
insertMessages := make([]msgstream.TsMsg, 0)
|
||||
for i := 0; i < MSGLENGTH; i++ {
|
||||
var msg msgstream.TsMsg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{
|
||||
uint32(i),
|
||||
},
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: UniqueID(0),
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionName: "col1",
|
||||
PartitionName: "default",
|
||||
SegmentID: UniqueID(1),
|
||||
ChannelID: "0",
|
||||
Timestamps: []Timestamp{Timestamp(i + 1000)},
|
||||
RowIDs: []UniqueID{UniqueID(i)},
|
||||
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
},
|
||||
},
|
||||
}
|
||||
insertMessages = append(insertMessages, msg)
|
||||
}
|
||||
dataFactory := NewDataFactory()
|
||||
insertMessages := dataFactory.GetMsgStreamTsInsertMsgs(2)
|
||||
|
||||
msgPack := msgstream.MsgPack{
|
||||
BeginTs: timeRange.timestampMin,
|
||||
|
@ -184,11 +82,12 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||
insertChannels := Params.InsertChannelNames
|
||||
ddChannels := Params.DDChannelNames
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
|
@ -208,6 +107,7 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||
|
||||
// dataSync
|
||||
Params.FlushInsertBufferSize = 1
|
||||
<-sync.ctx.Done()
|
||||
|
||||
sync.close()
|
||||
}
|
||||
|
|
|
@ -1,231 +0,0 @@
|
|||
package datanode
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
type (
|
||||
Factory interface {
|
||||
}
|
||||
|
||||
MetaFactory struct {
|
||||
}
|
||||
|
||||
AllocatorFactory struct {
|
||||
ID UniqueID
|
||||
}
|
||||
|
||||
MasterServiceFactory struct {
|
||||
ID UniqueID
|
||||
collectionName string
|
||||
collectionID UniqueID
|
||||
}
|
||||
)
|
||||
|
||||
func (mf *MetaFactory) CollectionMetaFactory(collectionID UniqueID, collectionName string) *etcdpb.CollectionMeta {
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
Description: "test collection by meta factory",
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
{
|
||||
FieldID: 0,
|
||||
Name: "RowID",
|
||||
Description: "RowID field",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "f0_tk1",
|
||||
Value: "f0_tv1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 1,
|
||||
Name: "Timestamp",
|
||||
Description: "Timestamp field",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "f1_tk1",
|
||||
Value: "f1_tv1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 100,
|
||||
Name: "float_vector_field",
|
||||
Description: "field 100",
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "2",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "indexkey",
|
||||
Value: "indexvalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 101,
|
||||
Name: "binary_vector_field",
|
||||
Description: "field 101",
|
||||
DataType: schemapb.DataType_VECTOR_BINARY,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "32",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "indexkey",
|
||||
Value: "indexvalue",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 102,
|
||||
Name: "bool_field",
|
||||
Description: "field 102",
|
||||
DataType: schemapb.DataType_BOOL,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 103,
|
||||
Name: "int8_field",
|
||||
Description: "field 103",
|
||||
DataType: schemapb.DataType_INT8,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 104,
|
||||
Name: "int16_field",
|
||||
Description: "field 104",
|
||||
DataType: schemapb.DataType_INT16,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 105,
|
||||
Name: "int32_field",
|
||||
Description: "field 105",
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 106,
|
||||
Name: "int64_field",
|
||||
Description: "field 106",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 107,
|
||||
Name: "float32_field",
|
||||
Description: "field 107",
|
||||
DataType: schemapb.DataType_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 108,
|
||||
Name: "float64_field",
|
||||
Description: "field 108",
|
||||
DataType: schemapb.DataType_DOUBLE,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
collection := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &sch,
|
||||
CreateTime: Timestamp(1),
|
||||
SegmentIDs: make([]UniqueID, 0),
|
||||
PartitionTags: make([]string, 0),
|
||||
}
|
||||
return &collection
|
||||
}
|
||||
|
||||
func NewAllocatorFactory(id ...UniqueID) *AllocatorFactory {
|
||||
f := &AllocatorFactory{}
|
||||
if len(id) == 1 {
|
||||
f.ID = id[0]
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (alloc AllocatorFactory) setID(id UniqueID) {
|
||||
alloc.ID = id
|
||||
}
|
||||
|
||||
func (alloc AllocatorFactory) allocID() (UniqueID, error) {
|
||||
if alloc.ID == 0 {
|
||||
return UniqueID(0), nil // GOOSE TODO: random ID generating
|
||||
}
|
||||
return alloc.ID, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) setID(id UniqueID) {
|
||||
m.ID = id // GOOSE TODO: random ID generator
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) setCollectionID(id UniqueID) {
|
||||
m.collectionID = id
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) setCollectionName(name string) {
|
||||
m.collectionName = name
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
resp := &masterpb.IDResponse{
|
||||
Status: &commonpb.Status{},
|
||||
ID: m.ID,
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
resp := &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{},
|
||||
CollectionNames: []string{m.collectionName},
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
func (m *MasterServiceFactory) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
f := MetaFactory{}
|
||||
meta := f.CollectionMetaFactory(m.collectionID, m.collectionName)
|
||||
resp := &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{},
|
||||
CollectionID: m.collectionID,
|
||||
Schema: meta.Schema,
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *MasterServiceFactory) GetComponentStates() (*internalpb2.ComponentStates, error) {
|
||||
return &internalpb2.ComponentStates{
|
||||
State: &internalpb2.ComponentInfo{},
|
||||
SubcomponentStates: make([]*internalpb2.ComponentInfo, 0),
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}, nil
|
||||
}
|
|
@ -132,7 +132,6 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
|
|||
}
|
||||
|
||||
default:
|
||||
//log.Println(". default: do nothing ...")
|
||||
}
|
||||
|
||||
// generate binlog
|
||||
|
|
|
@ -653,22 +653,24 @@ func newInsertBufferNode(ctx context.Context, flushMeta *metaTable,
|
|||
}
|
||||
minioPrefix := Params.InsertBinlogRootPath
|
||||
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
//input stream, data node time tick
|
||||
wTt := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
wTt := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
wTt.SetPulsarClient(Params.PulsarAddress)
|
||||
wTt.CreatePulsarProducers([]string{Params.TimeTickChannelName})
|
||||
var wTtMsgStream msgstream.MsgStream = wTt
|
||||
wTtMsgStream.Start()
|
||||
|
||||
// update statistics channel
|
||||
segS := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
segS := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
segS.SetPulsarClient(Params.PulsarAddress)
|
||||
segS.CreatePulsarProducers([]string{Params.SegmentStatisticsChannelName})
|
||||
var segStatisticsMsgStream msgstream.MsgStream = segS
|
||||
segStatisticsMsgStream.Start()
|
||||
|
||||
// segment flush completed channel
|
||||
cf := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
cf := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
cf.SetPulsarClient(Params.PulsarAddress)
|
||||
cf.CreatePulsarProducers([]string{Params.CompleteFlushChannelName})
|
||||
var completeFlushStream msgstream.MsgStream = cf
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
package datanode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -12,8 +9,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
|
@ -43,7 +38,6 @@ func TestFlowGraphInsertBufferNode_Operate(t *testing.T) {
|
|||
err = replica.addCollection(collMeta.ID, collMeta.Schema)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Params.FlushInsertBufSize = 2
|
||||
idFactory := AllocatorFactory{}
|
||||
iBNode := newInsertBufferNode(ctx, newMetaTable(), replica, idFactory)
|
||||
inMsg := genInsertMsg()
|
||||
|
@ -52,82 +46,6 @@ func TestFlowGraphInsertBufferNode_Operate(t *testing.T) {
|
|||
}
|
||||
|
||||
func genInsertMsg() insertMsg {
|
||||
// test data generate
|
||||
const DIM = 2
|
||||
const N = 1
|
||||
var rawData []byte
|
||||
|
||||
// Float vector
|
||||
var fvector = [DIM]float32{1, 2}
|
||||
for _, ele := range fvector {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
rawData = append(rawData, buf...)
|
||||
}
|
||||
|
||||
// Binary vector
|
||||
// Dimension of binary vector is 32
|
||||
// size := 4, = 32 / 8
|
||||
var bvector = []byte{255, 255, 255, 0}
|
||||
rawData = append(rawData, bvector...)
|
||||
|
||||
// Bool
|
||||
var fieldBool = true
|
||||
buf := new(bytes.Buffer)
|
||||
if err := binary.Write(buf, binary.LittleEndian, fieldBool); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rawData = append(rawData, buf.Bytes()...)
|
||||
|
||||
// int8
|
||||
var dataInt8 int8 = 100
|
||||
bint8 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint8, binary.LittleEndian, dataInt8); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint8.Bytes()...)
|
||||
|
||||
// int16
|
||||
var dataInt16 int16 = 200
|
||||
bint16 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint16, binary.LittleEndian, dataInt16); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint16.Bytes()...)
|
||||
|
||||
// int32
|
||||
var dataInt32 int32 = 300
|
||||
bint32 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint32, binary.LittleEndian, dataInt32); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint32.Bytes()...)
|
||||
|
||||
// int64
|
||||
var dataInt64 int64 = 400
|
||||
bint64 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint64, binary.LittleEndian, dataInt64); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint64.Bytes()...)
|
||||
|
||||
// float32
|
||||
var datafloat float32 = 1.1
|
||||
bfloat32 := new(bytes.Buffer)
|
||||
if err := binary.Write(bfloat32, binary.LittleEndian, datafloat); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bfloat32.Bytes()...)
|
||||
|
||||
// float64
|
||||
var datafloat64 float64 = 2.2
|
||||
bfloat64 := new(bytes.Buffer)
|
||||
if err := binary.Write(bfloat64, binary.LittleEndian, datafloat64); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bfloat64.Bytes()...)
|
||||
log.Println("Test rawdata length:", len(rawData))
|
||||
|
||||
timeRange := TimeRange{
|
||||
timestampMin: 0,
|
||||
|
@ -143,55 +61,8 @@ func genInsertMsg() insertMsg {
|
|||
},
|
||||
}
|
||||
|
||||
// messages generate
|
||||
const MSGLENGTH = 1
|
||||
// insertMessages := make([]msgstream.TsMsg, 0)
|
||||
for i := 0; i < MSGLENGTH; i++ {
|
||||
var msg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{
|
||||
uint32(i),
|
||||
},
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0,
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionName: "col1",
|
||||
PartitionName: "default",
|
||||
CollectionID: 0,
|
||||
PartitionID: 1,
|
||||
SegmentID: UniqueID(1),
|
||||
ChannelID: "0",
|
||||
Timestamps: []Timestamp{
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
},
|
||||
RowIDs: []UniqueID{
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
},
|
||||
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
},
|
||||
},
|
||||
}
|
||||
iMsg.insertMessages = append(iMsg.insertMessages, msg)
|
||||
}
|
||||
dataFactory := NewDataFactory()
|
||||
iMsg.insertMessages = append(iMsg.insertMessages, dataFactory.GetMsgStreamInsertMsgs(2)...)
|
||||
|
||||
fmsg := &flushMsg{
|
||||
msgID: 1,
|
||||
|
|
|
@ -5,47 +5,39 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||
msgStreamURL := Params.PulsarAddress
|
||||
|
||||
consumeChannels := Params.InsertChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
insertStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
|
||||
|
||||
insertStream.SetPulsarClient(msgStreamURL)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
|
||||
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, 1024)
|
||||
|
||||
var stream msgstream.MsgStream = insertStream
|
||||
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
consumeChannels := Params.InsertChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
insertStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(Params.PulsarAddress)
|
||||
|
||||
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
|
||||
var stream msgstream.MsgStream = insertStream
|
||||
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
|
||||
return node
|
||||
}
|
||||
|
||||
func newDDInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||
|
||||
consumeChannels := Params.DDChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
ddStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
|
||||
ddStream.SetPulsarClient(Params.PulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, 1024)
|
||||
|
||||
var stream msgstream.MsgStream = ddStream
|
||||
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
tmpStream := pulsarms.NewPulsarTtMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
tmpStream.SetPulsarClient(Params.PulsarAddress)
|
||||
tmpStream.CreatePulsarConsumers(Params.DDChannelNames, consumeSubName)
|
||||
|
||||
var stream msgstream.MsgStream = tmpStream
|
||||
node := flowgraph.NewInputNode(&stream, "ddInputNode", maxQueueLength, maxParallelism)
|
||||
return node
|
||||
}
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
package dataservice
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type (
|
||||
insertChannelManager struct {
|
||||
mu sync.RWMutex
|
||||
count int
|
||||
channelGroups map[UniqueID][]string // collection id to channel ranges
|
||||
}
|
||||
)
|
||||
|
||||
func newInsertChannelManager() *insertChannelManager {
|
||||
return &insertChannelManager{
|
||||
count: 0,
|
||||
channelGroups: make(map[UniqueID][]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *insertChannelManager) GetChannels(collectionID UniqueID) ([]string, error) {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
if _, ok := cm.channelGroups[collectionID]; ok {
|
||||
return cm.channelGroups[collectionID], nil
|
||||
}
|
||||
channels := Params.InsertChannelNumPerCollection
|
||||
cg := make([]string, channels)
|
||||
var i int64 = 0
|
||||
for ; i < channels; i++ {
|
||||
cg[i] = Params.InsertChannelPrefixName + strconv.Itoa(cm.count)
|
||||
cm.count++
|
||||
}
|
||||
cm.channelGroups[collectionID] = cg
|
||||
return cg, nil
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package dataservice
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetChannel(t *testing.T) {
|
||||
Params.Init()
|
||||
Params.InsertChannelNumPerCollection = 4
|
||||
Params.InsertChannelPrefixName = "channel"
|
||||
manager := newInsertChannelManager()
|
||||
channels, err := manager.GetChannels(1)
|
||||
assert.Nil(t, err)
|
||||
assert.EqualValues(t, Params.InsertChannelNumPerCollection, len(channels))
|
||||
for i := 0; i < len(channels); i++ {
|
||||
assert.EqualValues(t, Params.InsertChannelPrefixName+strconv.Itoa(i), channels[i])
|
||||
}
|
||||
}
|
|
@ -2,7 +2,6 @@ package dataservice
|
|||
|
||||
import (
|
||||
"log"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
@ -23,18 +22,16 @@ type (
|
|||
channelNum int
|
||||
}
|
||||
dataNodeCluster struct {
|
||||
mu sync.RWMutex
|
||||
finishCh chan struct{}
|
||||
nodes []*dataNode
|
||||
watchedCollection map[UniqueID]bool
|
||||
mu sync.RWMutex
|
||||
finishCh chan struct{}
|
||||
nodes []*dataNode
|
||||
}
|
||||
)
|
||||
|
||||
func newDataNodeCluster(finishCh chan struct{}) *dataNodeCluster {
|
||||
return &dataNodeCluster{
|
||||
finishCh: finishCh,
|
||||
nodes: make([]*dataNode, 0),
|
||||
watchedCollection: make(map[UniqueID]bool),
|
||||
finishCh: finishCh,
|
||||
nodes: make([]*dataNode, 0),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,13 +69,9 @@ func (c *dataNodeCluster) GetNodeIDs() []int64 {
|
|||
return ret
|
||||
}
|
||||
|
||||
func (c *dataNodeCluster) WatchInsertChannels(collectionID UniqueID, channels []string) {
|
||||
func (c *dataNodeCluster) WatchInsertChannels(channels []string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.watchedCollection[collectionID] {
|
||||
return
|
||||
}
|
||||
sort.Slice(c.nodes, func(i, j int) bool { return c.nodes[i].channelNum < c.nodes[j].channelNum })
|
||||
var groups [][]string
|
||||
if len(channels) < len(c.nodes) {
|
||||
groups = make([][]string, len(channels))
|
||||
|
@ -109,7 +102,6 @@ func (c *dataNodeCluster) WatchInsertChannels(collectionID UniqueID, channels []
|
|||
}
|
||||
c.nodes[i].channelNum += len(group)
|
||||
}
|
||||
c.watchedCollection[collectionID] = true
|
||||
}
|
||||
|
||||
func (c *dataNodeCluster) GetDataNodeStates() ([]*internalpb2.ComponentInfo, error) {
|
||||
|
@ -153,5 +145,4 @@ func (c *dataNodeCluster) Clear() {
|
|||
defer c.mu.Unlock()
|
||||
c.finishCh = make(chan struct{})
|
||||
c.nodes = make([]*dataNode, 0)
|
||||
c.watchedCollection = make(map[UniqueID]bool)
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ func TestWatchChannels(t *testing.T) {
|
|||
channelNum: 0,
|
||||
})
|
||||
}
|
||||
cluster.WatchInsertChannels(c.collectionID, c.channels)
|
||||
cluster.WatchInsertChannels(c.channels)
|
||||
for i := 0; i < len(cluster.nodes); i++ {
|
||||
assert.EqualValues(t, c.channelNums[i], cluster.nodes[i].channelNum)
|
||||
}
|
||||
|
|
|
@ -24,14 +24,14 @@ type ParamTable struct {
|
|||
DefaultRecordSize int64
|
||||
SegIDAssignExpiration int64
|
||||
|
||||
InsertChannelPrefixName string
|
||||
InsertChannelNumPerCollection int64
|
||||
StatisticsChannelName string
|
||||
TimeTickChannelName string
|
||||
DataNodeNum int
|
||||
SegmentInfoChannelName string
|
||||
DataServiceSubscriptionName string
|
||||
K2SChannelNames []string
|
||||
InsertChannelPrefixName string
|
||||
InsertChannelNum int64
|
||||
StatisticsChannelName string
|
||||
TimeTickChannelName string
|
||||
DataNodeNum int
|
||||
SegmentInfoChannelName string
|
||||
DataServiceSubscriptionName string
|
||||
K2SChannelNames []string
|
||||
|
||||
SegmentFlushMetaPath string
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ func (p *ParamTable) Init() {
|
|||
p.initDefaultRecordSize()
|
||||
p.initSegIDAssignExpiration()
|
||||
p.initInsertChannelPrefixName()
|
||||
p.initInsertChannelNumPerCollection()
|
||||
p.initInsertChannelNum()
|
||||
p.initStatisticsChannelName()
|
||||
p.initTimeTickChannelName()
|
||||
p.initDataNodeNum()
|
||||
|
@ -150,8 +150,8 @@ func (p *ParamTable) initInsertChannelPrefixName() {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertChannelNumPerCollection() {
|
||||
p.InsertChannelNumPerCollection = p.ParseInt64("dataservice.insertChannelNumPerCollection")
|
||||
func (p *ParamTable) initInsertChannelNum() {
|
||||
p.InsertChannelNum = p.ParseInt64("dataservice.insertChannelNum")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initStatisticsChannelName() {
|
||||
|
|
|
@ -16,8 +16,6 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
|
||||
|
@ -85,7 +83,6 @@ type (
|
|||
segAllocator segmentAllocator
|
||||
statsHandler *statsHandler
|
||||
ddHandler *ddHandler
|
||||
insertChannelMgr *insertChannelManager
|
||||
allocator allocator
|
||||
cluster *dataNodeCluster
|
||||
msgProducer *timesync.MsgProducer
|
||||
|
@ -95,6 +92,7 @@ type (
|
|||
k2sMsgStream msgstream.MsgStream
|
||||
ddChannelName string
|
||||
segmentInfoStream msgstream.MsgStream
|
||||
insertChannels []string
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -103,14 +101,23 @@ func CreateServer(ctx context.Context) (*Server, error) {
|
|||
ch := make(chan struct{})
|
||||
s := &Server{
|
||||
ctx: ctx,
|
||||
insertChannelMgr: newInsertChannelManager(),
|
||||
registerFinishCh: ch,
|
||||
cluster: newDataNodeCluster(ch),
|
||||
}
|
||||
s.insertChannels = s.getInsertChannels()
|
||||
s.state.Store(internalpb2.StateCode_INITIALIZING)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) getInsertChannels() []string {
|
||||
channels := make([]string, Params.InsertChannelNum)
|
||||
var i int64 = 0
|
||||
for ; i < Params.InsertChannelNum; i++ {
|
||||
channels[i] = Params.InsertChannelPrefixName + strconv.FormatInt(i, 10)
|
||||
}
|
||||
return channels
|
||||
}
|
||||
|
||||
func (s *Server) SetMasterClient(masterClient MasterClient) {
|
||||
s.masterClient = masterClient
|
||||
}
|
||||
|
@ -137,6 +144,7 @@ func (s *Server) Start() error {
|
|||
}
|
||||
s.startServerLoop()
|
||||
s.waitDataNodeRegister()
|
||||
s.cluster.WatchInsertChannels(s.insertChannels)
|
||||
if err = s.initMsgProducer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -164,21 +172,23 @@ func (s *Server) initMeta() error {
|
|||
}
|
||||
|
||||
func (s *Server) initSegmentInfoChannel() {
|
||||
segmentInfoStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
segmentInfoStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
segmentInfoStream.SetPulsarClient(Params.PulsarAddress)
|
||||
segmentInfoStream.CreatePulsarProducers([]string{Params.SegmentInfoChannelName})
|
||||
s.segmentInfoStream = segmentInfoStream
|
||||
s.segmentInfoStream.Start()
|
||||
}
|
||||
func (s *Server) initMsgProducer() error {
|
||||
ttMsgStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
ttMsgStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
ttMsgStream.SetPulsarClient(Params.PulsarAddress)
|
||||
ttMsgStream.CreatePulsarConsumers([]string{Params.TimeTickChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
|
||||
ttMsgStream.CreatePulsarConsumers([]string{Params.TimeTickChannelName}, Params.DataServiceSubscriptionName)
|
||||
s.ttMsgStream = ttMsgStream
|
||||
s.ttMsgStream.Start()
|
||||
timeTickBarrier := timesync.NewHardTimeTickBarrier(s.ttMsgStream, s.cluster.GetNodeIDs())
|
||||
dataNodeTTWatcher := newDataNodeTimeTickWatcher(s.meta, s.segAllocator, s.cluster)
|
||||
k2sStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
|
||||
k2sStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
k2sStream.SetPulsarClient(Params.PulsarAddress)
|
||||
k2sStream.CreatePulsarProducers(Params.K2SChannelNames)
|
||||
s.k2sMsgStream = k2sStream
|
||||
|
@ -300,9 +310,10 @@ func (s *Server) startServerLoop() {
|
|||
|
||||
func (s *Server) startStatsChannel(ctx context.Context) {
|
||||
defer s.serverLoopWg.Done()
|
||||
statsStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
statsStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
statsStream.SetPulsarClient(Params.PulsarAddress)
|
||||
statsStream.CreatePulsarConsumers([]string{Params.StatisticsChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
|
||||
statsStream.CreatePulsarConsumers([]string{Params.StatisticsChannelName}, Params.DataServiceSubscriptionName)
|
||||
statsStream.Start()
|
||||
defer statsStream.Close()
|
||||
for {
|
||||
|
@ -326,9 +337,10 @@ func (s *Server) startStatsChannel(ctx context.Context) {
|
|||
|
||||
func (s *Server) startSegmentFlushChannel(ctx context.Context) {
|
||||
defer s.serverLoopWg.Done()
|
||||
flushStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
flushStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
flushStream.SetPulsarClient(Params.PulsarAddress)
|
||||
flushStream.CreatePulsarConsumers([]string{Params.SegmentInfoChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
|
||||
flushStream.CreatePulsarConsumers([]string{Params.SegmentInfoChannelName}, Params.DataServiceSubscriptionName)
|
||||
flushStream.Start()
|
||||
defer flushStream.Close()
|
||||
for {
|
||||
|
@ -361,9 +373,10 @@ func (s *Server) startSegmentFlushChannel(ctx context.Context) {
|
|||
|
||||
func (s *Server) startDDChannel(ctx context.Context) {
|
||||
defer s.serverLoopWg.Done()
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(Params.PulsarAddress)
|
||||
ddStream.CreatePulsarConsumers([]string{s.ddChannelName}, Params.DataServiceSubscriptionName, util.NewUnmarshalDispatcher(), 1024)
|
||||
ddStream.CreatePulsarConsumers([]string{s.ddChannelName}, Params.DataServiceSubscriptionName)
|
||||
ddStream.Start()
|
||||
defer ddStream.Close()
|
||||
for {
|
||||
|
@ -675,16 +688,7 @@ func (s *Server) GetInsertBinlogPaths(req *datapb.InsertBinlogPathRequest) (*dat
|
|||
}
|
||||
|
||||
func (s *Server) GetInsertChannels(req *datapb.InsertChannelRequest) ([]string, error) {
|
||||
if !s.checkStateIsHealthy() {
|
||||
return nil, errors.New("server is initializing")
|
||||
}
|
||||
channels, err := s.insertChannelMgr.GetChannels(req.CollectionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.cluster.WatchInsertChannels(req.CollectionID, channels)
|
||||
return channels, nil
|
||||
return s.insertChannels, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetCollectionStatistics(req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error) {
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
msutil "github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
|
@ -403,6 +402,8 @@ func (c *Core) tsLoop() {
|
|||
}
|
||||
}
|
||||
func (c *Core) setMsgStreams() error {
|
||||
dispatcherFactory := ms.ProtoUDFactory{}
|
||||
|
||||
if Params.PulsarAddress == "" {
|
||||
return errors.Errorf("PulsarAddress is empty")
|
||||
}
|
||||
|
@ -414,16 +415,16 @@ func (c *Core) setMsgStreams() error {
|
|||
if Params.ProxyTimeTickChannel == "" {
|
||||
return errors.Errorf("ProxyTimeTickChannel is empty")
|
||||
}
|
||||
proxyTimeTickStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024)
|
||||
proxyTimeTickStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024, 1024, dispatcherFactory.NewUnmarshalDispatcher())
|
||||
proxyTimeTickStream.SetPulsarClient(Params.PulsarAddress)
|
||||
proxyTimeTickStream.CreatePulsarConsumers([]string{Params.ProxyTimeTickChannel}, Params.MsgChannelSubName, msutil.NewUnmarshalDispatcher(), 1024)
|
||||
proxyTimeTickStream.CreatePulsarConsumers([]string{Params.ProxyTimeTickChannel}, Params.MsgChannelSubName)
|
||||
proxyTimeTickStream.Start()
|
||||
|
||||
// master time tick channel
|
||||
if Params.TimeTickChannel == "" {
|
||||
return errors.Errorf("TimeTickChannel is empty")
|
||||
}
|
||||
timeTickStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024)
|
||||
timeTickStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024, 1024, dispatcherFactory.NewUnmarshalDispatcher())
|
||||
timeTickStream.SetPulsarClient(Params.PulsarAddress)
|
||||
timeTickStream.CreatePulsarProducers([]string{Params.TimeTickChannel})
|
||||
|
||||
|
@ -431,7 +432,7 @@ func (c *Core) setMsgStreams() error {
|
|||
if Params.DdChannel == "" {
|
||||
return errors.Errorf("DdChannel is empty")
|
||||
}
|
||||
ddStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024, 1024, dispatcherFactory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(Params.PulsarAddress)
|
||||
ddStream.CreatePulsarProducers([]string{Params.DdChannel})
|
||||
|
||||
|
@ -566,9 +567,9 @@ func (c *Core) setMsgStreams() error {
|
|||
if Params.DataServiceSegmentChannel == "" {
|
||||
return errors.Errorf("DataServiceSegmentChannel is empty")
|
||||
}
|
||||
dataServiceStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024)
|
||||
dataServiceStream := pulsarms.NewPulsarMsgStream(c.ctx, 1024, 1024, dispatcherFactory.NewUnmarshalDispatcher())
|
||||
dataServiceStream.SetPulsarClient(Params.PulsarAddress)
|
||||
dataServiceStream.CreatePulsarConsumers([]string{Params.DataServiceSegmentChannel}, Params.MsgChannelSubName, msutil.NewUnmarshalDispatcher(), 1024)
|
||||
dataServiceStream.CreatePulsarConsumers([]string{Params.DataServiceSegmentChannel}, Params.MsgChannelSubName)
|
||||
dataServiceStream.Start()
|
||||
c.DataServiceSegmentChan = make(chan *datapb.SegmentInfo, 1024)
|
||||
c.DataNodeSegmentFlushCompletedChan = make(chan typeutil.UniqueID, 1024)
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
msutil "github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
|
@ -147,22 +146,23 @@ func TestMasterService(t *testing.T) {
|
|||
err = core.Start()
|
||||
assert.Nil(t, err)
|
||||
|
||||
proxyTimeTickStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
factory := ms.ProtoUDFactory{}
|
||||
proxyTimeTickStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
proxyTimeTickStream.SetPulsarClient(Params.PulsarAddress)
|
||||
proxyTimeTickStream.CreatePulsarProducers([]string{Params.ProxyTimeTickChannel})
|
||||
|
||||
dataServiceSegmentStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
dataServiceSegmentStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
dataServiceSegmentStream.SetPulsarClient(Params.PulsarAddress)
|
||||
dataServiceSegmentStream.CreatePulsarProducers([]string{Params.DataServiceSegmentChannel})
|
||||
|
||||
timeTickStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
timeTickStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
timeTickStream.SetPulsarClient(Params.PulsarAddress)
|
||||
timeTickStream.CreatePulsarConsumers([]string{Params.TimeTickChannel}, Params.MsgChannelSubName, msutil.NewUnmarshalDispatcher(), 1024)
|
||||
timeTickStream.CreatePulsarConsumers([]string{Params.TimeTickChannel}, Params.MsgChannelSubName)
|
||||
timeTickStream.Start()
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, 1024, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(Params.PulsarAddress)
|
||||
ddStream.CreatePulsarConsumers([]string{Params.DdChannel}, Params.MsgChannelSubName, msutil.NewUnmarshalDispatcher(), 1024)
|
||||
ddStream.CreatePulsarConsumers([]string{Params.DdChannel}, Params.MsgChannelSubName)
|
||||
ddStream.Start()
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
@ -657,10 +657,18 @@ func TestMasterService(t *testing.T) {
|
|||
rsp, err := core.DescribeIndex(req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
assert.Equal(t, len(rsp.IndexDescriptions), 3)
|
||||
assert.Equal(t, rsp.IndexDescriptions[0].IndexName, Params.DefaultIndexName)
|
||||
assert.Equal(t, rsp.IndexDescriptions[1].IndexName, "index_field_100_0")
|
||||
assert.Equal(t, rsp.IndexDescriptions[2].IndexName, "index_field_100_1")
|
||||
indexNames := make([]string, 0)
|
||||
for _, d := range rsp.IndexDescriptions {
|
||||
indexNames = append(indexNames, d.IndexName)
|
||||
}
|
||||
|
||||
assert.ElementsMatch(t, indexNames, []string{
|
||||
"index_field_100_0",
|
||||
"index_field_100_1",
|
||||
Params.DefaultIndexName,
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("drop partition", func(t *testing.T) {
|
||||
|
|
|
@ -4,12 +4,14 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
type MsgType = commonpb.MsgType
|
||||
type MarshalType = interface{}
|
||||
|
||||
type TsMsg interface {
|
||||
GetMsgContext() context.Context
|
||||
|
@ -18,8 +20,8 @@ type TsMsg interface {
|
|||
EndTs() Timestamp
|
||||
Type() MsgType
|
||||
HashKeys() []uint32
|
||||
Marshal(TsMsg) ([]byte, error)
|
||||
Unmarshal([]byte) (TsMsg, error)
|
||||
Marshal(TsMsg) (MarshalType, error)
|
||||
Unmarshal(MarshalType) (TsMsg, error)
|
||||
Position() *MsgPosition
|
||||
SetPosition(*MsgPosition)
|
||||
}
|
||||
|
@ -52,6 +54,15 @@ func (bm *BaseMsg) SetPosition(position *MsgPosition) {
|
|||
bm.MsgPosition = position
|
||||
}
|
||||
|
||||
func ConvertToByteArray(input interface{}) ([]byte, error) {
|
||||
switch output := input.(type) {
|
||||
case []byte:
|
||||
return output, nil
|
||||
default:
|
||||
return nil, errors.New("Cannot convert interface{} to []byte")
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Insert//////////////////////////////////////////
|
||||
type InsertMsg struct {
|
||||
BaseMsg
|
||||
|
@ -70,7 +81,7 @@ func (it *InsertMsg) SetMsgContext(ctx context.Context) {
|
|||
it.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (it *InsertMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
insertMsg := input.(*InsertMsg)
|
||||
insertRequest := &insertMsg.InsertRequest
|
||||
mb, err := proto.Marshal(insertRequest)
|
||||
|
@ -80,9 +91,13 @@ func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (it *InsertMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
insertRequest := internalpb2.InsertRequest{}
|
||||
err := proto.Unmarshal(input, &insertRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &insertRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -121,7 +136,7 @@ func (fl *FlushCompletedMsg) SetMsgContext(ctx context.Context) {
|
|||
fl.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (fl *FlushCompletedMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (fl *FlushCompletedMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
flushCompletedMsgTask := input.(*FlushCompletedMsg)
|
||||
flushCompletedMsg := &flushCompletedMsgTask.SegmentFlushCompletedMsg
|
||||
mb, err := proto.Marshal(flushCompletedMsg)
|
||||
|
@ -131,9 +146,13 @@ func (fl *FlushCompletedMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (fl *FlushCompletedMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (fl *FlushCompletedMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
flushCompletedMsg := internalpb2.SegmentFlushCompletedMsg{}
|
||||
err := proto.Unmarshal(input, &flushCompletedMsg)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &flushCompletedMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +181,7 @@ func (fl *FlushMsg) SetMsgContext(ctx context.Context) {
|
|||
fl.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (fl *FlushMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (fl *FlushMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
flushMsgTask := input.(*FlushMsg)
|
||||
flushMsg := &flushMsgTask.FlushMsg
|
||||
mb, err := proto.Marshal(flushMsg)
|
||||
|
@ -172,9 +191,13 @@ func (fl *FlushMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (fl *FlushMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (fl *FlushMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
flushMsg := internalpb2.FlushMsg{}
|
||||
err := proto.Unmarshal(input, &flushMsg)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &flushMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -203,7 +226,7 @@ func (dt *DeleteMsg) SetMsgContext(ctx context.Context) {
|
|||
dt.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (dt *DeleteMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
deleteMsg := input.(*DeleteMsg)
|
||||
deleteRequest := &deleteMsg.DeleteRequest
|
||||
mb, err := proto.Marshal(deleteRequest)
|
||||
|
@ -214,9 +237,13 @@ func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (dt *DeleteMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
deleteRequest := internalpb2.DeleteRequest{}
|
||||
err := proto.Unmarshal(input, &deleteRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &deleteRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -256,7 +283,7 @@ func (st *SearchMsg) SetMsgContext(ctx context.Context) {
|
|||
st.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (st *SearchMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
searchTask := input.(*SearchMsg)
|
||||
searchRequest := &searchTask.SearchRequest
|
||||
mb, err := proto.Marshal(searchRequest)
|
||||
|
@ -266,9 +293,13 @@ func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (st *SearchMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
searchRequest := internalpb2.SearchRequest{}
|
||||
err := proto.Unmarshal(input, &searchRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -297,7 +328,7 @@ func (srt *SearchResultMsg) SetMsgContext(ctx context.Context) {
|
|||
srt.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (srt *SearchResultMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
searchResultTask := input.(*SearchResultMsg)
|
||||
searchResultRequest := &searchResultTask.SearchResults
|
||||
mb, err := proto.Marshal(searchResultRequest)
|
||||
|
@ -307,9 +338,13 @@ func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (srt *SearchResultMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
searchResultRequest := internalpb2.SearchResults{}
|
||||
err := proto.Unmarshal(input, &searchResultRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &searchResultRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -338,7 +373,7 @@ func (tst *TimeTickMsg) SetMsgContext(ctx context.Context) {
|
|||
tst.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (tst *TimeTickMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
timeTickTask := input.(*TimeTickMsg)
|
||||
timeTick := &timeTickTask.TimeTickMsg
|
||||
mb, err := proto.Marshal(timeTick)
|
||||
|
@ -348,9 +383,13 @@ func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (tst *TimeTickMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
timeTickMsg := internalpb2.TimeTickMsg{}
|
||||
err := proto.Unmarshal(input, &timeTickMsg)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &timeTickMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -380,7 +419,7 @@ func (qs *QueryNodeStatsMsg) SetMsgContext(ctx context.Context) {
|
|||
qs.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (qs *QueryNodeStatsMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (qs *QueryNodeStatsMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
queryNodeSegStatsTask := input.(*QueryNodeStatsMsg)
|
||||
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeStats
|
||||
mb, err := proto.Marshal(queryNodeSegStats)
|
||||
|
@ -390,9 +429,13 @@ func (qs *QueryNodeStatsMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (qs *QueryNodeStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (qs *QueryNodeStatsMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
queryNodeSegStats := internalpb2.QueryNodeStats{}
|
||||
err := proto.Unmarshal(input, &queryNodeSegStats)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &queryNodeSegStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -419,7 +462,7 @@ func (ss *SegmentStatisticsMsg) SetMsgContext(ctx context.Context) {
|
|||
ss.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (ss *SegmentStatisticsMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (ss *SegmentStatisticsMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
segStatsTask := input.(*SegmentStatisticsMsg)
|
||||
segStats := &segStatsTask.SegmentStatistics
|
||||
mb, err := proto.Marshal(segStats)
|
||||
|
@ -429,9 +472,13 @@ func (ss *SegmentStatisticsMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (ss *SegmentStatisticsMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (ss *SegmentStatisticsMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
segStats := internalpb2.SegmentStatistics{}
|
||||
err := proto.Unmarshal(input, &segStats)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &segStats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -468,7 +515,7 @@ func (cc *CreateCollectionMsg) SetMsgContext(ctx context.Context) {
|
|||
cc.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (cc *CreateCollectionMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
createCollectionMsg := input.(*CreateCollectionMsg)
|
||||
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
|
||||
mb, err := proto.Marshal(createCollectionRequest)
|
||||
|
@ -478,9 +525,13 @@ func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (cc *CreateCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (cc *CreateCollectionMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
createCollectionRequest := internalpb2.CreateCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &createCollectionRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &createCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -508,7 +559,7 @@ func (dc *DropCollectionMsg) SetMsgContext(ctx context.Context) {
|
|||
dc.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (dc *DropCollectionMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
dropCollectionMsg := input.(*DropCollectionMsg)
|
||||
dropCollectionRequest := &dropCollectionMsg.DropCollectionRequest
|
||||
mb, err := proto.Marshal(dropCollectionRequest)
|
||||
|
@ -518,9 +569,13 @@ func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (dc *DropCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (dc *DropCollectionMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
dropCollectionRequest := internalpb2.DropCollectionRequest{}
|
||||
err := proto.Unmarshal(input, &dropCollectionRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &dropCollectionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -549,7 +604,7 @@ func (cc *CreatePartitionMsg) Type() MsgType {
|
|||
return cc.Base.MsgType
|
||||
}
|
||||
|
||||
func (cc *CreatePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (cc *CreatePartitionMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
createPartitionMsg := input.(*CreatePartitionMsg)
|
||||
createPartitionRequest := &createPartitionMsg.CreatePartitionRequest
|
||||
mb, err := proto.Marshal(createPartitionRequest)
|
||||
|
@ -559,9 +614,13 @@ func (cc *CreatePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (cc *CreatePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (cc *CreatePartitionMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
createPartitionRequest := internalpb2.CreatePartitionRequest{}
|
||||
err := proto.Unmarshal(input, &createPartitionRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &createPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -590,7 +649,7 @@ func (dc *DropPartitionMsg) Type() MsgType {
|
|||
return dc.Base.MsgType
|
||||
}
|
||||
|
||||
func (dc *DropPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (dc *DropPartitionMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
dropPartitionMsg := input.(*DropPartitionMsg)
|
||||
dropPartitionRequest := &dropPartitionMsg.DropPartitionRequest
|
||||
mb, err := proto.Marshal(dropPartitionRequest)
|
||||
|
@ -600,9 +659,13 @@ func (dc *DropPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (dc *DropPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (dc *DropPartitionMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
dropPartitionRequest := internalpb2.DropPartitionRequest{}
|
||||
err := proto.Unmarshal(input, &dropPartitionRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &dropPartitionRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -631,7 +694,7 @@ func (lim *LoadIndexMsg) SetMsgContext(ctx context.Context) {
|
|||
lim.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (lim *LoadIndexMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (lim *LoadIndexMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
loadIndexMsg := input.(*LoadIndexMsg)
|
||||
loadIndexRequest := &loadIndexMsg.LoadIndex
|
||||
mb, err := proto.Marshal(loadIndexRequest)
|
||||
|
@ -641,9 +704,13 @@ func (lim *LoadIndexMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (lim *LoadIndexMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (lim *LoadIndexMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
loadIndexRequest := internalpb2.LoadIndex{}
|
||||
err := proto.Unmarshal(input, &loadIndexRequest)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &loadIndexRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -670,7 +737,7 @@ func (sim *SegmentInfoMsg) SetMsgContext(ctx context.Context) {
|
|||
sim.MsgCtx = ctx
|
||||
}
|
||||
|
||||
func (sim *SegmentInfoMsg) Marshal(input TsMsg) ([]byte, error) {
|
||||
func (sim *SegmentInfoMsg) Marshal(input TsMsg) (MarshalType, error) {
|
||||
segInfoMsg := input.(*SegmentInfoMsg)
|
||||
mb, err := proto.Marshal(&segInfoMsg.SegmentMsg)
|
||||
if err != nil {
|
||||
|
@ -679,9 +746,13 @@ func (sim *SegmentInfoMsg) Marshal(input TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (sim *SegmentInfoMsg) Unmarshal(input []byte) (TsMsg, error) {
|
||||
func (sim *SegmentInfoMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
||||
segMsg := datapb.SegmentMsg{}
|
||||
err := proto.Unmarshal(input, &segMsg)
|
||||
in, err := ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &segMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -23,10 +23,10 @@ type RepackFunc func(msgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, erro
|
|||
type MsgStream interface {
|
||||
Start()
|
||||
Close()
|
||||
Chan() <-chan *MsgPack
|
||||
|
||||
Produce(*MsgPack) error
|
||||
Broadcast(*MsgPack) error
|
||||
Consume() *MsgPack
|
||||
Chan() <-chan *MsgPack
|
||||
Seek(offset *MsgPosition) error
|
||||
}
|
||||
|
|
|
@ -10,17 +10,18 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
type MarshalType = msgstream.MarshalType
|
||||
|
||||
type InsertTask struct {
|
||||
Tag string
|
||||
msgstream.InsertMsg
|
||||
}
|
||||
|
||||
func (tt *InsertTask) Marshal(input msgstream.TsMsg) ([]byte, error) {
|
||||
func (tt *InsertTask) Marshal(input msgstream.TsMsg) (MarshalType, error) {
|
||||
testMsg := input.(*InsertTask)
|
||||
insertRequest := &testMsg.InsertRequest
|
||||
mb, err := proto.Marshal(insertRequest)
|
||||
|
@ -30,9 +31,13 @@ func (tt *InsertTask) Marshal(input msgstream.TsMsg) ([]byte, error) {
|
|||
return mb, nil
|
||||
}
|
||||
|
||||
func (tt *InsertTask) Unmarshal(input []byte) (msgstream.TsMsg, error) {
|
||||
func (tt *InsertTask) Unmarshal(input MarshalType) (msgstream.TsMsg, error) {
|
||||
insertRequest := internalpb2.InsertRequest{}
|
||||
err := proto.Unmarshal(input, &insertRequest)
|
||||
in, err := msgstream.ConvertToByteArray(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = proto.Unmarshal(in, &insertRequest)
|
||||
testMsg := &InsertTask{InsertMsg: msgstream.InsertMsg{InsertRequest: insertRequest}}
|
||||
testMsg.Tag = testMsg.InsertRequest.PartitionName
|
||||
if err != nil {
|
||||
|
@ -135,18 +140,19 @@ func TestStream_task_Insert(t *testing.T) {
|
|||
msgPack.Msgs = append(msgPack.Msgs, getInsertTask(1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getInsertTask(3, 3))
|
||||
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
inputStream.SetRepackFunc(newRepackFunc)
|
||||
inputStream.Start()
|
||||
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
dispatcher := factory.NewUnmarshalDispatcher()
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100, 100, dispatcher)
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
testTask := InsertTask{}
|
||||
unmarshalDispatcher.AddMsgTemplate(commonpb.MsgType_kInsert, testTask.Unmarshal)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
dispatcher.AddMsgTemplate(commonpb.MsgType_kInsert, testTask.Unmarshal)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName)
|
||||
outputStream.Start()
|
||||
|
||||
err := inputStream.Produce(&msgPack)
|
||||
|
|
|
@ -36,6 +36,7 @@ type QueryNodeStatsMsg = msgstream.QueryNodeStatsMsg
|
|||
type RepackFunc = msgstream.RepackFunc
|
||||
type Consumer = pulsar.Consumer
|
||||
type Producer = pulsar.Producer
|
||||
type UnmarshalDispatcher = msgstream.UnmarshalDispatcher
|
||||
|
||||
type PulsarMsgStream struct {
|
||||
ctx context.Context
|
||||
|
@ -44,13 +45,14 @@ type PulsarMsgStream struct {
|
|||
consumers []Consumer
|
||||
consumerChannels []string
|
||||
repackFunc RepackFunc
|
||||
unmarshal *util.UnmarshalDispatcher
|
||||
unmarshal UnmarshalDispatcher
|
||||
receiveBuf chan *MsgPack
|
||||
wait *sync.WaitGroup
|
||||
streamCancel func()
|
||||
pulsarBufSize int64
|
||||
}
|
||||
|
||||
func NewPulsarMsgStream(ctx context.Context, receiveBufSize int64) *PulsarMsgStream {
|
||||
func NewPulsarMsgStream(ctx context.Context, receiveBufSize int64, pulsarBufSize int64, unmarshal UnmarshalDispatcher) *PulsarMsgStream {
|
||||
streamCtx, streamCancel := context.WithCancel(ctx)
|
||||
producers := make([]Producer, 0)
|
||||
consumers := make([]Consumer, 0)
|
||||
|
@ -61,6 +63,8 @@ func NewPulsarMsgStream(ctx context.Context, receiveBufSize int64) *PulsarMsgStr
|
|||
producers: producers,
|
||||
consumers: consumers,
|
||||
consumerChannels: consumerChannels,
|
||||
unmarshal: unmarshal,
|
||||
pulsarBufSize: pulsarBufSize,
|
||||
}
|
||||
stream.receiveBuf = make(chan *MsgPack, receiveBufSize)
|
||||
return stream
|
||||
|
@ -97,13 +101,10 @@ func (ms *PulsarMsgStream) CreatePulsarProducers(channels []string) {
|
|||
}
|
||||
|
||||
func (ms *PulsarMsgStream) CreatePulsarConsumers(channels []string,
|
||||
subName string,
|
||||
unmarshal *util.UnmarshalDispatcher,
|
||||
pulsarBufSize int64) {
|
||||
ms.unmarshal = unmarshal
|
||||
subName string) {
|
||||
for i := 0; i < len(channels); i++ {
|
||||
fn := func() error {
|
||||
receiveChannel := make(chan pulsar.ConsumerMessage, pulsarBufSize)
|
||||
receiveChannel := make(chan pulsar.ConsumerMessage, ms.pulsarBufSize)
|
||||
pc, err := ms.client.Subscribe(pulsar.ConsumerOptions{
|
||||
Topic: channels[i],
|
||||
SubscriptionName: subName,
|
||||
|
@ -236,7 +237,12 @@ func (ms *PulsarMsgStream) Produce(msgPack *MsgPack) error {
|
|||
return err
|
||||
}
|
||||
|
||||
msg := &pulsar.ProducerMessage{Payload: mb}
|
||||
m, err := msgstream.ConvertToByteArray(mb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := &pulsar.ProducerMessage{Payload: m}
|
||||
|
||||
var child opentracing.Span
|
||||
if v.Msgs[i].Type() == commonpb.MsgType_kInsert ||
|
||||
|
@ -293,7 +299,13 @@ func (ms *PulsarMsgStream) Broadcast(msgPack *MsgPack) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg := &pulsar.ProducerMessage{Payload: mb}
|
||||
|
||||
m, err := msgstream.ConvertToByteArray(mb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := &pulsar.ProducerMessage{Payload: m}
|
||||
var child opentracing.Span
|
||||
if v.Type() == commonpb.MsgType_kInsert ||
|
||||
v.Type() == commonpb.MsgType_kSearch ||
|
||||
|
@ -467,11 +479,13 @@ type PulsarTtMsgStream struct {
|
|||
lastTimeStamp Timestamp
|
||||
}
|
||||
|
||||
func NewPulsarTtMsgStream(ctx context.Context, receiveBufSize int64) *PulsarTtMsgStream {
|
||||
func NewPulsarTtMsgStream(ctx context.Context, receiveBufSize int64, pulsarBufSize int64, unmarshal msgstream.UnmarshalDispatcher) *PulsarTtMsgStream {
|
||||
streamCtx, streamCancel := context.WithCancel(ctx)
|
||||
pulsarMsgStream := PulsarMsgStream{
|
||||
ctx: streamCtx,
|
||||
streamCancel: streamCancel,
|
||||
ctx: streamCtx,
|
||||
streamCancel: streamCancel,
|
||||
pulsarBufSize: pulsarBufSize,
|
||||
unmarshal: unmarshal,
|
||||
}
|
||||
pulsarMsgStream.receiveBuf = make(chan *MsgPack, receiveBufSize)
|
||||
return &PulsarTtMsgStream{
|
||||
|
@ -601,11 +615,10 @@ func (ms *PulsarTtMsgStream) findTimeTick(consumer Consumer,
|
|||
if err != nil {
|
||||
log.Printf("Failed to unmarshal, error = %v", err)
|
||||
}
|
||||
unMarshalFunc := (*ms.unmarshal).TempMap[headerMsg.Base.MsgType]
|
||||
if unMarshalFunc == nil {
|
||||
tsMsg, err := ms.unmarshal.Unmarshal(pulsarMsg.Payload(), headerMsg.Base.MsgType)
|
||||
if tsMsg == nil && err != nil {
|
||||
panic("null unMarshalFunc for " + headerMsg.Base.MsgType.String() + " msg type")
|
||||
}
|
||||
tsMsg, err := unMarshalFunc(pulsarMsg.Payload())
|
||||
if err != nil {
|
||||
log.Printf("Failed to unmarshal, error = %v", err)
|
||||
}
|
||||
|
@ -674,11 +687,11 @@ func (ms *PulsarTtMsgStream) Seek(mp *internalpb2.MsgPosition) error {
|
|||
if err != nil {
|
||||
log.Printf("Failed to unmarshal msgHeader, error = %v", err)
|
||||
}
|
||||
unMarshalFunc := (*ms.unmarshal).TempMap[headerMsg.Base.MsgType]
|
||||
if unMarshalFunc == nil {
|
||||
tsMsg, err := ms.unmarshal.Unmarshal(pulsarMsg.Payload(), headerMsg.Base.MsgType)
|
||||
if tsMsg == nil && err != nil {
|
||||
panic("null unMarshalFunc for " + headerMsg.Base.MsgType.String() + " msg type")
|
||||
|
||||
}
|
||||
tsMsg, err := unMarshalFunc(pulsarMsg.Payload())
|
||||
if err != nil {
|
||||
log.Printf("Failed to unmarshal pulsarMsg, error = %v", err)
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
|
@ -174,9 +173,10 @@ func initPulsarStream(pulsarAddress string,
|
|||
consumerChannels []string,
|
||||
consumerSubName string,
|
||||
opts ...RepackFunc) (msgstream.MsgStream, msgstream.MsgStream) {
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
// set input stream
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
for _, opt := range opts {
|
||||
|
@ -186,10 +186,9 @@ func initPulsarStream(pulsarAddress string,
|
|||
var input msgstream.MsgStream = inputStream
|
||||
|
||||
// set output stream
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName)
|
||||
outputStream.Start()
|
||||
var output msgstream.MsgStream = outputStream
|
||||
|
||||
|
@ -201,9 +200,10 @@ func initPulsarTtStream(pulsarAddress string,
|
|||
consumerChannels []string,
|
||||
consumerSubName string,
|
||||
opts ...RepackFunc) (msgstream.MsgStream, msgstream.MsgStream) {
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
// set input stream
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
for _, opt := range opts {
|
||||
|
@ -213,10 +213,9 @@ func initPulsarTtStream(pulsarAddress string,
|
|||
var input msgstream.MsgStream = inputStream
|
||||
|
||||
// set output stream
|
||||
outputStream := NewPulsarTtMsgStream(context.Background(), 100)
|
||||
outputStream := NewPulsarTtMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName)
|
||||
outputStream.Start()
|
||||
var output msgstream.MsgStream = outputStream
|
||||
|
||||
|
@ -417,15 +416,15 @@ func TestStream_PulsarMsgStream_InsertRepackFunc(t *testing.T) {
|
|||
msgPack := msgstream.MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, insertMsg)
|
||||
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
inputStream.Start()
|
||||
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName)
|
||||
outputStream.Start()
|
||||
var output msgstream.MsgStream = outputStream
|
||||
|
||||
|
@ -470,15 +469,15 @@ func TestStream_PulsarMsgStream_DeleteRepackFunc(t *testing.T) {
|
|||
msgPack := msgstream.MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, deleteMsg)
|
||||
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
inputStream.Start()
|
||||
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName)
|
||||
outputStream.Start()
|
||||
var output msgstream.MsgStream = outputStream
|
||||
|
||||
|
@ -503,15 +502,15 @@ func TestStream_PulsarMsgStream_DefaultRepackFunc(t *testing.T) {
|
|||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(commonpb.MsgType_kSearchResult, 3, 3))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(commonpb.MsgType_kQueryNodeStats, 4, 4))
|
||||
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
inputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
inputStream.Start()
|
||||
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100)
|
||||
outputStream := NewPulsarMsgStream(context.Background(), 100, 100, factory.NewUnmarshalDispatcher())
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName)
|
||||
outputStream.Start()
|
||||
var output msgstream.MsgStream = outputStream
|
||||
|
||||
|
|
|
@ -2,103 +2,262 @@ package rmqms
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/masterservice"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
rocksmq "github.com/zilliztech/milvus-distributed/internal/util/rocksmq"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type RmqMsgStream struct {
|
||||
isServing int64
|
||||
idAllocator *masterservice.GlobalIDAllocator
|
||||
ctx context.Context
|
||||
serverLoopWg sync.WaitGroup
|
||||
serverLoopCtx context.Context
|
||||
serverLoopCancel func()
|
||||
|
||||
rmq *rocksmq.RocksMQ
|
||||
repackFunc msgstream.RepackFunc
|
||||
consumers []rocksmq.Consumer
|
||||
producers []string
|
||||
|
||||
unmarshal msgstream.UnmarshalDispatcher
|
||||
receiveBuf chan *msgstream.MsgPack
|
||||
wait *sync.WaitGroup
|
||||
// tso ticker
|
||||
tsoTicker *time.Ticker
|
||||
streamCancel func()
|
||||
}
|
||||
|
||||
func NewRmqMsgStream() *RmqMsgStream {
|
||||
//idAllocator := master.NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{""}, "singleNode/rocksmq", "gid"))
|
||||
//if err := idAllocator.Initialize(); err != nil {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//return &RmqMsgStream{
|
||||
// idAllocator: idAllocator,
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) startServerLoop(ctx context.Context) error {
|
||||
ms.serverLoopCtx, ms.serverLoopCancel = context.WithCancel(ctx)
|
||||
|
||||
ms.serverLoopWg.Add(1)
|
||||
go ms.tsLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) stopServerLoop() {
|
||||
ms.serverLoopCancel()
|
||||
ms.serverLoopWg.Wait()
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) tsLoop() {
|
||||
defer ms.serverLoopWg.Done()
|
||||
|
||||
ms.tsoTicker = time.NewTicker(masterservice.UpdateTimestampStep)
|
||||
defer ms.tsoTicker.Stop()
|
||||
|
||||
ctx, cancel := context.WithCancel(ms.serverLoopCtx)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ms.tsoTicker.C:
|
||||
if err := ms.idAllocator.UpdateID(); err != nil {
|
||||
log.Println("failed to update id", err)
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// Server is closed and it should return nil.
|
||||
log.Println("tsLoop is closed")
|
||||
return
|
||||
}
|
||||
func NewRmqMsgStream(ctx context.Context, rmq *rocksmq.RocksMQ, receiveBufSize int64) *RmqMsgStream {
|
||||
streamCtx, streamCancel := context.WithCancel(ctx)
|
||||
receiveBuf := make(chan *msgstream.MsgPack, receiveBufSize)
|
||||
stream := &RmqMsgStream{
|
||||
ctx: streamCtx,
|
||||
rmq: nil,
|
||||
receiveBuf: receiveBuf,
|
||||
streamCancel: streamCancel,
|
||||
}
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) Start() {
|
||||
if err := ms.startServerLoop(ms.ctx); err != nil {
|
||||
return
|
||||
ms.wait = &sync.WaitGroup{}
|
||||
if ms.consumers != nil {
|
||||
ms.wait.Add(1)
|
||||
go ms.bufMsgPackToChannel()
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&ms.isServing, 1)
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) Close() {
|
||||
if !atomic.CompareAndSwapInt64(&ms.isServing, 1, 0) {
|
||||
// server is already closed
|
||||
return
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) CreateProducers(channels []string) error {
|
||||
for _, channel := range channels {
|
||||
// TODO(yhz): Here may allow to create an existing channel
|
||||
if err := ms.rmq.CreateChannel(channel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Print("closing server")
|
||||
|
||||
ms.stopServerLoop()
|
||||
func (ms *RmqMsgStream) CreateConsumers(channels []string, groupName string) error {
|
||||
for _, channelName := range channels {
|
||||
if err := ms.rmq.CreateConsumerGroup(groupName, channelName); err != nil {
|
||||
return err
|
||||
}
|
||||
msgNum := make(chan int)
|
||||
ms.consumers = append(ms.consumers, rocksmq.Consumer{GroupName: groupName, ChannelName: channelName, MsgNum: msgNum})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) Produce(pack *msgstream.MsgPack) error {
|
||||
tsMsgs := pack.Msgs
|
||||
if len(tsMsgs) <= 0 {
|
||||
log.Printf("Warning: Receive empty msgPack")
|
||||
return nil
|
||||
}
|
||||
if len(ms.producers) <= 0 {
|
||||
return errors.New("nil producer in msg stream")
|
||||
}
|
||||
reBucketValues := make([][]int32, len(tsMsgs))
|
||||
for channelID, tsMsg := range tsMsgs {
|
||||
hashValues := tsMsg.HashKeys()
|
||||
bucketValues := make([]int32, len(hashValues))
|
||||
for index, hashValue := range hashValues {
|
||||
if tsMsg.Type() == commonpb.MsgType_kSearchResult {
|
||||
searchResult := tsMsg.(*msgstream.SearchResultMsg)
|
||||
channelID := searchResult.ResultChannelID
|
||||
channelIDInt, _ := strconv.ParseInt(channelID, 10, 64)
|
||||
if channelIDInt >= int64(len(ms.producers)) {
|
||||
return errors.New("Failed to produce pulsar msg to unKnow channel")
|
||||
}
|
||||
bucketValues[index] = int32(channelIDInt)
|
||||
continue
|
||||
}
|
||||
bucketValues[index] = int32(hashValue % uint32(len(ms.producers)))
|
||||
}
|
||||
reBucketValues[channelID] = bucketValues
|
||||
}
|
||||
|
||||
var result map[int32]*msgstream.MsgPack
|
||||
var err error
|
||||
if ms.repackFunc != nil {
|
||||
result, err = ms.repackFunc(tsMsgs, reBucketValues)
|
||||
} else {
|
||||
msgType := (tsMsgs[0]).Type()
|
||||
switch msgType {
|
||||
case commonpb.MsgType_kInsert:
|
||||
result, err = util.InsertRepackFunc(tsMsgs, reBucketValues)
|
||||
case commonpb.MsgType_kDelete:
|
||||
result, err = util.DeleteRepackFunc(tsMsgs, reBucketValues)
|
||||
default:
|
||||
result, err = util.DefaultRepackFunc(tsMsgs, reBucketValues)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range result {
|
||||
for i := 0; i < len(v.Msgs); i++ {
|
||||
mb, err := v.Msgs[i].Marshal(v.Msgs[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//
|
||||
//msg := &pulsar.ProducerMessage{Payload: mb}
|
||||
|
||||
//var child opentracing.Span
|
||||
if v.Msgs[i].Type() == commonpb.MsgType_kInsert ||
|
||||
v.Msgs[i].Type() == commonpb.MsgType_kSearch ||
|
||||
v.Msgs[i].Type() == commonpb.MsgType_kSearchResult {
|
||||
//tracer := opentracing.GlobalTracer()
|
||||
//ctx := v.Msgs[i].GetMsgContext()
|
||||
//if ctx == nil {
|
||||
// ctx = context.Background()
|
||||
//}
|
||||
//
|
||||
//if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||
// child = tracer.StartSpan("start send pulsar msg",
|
||||
// opentracing.FollowsFrom(parent.Context()))
|
||||
//} else {
|
||||
// child = tracer.StartSpan("start send pulsar msg")
|
||||
//}
|
||||
//child.SetTag("hash keys", v.Msgs[i].HashKeys())
|
||||
//child.SetTag("start time", v.Msgs[i].BeginTs())
|
||||
//child.SetTag("end time", v.Msgs[i].EndTs())
|
||||
//child.SetTag("msg type", v.Msgs[i].Type())
|
||||
//msg.Properties = make(map[string]string)
|
||||
//err = tracer.Inject(child.Context(), opentracing.TextMap, &propertiesReaderWriter{msg.Properties})
|
||||
//if err != nil {
|
||||
// child.LogFields(oplog.Error(err))
|
||||
// child.Finish()
|
||||
// return err
|
||||
//}
|
||||
//child.LogFields(oplog.String("inject success", "inject success"))
|
||||
}
|
||||
|
||||
m, err := msgstream.ConvertToByteArray(mb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg := make([]rocksmq.ProducerMessage, 0)
|
||||
msg = append(msg, *rocksmq.NewProducerMessage(m))
|
||||
if err := ms.rmq.Produce(ms.producers[k], msg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) Consume() *msgstream.MsgPack {
|
||||
return nil
|
||||
for {
|
||||
select {
|
||||
case cm, ok := <-ms.receiveBuf:
|
||||
if !ok {
|
||||
log.Println("buf chan closed")
|
||||
return nil
|
||||
}
|
||||
return cm
|
||||
case <-ms.ctx.Done():
|
||||
log.Printf("context closed")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) bufMsgPackToChannel() {
|
||||
defer ms.wait.Done()
|
||||
|
||||
cases := make([]reflect.SelectCase, len(ms.consumers))
|
||||
for i := 0; i < len(ms.consumers); i++ {
|
||||
ch := ms.consumers[i].MsgNum
|
||||
cases[i] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ms.ctx.Done():
|
||||
log.Println("done")
|
||||
return
|
||||
default:
|
||||
tsMsgList := make([]msgstream.TsMsg, 0)
|
||||
|
||||
for {
|
||||
chosen, value, ok := reflect.Select(cases)
|
||||
if !ok {
|
||||
log.Printf("channel closed")
|
||||
return
|
||||
}
|
||||
|
||||
msgNum := value.Interface().(int)
|
||||
rmqMsg, err := ms.rmq.Consume(ms.consumers[chosen].GroupName, ms.consumers[chosen].ChannelName, msgNum)
|
||||
if err != nil {
|
||||
log.Printf("Failed to consume message in rocksmq, error = %v", err)
|
||||
continue
|
||||
}
|
||||
for j := 0; j < len(rmqMsg); j++ {
|
||||
headerMsg := commonpb.MsgHeader{}
|
||||
err := proto.Unmarshal(rmqMsg[j].Payload, &headerMsg)
|
||||
if err != nil {
|
||||
log.Printf("Failed to unmarshal message header, error = %v", err)
|
||||
continue
|
||||
}
|
||||
tsMsg, err := ms.unmarshal.Unmarshal(rmqMsg[j].Payload, headerMsg.Base.MsgType)
|
||||
if err != nil {
|
||||
log.Printf("Failed to unmarshal tsMsg, error = %v", err)
|
||||
continue
|
||||
}
|
||||
tsMsgList = append(tsMsgList, tsMsg)
|
||||
}
|
||||
noMoreMessage := true
|
||||
for k := 0; k < len(ms.consumers); k++ {
|
||||
if len(ms.consumers[k].MsgNum) > 0 {
|
||||
noMoreMessage = false
|
||||
}
|
||||
}
|
||||
|
||||
if noMoreMessage {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(tsMsgList) > 0 {
|
||||
msgPack := util.MsgPack{Msgs: tsMsgList}
|
||||
ms.receiveBuf <- &msgPack
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *RmqMsgStream) Chan() <-chan *msgstream.MsgPack {
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
package msgstream
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
)
|
||||
|
||||
type UnmarshalFunc func(interface{}) (TsMsg, error)
|
||||
|
||||
type UnmarshalDispatcher interface {
|
||||
Unmarshal(input interface{}, msgType commonpb.MsgType) (TsMsg, error)
|
||||
AddMsgTemplate(msgType commonpb.MsgType, unmarshalFunc UnmarshalFunc)
|
||||
}
|
||||
|
||||
type UnmarshalDispatcherFactory interface {
|
||||
NewUnmarshalDispatcher() *UnmarshalDispatcher
|
||||
}
|
||||
|
||||
// ProtoUnmarshalDispatcher ant its factory
|
||||
|
||||
type ProtoUnmarshalDispatcher struct {
|
||||
TempMap map[commonpb.MsgType]UnmarshalFunc
|
||||
}
|
||||
|
||||
func (p *ProtoUnmarshalDispatcher) Unmarshal(input interface{}, msgType commonpb.MsgType) (TsMsg, error) {
|
||||
unmarshalFunc, ok := p.TempMap[msgType]
|
||||
if !ok {
|
||||
return nil, errors.New(string("Not set unmarshalFunc for this messageType."))
|
||||
}
|
||||
return unmarshalFunc(input)
|
||||
}
|
||||
|
||||
func (p *ProtoUnmarshalDispatcher) AddMsgTemplate(msgType commonpb.MsgType, unmarshalFunc UnmarshalFunc) {
|
||||
p.TempMap[msgType] = unmarshalFunc
|
||||
}
|
||||
|
||||
type ProtoUDFactory struct{}
|
||||
|
||||
func (pudf *ProtoUDFactory) NewUnmarshalDispatcher() *ProtoUnmarshalDispatcher {
|
||||
insertMsg := InsertMsg{}
|
||||
deleteMsg := DeleteMsg{}
|
||||
searchMsg := SearchMsg{}
|
||||
searchResultMsg := SearchResultMsg{}
|
||||
timeTickMsg := TimeTickMsg{}
|
||||
createCollectionMsg := CreateCollectionMsg{}
|
||||
dropCollectionMsg := DropCollectionMsg{}
|
||||
createPartitionMsg := CreatePartitionMsg{}
|
||||
dropPartitionMsg := DropPartitionMsg{}
|
||||
loadIndexMsg := LoadIndexMsg{}
|
||||
flushMsg := FlushMsg{}
|
||||
segmentInfoMsg := SegmentInfoMsg{}
|
||||
flushCompletedMsg := FlushCompletedMsg{}
|
||||
queryNodeSegStatsMsg := QueryNodeStatsMsg{}
|
||||
|
||||
p := &ProtoUnmarshalDispatcher{}
|
||||
p.TempMap = make(map[commonpb.MsgType]UnmarshalFunc)
|
||||
p.TempMap[commonpb.MsgType_kInsert] = insertMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kDelete] = deleteMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kSearch] = searchMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kSearchResult] = searchResultMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kTimeTick] = timeTickMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kQueryNodeStats] = queryNodeSegStatsMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kCreateCollection] = createCollectionMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kDropCollection] = dropCollectionMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kCreatePartition] = createPartitionMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kDropPartition] = dropPartitionMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kLoadIndex] = loadIndexMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kFlush] = flushMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kSegmentInfo] = segmentInfoMsg.Unmarshal
|
||||
p.TempMap[commonpb.MsgType_kSegmentFlushDone] = flushCompletedMsg.Unmarshal
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// MemUnmarshalDispatcher ant its factory
|
||||
|
||||
//type MemUDFactory struct {
|
||||
//
|
||||
//}
|
||||
//func (mudf *MemUDFactory) NewUnmarshalDispatcher() *UnmarshalDispatcher {
|
||||
//
|
||||
//}
|
|
@ -1,4 +1,4 @@
|
|||
package util
|
||||
package msgstream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
|
@ -14,12 +13,10 @@ import (
|
|||
|
||||
var Params paramtable.BaseTable
|
||||
|
||||
type Timestamp = msgstream.Timestamp
|
||||
|
||||
func newInsertMsgUnmarshal(input []byte) (msgstream.TsMsg, error) {
|
||||
func newInsertMsgUnmarshal(input []byte) (TsMsg, error) {
|
||||
insertRequest := internalpb2.InsertRequest{}
|
||||
err := proto.Unmarshal(input, &insertRequest)
|
||||
insertMsg := &msgstream.InsertMsg{InsertRequest: insertRequest}
|
||||
insertMsg := &InsertMsg{InsertRequest: insertRequest}
|
||||
fmt.Println("use func newInsertMsgUnmarshal unmarshal")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -29,9 +26,9 @@ func newInsertMsgUnmarshal(input []byte) (msgstream.TsMsg, error) {
|
|||
}
|
||||
|
||||
func TestStream_unmarshal_Insert(t *testing.T) {
|
||||
msgPack := msgstream.MsgPack{}
|
||||
insertMsg := &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
msgPack := MsgPack{}
|
||||
insertMsg := &InsertMsg{
|
||||
BaseMsg: BaseMsg{
|
||||
BeginTimestamp: 0,
|
||||
EndTimestamp: 0,
|
||||
HashValues: []uint32{1},
|
||||
|
@ -54,16 +51,21 @@ func TestStream_unmarshal_Insert(t *testing.T) {
|
|||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, insertMsg)
|
||||
|
||||
unmarshalDispatcher := NewUnmarshalDispatcher()
|
||||
unmarshalDispatcher.AddMsgTemplate(commonpb.MsgType_kInsert, newInsertMsgUnmarshal)
|
||||
factory := &ProtoUDFactory{}
|
||||
unmarshalDispatcher := factory.NewUnmarshalDispatcher()
|
||||
|
||||
// FIXME(wxyu): Maybe we dont need this interface
|
||||
//unmarshalDispatcher.AddMsgTemplate(commonpb.MsgType_kInsert, newInsertMsgUnmarshal)
|
||||
|
||||
for _, v := range msgPack.Msgs {
|
||||
headerMsg := commonpb.MsgHeader{}
|
||||
payload, err := v.Marshal(v)
|
||||
assert.Nil(t, err)
|
||||
err = proto.Unmarshal(payload, &headerMsg)
|
||||
p, err := ConvertToByteArray(payload)
|
||||
assert.Nil(t, err)
|
||||
msg, err := unmarshalDispatcher.Unmarshal(payload, headerMsg.Base.MsgType)
|
||||
err = proto.Unmarshal(p, &headerMsg)
|
||||
assert.Nil(t, err)
|
||||
msg, err := unmarshalDispatcher.Unmarshal(p, headerMsg.Base.MsgType)
|
||||
assert.Nil(t, err)
|
||||
fmt.Println("msg type: ", msg.Type(), ", msg value: ", msg, "msg tag: ")
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
)
|
||||
|
||||
type MarshalFunc func(msgstream.TsMsg) ([]byte, error)
|
||||
type UnmarshalFunc func([]byte) (msgstream.TsMsg, error)
|
||||
|
||||
type UnmarshalDispatcher struct {
|
||||
TempMap map[commonpb.MsgType]UnmarshalFunc
|
||||
}
|
||||
|
||||
func (dispatcher *UnmarshalDispatcher) Unmarshal(input []byte, msgType commonpb.MsgType) (msgstream.TsMsg, error) {
|
||||
unmarshalFunc, ok := dispatcher.TempMap[msgType]
|
||||
if !ok {
|
||||
return nil, errors.New(string("Not set unmarshalFunc for this messageType."))
|
||||
}
|
||||
return unmarshalFunc(input)
|
||||
}
|
||||
|
||||
func (dispatcher *UnmarshalDispatcher) AddMsgTemplate(msgType commonpb.MsgType, unmarshal UnmarshalFunc) {
|
||||
dispatcher.TempMap[msgType] = unmarshal
|
||||
}
|
||||
|
||||
func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
|
||||
insertMsg := msgstream.InsertMsg{}
|
||||
deleteMsg := msgstream.DeleteMsg{}
|
||||
searchMsg := msgstream.SearchMsg{}
|
||||
searchResultMsg := msgstream.SearchResultMsg{}
|
||||
timeTickMsg := msgstream.TimeTickMsg{}
|
||||
createCollectionMsg := msgstream.CreateCollectionMsg{}
|
||||
dropCollectionMsg := msgstream.DropCollectionMsg{}
|
||||
createPartitionMsg := msgstream.CreatePartitionMsg{}
|
||||
dropPartitionMsg := msgstream.DropPartitionMsg{}
|
||||
loadIndexMsg := msgstream.LoadIndexMsg{}
|
||||
flushMsg := msgstream.FlushMsg{}
|
||||
segmentInfoMsg := msgstream.SegmentInfoMsg{}
|
||||
flushCompletedMsg := msgstream.FlushCompletedMsg{}
|
||||
|
||||
queryNodeSegStatsMsg := msgstream.QueryNodeStatsMsg{}
|
||||
dispatcher.TempMap = make(map[commonpb.MsgType]UnmarshalFunc)
|
||||
dispatcher.TempMap[commonpb.MsgType_kInsert] = insertMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kDelete] = deleteMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kSearch] = searchMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kSearchResult] = searchResultMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kTimeTick] = timeTickMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kQueryNodeStats] = queryNodeSegStatsMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kCreateCollection] = createCollectionMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kDropCollection] = dropCollectionMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kCreatePartition] = createPartitionMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kDropPartition] = dropPartitionMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kLoadIndex] = loadIndexMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kFlush] = flushMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kSegmentInfo] = segmentInfoMsg.Unmarshal
|
||||
dispatcher.TempMap[commonpb.MsgType_kSegmentFlushDone] = flushCompletedMsg.Unmarshal
|
||||
}
|
||||
|
||||
func NewUnmarshalDispatcher() *UnmarshalDispatcher {
|
||||
unmarshalDispatcher := UnmarshalDispatcher{}
|
||||
unmarshalDispatcher.addDefaultMsgTemplates()
|
||||
return &unmarshalDispatcher
|
||||
}
|
|
@ -263,7 +263,7 @@ message SearchResults {
|
|||
message FlushRequest {
|
||||
common.MsgBase base = 1;
|
||||
string db_name = 2;
|
||||
string collection_name = 3;
|
||||
repeated string collection_names = 3;
|
||||
}
|
||||
|
||||
service MilvusService {
|
||||
|
|
|
@ -2261,7 +2261,7 @@ func (m *SearchResults) GetHits() [][]byte {
|
|||
type FlushRequest struct {
|
||||
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
CollectionNames []string `protobuf:"bytes,3,rep,name=collection_names,json=collectionNames,proto3" json:"collection_names,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -2306,11 +2306,11 @@ func (m *FlushRequest) GetDbName() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *FlushRequest) GetCollectionName() string {
|
||||
func (m *FlushRequest) GetCollectionNames() []string {
|
||||
if m != nil {
|
||||
return m.CollectionName
|
||||
return m.CollectionNames
|
||||
}
|
||||
return ""
|
||||
return nil
|
||||
}
|
||||
|
||||
type RegisterLinkResponse struct {
|
||||
|
@ -2408,112 +2408,113 @@ func init() {
|
|||
func init() { proto.RegisterFile("milvus.proto", fileDescriptor_02345ba45cc0e303) }
|
||||
|
||||
var fileDescriptor_02345ba45cc0e303 = []byte{
|
||||
// 1676 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xdd, 0x6f, 0x1b, 0xc5,
|
||||
0x16, 0xcf, 0xda, 0x8e, 0x93, 0x9c, 0xac, 0x1d, 0x67, 0xf2, 0xe5, 0xba, 0xed, 0x6d, 0xba, 0xf7,
|
||||
0xf6, 0x26, 0xfd, 0x4a, 0xae, 0xd2, 0xfb, 0xf9, 0x70, 0xa5, 0x36, 0x71, 0x9a, 0x5a, 0x6d, 0xd3,
|
||||
0xdc, 0x75, 0x6e, 0x21, 0x54, 0x95, 0x59, 0x7b, 0x07, 0x7b, 0xe9, 0x7a, 0xd7, 0xec, 0x8c, 0xf3,
|
||||
0xd1, 0x27, 0x04, 0x12, 0x48, 0x80, 0xe0, 0x99, 0x57, 0x24, 0x5e, 0x11, 0x14, 0xc4, 0x7f, 0x80,
|
||||
0xc4, 0xff, 0x82, 0x10, 0x7f, 0x01, 0x12, 0xda, 0x99, 0xf5, 0x7a, 0x77, 0x33, 0x1b, 0x9b, 0x1a,
|
||||
0x88, 0xf3, 0xe6, 0x39, 0x3e, 0x73, 0xe6, 0x37, 0xbf, 0x73, 0xce, 0xcc, 0x99, 0xb3, 0x20, 0x37,
|
||||
0x0d, 0x73, 0xbf, 0x4d, 0x56, 0x5a, 0x8e, 0x4d, 0x6d, 0x34, 0x13, 0x1c, 0xad, 0xf0, 0x41, 0x41,
|
||||
0xae, 0xd9, 0xcd, 0xa6, 0x6d, 0x71, 0x61, 0x41, 0x26, 0xb5, 0x06, 0x6e, 0x6a, 0x7c, 0xa4, 0x7c,
|
||||
0x2e, 0xc1, 0xc2, 0x86, 0x83, 0x35, 0x8a, 0x37, 0x6c, 0xd3, 0xc4, 0x35, 0x6a, 0xd8, 0x96, 0x8a,
|
||||
0xdf, 0x6a, 0x63, 0x42, 0xd1, 0xdf, 0x20, 0x55, 0xd5, 0x08, 0xce, 0x4b, 0x8b, 0xd2, 0xf2, 0xe4,
|
||||
0xda, 0x85, 0x95, 0x90, 0x6d, 0xcf, 0xe6, 0x43, 0x52, 0x5f, 0xd7, 0x08, 0x56, 0x99, 0x26, 0x5a,
|
||||
0x80, 0x31, 0xbd, 0x5a, 0xb1, 0xb4, 0x26, 0xce, 0x27, 0x16, 0xa5, 0xe5, 0x09, 0x35, 0xad, 0x57,
|
||||
0xb7, 0xb5, 0x26, 0x46, 0x4b, 0x30, 0x55, 0xf3, 0xed, 0x73, 0x85, 0x24, 0x53, 0xc8, 0x76, 0xc5,
|
||||
0x4c, 0x71, 0x1e, 0xd2, 0x1c, 0x5f, 0x3e, 0xb5, 0x28, 0x2d, 0xcb, 0xaa, 0x37, 0x52, 0x3e, 0x94,
|
||||
0x60, 0xae, 0xe8, 0xd8, 0xad, 0xa1, 0x40, 0xa9, 0x7c, 0x20, 0xc1, 0xec, 0x3d, 0x8d, 0x0c, 0x07,
|
||||
0x98, 0x3d, 0x90, 0xd7, 0x6d, 0xdb, 0x54, 0x31, 0x69, 0xd9, 0x16, 0xc1, 0xe8, 0x16, 0xa4, 0x09,
|
||||
0xd5, 0x68, 0x9b, 0x78, 0x28, 0xce, 0x0b, 0x51, 0x94, 0x99, 0x8a, 0xea, 0xa9, 0xa2, 0x59, 0x18,
|
||||
0xdd, 0xd7, 0xcc, 0x36, 0x07, 0x31, 0xae, 0xf2, 0x81, 0xf2, 0x04, 0xb2, 0x65, 0xea, 0x18, 0x56,
|
||||
0xfd, 0x37, 0x34, 0x3e, 0xd1, 0x31, 0xfe, 0x89, 0x04, 0xe7, 0x8a, 0x98, 0xd4, 0x1c, 0xa3, 0x3a,
|
||||
0x1c, 0xc1, 0xa7, 0x7c, 0x23, 0x41, 0x41, 0x84, 0x68, 0x90, 0xbd, 0xff, 0xd7, 0x0f, 0xe8, 0x04,
|
||||
0x9b, 0x74, 0x25, 0x3c, 0xc9, 0x4b, 0xc6, 0xee, 0x6a, 0x65, 0x26, 0xe8, 0xc4, 0x3d, 0x52, 0x40,
|
||||
0xee, 0x82, 0x2c, 0x15, 0x19, 0xf0, 0xa4, 0x1a, 0x92, 0xb1, 0xdc, 0x78, 0x60, 0x6b, 0xfa, 0x70,
|
||||
0x90, 0xf8, 0xb1, 0x04, 0x79, 0x15, 0x9b, 0x58, 0x23, 0x43, 0xe2, 0xd5, 0x8f, 0x24, 0x98, 0x0f,
|
||||
0xf0, 0x4b, 0x35, 0x4a, 0x4e, 0x13, 0xce, 0xfb, 0xee, 0x89, 0x1b, 0x85, 0x33, 0x48, 0x84, 0xfd,
|
||||
0x0b, 0x46, 0xdd, 0x5f, 0x24, 0x9f, 0x58, 0x4c, 0x2e, 0x4f, 0xae, 0x5d, 0x16, 0xce, 0xb9, 0x8f,
|
||||
0x8f, 0x1e, 0xbb, 0x59, 0xb7, 0xa3, 0x19, 0x8e, 0xca, 0xf5, 0x95, 0x2a, 0xcc, 0x95, 0x1b, 0xf6,
|
||||
0xc1, 0xef, 0xe9, 0x25, 0xe5, 0x10, 0xe6, 0xa3, 0x6b, 0x0c, 0xb2, 0xd7, 0xab, 0x90, 0x8b, 0xb0,
|
||||
0xcc, 0xb7, 0x3d, 0xa1, 0x4e, 0x85, 0x69, 0x26, 0xca, 0xd7, 0xae, 0xdb, 0xd9, 0xcd, 0xb6, 0xa3,
|
||||
0x39, 0xd4, 0x38, 0xed, 0x8b, 0xed, 0x0a, 0x64, 0x5b, 0x1d, 0x1c, 0x5c, 0x2f, 0xc5, 0xf4, 0x32,
|
||||
0xbe, 0x94, 0xf1, 0xf5, 0x95, 0x04, 0xb3, 0xee, 0x3d, 0x77, 0x96, 0x30, 0x7f, 0x29, 0xc1, 0xcc,
|
||||
0x3d, 0x8d, 0x9c, 0x25, 0xc8, 0x2f, 0x24, 0x98, 0x71, 0x8f, 0x4c, 0x8e, 0xf9, 0x74, 0x21, 0x2f,
|
||||
0xc1, 0x54, 0x18, 0x32, 0xc9, 0xa7, 0x58, 0x48, 0x67, 0x43, 0x98, 0x89, 0xf2, 0xad, 0x04, 0x0b,
|
||||
0xde, 0xc9, 0x3a, 0x14, 0x5c, 0xf7, 0x0d, 0xfc, 0x85, 0x04, 0x73, 0x3e, 0xe2, 0xd3, 0x3e, 0x80,
|
||||
0xfb, 0x0d, 0x91, 0xf7, 0x24, 0x98, 0x8f, 0x82, 0x3e, 0x95, 0x63, 0xfa, 0x0b, 0x09, 0x66, 0xdd,
|
||||
0x33, 0x74, 0x28, 0x7c, 0x1e, 0xad, 0x47, 0x52, 0x82, 0x7a, 0xe4, 0x53, 0x89, 0x5f, 0x2c, 0x01,
|
||||
0xc0, 0x83, 0x10, 0x27, 0x08, 0xb3, 0x84, 0x28, 0xcc, 0x5c, 0x6c, 0xbe, 0xa4, 0x54, 0x24, 0xf9,
|
||||
0xe4, 0x62, 0xd2, 0xc5, 0x16, 0x94, 0xb1, 0x62, 0xa0, 0x53, 0xe2, 0x95, 0x71, 0xbd, 0x89, 0x2d,
|
||||
0xfa, 0xf2, 0x74, 0x46, 0xc9, 0x48, 0x1c, 0x27, 0x03, 0x5d, 0x80, 0x09, 0xc2, 0xd7, 0xf1, 0xab,
|
||||
0xb7, 0xae, 0x40, 0x79, 0x47, 0x82, 0x85, 0x63, 0x70, 0x06, 0x21, 0x2b, 0x0f, 0x63, 0x86, 0xa5,
|
||||
0xe3, 0x43, 0x1f, 0x4d, 0x67, 0xe8, 0xfe, 0x53, 0x6d, 0x1b, 0xa6, 0xee, 0xc3, 0xe8, 0x0c, 0x5d,
|
||||
0x4e, 0x90, 0xeb, 0xaf, 0x3f, 0x84, 0x8f, 0x45, 0x98, 0x0c, 0x38, 0xc4, 0x83, 0x12, 0x14, 0x29,
|
||||
0x6f, 0xc2, 0x4c, 0x08, 0xcd, 0x20, 0x74, 0xfc, 0x09, 0xc0, 0x27, 0x9b, 0x87, 0x4d, 0x52, 0x0d,
|
||||
0x48, 0x94, 0x1f, 0x25, 0x40, 0xbc, 0x48, 0x28, 0xb9, 0x34, 0x9d, 0x66, 0x66, 0x5d, 0x04, 0x78,
|
||||
0xc3, 0xc0, 0xa6, 0x1e, 0x3c, 0x92, 0x26, 0x98, 0x84, 0xfd, 0x5d, 0x04, 0x19, 0x1f, 0x52, 0x47,
|
||||
0xab, 0xb4, 0x34, 0x47, 0x6b, 0x92, 0xfc, 0x68, 0xbf, 0xa7, 0xc8, 0x24, 0x9b, 0xb6, 0xc3, 0x66,
|
||||
0x29, 0xdf, 0xbb, 0xe5, 0x85, 0x17, 0x6f, 0xc3, 0xbe, 0xe3, 0x8b, 0x00, 0x2c, 0x76, 0xf9, 0xdf,
|
||||
0xa3, 0xfc, 0x6f, 0x26, 0x61, 0xe7, 0xb3, 0x09, 0x39, 0xb6, 0x03, 0xbe, 0x9d, 0x96, 0x6b, 0x35,
|
||||
0x32, 0x45, 0x8a, 0x4c, 0x41, 0xff, 0x81, 0xb4, 0xc7, 0x5e, 0xdf, 0x67, 0xb0, 0x37, 0x41, 0xf9,
|
||||
0x4c, 0x82, 0xb9, 0x08, 0x71, 0x83, 0xc4, 0xe5, 0x2e, 0x20, 0x0e, 0x54, 0xef, 0xa2, 0xef, 0xa0,
|
||||
0x8a, 0xbc, 0x10, 0xbd, 0x41, 0x74, 0xaf, 0xea, 0xb4, 0x11, 0x91, 0x10, 0xe5, 0x3b, 0x09, 0xa6,
|
||||
0x99, 0x9e, 0xbb, 0x1a, 0x3e, 0xbb, 0xae, 0x7d, 0x5b, 0x02, 0x14, 0xdc, 0xc7, 0x20, 0x4c, 0xff,
|
||||
0x83, 0x5f, 0xbb, 0x7c, 0x27, 0xd9, 0xb5, 0x4b, 0xc2, 0x39, 0x81, 0xc5, 0xb8, 0xb6, 0xf2, 0xb3,
|
||||
0x04, 0x99, 0x92, 0x45, 0xb0, 0x43, 0x87, 0xbf, 0x54, 0x41, 0x7f, 0x87, 0x71, 0xc7, 0x3e, 0xa8,
|
||||
0xe8, 0x1a, 0xd5, 0xbc, 0x73, 0xe1, 0x9c, 0x10, 0xde, 0xba, 0x69, 0x57, 0xd5, 0x31, 0xc7, 0x3e,
|
||||
0x28, 0x6a, 0x54, 0x43, 0xe7, 0x61, 0xa2, 0xa1, 0x91, 0x46, 0xe5, 0x19, 0x3e, 0x22, 0xf9, 0xf4,
|
||||
0x62, 0x72, 0x39, 0xa3, 0x8e, 0xbb, 0x82, 0xfb, 0xf8, 0x88, 0x28, 0xef, 0x4a, 0x90, 0xed, 0xec,
|
||||
0x7f, 0x10, 0xfa, 0x2f, 0xc1, 0xa4, 0x63, 0x1f, 0x94, 0x8a, 0x95, 0x2a, 0xae, 0x1b, 0x96, 0x77,
|
||||
0x23, 0x00, 0x13, 0xad, 0xbb, 0x12, 0x17, 0x05, 0x57, 0xc0, 0x96, 0xee, 0xdd, 0x06, 0xe3, 0x4c,
|
||||
0xb0, 0x69, 0xe9, 0xca, 0x3e, 0xe4, 0x76, 0x4c, 0xad, 0x86, 0x1b, 0xb6, 0xa9, 0x63, 0x87, 0x65,
|
||||
0x25, 0xca, 0x41, 0x92, 0x6a, 0x75, 0x2f, 0xb9, 0xdd, 0x9f, 0xe8, 0xdf, 0x90, 0xa2, 0x47, 0xad,
|
||||
0x8e, 0x87, 0xff, 0x22, 0x4c, 0x9f, 0x80, 0x99, 0xdd, 0xa3, 0x16, 0x56, 0xd9, 0x0c, 0x34, 0x0f,
|
||||
0x69, 0xd6, 0x8b, 0xe2, 0xb5, 0x82, 0xac, 0x7a, 0x23, 0xe5, 0x69, 0x68, 0xdd, 0x2d, 0xc7, 0x6e,
|
||||
0xb7, 0x50, 0x09, 0xe4, 0x56, 0x57, 0xe6, 0x92, 0x10, 0x9f, 0xac, 0x51, 0xd0, 0x6a, 0x68, 0xaa,
|
||||
0xf2, 0x83, 0x04, 0x99, 0x32, 0xd6, 0x9c, 0x5a, 0xe3, 0x2c, 0x94, 0xef, 0x2e, 0xe3, 0x3a, 0x31,
|
||||
0xbd, 0x34, 0x75, 0x7f, 0xa2, 0xeb, 0x30, 0x1d, 0xd8, 0x50, 0xa5, 0xee, 0x12, 0x94, 0x4f, 0xb3,
|
||||
0x86, 0x6d, 0xae, 0x15, 0x21, 0x4e, 0xb9, 0x0f, 0xa9, 0x7b, 0x06, 0x65, 0x66, 0xdc, 0x4b, 0x58,
|
||||
0x62, 0x97, 0xb0, 0xfb, 0x13, 0x9d, 0x0b, 0xc4, 0x6d, 0x82, 0x39, 0xc0, 0x0f, 0x4e, 0xd6, 0x07,
|
||||
0xb6, 0x1d, 0xcf, 0x33, 0x09, 0xd5, 0x1b, 0x29, 0xaf, 0x76, 0x99, 0x23, 0x6d, 0x93, 0x92, 0x97,
|
||||
0x8b, 0x4a, 0x04, 0xa9, 0x86, 0xe1, 0x95, 0xe2, 0xb2, 0xca, 0x7e, 0xbb, 0xa5, 0x98, 0x7c, 0xd7,
|
||||
0x6c, 0x93, 0xd3, 0xf4, 0x89, 0x9b, 0x76, 0xb3, 0x2a, 0xae, 0x1b, 0x84, 0x62, 0xe7, 0x81, 0x61,
|
||||
0x3d, 0xf3, 0x93, 0xef, 0x9f, 0x30, 0xa6, 0xe9, 0xba, 0x83, 0x09, 0x39, 0x11, 0xcf, 0x1d, 0xae,
|
||||
0xa3, 0x76, 0x94, 0x03, 0xf4, 0x24, 0xfa, 0xa6, 0xe7, 0xda, 0x6d, 0x98, 0x8a, 0xe4, 0x0b, 0x1a,
|
||||
0x87, 0xd4, 0xf6, 0xa3, 0xed, 0xcd, 0xdc, 0x08, 0x9a, 0x86, 0xcc, 0xe3, 0xcd, 0x8d, 0xdd, 0x47,
|
||||
0x6a, 0x65, 0xbd, 0xb4, 0x7d, 0x47, 0xdd, 0xcb, 0xe9, 0x28, 0x07, 0xb2, 0x27, 0xba, 0xfb, 0xe0,
|
||||
0xd1, 0x9d, 0xdd, 0x1c, 0x5e, 0xfb, 0x29, 0x07, 0x99, 0x87, 0x6c, 0xa1, 0x32, 0x76, 0xf6, 0x8d,
|
||||
0x1a, 0x46, 0x15, 0xc8, 0x45, 0xbf, 0x33, 0xa0, 0x1b, 0xc2, 0xe4, 0x89, 0xf9, 0x1c, 0x51, 0x38,
|
||||
0x09, 0xba, 0x32, 0x82, 0x9e, 0x40, 0x36, 0xfc, 0x81, 0x00, 0x5d, 0x13, 0x9a, 0x17, 0x7e, 0x45,
|
||||
0xe8, 0x65, 0xbc, 0x02, 0x99, 0x50, 0xbf, 0x1f, 0x5d, 0x15, 0xda, 0x16, 0x7d, 0x13, 0x28, 0x5c,
|
||||
0x16, 0xaa, 0x06, 0x5b, 0xf6, 0x1c, 0x7d, 0xb8, 0x85, 0x1b, 0x83, 0x5e, 0xd8, 0xe7, 0xed, 0x85,
|
||||
0x5e, 0x83, 0xe9, 0x63, 0x1d, 0x59, 0x74, 0x53, 0x68, 0x3f, 0xae, 0x73, 0xdb, 0x6b, 0x89, 0x03,
|
||||
0x40, 0xc7, 0x3b, 0xe7, 0x68, 0x45, 0xec, 0x81, 0xb8, 0xa6, 0x7f, 0x61, 0xb5, 0x6f, 0x7d, 0x9f,
|
||||
0xb8, 0x7d, 0x58, 0xd8, 0xc2, 0x34, 0xdc, 0x50, 0x35, 0x08, 0x35, 0x6a, 0x04, 0x5d, 0x17, 0x87,
|
||||
0x97, 0xb0, 0x15, 0x5c, 0xb8, 0xd1, 0x9f, 0xb2, 0xbf, 0xae, 0x09, 0x53, 0xe1, 0xc6, 0x26, 0x89,
|
||||
0xf1, 0x98, 0xb0, 0xc5, 0x5a, 0xb8, 0xde, 0x97, 0xae, 0xbf, 0xda, 0x53, 0x98, 0x8a, 0xf4, 0x32,
|
||||
0xe3, 0x76, 0x27, 0xec, 0x78, 0xf6, 0xf2, 0xde, 0x1e, 0x64, 0x42, 0x4d, 0xc7, 0x98, 0xf0, 0x16,
|
||||
0x35, 0x26, 0x7b, 0x99, 0x7e, 0x0a, 0x72, 0xb0, 0x37, 0x88, 0x96, 0xe3, 0x12, 0xe7, 0x98, 0xe1,
|
||||
0xbe, 0xf2, 0x66, 0x8f, 0xe7, 0x8d, 0x3f, 0x99, 0xc4, 0x2c, 0x20, 0x68, 0xf6, 0xf5, 0x42, 0xfe,
|
||||
0xba, 0x9f, 0x35, 0x01, 0xeb, 0x37, 0x4e, 0xca, 0x9a, 0x5f, 0xcb, 0x0d, 0x81, 0xf9, 0x2d, 0x4c,
|
||||
0x43, 0x4d, 0x26, 0x2f, 0x74, 0xc5, 0xa1, 0x24, 0xec, 0xa1, 0xc5, 0x84, 0x92, 0xb8, 0x75, 0xa5,
|
||||
0x8c, 0x20, 0x03, 0xb2, 0xa1, 0xe6, 0x0c, 0x89, 0x71, 0xb6, 0xa8, 0xe5, 0x54, 0xb8, 0xd6, 0x8f,
|
||||
0xaa, 0xbf, 0xd4, 0xff, 0x61, 0x32, 0xf0, 0xb8, 0x46, 0x4b, 0x27, 0x44, 0x6c, 0xf0, 0x31, 0xda,
|
||||
0x8b, 0xb6, 0x06, 0x64, 0x42, 0x4f, 0xb1, 0xb8, 0x68, 0x15, 0xbc, 0x73, 0x63, 0x36, 0x20, 0x7c,
|
||||
0xd9, 0x29, 0x23, 0xa8, 0x0a, 0x99, 0x2d, 0x4c, 0xbb, 0xaf, 0x03, 0xf4, 0xd7, 0xf8, 0xb7, 0x59,
|
||||
0xf0, 0xcd, 0x55, 0x58, 0xea, 0xa9, 0xe7, 0xaf, 0x51, 0x86, 0x34, 0x2f, 0xb4, 0x91, 0x12, 0x33,
|
||||
0x29, 0xf0, 0x0a, 0x29, 0xfc, 0xf9, 0x44, 0x1d, 0xdf, 0xa8, 0x0a, 0x69, 0x5e, 0x26, 0xc5, 0x18,
|
||||
0x0d, 0x55, 0x9f, 0x85, 0x93, 0x75, 0x58, 0x9d, 0xa5, 0x8c, 0xa0, 0x12, 0x8c, 0xb2, 0xfa, 0x08,
|
||||
0x89, 0x13, 0x33, 0x58, 0x3b, 0xf5, 0xf2, 0xe0, 0xff, 0x40, 0xde, 0xc2, 0xb4, 0xa8, 0x6f, 0x34,
|
||||
0x34, 0xcb, 0xc2, 0x26, 0x2a, 0x08, 0xd5, 0x37, 0x9b, 0x2d, 0x7a, 0x14, 0xb3, 0xe3, 0xf0, 0x67,
|
||||
0x69, 0x65, 0x64, 0xad, 0x0e, 0xf2, 0x8e, 0x63, 0x1f, 0x1e, 0x75, 0xea, 0x8d, 0x57, 0x40, 0x0e,
|
||||
0x16, 0x52, 0x27, 0x2e, 0x71, 0x35, 0x26, 0xa9, 0x8f, 0xd7, 0x61, 0xca, 0xc8, 0xfa, 0xfa, 0x6b,
|
||||
0xb7, 0xeb, 0x06, 0x6d, 0xb4, 0xab, 0xae, 0x8d, 0xd5, 0xe7, 0x86, 0x69, 0x1a, 0xcf, 0x29, 0xae,
|
||||
0x35, 0x56, 0xf9, 0xb4, 0x9b, 0xba, 0x41, 0xa8, 0x63, 0x54, 0xdb, 0x14, 0xeb, 0xab, 0x86, 0x45,
|
||||
0xb1, 0x63, 0x69, 0xe6, 0x2a, 0x33, 0xec, 0x69, 0xb4, 0xaa, 0xd5, 0x34, 0x1b, 0xdf, 0xfa, 0x25,
|
||||
0x00, 0x00, 0xff, 0xff, 0x47, 0x3b, 0x71, 0x2a, 0xbd, 0x21, 0x00, 0x00,
|
||||
// 1685 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xdd, 0x6f, 0xdb, 0x46,
|
||||
0x12, 0x37, 0x25, 0x59, 0xb6, 0xc7, 0x94, 0x2c, 0xaf, 0xbf, 0x14, 0x25, 0xb9, 0x38, 0x7b, 0x97,
|
||||
0xb3, 0xf3, 0x65, 0x1f, 0x9c, 0xfb, 0x7c, 0x38, 0x20, 0xb1, 0xe5, 0x38, 0x42, 0x12, 0xc7, 0x47,
|
||||
0xf9, 0x72, 0xe7, 0x0b, 0x02, 0x1d, 0x25, 0xee, 0x49, 0xbc, 0x50, 0xa4, 0xca, 0x5d, 0xf9, 0x23,
|
||||
0x4f, 0x45, 0x53, 0xb4, 0x40, 0x5b, 0xb4, 0xcf, 0x7d, 0x2d, 0xd0, 0xd7, 0xa2, 0x4d, 0x8b, 0xfe,
|
||||
0x07, 0x05, 0xfa, 0xbf, 0x14, 0x45, 0xff, 0x82, 0x02, 0x05, 0x97, 0x14, 0x45, 0xd2, 0x4b, 0x4b,
|
||||
0x8d, 0x9a, 0x5a, 0x7e, 0xd3, 0x8e, 0x66, 0x67, 0x7f, 0xfb, 0x9b, 0x99, 0xdd, 0xd9, 0x21, 0xc8,
|
||||
0x4d, 0xdd, 0xd8, 0x6f, 0xd3, 0x95, 0x96, 0x6d, 0x31, 0x0b, 0xcd, 0x04, 0x47, 0x2b, 0xee, 0xa0,
|
||||
0x20, 0xd7, 0xac, 0x66, 0xd3, 0x32, 0x5d, 0x61, 0x41, 0xa6, 0xb5, 0x06, 0x69, 0xaa, 0xee, 0x08,
|
||||
0x7f, 0x2a, 0xc1, 0xc2, 0x86, 0x4d, 0x54, 0x46, 0x36, 0x2c, 0xc3, 0x20, 0x35, 0xa6, 0x5b, 0xa6,
|
||||
0x42, 0xde, 0x68, 0x13, 0xca, 0xd0, 0x1f, 0x20, 0x55, 0x55, 0x29, 0xc9, 0x4b, 0x8b, 0xd2, 0xf2,
|
||||
0xe4, 0xda, 0x85, 0x95, 0x90, 0x6d, 0xcf, 0xe6, 0x43, 0x5a, 0x5f, 0x57, 0x29, 0x51, 0xb8, 0x26,
|
||||
0x5a, 0x80, 0x31, 0xad, 0x5a, 0x31, 0xd5, 0x26, 0xc9, 0x27, 0x16, 0xa5, 0xe5, 0x09, 0x25, 0xad,
|
||||
0x55, 0xb7, 0xd5, 0x26, 0x41, 0x4b, 0x30, 0x55, 0xf3, 0xed, 0xbb, 0x0a, 0x49, 0xae, 0x90, 0xed,
|
||||
0x8a, 0xb9, 0xe2, 0x3c, 0xa4, 0x5d, 0x7c, 0xf9, 0xd4, 0xa2, 0xb4, 0x2c, 0x2b, 0xde, 0x08, 0xbf,
|
||||
0x2f, 0xc1, 0x5c, 0xd1, 0xb6, 0x5a, 0x43, 0x81, 0x12, 0xbf, 0x27, 0xc1, 0xec, 0x3d, 0x95, 0x0e,
|
||||
0x07, 0x98, 0x3d, 0x90, 0xd7, 0x2d, 0xcb, 0x50, 0x08, 0x6d, 0x59, 0x26, 0x25, 0xe8, 0x16, 0xa4,
|
||||
0x29, 0x53, 0x59, 0x9b, 0x7a, 0x28, 0xce, 0x0b, 0x51, 0x94, 0xb9, 0x8a, 0xe2, 0xa9, 0xa2, 0x59,
|
||||
0x18, 0xdd, 0x57, 0x8d, 0xb6, 0x0b, 0x62, 0x5c, 0x71, 0x07, 0xf8, 0x09, 0x64, 0xcb, 0xcc, 0xd6,
|
||||
0xcd, 0xfa, 0x2f, 0x68, 0x7c, 0xa2, 0x63, 0xfc, 0x23, 0x09, 0xce, 0x15, 0x09, 0xad, 0xd9, 0x7a,
|
||||
0x75, 0x38, 0x82, 0x0f, 0x7f, 0x25, 0x41, 0x41, 0x84, 0x68, 0x90, 0xbd, 0xff, 0xdd, 0x0f, 0xe8,
|
||||
0x04, 0x9f, 0x74, 0x25, 0x3c, 0xc9, 0x4b, 0xc6, 0xee, 0x6a, 0x65, 0x2e, 0xe8, 0xc4, 0x3d, 0xc2,
|
||||
0x20, 0x77, 0x41, 0x96, 0x8a, 0x1c, 0x78, 0x52, 0x09, 0xc9, 0x78, 0x6e, 0x3c, 0xb0, 0x54, 0x6d,
|
||||
0x38, 0x48, 0xfc, 0x50, 0x82, 0xbc, 0x42, 0x0c, 0xa2, 0xd2, 0x21, 0xf1, 0xea, 0x07, 0x12, 0xcc,
|
||||
0x07, 0xf8, 0x65, 0x2a, 0xa3, 0xa7, 0x09, 0xe7, 0x5d, 0xe7, 0xc4, 0x8d, 0xc2, 0x19, 0x24, 0xc2,
|
||||
0xfe, 0x02, 0xa3, 0xce, 0x2f, 0x9a, 0x4f, 0x2c, 0x26, 0x97, 0x27, 0xd7, 0x2e, 0x0b, 0xe7, 0xdc,
|
||||
0x27, 0x47, 0x8f, 0x9d, 0xac, 0xdb, 0x51, 0x75, 0x5b, 0x71, 0xf5, 0x71, 0x15, 0xe6, 0xca, 0x0d,
|
||||
0xeb, 0xe0, 0x75, 0x7a, 0x09, 0x1f, 0xc2, 0x7c, 0x74, 0x8d, 0x41, 0xf6, 0x7a, 0x15, 0x72, 0x11,
|
||||
0x96, 0xdd, 0x6d, 0x4f, 0x28, 0x53, 0x61, 0x9a, 0x29, 0xfe, 0xd2, 0x71, 0x3b, 0xbf, 0xd9, 0x76,
|
||||
0x54, 0x9b, 0xe9, 0xa7, 0x7d, 0xb1, 0x5d, 0x81, 0x6c, 0xab, 0x83, 0xc3, 0xd5, 0x4b, 0x71, 0xbd,
|
||||
0x8c, 0x2f, 0xe5, 0x7c, 0x7d, 0x21, 0xc1, 0xac, 0x73, 0xcf, 0x9d, 0x25, 0xcc, 0x9f, 0x4b, 0x30,
|
||||
0x73, 0x4f, 0xa5, 0x67, 0x09, 0xf2, 0x4b, 0x09, 0x66, 0x9c, 0x23, 0xd3, 0xc5, 0x7c, 0xba, 0x90,
|
||||
0x97, 0x60, 0x2a, 0x0c, 0x99, 0xe6, 0x53, 0x3c, 0xa4, 0xb3, 0x21, 0xcc, 0x14, 0x7f, 0x2d, 0xc1,
|
||||
0x82, 0x77, 0xb2, 0x0e, 0x05, 0xd7, 0x7d, 0x03, 0x7f, 0x29, 0xc1, 0x9c, 0x8f, 0xf8, 0xb4, 0x0f,
|
||||
0xe0, 0x7e, 0x43, 0xe4, 0x1d, 0x09, 0xe6, 0xa3, 0xa0, 0x4f, 0xe5, 0x98, 0xfe, 0x4c, 0x82, 0x59,
|
||||
0xe7, 0x0c, 0x1d, 0x0a, 0x9f, 0x47, 0xeb, 0x91, 0x94, 0xa0, 0x1e, 0xf9, 0x58, 0x72, 0x2f, 0x96,
|
||||
0x00, 0xe0, 0x41, 0x88, 0x13, 0x84, 0x59, 0x42, 0x14, 0x66, 0x0e, 0x36, 0x5f, 0x52, 0x2a, 0xd2,
|
||||
0x7c, 0x72, 0x31, 0xe9, 0x60, 0x0b, 0xca, 0x78, 0x31, 0xd0, 0x29, 0xf1, 0xca, 0xa4, 0xde, 0x24,
|
||||
0x26, 0x7b, 0x75, 0x3a, 0xa3, 0x64, 0x24, 0x8e, 0x93, 0x81, 0x2e, 0xc0, 0x04, 0x75, 0xd7, 0xf1,
|
||||
0xab, 0xb7, 0xae, 0x00, 0xbf, 0x25, 0xc1, 0xc2, 0x31, 0x38, 0x83, 0x90, 0x95, 0x87, 0x31, 0xdd,
|
||||
0xd4, 0xc8, 0xa1, 0x8f, 0xa6, 0x33, 0x74, 0xfe, 0xa9, 0xb6, 0x75, 0x43, 0xf3, 0x61, 0x74, 0x86,
|
||||
0x0e, 0x27, 0xc8, 0xf1, 0xd7, 0xaf, 0xc2, 0xc7, 0x22, 0x4c, 0x06, 0x1c, 0xe2, 0x41, 0x09, 0x8a,
|
||||
0xf0, 0xff, 0x61, 0x26, 0x84, 0x66, 0x10, 0x3a, 0x7e, 0x03, 0xe0, 0x93, 0xed, 0x86, 0x4d, 0x52,
|
||||
0x09, 0x48, 0xf0, 0xf7, 0x12, 0x20, 0xb7, 0x48, 0x28, 0x39, 0x34, 0x9d, 0x66, 0x66, 0x5d, 0x04,
|
||||
0xf8, 0x9f, 0x4e, 0x0c, 0x2d, 0x78, 0x24, 0x4d, 0x70, 0x09, 0xff, 0xbb, 0x08, 0x32, 0x39, 0x64,
|
||||
0xb6, 0x5a, 0x69, 0xa9, 0xb6, 0xda, 0xa4, 0xf9, 0xd1, 0x7e, 0x4f, 0x91, 0x49, 0x3e, 0x6d, 0x87,
|
||||
0xcf, 0xc2, 0xdf, 0x3a, 0xe5, 0x85, 0x17, 0x6f, 0xc3, 0xbe, 0xe3, 0x8b, 0x00, 0x3c, 0x76, 0xdd,
|
||||
0xbf, 0x47, 0xdd, 0xbf, 0xb9, 0x84, 0x9f, 0xcf, 0x06, 0xe4, 0xf8, 0x0e, 0xdc, 0xed, 0xb4, 0x1c,
|
||||
0xab, 0x91, 0x29, 0x52, 0x64, 0x0a, 0xfa, 0x1b, 0xa4, 0x3d, 0xf6, 0xfa, 0x3e, 0x83, 0xbd, 0x09,
|
||||
0xf8, 0x13, 0x09, 0xe6, 0x22, 0xc4, 0x0d, 0x12, 0x97, 0xbb, 0x80, 0x5c, 0xa0, 0x5a, 0x17, 0x7d,
|
||||
0x07, 0x55, 0xe4, 0x85, 0xe8, 0x0d, 0xa2, 0x7b, 0x55, 0xa6, 0xf5, 0x88, 0x84, 0xe2, 0x6f, 0x24,
|
||||
0x98, 0xe6, 0x7a, 0xce, 0x6a, 0xe4, 0xec, 0xba, 0xf6, 0x4d, 0x09, 0x50, 0x70, 0x1f, 0x83, 0x30,
|
||||
0xfd, 0x27, 0xf7, 0xda, 0x75, 0x77, 0x92, 0x5d, 0xbb, 0x24, 0x9c, 0x13, 0x58, 0xcc, 0xd5, 0xc6,
|
||||
0x3f, 0x4a, 0x90, 0x29, 0x99, 0x94, 0xd8, 0x6c, 0xf8, 0x4b, 0x15, 0xf4, 0x47, 0x18, 0xb7, 0xad,
|
||||
0x83, 0x8a, 0xa6, 0x32, 0xd5, 0x3b, 0x17, 0xce, 0x09, 0xe1, 0xad, 0x1b, 0x56, 0x55, 0x19, 0xb3,
|
||||
0xad, 0x83, 0xa2, 0xca, 0x54, 0x74, 0x1e, 0x26, 0x1a, 0x2a, 0x6d, 0x54, 0x9e, 0x91, 0x23, 0x9a,
|
||||
0x4f, 0x2f, 0x26, 0x97, 0x33, 0xca, 0xb8, 0x23, 0xb8, 0x4f, 0x8e, 0x28, 0x7e, 0x21, 0x41, 0xb6,
|
||||
0xb3, 0xff, 0x41, 0xe8, 0xbf, 0x04, 0x93, 0xb6, 0x75, 0x50, 0x2a, 0x56, 0xaa, 0xa4, 0xae, 0x9b,
|
||||
0xde, 0x8d, 0x00, 0x5c, 0xb4, 0xee, 0x48, 0x1c, 0x14, 0xae, 0x02, 0x31, 0x35, 0xef, 0x36, 0x18,
|
||||
0xe7, 0x82, 0x4d, 0x53, 0xc3, 0xfb, 0x90, 0xdb, 0x31, 0xd4, 0x1a, 0x69, 0x58, 0x86, 0x46, 0x6c,
|
||||
0x9e, 0x95, 0x28, 0x07, 0x49, 0xa6, 0xd6, 0xbd, 0xe4, 0x76, 0x7e, 0xa2, 0xbf, 0x42, 0x8a, 0x1d,
|
||||
0xb5, 0x3a, 0x1e, 0xfe, 0x9d, 0x30, 0x7d, 0x02, 0x66, 0x76, 0x8f, 0x5a, 0x44, 0xe1, 0x33, 0xd0,
|
||||
0x3c, 0xa4, 0x79, 0x2f, 0xca, 0xad, 0x15, 0x64, 0xc5, 0x1b, 0xe1, 0xa7, 0xa1, 0x75, 0xb7, 0x6c,
|
||||
0xab, 0xdd, 0x42, 0x25, 0x90, 0x5b, 0x5d, 0x99, 0x43, 0x42, 0x7c, 0xb2, 0x46, 0x41, 0x2b, 0xa1,
|
||||
0xa9, 0xf8, 0x3b, 0x09, 0x32, 0x65, 0xa2, 0xda, 0xb5, 0xc6, 0x59, 0x28, 0xdf, 0x1d, 0xc6, 0x35,
|
||||
0x6a, 0x78, 0x69, 0xea, 0xfc, 0x44, 0xd7, 0x61, 0x3a, 0xb0, 0xa1, 0x4a, 0xdd, 0x21, 0x28, 0x9f,
|
||||
0xe6, 0x0d, 0xdb, 0x5c, 0x2b, 0x42, 0x1c, 0xbe, 0x0f, 0xa9, 0x7b, 0x3a, 0xe3, 0x66, 0x9c, 0x4b,
|
||||
0x58, 0xe2, 0x97, 0xb0, 0xf3, 0x13, 0x9d, 0x0b, 0xc4, 0x6d, 0x82, 0x3b, 0xc0, 0x0f, 0x4e, 0xde,
|
||||
0x07, 0xb6, 0x6c, 0xcf, 0x33, 0x09, 0xc5, 0x1b, 0xe1, 0x7f, 0x77, 0x99, 0xa3, 0x6d, 0x83, 0xd1,
|
||||
0x57, 0x8b, 0x4a, 0x04, 0xa9, 0x86, 0xee, 0x95, 0xe2, 0xb2, 0xc2, 0x7f, 0xe3, 0xb7, 0x25, 0x90,
|
||||
0xef, 0x1a, 0x6d, 0xfa, 0x3a, 0x7c, 0x22, 0x6a, 0x5b, 0x24, 0xc5, 0x6d, 0x8b, 0x17, 0x12, 0xcc,
|
||||
0x2a, 0xa4, 0xae, 0x53, 0x46, 0xec, 0x07, 0xba, 0xf9, 0xcc, 0x4f, 0xbf, 0x3f, 0xc3, 0x98, 0xaa,
|
||||
0x69, 0x36, 0xa1, 0xf4, 0x44, 0x44, 0x77, 0x5c, 0x1d, 0xa5, 0xa3, 0x1c, 0x20, 0x28, 0xd1, 0x37,
|
||||
0x41, 0xd7, 0x6e, 0xc3, 0x54, 0x24, 0x63, 0xd0, 0x38, 0xa4, 0xb6, 0x1f, 0x6d, 0x6f, 0xe6, 0x46,
|
||||
0xd0, 0x34, 0x64, 0x1e, 0x6f, 0x6e, 0xec, 0x3e, 0x52, 0x2a, 0xeb, 0xa5, 0xed, 0x3b, 0xca, 0x5e,
|
||||
0x4e, 0x43, 0x39, 0x90, 0x3d, 0xd1, 0xdd, 0x07, 0x8f, 0xee, 0xec, 0xe6, 0xc8, 0xda, 0x0f, 0x39,
|
||||
0xc8, 0x3c, 0xe4, 0x0b, 0x95, 0x89, 0xbd, 0xaf, 0xd7, 0x08, 0xaa, 0x40, 0x2e, 0xfa, 0xa5, 0x01,
|
||||
0xdd, 0x10, 0xa6, 0x4f, 0xcc, 0x07, 0x89, 0xc2, 0x49, 0xd0, 0xf1, 0x08, 0x7a, 0x02, 0xd9, 0xf0,
|
||||
0x27, 0x02, 0x74, 0x4d, 0x68, 0x5e, 0xf8, 0x1d, 0xa1, 0x97, 0xf1, 0x0a, 0x64, 0x42, 0x1d, 0x7f,
|
||||
0x74, 0x55, 0x68, 0x5b, 0xf4, 0x55, 0xa0, 0x70, 0x59, 0xa8, 0x1a, 0x6c, 0xda, 0xbb, 0xe8, 0xc3,
|
||||
0x4d, 0xdc, 0x18, 0xf4, 0xc2, 0x4e, 0x6f, 0x2f, 0xf4, 0x2a, 0x4c, 0x1f, 0xeb, 0xc9, 0xa2, 0x9b,
|
||||
0x42, 0xfb, 0x71, 0xbd, 0xdb, 0x5e, 0x4b, 0x1c, 0x00, 0x3a, 0xde, 0x3b, 0x47, 0x2b, 0x62, 0x0f,
|
||||
0xc4, 0xb5, 0xfd, 0x0b, 0xab, 0x7d, 0xeb, 0xfb, 0xc4, 0xed, 0xc3, 0xc2, 0x16, 0x61, 0xe1, 0x96,
|
||||
0xaa, 0x4e, 0x99, 0x5e, 0xa3, 0xe8, 0xba, 0x38, 0xbc, 0x84, 0xcd, 0xe0, 0xc2, 0x8d, 0xfe, 0x94,
|
||||
0xfd, 0x75, 0x0d, 0x98, 0x0a, 0xb7, 0x36, 0x69, 0x8c, 0xc7, 0x84, 0x4d, 0xd6, 0xc2, 0xf5, 0xbe,
|
||||
0x74, 0xfd, 0xd5, 0x9e, 0xc2, 0x54, 0xa4, 0x9b, 0x19, 0xb7, 0x3b, 0x61, 0xcf, 0xb3, 0x97, 0xf7,
|
||||
0xf6, 0x20, 0x13, 0x6a, 0x3b, 0xc6, 0x84, 0xb7, 0xa8, 0x35, 0xd9, 0xcb, 0xf4, 0x53, 0x90, 0x83,
|
||||
0xdd, 0x41, 0xb4, 0x1c, 0x97, 0x38, 0xc7, 0x0c, 0xf7, 0x95, 0x37, 0x7b, 0x6e, 0xde, 0xf8, 0x93,
|
||||
0x69, 0xcc, 0x02, 0x82, 0x76, 0x5f, 0x2f, 0xe4, 0xff, 0xf5, 0xb3, 0x26, 0x60, 0xfd, 0xc6, 0x49,
|
||||
0x59, 0xf3, 0x73, 0xb9, 0xa1, 0x30, 0xbf, 0x45, 0x58, 0xa8, 0xcd, 0xe4, 0x85, 0xae, 0x38, 0x94,
|
||||
0x84, 0x5d, 0xb4, 0x98, 0x50, 0x12, 0x37, 0xaf, 0xf0, 0x08, 0xd2, 0x21, 0x1b, 0x6a, 0xcf, 0xd0,
|
||||
0x18, 0x67, 0x8b, 0x9a, 0x4e, 0x85, 0x6b, 0xfd, 0xa8, 0xfa, 0x4b, 0xfd, 0x13, 0x26, 0x03, 0xcf,
|
||||
0x6b, 0xb4, 0x74, 0x42, 0xc4, 0x06, 0x9f, 0xa3, 0xbd, 0x68, 0x6b, 0x40, 0x26, 0xf4, 0x18, 0x8b,
|
||||
0x8b, 0x56, 0xc1, 0x4b, 0x37, 0x66, 0x03, 0xc2, 0xb7, 0x1d, 0x1e, 0x41, 0x55, 0xc8, 0x6c, 0x11,
|
||||
0xd6, 0x7d, 0x1f, 0xa0, 0xdf, 0xc7, 0xbf, 0xce, 0x82, 0xaf, 0xae, 0xc2, 0x52, 0x4f, 0x3d, 0x7f,
|
||||
0x8d, 0x32, 0xa4, 0xdd, 0x52, 0x1b, 0xe1, 0x98, 0x49, 0x81, 0x77, 0x48, 0xe1, 0xb7, 0x27, 0xea,
|
||||
0xf8, 0x46, 0x15, 0x48, 0xbb, 0x85, 0x52, 0x8c, 0xd1, 0x50, 0xfd, 0x59, 0x38, 0x59, 0x87, 0x57,
|
||||
0x5a, 0x78, 0x04, 0x95, 0x60, 0x94, 0x57, 0x48, 0x48, 0x9c, 0x98, 0xc1, 0xea, 0xa9, 0x97, 0x07,
|
||||
0xff, 0x01, 0xf2, 0x16, 0x61, 0x45, 0x6d, 0xa3, 0xa1, 0x9a, 0x26, 0x31, 0x50, 0x41, 0xa8, 0xbe,
|
||||
0xd9, 0x6c, 0xb1, 0xa3, 0x98, 0x1d, 0x87, 0x3f, 0x4c, 0xe3, 0x91, 0xb5, 0x3a, 0xc8, 0x3b, 0xb6,
|
||||
0x75, 0x78, 0xd4, 0xa9, 0x37, 0xfe, 0x05, 0x72, 0xb0, 0x90, 0x3a, 0x71, 0x89, 0xab, 0x31, 0x49,
|
||||
0x7d, 0xbc, 0x0e, 0xc3, 0x23, 0xeb, 0xeb, 0xff, 0xb9, 0x5d, 0xd7, 0x59, 0xa3, 0x5d, 0x75, 0x6c,
|
||||
0xac, 0x3e, 0xd7, 0x0d, 0x43, 0x7f, 0xce, 0x48, 0xad, 0xb1, 0xea, 0x4e, 0xbb, 0xa9, 0xe9, 0x94,
|
||||
0xd9, 0x7a, 0xb5, 0xcd, 0x88, 0xb6, 0xaa, 0x9b, 0x8c, 0xd8, 0xa6, 0x6a, 0xac, 0x72, 0xc3, 0x9e,
|
||||
0x46, 0xab, 0x5a, 0x4d, 0xf3, 0xf1, 0xad, 0x9f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x14, 0x2f, 0x38,
|
||||
0x4a, 0xbf, 0x21, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
type Condition interface {
|
||||
WaitToFinish() error
|
||||
Notify(err error)
|
||||
Ctx() context.Context
|
||||
}
|
||||
|
||||
type TaskCondition struct {
|
||||
|
@ -31,6 +32,10 @@ func (tc *TaskCondition) Notify(err error) {
|
|||
tc.done <- err
|
||||
}
|
||||
|
||||
func (tc *TaskCondition) Ctx() context.Context {
|
||||
return tc.ctx
|
||||
}
|
||||
|
||||
func NewTaskCondition(ctx context.Context) *TaskCondition {
|
||||
return &TaskCondition{
|
||||
done: make(chan error),
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
package proxynode
|
||||
|
||||
//
|
||||
//func (node *NodeImpl) DescribePartition(ctx context.Context, in *milvuspb.PartitionName) (*milvuspb.PartitionDescription, error) {
|
||||
// log.Println("describe partition: ", in)
|
||||
//
|
||||
// return &milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: "Deprecated!",
|
||||
// },
|
||||
// Name: in,
|
||||
// Statistics: nil,
|
||||
// }, nil
|
||||
//
|
||||
//}
|
||||
//
|
||||
//func (p *NodeImpl) DescribePartition2(ctx context.Context, in *milvuspb.PartitionName) (*milvuspb.PartitionDescription, error) {
|
||||
// log.Println("describe partition: ", in)
|
||||
// dpt := &DescribePartitionTask{
|
||||
// Condition: NewTaskCondition(ctx),
|
||||
// DescribePartitionRequest: internalpb.DescribePartitionRequest{
|
||||
// MsgType: commonpb.MsgType_kDescribePartition,
|
||||
// ReqID: 0,
|
||||
// Timestamp: 0,
|
||||
// ProxyID: 0,
|
||||
// PartitionName: in,
|
||||
// //TODO, ReqID,Timestamp,ProxyID
|
||||
// },
|
||||
// masterClient: p.masterClient,
|
||||
// result: nil,
|
||||
// ctx: nil,
|
||||
// }
|
||||
//
|
||||
// var cancel func()
|
||||
// dpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
// defer cancel()
|
||||
//
|
||||
// err := func() error {
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return errors.New("describe partion timeout")
|
||||
// default:
|
||||
// return p.sched.DdQueue.Enqueue(dpt)
|
||||
// }
|
||||
// }()
|
||||
//
|
||||
// if err != nil {
|
||||
// return &milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
// },
|
||||
// Name: in,
|
||||
// Statistics: nil,
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// err = dpt.WaitToFinish()
|
||||
// if err != nil {
|
||||
// return &milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
// },
|
||||
// Name: in,
|
||||
// Statistics: nil,
|
||||
// }, nil
|
||||
// }
|
||||
// return dpt.result, nil
|
||||
//}
|
||||
//
|
||||
//func (node *NodeImpl) DescribeIndexProgress(ctx context.Context, req *milvuspb.DescribeIndexProgressRequest) (*milvuspb.BoolResponse, error) {
|
||||
// log.Println("Describe index progress for: ", req.FieldName)
|
||||
// dipt := &GetIndexStateTask{
|
||||
// Condition: NewTaskCondition(ctx),
|
||||
// IndexStateRequest: milvuspb.IndexStateRequest{
|
||||
// Base: &commonpb.MsgBase{
|
||||
// MsgType: commonpb.MsgType_kGetIndexState,
|
||||
// SourceID: Params.ProxyID(),
|
||||
// },
|
||||
// CollectionName: req.CollectionName,
|
||||
// FieldName: req.FieldName,
|
||||
// },
|
||||
// masterClient: node.masterClient,
|
||||
// }
|
||||
//
|
||||
// var cancel func()
|
||||
// dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
// defer cancel()
|
||||
//
|
||||
// fn := func() error {
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return errors.New("create index timeout")
|
||||
// default:
|
||||
// return node.sched.DdQueue.Enqueue(dipt)
|
||||
// }
|
||||
// }
|
||||
// err := fn()
|
||||
// if err != nil {
|
||||
// return &milvuspb.BoolResponse{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
// },
|
||||
// Value: false,
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// err = dipt.WaitToFinish()
|
||||
// if err != nil {
|
||||
// return &milvuspb.BoolResponse{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
// },
|
||||
// Value: false,
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// return dipt.result, nil
|
||||
//}
|
|
@ -6,11 +6,9 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
|
@ -35,26 +33,17 @@ func (node *NodeImpl) InvalidateCollectionMetaCache(ctx context.Context, request
|
|||
|
||||
func (node *NodeImpl) CreateCollection(request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("create collection: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
cct := &CreateCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
dataServiceClient: node.dataServiceClient,
|
||||
}
|
||||
var cancel func()
|
||||
cct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(cct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(cct)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -75,25 +64,16 @@ func (node *NodeImpl) CreateCollection(request *milvuspb.CreateCollectionRequest
|
|||
|
||||
func (node *NodeImpl) DropCollection(request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("drop collection: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
dct := &DropCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(dct)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -114,25 +94,16 @@ func (node *NodeImpl) DropCollection(request *milvuspb.DropCollectionRequest) (*
|
|||
|
||||
func (node *NodeImpl) HasCollection(request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
log.Println("has collection: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
hct := &HasCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
hct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(hct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(hct)
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -165,25 +136,16 @@ func (node *NodeImpl) ReleaseCollection(request *milvuspb.ReleaseCollectionReque
|
|||
|
||||
func (node *NodeImpl) DescribeCollection(request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
log.Println("describe collection: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
dct := &DescribeCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(dct)
|
||||
if err != nil {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -208,25 +170,15 @@ func (node *NodeImpl) DescribeCollection(request *milvuspb.DescribeCollectionReq
|
|||
|
||||
func (node *NodeImpl) GetCollectionStatistics(request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
log.Println("get collection statistics")
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
g := &GetCollectionsStatisticsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CollectionStatsRequest: request,
|
||||
dataServiceClient: node.dataServiceClient,
|
||||
}
|
||||
var cancel func()
|
||||
g.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(g)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(g)
|
||||
if err != nil {
|
||||
return &milvuspb.CollectionStatsResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -251,25 +203,15 @@ func (node *NodeImpl) GetCollectionStatistics(request *milvuspb.CollectionStatsR
|
|||
|
||||
func (node *NodeImpl) ShowCollections(request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
log.Println("show collections")
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
sct := &ShowCollectionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
sct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(sct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(sct)
|
||||
if err != nil {
|
||||
return &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -294,27 +236,16 @@ func (node *NodeImpl) ShowCollections(request *milvuspb.ShowCollectionRequest) (
|
|||
|
||||
func (node *NodeImpl) CreatePartition(request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("create partition", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
cpt := &CreatePartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreatePartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
var cancel func()
|
||||
cpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(cpt)
|
||||
}
|
||||
}()
|
||||
|
||||
err := node.sched.DdQueue.Enqueue(cpt)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -333,27 +264,16 @@ func (node *NodeImpl) CreatePartition(request *milvuspb.CreatePartitionRequest)
|
|||
|
||||
func (node *NodeImpl) DropPartition(request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("drop partition: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dpt := &DropPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropPartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("drop partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dpt)
|
||||
}
|
||||
}()
|
||||
err := node.sched.DdQueue.Enqueue(dpt)
|
||||
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
|
@ -373,27 +293,16 @@ func (node *NodeImpl) DropPartition(request *milvuspb.DropPartitionRequest) (*co
|
|||
|
||||
func (node *NodeImpl) HasPartition(request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
log.Println("has partition: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
hpt := &HasPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasPartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
hpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("has partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(hpt)
|
||||
}
|
||||
}()
|
||||
err := node.sched.DdQueue.Enqueue(hpt)
|
||||
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
|
@ -431,27 +340,16 @@ func (node *NodeImpl) GetPartitionStatistics(request *milvuspb.PartitionStatsReq
|
|||
|
||||
func (node *NodeImpl) ShowPartitions(request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
log.Println("show partitions: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
spt := &ShowPartitionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowPartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
spt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("show partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(spt)
|
||||
}
|
||||
}()
|
||||
err := node.sched.DdQueue.Enqueue(spt)
|
||||
|
||||
if err != nil {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
|
@ -476,26 +374,15 @@ func (node *NodeImpl) ShowPartitions(request *milvuspb.ShowPartitionRequest) (*m
|
|||
|
||||
func (node *NodeImpl) CreateIndex(request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
log.Println("create index for: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
cit := &CreateIndexTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateIndexRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
cit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(cit)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(cit)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
|
@ -516,26 +403,15 @@ func (node *NodeImpl) CreateIndex(request *milvuspb.CreateIndexRequest) (*common
|
|||
|
||||
func (node *NodeImpl) DescribeIndex(request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
log.Println("Describe index for: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dit := &DescribeIndexTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeIndexRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dit)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(dit)
|
||||
if err != nil {
|
||||
return &milvuspb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -560,25 +436,14 @@ func (node *NodeImpl) DescribeIndex(request *milvuspb.DescribeIndexRequest) (*mi
|
|||
|
||||
func (node *NodeImpl) GetIndexState(request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
// log.Println("Describe index progress for: ", request)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
dipt := &GetIndexStateTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
IndexStateRequest: request,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dipt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DdQueue.Enqueue(dipt)
|
||||
if err != nil {
|
||||
return &milvuspb.IndexStateResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -602,14 +467,10 @@ func (node *NodeImpl) GetIndexState(request *milvuspb.IndexStateRequest) (*milvu
|
|||
}
|
||||
|
||||
func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
|
||||
ctx := context.Background()
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "insert grpc received")
|
||||
defer span.Finish()
|
||||
span.SetTag("collection name", request.CollectionName)
|
||||
span.SetTag("partition tag", request.PartitionName)
|
||||
log.Println("insert into: ", request.CollectionName)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
it := &InsertTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
dataServiceClient: node.dataServiceClient,
|
||||
BaseInsertTask: BaseInsertTask{
|
||||
|
@ -632,20 +493,7 @@ func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertR
|
|||
it.PartitionName = Params.DefaultPartitionTag
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
it.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("insert timeout")
|
||||
default:
|
||||
return node.sched.DmQueue.Enqueue(it)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DmQueue.Enqueue(it)
|
||||
|
||||
if err != nil {
|
||||
return &milvuspb.InsertResponse{
|
||||
|
@ -670,15 +518,10 @@ func (node *NodeImpl) Insert(request *milvuspb.InsertRequest) (*milvuspb.InsertR
|
|||
}
|
||||
|
||||
func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
|
||||
ctx := context.Background()
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "search grpc received")
|
||||
defer span.Finish()
|
||||
span.SetTag("collection name", request.CollectionName)
|
||||
span.SetTag("partition tag", request.PartitionNames)
|
||||
span.SetTag("dsl", request.Dsl)
|
||||
log.Println("search: ", request.CollectionName, request.Dsl)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
qt := &SearchTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
SearchRequest: internalpb2.SearchRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
|
@ -691,20 +534,8 @@ func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchR
|
|||
resultBuf: make(chan []*internalpb2.SearchResults),
|
||||
query: request,
|
||||
}
|
||||
var cancel func()
|
||||
qt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
log.Printf("grpc address of query task: %p", qt)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DqQueue.Enqueue(qt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
err := node.sched.DqQueue.Enqueue(qt)
|
||||
if err != nil {
|
||||
return &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -728,7 +559,32 @@ func (node *NodeImpl) Search(request *milvuspb.SearchRequest) (*milvuspb.SearchR
|
|||
}
|
||||
|
||||
func (node *NodeImpl) Flush(request *milvuspb.FlushRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
log.Println("AA Flush collections: ", request.CollectionNames)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), reqTimeoutInterval)
|
||||
defer cancel()
|
||||
ft := &FlushTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
FlushRequest: request,
|
||||
dataServiceClient: node.dataServiceClient,
|
||||
}
|
||||
|
||||
err := node.sched.DdQueue.Enqueue(ft)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = ft.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return ft.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetDdChannel(request *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
|
|
|
@ -83,6 +83,7 @@ type InsertChannelsMap struct {
|
|||
func (m *InsertChannelsMap) createInsertMsgStream(collID UniqueID, channels []string) error {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
_, ok := m.collectionID2InsertChannels[collID]
|
||||
if ok {
|
||||
|
@ -100,7 +101,7 @@ func (m *InsertChannelsMap) createInsertMsgStream(collID UniqueID, channels []st
|
|||
}
|
||||
m.insertChannels = append(m.insertChannels, channels)
|
||||
m.collectionID2InsertChannels[collID] = len(m.insertChannels) - 1
|
||||
stream := pulsarms.NewPulsarMsgStream(context.Background(), Params.MsgStreamInsertBufSize)
|
||||
stream := pulsarms.NewPulsarMsgStream(context.Background(), Params.MsgStreamInsertBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
stream.SetPulsarClient(Params.PulsarAddress)
|
||||
stream.CreatePulsarProducers(channels)
|
||||
repack := func(tsMsgs []msgstream.TsMsg, hashKeys [][]int32) (map[int32]*msgstream.MsgPack, error) {
|
||||
|
|
|
@ -106,6 +106,7 @@ func (node *NodeImpl) waitForServiceReady(service Component, serviceName string)
|
|||
}
|
||||
|
||||
func (node *NodeImpl) Init() error {
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
// todo wait for proxyservice state changed to Healthy
|
||||
|
||||
|
@ -196,7 +197,7 @@ func (node *NodeImpl) Init() error {
|
|||
|
||||
pulsarAddress := Params.PulsarAddress
|
||||
|
||||
node.queryMsgStream = pulsarms.NewPulsarMsgStream(node.ctx, Params.MsgStreamSearchBufSize)
|
||||
node.queryMsgStream = pulsarms.NewPulsarMsgStream(node.ctx, Params.MsgStreamSearchBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
node.queryMsgStream.SetPulsarClient(pulsarAddress)
|
||||
node.queryMsgStream.CreatePulsarProducers(Params.SearchChannelNames)
|
||||
log.Println("create query message stream ...")
|
||||
|
@ -224,7 +225,7 @@ func (node *NodeImpl) Init() error {
|
|||
node.segAssigner = segAssigner
|
||||
node.segAssigner.PeerID = Params.ProxyID
|
||||
|
||||
node.manipulationMsgStream = pulsarms.NewPulsarMsgStream(node.ctx, Params.MsgStreamInsertBufSize)
|
||||
node.manipulationMsgStream = pulsarms.NewPulsarMsgStream(node.ctx, Params.MsgStreamInsertBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
node.manipulationMsgStream.SetPulsarClient(pulsarAddress)
|
||||
node.manipulationMsgStream.CreatePulsarProducers(Params.InsertChannelNames)
|
||||
repackFuncImpl := func(tsMsgs []msgstream.TsMsg, hashKeys [][]int32) (map[int32]*msgstream.MsgPack, error) {
|
||||
|
|
|
@ -3,7 +3,6 @@ package proxynode
|
|||
import (
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
|
@ -181,6 +180,7 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
|
|||
partitionID := insertRequest.PartitionID
|
||||
partitionName := insertRequest.PartitionName
|
||||
proxyID := insertRequest.Base.SourceID
|
||||
channelNames := channelNamesMap[collectionID]
|
||||
for index, key := range keys {
|
||||
ts := insertRequest.Timestamps[index]
|
||||
rowID := insertRequest.RowIDs[index]
|
||||
|
@ -191,6 +191,7 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
|
|||
result[key] = &msgPack
|
||||
}
|
||||
segmentID := getSegmentID(reqID, key)
|
||||
channelID := channelNames[int(key)%len(channelNames)]
|
||||
sliceRequest := internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
|
@ -204,7 +205,8 @@ func insertRepackFunc(tsMsgs []msgstream.TsMsg,
|
|||
PartitionName: partitionName,
|
||||
SegmentID: segmentID,
|
||||
// todo rename to ChannelName
|
||||
ChannelID: strconv.FormatInt(int64(key), 10),
|
||||
// ChannelID: strconv.FormatInt(int64(key), 10),
|
||||
ChannelID: channelID,
|
||||
Timestamps: []uint64{ts},
|
||||
RowIDs: []int64{rowID},
|
||||
RowData: []*commonpb.Blob{row},
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"math"
|
||||
|
@ -9,9 +8,6 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
oplog "github.com/opentracing/opentracing-go/log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
|
@ -45,7 +41,6 @@ type InsertTask struct {
|
|||
Condition
|
||||
dataServiceClient DataServiceClient
|
||||
result *milvuspb.InsertResponse
|
||||
ctx context.Context
|
||||
rowIDAllocator *allocator.IDAllocator
|
||||
}
|
||||
|
||||
|
@ -87,21 +82,12 @@ func (it *InsertTask) PreExecute() error {
|
|||
it.Base.MsgType = commonpb.MsgType_kInsert
|
||||
it.Base.SourceID = Params.ProxyID
|
||||
|
||||
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask preExecute")
|
||||
defer span.Finish()
|
||||
it.ctx = ctx
|
||||
span.SetTag("hash keys", it.Base.MsgID)
|
||||
span.SetTag("start time", it.BeginTs())
|
||||
collectionName := it.BaseInsertTask.CollectionName
|
||||
if err := ValidateCollectionName(collectionName); err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
return err
|
||||
}
|
||||
partitionTag := it.BaseInsertTask.PartitionName
|
||||
if err := ValidatePartitionTag(partitionTag, true); err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -109,13 +95,7 @@ func (it *InsertTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (it *InsertTask) Execute() error {
|
||||
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask Execute")
|
||||
defer span.Finish()
|
||||
it.ctx = ctx
|
||||
span.SetTag("hash keys", it.Base.MsgID)
|
||||
span.SetTag("start time", it.BeginTs())
|
||||
collectionName := it.BaseInsertTask.CollectionName
|
||||
span.LogFields(oplog.String("collection_name", collectionName))
|
||||
collSchema, err := globalMetaCache.GetCollectionSchema(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -126,19 +106,24 @@ func (it *InsertTask) Execute() error {
|
|||
return err
|
||||
}
|
||||
it.CollectionID = collID
|
||||
partitionID, err := globalMetaCache.GetPartitionID(collectionName, it.PartitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
var partitionID UniqueID
|
||||
if len(it.PartitionName) > 0 {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(collectionName, it.PartitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
partitionID, err = globalMetaCache.GetPartitionID(collectionName, Params.DefaultPartitionTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
it.PartitionID = partitionID
|
||||
span.LogFields(oplog.Bool("auto_id", autoID))
|
||||
var rowIDBegin UniqueID
|
||||
var rowIDEnd UniqueID
|
||||
rowNums := len(it.BaseInsertTask.RowData)
|
||||
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(uint32(rowNums))
|
||||
span.LogFields(oplog.Int("rowNums", rowNums),
|
||||
oplog.Int("rowIDBegin", int(rowIDBegin)),
|
||||
oplog.Int("rowIDEnd", int(rowIDEnd)))
|
||||
|
||||
it.BaseInsertTask.RowIDs = make([]UniqueID, rowNums)
|
||||
for i := rowIDBegin; i < rowIDEnd; i++ {
|
||||
offset := i - rowIDBegin
|
||||
|
@ -161,8 +146,7 @@ func (it *InsertTask) Execute() error {
|
|||
EndTs: it.EndTs(),
|
||||
Msgs: make([]msgstream.TsMsg, 1),
|
||||
}
|
||||
tsMsg.SetMsgContext(ctx)
|
||||
span.LogFields(oplog.String("send msg", "send msg"))
|
||||
tsMsg.SetMsgContext(it.Ctx())
|
||||
|
||||
it.result = &milvuspb.InsertResponse{
|
||||
Status: &commonpb.Status{
|
||||
|
@ -198,7 +182,6 @@ func (it *InsertTask) Execute() error {
|
|||
if err != nil {
|
||||
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
it.result.Status.Reason = err.Error()
|
||||
span.LogFields(oplog.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -206,7 +189,6 @@ func (it *InsertTask) Execute() error {
|
|||
if err != nil {
|
||||
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
it.result.Status.Reason = err.Error()
|
||||
span.LogFields(oplog.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -214,8 +196,6 @@ func (it *InsertTask) Execute() error {
|
|||
}
|
||||
|
||||
func (it *InsertTask) PostExecute() error {
|
||||
span, _ := opentracing.StartSpanFromContext(it.ctx, "InsertTask postExecute")
|
||||
defer span.Finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -225,7 +205,6 @@ type CreateCollectionTask struct {
|
|||
masterClient MasterClient
|
||||
dataServiceClient DataServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
schema *schemapb.CollectionSchema
|
||||
}
|
||||
|
||||
|
@ -366,7 +345,6 @@ type DropCollectionTask struct {
|
|||
*milvuspb.DropCollectionRequest
|
||||
masterClient MasterClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dct *DropCollectionTask) OnEnqueue() error {
|
||||
|
@ -435,7 +413,6 @@ type SearchTask struct {
|
|||
queryMsgStream *pulsarms.PulsarMsgStream
|
||||
resultBuf chan []*internalpb2.SearchResults
|
||||
result *milvuspb.SearchResults
|
||||
ctx context.Context
|
||||
query *milvuspb.SearchRequest
|
||||
}
|
||||
|
||||
|
@ -471,52 +448,52 @@ func (st *SearchTask) PreExecute() error {
|
|||
st.Base.MsgType = commonpb.MsgType_kSearch
|
||||
st.Base.SourceID = Params.ProxyID
|
||||
|
||||
span, ctx := opentracing.StartSpanFromContext(st.ctx, "SearchTask preExecute")
|
||||
defer span.Finish()
|
||||
st.ctx = ctx
|
||||
span.SetTag("hash keys", st.Base.MsgID)
|
||||
span.SetTag("start time", st.BeginTs())
|
||||
|
||||
collectionName := st.query.CollectionName
|
||||
_, err := globalMetaCache.GetCollectionID(collectionName)
|
||||
if err != nil { // err is not nil if collection not exists
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ValidateCollectionName(st.query.CollectionName); err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
return err
|
||||
}
|
||||
|
||||
for _, tag := range st.query.PartitionNames {
|
||||
if err := ValidatePartitionTag(tag, false); err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
return err
|
||||
}
|
||||
}
|
||||
st.Base.MsgType = commonpb.MsgType_kSearch
|
||||
queryBytes, err := proto.Marshal(st.query)
|
||||
if err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
return err
|
||||
}
|
||||
st.Query = &commonpb.Blob{
|
||||
Value: queryBytes,
|
||||
}
|
||||
|
||||
st.ResultChannelID = Params.SearchResultChannelNames[0]
|
||||
st.DbID = 0 // todo
|
||||
collectionID, err := globalMetaCache.GetCollectionID(collectionName)
|
||||
if err != nil { // err is not nil if collection not exists
|
||||
return err
|
||||
}
|
||||
st.CollectionID = collectionID
|
||||
st.PartitionIDs = make([]UniqueID, 0)
|
||||
for _, partitionName := range st.query.PartitionNames {
|
||||
partitionID, err := globalMetaCache.GetPartitionID(collectionName, partitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st.PartitionIDs = append(st.PartitionIDs, partitionID)
|
||||
}
|
||||
st.Dsl = st.query.Dsl
|
||||
st.PlaceholderGroup = st.query.PlaceholderGroup
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *SearchTask) Execute() error {
|
||||
span, ctx := opentracing.StartSpanFromContext(st.ctx, "SearchTask Execute")
|
||||
defer span.Finish()
|
||||
st.ctx = ctx
|
||||
span.SetTag("hash keys", st.Base.MsgID)
|
||||
span.SetTag("start time", st.BeginTs())
|
||||
var tsMsg msgstream.TsMsg = &msgstream.SearchMsg{
|
||||
SearchRequest: st.SearchRequest,
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
|
@ -530,32 +507,24 @@ func (st *SearchTask) Execute() error {
|
|||
EndTs: st.Base.Timestamp,
|
||||
Msgs: make([]msgstream.TsMsg, 1),
|
||||
}
|
||||
tsMsg.SetMsgContext(ctx)
|
||||
tsMsg.SetMsgContext(st.Ctx())
|
||||
msgPack.Msgs[0] = tsMsg
|
||||
err := st.queryMsgStream.Produce(msgPack)
|
||||
log.Printf("[NodeImpl] length of searchMsg: %v", len(msgPack.Msgs))
|
||||
if err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
log.Printf("[NodeImpl] send search request failed: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (st *SearchTask) PostExecute() error {
|
||||
span, _ := opentracing.StartSpanFromContext(st.ctx, "SearchTask postExecute")
|
||||
defer span.Finish()
|
||||
span.SetTag("hash keys", st.Base.MsgID)
|
||||
span.SetTag("start time", st.BeginTs())
|
||||
for {
|
||||
select {
|
||||
case <-st.ctx.Done():
|
||||
case <-st.Ctx().Done():
|
||||
log.Print("SearchTask: wait to finish failed, timeout!, taskID:", st.ID())
|
||||
span.LogFields(oplog.String("wait to finish failed, timeout", "wait to finish failed, timeout"))
|
||||
return errors.New("SearchTask:wait to finish failed, timeout:" + strconv.FormatInt(st.ID(), 10))
|
||||
case searchResults := <-st.resultBuf:
|
||||
// fmt.Println("searchResults: ", searchResults)
|
||||
span.LogFields(oplog.String("receive result", "receive result"))
|
||||
filterSearchResult := make([]*internalpb2.SearchResults, 0)
|
||||
var filterReason string
|
||||
for _, partialSearchResult := range searchResults {
|
||||
|
@ -584,7 +553,6 @@ func (st *SearchTask) PostExecute() error {
|
|||
Reason: filterReason,
|
||||
},
|
||||
}
|
||||
span.LogFields(oplog.Error(errors.New(filterReason)))
|
||||
return errors.New(filterReason)
|
||||
}
|
||||
|
||||
|
@ -693,7 +661,6 @@ func (st *SearchTask) PostExecute() error {
|
|||
reducedHitsBs, err := proto.Marshal(reducedHits)
|
||||
if err != nil {
|
||||
log.Println("marshal error")
|
||||
span.LogFields(oplog.Error(err))
|
||||
return err
|
||||
}
|
||||
st.result.Hits = append(st.result.Hits, reducedHitsBs)
|
||||
|
@ -708,7 +675,6 @@ type HasCollectionTask struct {
|
|||
*milvuspb.HasCollectionRequest
|
||||
masterClient MasterClient
|
||||
result *milvuspb.BoolResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (hct *HasCollectionTask) OnEnqueue() error {
|
||||
|
@ -765,7 +731,6 @@ type DescribeCollectionTask struct {
|
|||
*milvuspb.DescribeCollectionRequest
|
||||
masterClient MasterClient
|
||||
result *milvuspb.DescribeCollectionResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dct *DescribeCollectionTask) OnEnqueue() error {
|
||||
|
@ -825,7 +790,6 @@ type GetCollectionsStatisticsTask struct {
|
|||
*milvuspb.CollectionStatsRequest
|
||||
dataServiceClient DataServiceClient
|
||||
result *milvuspb.CollectionStatsResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (g *GetCollectionsStatisticsTask) ID() UniqueID {
|
||||
|
@ -901,7 +865,6 @@ type ShowCollectionsTask struct {
|
|||
*milvuspb.ShowCollectionRequest
|
||||
masterClient MasterClient
|
||||
result *milvuspb.ShowCollectionResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (sct *ShowCollectionsTask) OnEnqueue() error {
|
||||
|
@ -955,7 +918,6 @@ type CreatePartitionTask struct {
|
|||
*milvuspb.CreatePartitionRequest
|
||||
masterClient MasterClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (cpt *CreatePartitionTask) OnEnqueue() error {
|
||||
|
@ -1018,7 +980,6 @@ type DropPartitionTask struct {
|
|||
*milvuspb.DropPartitionRequest
|
||||
masterClient MasterClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dpt *DropPartitionTask) OnEnqueue() error {
|
||||
|
@ -1081,7 +1042,6 @@ type HasPartitionTask struct {
|
|||
*milvuspb.HasPartitionRequest
|
||||
masterClient MasterClient
|
||||
result *milvuspb.BoolResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (hpt *HasPartitionTask) OnEnqueue() error {
|
||||
|
@ -1138,66 +1098,11 @@ func (hpt *HasPartitionTask) PostExecute() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
//type DescribePartitionTask struct {
|
||||
// Condition
|
||||
// internalpb.DescribePartitionRequest
|
||||
// masterClient masterpb.MasterServiceClient
|
||||
// result *milvuspb.PartitionDescription
|
||||
// ctx context.Context
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) ID() UniqueID {
|
||||
// return dpt.ReqID
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) SetID(uid UniqueID) {
|
||||
// dpt.ReqID = uid
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) Type() commonpb.MsgType {
|
||||
// return dpt.MsgType
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) BeginTs() Timestamp {
|
||||
// return dpt.Timestamp
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) EndTs() Timestamp {
|
||||
// return dpt.Timestamp
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) SetTs(ts Timestamp) {
|
||||
// dpt.Timestamp = ts
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) PreExecute() error {
|
||||
// collName, partitionTag := dpt.PartitionName.CollectionName, dpt.PartitionName.Tag
|
||||
//
|
||||
// if err := ValidateCollectionName(collName); err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// if err := ValidatePartitionTag(partitionTag, true); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) Execute() (err error) {
|
||||
// dpt.result, err = dpt.masterClient.DescribePartition(dpt.ctx, &dpt.DescribePartitionRequest)
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//func (dpt *DescribePartitionTask) PostExecute() error {
|
||||
// return nil
|
||||
//}
|
||||
|
||||
type ShowPartitionsTask struct {
|
||||
Condition
|
||||
*milvuspb.ShowPartitionRequest
|
||||
masterClient MasterClient
|
||||
result *milvuspb.ShowPartitionResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (spt *ShowPartitionsTask) OnEnqueue() error {
|
||||
|
@ -1257,7 +1162,6 @@ type CreateIndexTask struct {
|
|||
*milvuspb.CreateIndexRequest
|
||||
masterClient MasterClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (cit *CreateIndexTask) OnEnqueue() error {
|
||||
|
@ -1320,7 +1224,6 @@ type DescribeIndexTask struct {
|
|||
*milvuspb.DescribeIndexRequest
|
||||
masterClient MasterClient
|
||||
result *milvuspb.DescribeIndexResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dit *DescribeIndexTask) OnEnqueue() error {
|
||||
|
@ -1384,7 +1287,6 @@ type GetIndexStateTask struct {
|
|||
*milvuspb.IndexStateRequest
|
||||
indexServiceClient IndexServiceClient
|
||||
result *milvuspb.IndexStateResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dipt *GetIndexStateTask) OnEnqueue() error {
|
||||
|
@ -1457,7 +1359,6 @@ type FlushTask struct {
|
|||
*milvuspb.FlushRequest
|
||||
dataServiceClient DataServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (ft *FlushTask) OnEnqueue() error {
|
||||
|
@ -1496,23 +1397,34 @@ func (ft *FlushTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (ft *FlushTask) Execute() error {
|
||||
var err error
|
||||
collID, err := globalMetaCache.GetCollectionID(ft.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
for _, collName := range ft.CollectionNames {
|
||||
collID, err := globalMetaCache.GetCollectionID(collName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flushReq := &datapb.FlushRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kFlush,
|
||||
MsgID: ft.Base.MsgID,
|
||||
Timestamp: ft.Base.Timestamp,
|
||||
SourceID: ft.Base.SourceID,
|
||||
},
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
}
|
||||
var status *commonpb.Status
|
||||
status, err = ft.dataServiceClient.Flush(flushReq)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if status.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return errors.New(status.Reason)
|
||||
}
|
||||
}
|
||||
flushReq := &datapb.FlushRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kFlush,
|
||||
MsgID: ft.Base.MsgID,
|
||||
Timestamp: ft.Base.Timestamp,
|
||||
SourceID: ft.Base.SourceID,
|
||||
},
|
||||
DbID: 0,
|
||||
CollectionID: collID,
|
||||
ft.result = &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
}
|
||||
ft.result, err = ft.dataServiceClient.Flush(flushReq)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ft *FlushTask) PostExecute() error {
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
)
|
||||
|
||||
type TaskQueue interface {
|
||||
|
@ -372,14 +371,12 @@ func (sched *TaskScheduler) queryLoop() {
|
|||
|
||||
func (sched *TaskScheduler) queryResultLoop() {
|
||||
defer sched.wg.Done()
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
unmarshal := util.NewUnmarshalDispatcher()
|
||||
queryResultMsgStream := pulsarms.NewPulsarMsgStream(sched.ctx, Params.MsgStreamSearchResultBufSize)
|
||||
queryResultMsgStream := pulsarms.NewPulsarMsgStream(sched.ctx, Params.MsgStreamSearchResultBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
queryResultMsgStream.SetPulsarClient(Params.PulsarAddress)
|
||||
queryResultMsgStream.CreatePulsarConsumers(Params.SearchResultChannelNames,
|
||||
Params.ProxySubName,
|
||||
unmarshal,
|
||||
Params.MsgStreamSearchResultPulsarBufSize)
|
||||
Params.ProxySubName)
|
||||
queryNodeNum := Params.QueryNodeNum
|
||||
|
||||
queryResultMsgStream.Start()
|
||||
|
|
|
@ -51,7 +51,8 @@ func newTimeTick(ctx context.Context,
|
|||
checkFunc: checkFunc,
|
||||
}
|
||||
|
||||
t.tickMsgStream = pulsarms.NewPulsarMsgStream(t.ctx, Params.MsgStreamTimeTickBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
t.tickMsgStream = pulsarms.NewPulsarMsgStream(t.ctx, Params.MsgStreamTimeTickBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
pulsarAddress := Params.PulsarAddress
|
||||
|
||||
t.tickMsgStream.SetPulsarClient(pulsarAddress)
|
||||
|
|
|
@ -9,8 +9,7 @@ import (
|
|||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
@ -96,6 +95,7 @@ func (s *ServiceImpl) fillNodeInitParams() error {
|
|||
}
|
||||
|
||||
func (s *ServiceImpl) Init() error {
|
||||
dispatcherFactory := msgstream.ProtoUDFactory{}
|
||||
|
||||
err := s.fillNodeInitParams()
|
||||
if err != nil {
|
||||
|
@ -103,17 +103,15 @@ func (s *ServiceImpl) Init() error {
|
|||
}
|
||||
log.Println("fill node init params ...")
|
||||
|
||||
serviceTimeTickMsgStream := pulsarms.NewPulsarTtMsgStream(s.ctx, 1024)
|
||||
serviceTimeTickMsgStream := pulsarms.NewPulsarTtMsgStream(s.ctx, 1024, 1024, dispatcherFactory.NewUnmarshalDispatcher())
|
||||
serviceTimeTickMsgStream.SetPulsarClient(Params.PulsarAddress)
|
||||
serviceTimeTickMsgStream.CreatePulsarProducers([]string{Params.ServiceTimeTickChannel})
|
||||
log.Println("create service time tick producer channel: ", []string{Params.ServiceTimeTickChannel})
|
||||
|
||||
nodeTimeTickMsgStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024)
|
||||
nodeTimeTickMsgStream := pulsarms.NewPulsarMsgStream(s.ctx, 1024, 1024, dispatcherFactory.NewUnmarshalDispatcher())
|
||||
nodeTimeTickMsgStream.SetPulsarClient(Params.PulsarAddress)
|
||||
nodeTimeTickMsgStream.CreatePulsarConsumers(Params.NodeTimeTickChannel,
|
||||
"proxyservicesub", // TODO: add config
|
||||
util.NewUnmarshalDispatcher(),
|
||||
1024)
|
||||
"proxyservicesub") // TODO: add config
|
||||
log.Println("create node time tick consumer channel: ", Params.NodeTimeTickChannel)
|
||||
|
||||
ttBarrier := newSoftTimeTickBarrier(s.ctx, nodeTimeTickMsgStream, []UniqueID{0}, 10)
|
||||
|
|
|
@ -23,6 +23,10 @@ type Collection struct {
|
|||
partitions []*Partition
|
||||
}
|
||||
|
||||
//func (c *Collection) Name() string {
|
||||
// return c.schema.Name
|
||||
//}
|
||||
|
||||
func (c *Collection) ID() UniqueID {
|
||||
return c.id
|
||||
}
|
||||
|
|
|
@ -43,14 +43,17 @@ type collectionReplica interface {
|
|||
getVecFieldsByCollectionID(collectionID UniqueID) ([]int64, error)
|
||||
|
||||
// partition
|
||||
// TODO: remove collection ID, add a `map[partitionID]partition` to replica implement
|
||||
// Partition tags in different collections are not unique,
|
||||
// so partition api should specify the target collection.
|
||||
getPartitionNum(collectionID UniqueID) (int, error)
|
||||
addPartition2(collectionID UniqueID, partitionTag string) error
|
||||
addPartition(collectionID UniqueID, partitionID UniqueID) error
|
||||
removePartition(collectionID UniqueID, partitionID UniqueID) error
|
||||
addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error
|
||||
removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error
|
||||
removePartition(collectionID UniqueID, partitionTag string) error
|
||||
addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error
|
||||
removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error
|
||||
getPartitionByTag(collectionID UniqueID, partitionTag string) (*Partition, error)
|
||||
getPartitionByID(collectionID UniqueID, partitionID UniqueID) (*Partition, error)
|
||||
hasPartition(collectionID UniqueID, partitionID UniqueID) bool
|
||||
hasPartition(collectionID UniqueID, partitionTag string) bool
|
||||
enablePartitionDM(collectionID UniqueID, partitionID UniqueID) error
|
||||
disablePartitionDM(collectionID UniqueID, partitionID UniqueID) error
|
||||
getEnablePartitionDM(collectionID UniqueID, partitionID UniqueID) (bool, error)
|
||||
|
@ -58,6 +61,7 @@ type collectionReplica interface {
|
|||
// segment
|
||||
getSegmentNum() int
|
||||
getSegmentStatistics() []*internalpb2.SegmentStats
|
||||
addSegment2(segmentID UniqueID, partitionTag string, collectionID UniqueID, segType segmentType) error
|
||||
addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error
|
||||
removeSegment(segmentID UniqueID) error
|
||||
getSegmentByID(segmentID UniqueID) (*Segment, error)
|
||||
|
@ -193,6 +197,21 @@ func (colReplica *collectionReplicaImpl) getPartitionNum(collectionID UniqueID)
|
|||
return len(collection.partitions), nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) addPartition2(collectionID UniqueID, partitionTag string) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var newPartition = newPartition2(partitionTag)
|
||||
|
||||
*collection.Partitions() = append(*collection.Partitions(), newPartition)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) addPartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
@ -208,14 +227,14 @@ func (colReplica *collectionReplicaImpl) addPartition(collectionID UniqueID, par
|
|||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) removePartition(collectionID UniqueID, partitionID UniqueID) error {
|
||||
func (colReplica *collectionReplicaImpl) removePartition(collectionID UniqueID, partitionTag string) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
return colReplica.removePartitionPrivate(collectionID, partitionID)
|
||||
return colReplica.removePartitionPrivate(collectionID, partitionTag)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID UniqueID, partitionID UniqueID) error {
|
||||
func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID UniqueID, partitionTag string) error {
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -223,7 +242,7 @@ func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID Uni
|
|||
|
||||
var tmpPartitions = make([]*Partition, 0)
|
||||
for _, p := range *collection.Partitions() {
|
||||
if p.ID() == partitionID {
|
||||
if p.Tag() == partitionTag {
|
||||
for _, s := range *p.Segments() {
|
||||
deleteSegment(colReplica.segments[s.ID()])
|
||||
delete(colReplica.segments, s.ID())
|
||||
|
@ -238,30 +257,30 @@ func (colReplica *collectionReplicaImpl) removePartitionPrivate(collectionID Uni
|
|||
}
|
||||
|
||||
// deprecated
|
||||
func (colReplica *collectionReplicaImpl) addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error {
|
||||
func (colReplica *collectionReplicaImpl) addPartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error {
|
||||
if !colReplica.hasCollection(colMeta.ID) {
|
||||
err := errors.New("Cannot find collection, id = " + strconv.FormatInt(colMeta.ID, 10))
|
||||
return err
|
||||
}
|
||||
pToAdd := make([]UniqueID, 0)
|
||||
for _, partitionID := range colMeta.PartitionIDs {
|
||||
if !colReplica.hasPartition(colMeta.ID, partitionID) {
|
||||
pToAdd = append(pToAdd, partitionID)
|
||||
pToAdd := make([]string, 0)
|
||||
for _, partitionTag := range colMeta.PartitionTags {
|
||||
if !colReplica.hasPartition(colMeta.ID, partitionTag) {
|
||||
pToAdd = append(pToAdd, partitionTag)
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range pToAdd {
|
||||
err := colReplica.addPartition(colMeta.ID, id)
|
||||
for _, tag := range pToAdd {
|
||||
err := colReplica.addPartition2(colMeta.ID, tag)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
fmt.Println("add partition: ", id)
|
||||
fmt.Println("add partition: ", tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionInfo) error {
|
||||
func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMeta *etcdpb.CollectionMeta) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
|
@ -270,30 +289,37 @@ func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMet
|
|||
return err
|
||||
}
|
||||
|
||||
pToDel := make([]UniqueID, 0)
|
||||
pToDel := make([]string, 0)
|
||||
for _, partition := range col.partitions {
|
||||
hasPartition := false
|
||||
for _, id := range colMeta.PartitionIDs {
|
||||
if partition.ID() == id {
|
||||
for _, tag := range colMeta.PartitionTags {
|
||||
if partition.partitionTag == tag {
|
||||
hasPartition = true
|
||||
}
|
||||
}
|
||||
if !hasPartition {
|
||||
pToDel = append(pToDel, partition.ID())
|
||||
pToDel = append(pToDel, partition.partitionTag)
|
||||
}
|
||||
}
|
||||
|
||||
for _, id := range pToDel {
|
||||
err := colReplica.removePartitionPrivate(col.ID(), id)
|
||||
for _, tag := range pToDel {
|
||||
err := colReplica.removePartitionPrivate(col.ID(), tag)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
fmt.Println("delete partition: ", id)
|
||||
fmt.Println("delete partition: ", tag)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) getPartitionByTag(collectionID UniqueID, partitionTag string) (*Partition, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
return colReplica.getPartitionByTagPrivate(collectionID, partitionTag)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) getPartitionByID(collectionID UniqueID, partitionID UniqueID) (*Partition, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
@ -301,6 +327,21 @@ func (colReplica *collectionReplicaImpl) getPartitionByID(collectionID UniqueID,
|
|||
return colReplica.getPartitionByIDPrivate(collectionID, partitionID)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) getPartitionByTagPrivate(collectionID UniqueID, partitionTag string) (*Partition, error) {
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, p := range *collection.Partitions() {
|
||||
if p.Tag() == partitionTag {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("cannot find partition, tag = " + partitionTag)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) getPartitionByIDPrivate(collectionID UniqueID, partitionID UniqueID) (*Partition, error) {
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
|
@ -316,7 +357,7 @@ func (colReplica *collectionReplicaImpl) getPartitionByIDPrivate(collectionID Un
|
|||
return nil, errors.New("cannot find partition, id = " + strconv.FormatInt(partitionID, 10))
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) hasPartition(collectionID UniqueID, partitionID UniqueID) bool {
|
||||
func (colReplica *collectionReplicaImpl) hasPartition(collectionID UniqueID, partitionTag string) bool {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
|
@ -327,7 +368,7 @@ func (colReplica *collectionReplicaImpl) hasPartition(collectionID UniqueID, par
|
|||
}
|
||||
|
||||
for _, p := range *collection.Partitions() {
|
||||
if p.ID() == partitionID {
|
||||
if p.Tag() == partitionTag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -405,6 +446,28 @@ func (colReplica *collectionReplicaImpl) getSegmentStatistics() []*internalpb2.S
|
|||
return statisticData
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) addSegment2(segmentID UniqueID, partitionTag string, collectionID UniqueID, segType segmentType) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
collection, err := colReplica.getCollectionByIDPrivate(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partition, err2 := colReplica.getPartitionByTagPrivate(collectionID, partitionTag)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
|
||||
var newSegment = newSegment2(collection, segmentID, partitionTag, collectionID, segType)
|
||||
|
||||
colReplica.segments[segmentID] = newSegment
|
||||
*partition.Segments() = append(*partition.Segments(), newSegment)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) addSegment(segmentID UniqueID, partitionID UniqueID, collectionID UniqueID, segType segmentType) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
|
|
@ -61,18 +61,18 @@ func TestCollectionReplica_getPartitionNum(t *testing.T) {
|
|||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
partitionIDs := []UniqueID{1, 2, 3}
|
||||
for _, id := range partitionIDs {
|
||||
err := node.replica.addPartition(collectionID, id)
|
||||
partitionTags := []string{"a", "b", "c"}
|
||||
for _, tag := range partitionTags {
|
||||
err := node.replica.addPartition2(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
partition, err := node.replica.getPartitionByID(collectionID, id)
|
||||
partition, err := node.replica.getPartitionByTag(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partition.ID(), id)
|
||||
assert.Equal(t, partition.partitionTag, tag)
|
||||
}
|
||||
|
||||
partitionNum, err := node.replica.getPartitionNum(collectionID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, len(partitionIDs)+1) // _default
|
||||
assert.Equal(t, partitionNum, len(partitionTags)+1) // _default
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
|
@ -81,13 +81,13 @@ func TestCollectionReplica_addPartition(t *testing.T) {
|
|||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
partitionIDs := []UniqueID{1, 2, 3}
|
||||
for _, id := range partitionIDs {
|
||||
err := node.replica.addPartition(collectionID, id)
|
||||
partitionTags := []string{"a", "b", "c"}
|
||||
for _, tag := range partitionTags {
|
||||
err := node.replica.addPartition2(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
partition, err := node.replica.getPartitionByID(collectionID, id)
|
||||
partition, err := node.replica.getPartitionByTag(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partition.ID(), id)
|
||||
assert.Equal(t, partition.partitionTag, tag)
|
||||
}
|
||||
node.Stop()
|
||||
}
|
||||
|
@ -97,15 +97,15 @@ func TestCollectionReplica_removePartition(t *testing.T) {
|
|||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
partitionIDs := []UniqueID{1, 2, 3}
|
||||
partitionTags := []string{"a", "b", "c"}
|
||||
|
||||
for _, id := range partitionIDs {
|
||||
err := node.replica.addPartition(collectionID, id)
|
||||
for _, tag := range partitionTags {
|
||||
err := node.replica.addPartition2(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
partition, err := node.replica.getPartitionByID(collectionID, id)
|
||||
partition, err := node.replica.getPartitionByTag(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partition.ID(), id)
|
||||
err = node.replica.removePartition(collectionID, id)
|
||||
assert.Equal(t, partition.partitionTag, tag)
|
||||
err = node.replica.removePartition(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
node.Stop()
|
||||
|
@ -117,18 +117,18 @@ func TestCollectionReplica_addPartitionsByCollectionMeta(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
collectionMeta.PartitionIDs = []UniqueID{0, 1, 2}
|
||||
collectionMeta.PartitionTags = []string{"p0", "p1", "p2"}
|
||||
|
||||
err := node.replica.addPartitionsByCollectionMeta(collectionMeta)
|
||||
assert.NoError(t, err)
|
||||
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, len(collectionMeta.PartitionIDs)+1)
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), UniqueID(0))
|
||||
assert.Equal(t, partitionNum, len(collectionMeta.PartitionTags)+1)
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(1))
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(2))
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
|
||||
node.Stop()
|
||||
|
@ -140,19 +140,19 @@ func TestCollectionReplica_removePartitionsByCollectionMeta(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
collectionMeta.PartitionIDs = []UniqueID{0}
|
||||
collectionMeta.PartitionTags = []string{"p0"}
|
||||
|
||||
err := node.replica.addPartitionsByCollectionMeta(collectionMeta)
|
||||
assert.NoError(t, err)
|
||||
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, len(collectionMeta.PartitionIDs)+1)
|
||||
assert.Equal(t, partitionNum, len(collectionMeta.PartitionTags)+1)
|
||||
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), UniqueID(0))
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(1))
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), UniqueID(2))
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
|
||||
node.Stop()
|
||||
|
@ -165,12 +165,12 @@ func TestCollectionReplica_getPartitionByTag(t *testing.T) {
|
|||
|
||||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
|
||||
for _, id := range collectionMeta.PartitionIDs {
|
||||
err := node.replica.addPartition(collectionID, id)
|
||||
for _, tag := range collectionMeta.PartitionTags {
|
||||
err := node.replica.addPartition2(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
partition, err := node.replica.getPartitionByID(collectionID, id)
|
||||
partition, err := node.replica.getPartitionByTag(collectionID, tag)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partition.ID(), id)
|
||||
assert.Equal(t, partition.partitionTag, tag)
|
||||
assert.NotNil(t, partition)
|
||||
}
|
||||
node.Stop()
|
||||
|
@ -182,11 +182,11 @@ func TestCollectionReplica_hasPartition(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
err := node.replica.addPartition(collectionID, collectionMeta.PartitionIDs[0])
|
||||
err := node.replica.addPartition2(collectionID, collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
hasPartition := node.replica.hasPartition(collectionID, defaultPartitionID)
|
||||
hasPartition := node.replica.hasPartition(collectionID, "default")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(collectionID, defaultPartitionID+1)
|
||||
hasPartition = node.replica.hasPartition(collectionID, "default1")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
node.Stop()
|
||||
}
|
||||
|
@ -198,8 +198,9 @@ func TestCollectionReplica_addSegment(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
const segmentNum = 3
|
||||
tag := "default"
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
|
||||
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -215,9 +216,10 @@ func TestCollectionReplica_removeSegment(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
const segmentNum = 3
|
||||
tag := "default"
|
||||
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
|
||||
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -235,9 +237,10 @@ func TestCollectionReplica_getSegmentByID(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
const segmentNum = 3
|
||||
tag := "default"
|
||||
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
|
||||
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
@ -253,9 +256,10 @@ func TestCollectionReplica_hasSegment(t *testing.T) {
|
|||
initTestMeta(t, node, collectionID, 0)
|
||||
|
||||
const segmentNum = 3
|
||||
tag := "default"
|
||||
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.replica.addSegment(UniqueID(i), defaultPartitionID, collectionID, segTypeGrowing)
|
||||
err := node.replica.addSegment2(UniqueID(i), tag, collectionID, segTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
targetSeg, err := node.replica.getSegmentByID(UniqueID(i))
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -61,12 +61,12 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||
Timestamp: uint64(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionID: defaultPartitionID,
|
||||
SegmentID: int64(0),
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000), uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i), int64(i)},
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionName: "default",
|
||||
SegmentID: int64(0),
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000), uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i), int64(i)},
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
|
@ -109,11 +109,12 @@ func TestDataSyncService_Start(t *testing.T) {
|
|||
ddChannels := Params.DDChannelNames
|
||||
pulsarURL := Params.PulsarAddress
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
insertStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
|
|||
|
||||
var ddMsg = ddMsg{
|
||||
collectionRecords: make(map[UniqueID][]metaOperateRecord),
|
||||
partitionRecords: make(map[UniqueID][]metaOperateRecord),
|
||||
partitionRecords: make(map[string][]metaOperateRecord),
|
||||
timeRange: TimeRange{
|
||||
timestampMin: msMsg.TimestampMin(),
|
||||
timestampMax: msMsg.TimestampMax(),
|
||||
|
@ -102,8 +102,7 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
|||
}
|
||||
|
||||
// add default partition
|
||||
// TODO: allocate default partition id in master
|
||||
err = ddNode.replica.addPartition(collectionID, UniqueID(2021))
|
||||
err = ddNode.replica.addPartition2(collectionID, Params.DefaultPartitionTag)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
|
@ -119,6 +118,12 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
|||
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
|
||||
collectionID := msg.CollectionID
|
||||
|
||||
//err := ddNode.replica.removeCollection(collectionID)
|
||||
//if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
//}
|
||||
|
||||
ddNode.ddMsg.collectionRecords[collectionID] = append(ddNode.ddMsg.collectionRecords[collectionID],
|
||||
metaOperateRecord{
|
||||
createOrDrop: false,
|
||||
|
@ -130,15 +135,17 @@ func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
|
|||
|
||||
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
|
||||
collectionID := msg.CollectionID
|
||||
partitionID := msg.PartitionID
|
||||
partitionName := msg.PartitionName
|
||||
|
||||
err := ddNode.replica.addPartition(collectionID, partitionID)
|
||||
err := ddNode.replica.addPartition2(collectionID, partitionName)
|
||||
// TODO:: add partition by partitionID
|
||||
//err := ddNode.replica.addPartition(collectionID, msg.PartitionID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
ddNode.ddMsg.partitionRecords[partitionID] = append(ddNode.ddMsg.partitionRecords[partitionID],
|
||||
ddNode.ddMsg.partitionRecords[partitionName] = append(ddNode.ddMsg.partitionRecords[partitionName],
|
||||
metaOperateRecord{
|
||||
createOrDrop: true,
|
||||
timestamp: msg.Base.Timestamp,
|
||||
|
@ -147,16 +154,22 @@ func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
|
|||
|
||||
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
|
||||
collectionID := msg.CollectionID
|
||||
partitionID := msg.PartitionID
|
||||
partitionName := msg.PartitionName
|
||||
|
||||
ddNode.ddMsg.partitionRecords[partitionID] = append(ddNode.ddMsg.partitionRecords[partitionID],
|
||||
//err := ddNode.replica.removePartition(collectionID, partitionTag)
|
||||
//if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
//}
|
||||
|
||||
ddNode.ddMsg.partitionRecords[partitionName] = append(ddNode.ddMsg.partitionRecords[partitionName],
|
||||
metaOperateRecord{
|
||||
createOrDrop: false,
|
||||
timestamp: msg.Base.Timestamp,
|
||||
})
|
||||
|
||||
ddNode.ddMsg.gcRecord.partitions = append(ddNode.ddMsg.gcRecord.partitions, partitionWithID{
|
||||
partitionID: partitionID,
|
||||
partitionTag: partitionName,
|
||||
collectionID: collectionID,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
|
|||
|
||||
// drop partitions
|
||||
for _, partition := range gcMsg.gcRecord.partitions {
|
||||
err := gcNode.replica.removePartition(partition.collectionID, partition.partitionID)
|
||||
err := gcNode.replica.removePartition(partition.collectionID, partition.partitionTag)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
|||
|
||||
// check if segment exists, if not, create this segment
|
||||
if !iNode.replica.hasSegment(task.SegmentID) {
|
||||
err := iNode.replica.addSegment(task.SegmentID, task.PartitionID, task.CollectionID, segTypeGrowing)
|
||||
err := iNode.replica.addSegment2(task.SegmentID, task.PartitionName, task.CollectionID, segTypeGrowing)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
|
|
|
@ -15,7 +15,7 @@ type key2SegMsg struct {
|
|||
|
||||
type ddMsg struct {
|
||||
collectionRecords map[UniqueID][]metaOperateRecord
|
||||
partitionRecords map[UniqueID][]metaOperateRecord
|
||||
partitionRecords map[string][]metaOperateRecord
|
||||
gcRecord *gcRecord
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
@ -63,16 +63,17 @@ type DeletePreprocessData struct {
|
|||
count int32
|
||||
}
|
||||
|
||||
// TODO: delete collection id
|
||||
// TODO: replace partitionWithID by partition id
|
||||
type partitionWithID struct {
|
||||
partitionID UniqueID
|
||||
partitionTag string
|
||||
collectionID UniqueID
|
||||
}
|
||||
|
||||
type gcRecord struct {
|
||||
// collections and partitions to be dropped
|
||||
collections []UniqueID
|
||||
partitions []partitionWithID
|
||||
// TODO: use partition id
|
||||
partitions []partitionWithID
|
||||
}
|
||||
|
||||
func (ksMsg *key2SegMsg) TimeTick() Timestamp {
|
||||
|
|
|
@ -5,11 +5,11 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
func (dsService *dataSyncService) newDmInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
receiveBufSize := Params.InsertReceiveBufSize
|
||||
pulsarBufSize := Params.InsertPulsarBufSize
|
||||
|
||||
|
@ -18,10 +18,9 @@ func (dsService *dataSyncService) newDmInputNode(ctx context.Context) *flowgraph
|
|||
consumeChannels := Params.InsertChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
insertStream := pulsarms.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
||||
insertStream := pulsarms.NewPulsarTtMsgStream(ctx, receiveBufSize, pulsarBufSize, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(msgStreamURL)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
|
||||
var stream msgstream.MsgStream = insertStream
|
||||
dsService.dmStream = stream
|
||||
|
@ -34,6 +33,7 @@ func (dsService *dataSyncService) newDmInputNode(ctx context.Context) *flowgraph
|
|||
}
|
||||
|
||||
func (dsService *dataSyncService) newDDInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
receiveBufSize := Params.DDReceiveBufSize
|
||||
pulsarBufSize := Params.DDPulsarBufSize
|
||||
|
||||
|
@ -42,10 +42,9 @@ func (dsService *dataSyncService) newDDInputNode(ctx context.Context) *flowgraph
|
|||
consumeChannels := Params.DDChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
ddStream := pulsarms.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarTtMsgStream(ctx, receiveBufSize, pulsarBufSize, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(msgStreamURL)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
|
||||
var stream msgstream.MsgStream = ddStream
|
||||
dsService.ddStream = stream
|
||||
|
|
|
@ -78,7 +78,8 @@ func newServiceTimeNode(ctx context.Context, replica collectionReplica) *service
|
|||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
timeTimeMsgStream := pulsarms.NewPulsarMsgStream(ctx, Params.SearchReceiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
timeTimeMsgStream := pulsarms.NewPulsarMsgStream(ctx, Params.SearchReceiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
timeTimeMsgStream.SetPulsarClient(Params.PulsarAddress)
|
||||
timeTimeMsgStream.CreatePulsarProducers([]string{Params.QueryTimeTickChannelName})
|
||||
|
||||
|
|
|
@ -18,9 +18,7 @@ import (
|
|||
minioKV "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
|
@ -91,7 +89,7 @@ import (
|
|||
// SourceID: 0,
|
||||
// },
|
||||
// CollectionID: UniqueID(collectionID),
|
||||
// PartitionID: defaultPartitionID,
|
||||
// PartitionName: "default",
|
||||
// SegmentID: segmentID,
|
||||
// ChannelID: "0",
|
||||
// Timestamps: timestamps,
|
||||
|
@ -175,6 +173,8 @@ import (
|
|||
// log.Print("marshal placeholderGroup failed")
|
||||
// }
|
||||
// query := milvuspb.SearchRequest{
|
||||
// CollectionName: "collection0",
|
||||
// PartitionNames: []string{"default"},
|
||||
// Dsl: dslString,
|
||||
// PlaceholderGroup: placeGroupByte,
|
||||
// }
|
||||
|
@ -425,7 +425,7 @@ import (
|
|||
// SourceID: 0,
|
||||
// },
|
||||
// CollectionID: UniqueID(collectionID),
|
||||
// PartitionID: defaultPartitionID,
|
||||
// PartitionName: "default",
|
||||
// SegmentID: segmentID,
|
||||
// ChannelID: "0",
|
||||
// Timestamps: timestamps,
|
||||
|
@ -498,6 +498,8 @@ import (
|
|||
// log.Print("marshal placeholderGroup failed")
|
||||
// }
|
||||
// query := milvuspb.SearchRequest{
|
||||
// CollectionName: "collection0",
|
||||
// PartitionNames: []string{"default"},
|
||||
// Dsl: dslString,
|
||||
// PlaceholderGroup: placeGroupByte,
|
||||
// }
|
||||
|
@ -672,72 +674,6 @@ import (
|
|||
//}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
func genETCDCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.CollectionMeta {
|
||||
var fieldVec schemapb.FieldSchema
|
||||
if isBinary {
|
||||
fieldVec = schemapb.FieldSchema{
|
||||
FieldID: UniqueID(100),
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_BINARY,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "128",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "metric_type",
|
||||
Value: "JACCARD",
|
||||
},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
fieldVec = schemapb.FieldSchema{
|
||||
FieldID: UniqueID(100),
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "metric_type",
|
||||
Value: "L2",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
FieldID: UniqueID(101),
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
}
|
||||
|
||||
return &collectionMeta
|
||||
}
|
||||
|
||||
func generateInsertBinLog(collectionID UniqueID, partitionID UniqueID, segmentID UniqueID, keyPrefix string) ([]*internalpb2.StringList, []int64, error) {
|
||||
const (
|
||||
msgLength = 1000
|
||||
|
@ -789,7 +725,7 @@ func generateInsertBinLog(collectionID UniqueID, partitionID UniqueID, segmentID
|
|||
}
|
||||
|
||||
// buffer data to binLogs
|
||||
collMeta := genETCDCollectionMeta(collectionID, false)
|
||||
collMeta := genTestCollectionMeta(collectionID, false)
|
||||
collMeta.Schema.Fields = append(collMeta.Schema.Fields, &schemapb.FieldSchema{
|
||||
FieldID: 0,
|
||||
Name: "uid",
|
||||
|
@ -934,7 +870,7 @@ func generateIndex(segmentID UniqueID) ([]string, error) {
|
|||
return indexPaths, nil
|
||||
}
|
||||
|
||||
func doInsert(ctx context.Context, collectionID UniqueID, partitionID UniqueID, segmentID UniqueID) error {
|
||||
func doInsert(ctx context.Context, collectionID UniqueID, partitionTag string, segmentID UniqueID) error {
|
||||
const msgLength = 1000
|
||||
const DIM = 16
|
||||
|
||||
|
@ -970,12 +906,12 @@ func doInsert(ctx context.Context, collectionID UniqueID, partitionID UniqueID,
|
|||
Timestamp: uint64(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionID: collectionID,
|
||||
PartitionID: partitionID,
|
||||
SegmentID: segmentID,
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i)},
|
||||
CollectionID: collectionID,
|
||||
PartitionName: partitionTag,
|
||||
SegmentID: segmentID,
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i)},
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
},
|
||||
|
@ -1017,13 +953,13 @@ func doInsert(ctx context.Context, collectionID UniqueID, partitionID UniqueID,
|
|||
ddChannels := Params.DDChannelNames
|
||||
pulsarURL := Params.PulsarAddress
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
insertStream.CreatePulsarConsumers(insertChannels, Params.MsgChannelSubName, unmarshalDispatcher, receiveBufSize)
|
||||
insertStream.CreatePulsarConsumers(insertChannels, Params.MsgChannelSubName)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
|
@ -1077,13 +1013,13 @@ func sentTimeTick(ctx context.Context) error {
|
|||
ddChannels := Params.DDChannelNames
|
||||
pulsarURL := Params.PulsarAddress
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
insertStream.CreatePulsarConsumers(insertChannels, Params.MsgChannelSubName, unmarshalDispatcher, receiveBufSize)
|
||||
insertStream.CreatePulsarConsumers(insertChannels, Params.MsgChannelSubName)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
|
|
|
@ -6,14 +6,16 @@ import (
|
|||
"log"
|
||||
"path"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.etcd.io/etcd/mvcc/mvccpb"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -89,7 +91,23 @@ func isSegmentObj(key string) bool {
|
|||
return index == 0
|
||||
}
|
||||
|
||||
func printCollectionStruct(obj *etcdpb.CollectionInfo) {
|
||||
func isSegmentChannelRangeInQueryNodeChannelRange(segment *etcdpb.SegmentMeta) bool {
|
||||
if segment.ChannelStart > segment.ChannelEnd {
|
||||
log.Printf("Illegal segment channel range")
|
||||
return false
|
||||
}
|
||||
|
||||
var queryNodeChannelStart = Params.InsertChannelRange[0]
|
||||
var queryNodeChannelEnd = Params.InsertChannelRange[1]
|
||||
|
||||
if segment.ChannelStart >= int32(queryNodeChannelStart) && segment.ChannelEnd <= int32(queryNodeChannelEnd) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func printCollectionStruct(obj *etcdpb.CollectionMeta) {
|
||||
v := reflect.ValueOf(obj)
|
||||
v = reflect.Indirect(v)
|
||||
typeOfS := v.Type()
|
||||
|
@ -102,7 +120,7 @@ func printCollectionStruct(obj *etcdpb.CollectionInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
func printSegmentStruct(obj *datapb.SegmentInfo) {
|
||||
func printSegmentStruct(obj *etcdpb.SegmentMeta) {
|
||||
v := reflect.ValueOf(obj)
|
||||
v = reflect.Indirect(v)
|
||||
typeOfS := v.Type()
|
||||
|
@ -122,8 +140,8 @@ func (mService *metaService) processCollectionCreate(id string, value string) {
|
|||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
for _, partitionID := range col.PartitionIDs {
|
||||
err = mService.replica.addPartition(col.ID, partitionID)
|
||||
for _, partitionTag := range col.PartitionTags {
|
||||
err = mService.replica.addPartition2(col.ID, partitionTag)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
@ -135,11 +153,14 @@ func (mService *metaService) processSegmentCreate(id string, value string) {
|
|||
//println("Create Segment: ", id)
|
||||
|
||||
seg := mService.segmentUnmarshal(value)
|
||||
if !isSegmentChannelRangeInQueryNodeChannelRange(seg) {
|
||||
log.Println("Illegal segment channel range")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: what if seg == nil? We need to notify master and return rpc request failed
|
||||
if seg != nil {
|
||||
// TODO: get partition id from segment meta
|
||||
err := mService.replica.addSegment(seg.SegmentID, seg.PartitionID, seg.CollectionID, segTypeGrowing)
|
||||
err := mService.replica.addSegment2(seg.SegmentID, seg.PartitionTag, seg.CollectionID, segTypeGrowing)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
|
@ -160,6 +181,122 @@ func (mService *metaService) processCreate(key string, msg string) {
|
|||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processSegmentModify(id string, value string) {
|
||||
seg := mService.segmentUnmarshal(value)
|
||||
|
||||
if !isSegmentChannelRangeInQueryNodeChannelRange(seg) {
|
||||
return
|
||||
}
|
||||
|
||||
if seg != nil {
|
||||
targetSegment, err := mService.replica.getSegmentByID(seg.SegmentID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: do modify
|
||||
fmt.Println(targetSegment)
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processCollectionModify(id string, value string) {
|
||||
//println("Modify Collection: ", id)
|
||||
|
||||
col := mService.collectionUnmarshal(value)
|
||||
if col != nil {
|
||||
err := mService.replica.addPartitionsByCollectionMeta(col)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
err = mService.replica.removePartitionsByCollectionMeta(col)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processModify(key string, msg string) {
|
||||
if isCollectionObj(key) {
|
||||
objID := GetCollectionObjID(key)
|
||||
mService.processCollectionModify(objID, msg)
|
||||
} else if isSegmentObj(key) {
|
||||
objID := GetSegmentObjID(key)
|
||||
mService.processSegmentModify(objID, msg)
|
||||
} else {
|
||||
println("can not process modify msg:", key)
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processSegmentDelete(id string) {
|
||||
//println("Delete segment: ", id)
|
||||
|
||||
var segmentID, err = strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
log.Println("Cannot parse segment id:" + id)
|
||||
}
|
||||
|
||||
err = mService.replica.removeSegment(segmentID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processCollectionDelete(id string) {
|
||||
//println("Delete collection: ", id)
|
||||
|
||||
var collectionID, err = strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
log.Println("Cannot parse collection id:" + id)
|
||||
}
|
||||
|
||||
err = mService.replica.removeCollection(collectionID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processDelete(key string) {
|
||||
//println("process delete")
|
||||
|
||||
if isCollectionObj(key) {
|
||||
objID := GetCollectionObjID(key)
|
||||
mService.processCollectionDelete(objID)
|
||||
} else if isSegmentObj(key) {
|
||||
objID := GetSegmentObjID(key)
|
||||
mService.processSegmentDelete(objID)
|
||||
} else {
|
||||
println("can not process delete msg:", key)
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processResp(resp clientv3.WatchResponse) error {
|
||||
err := resp.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ev := range resp.Events {
|
||||
if ev.IsCreate() {
|
||||
key := string(ev.Kv.Key)
|
||||
msg := string(ev.Kv.Value)
|
||||
mService.processCreate(key, msg)
|
||||
} else if ev.IsModify() {
|
||||
key := string(ev.Kv.Key)
|
||||
msg := string(ev.Kv.Value)
|
||||
mService.processModify(key, msg)
|
||||
} else if ev.Type == mvccpb.DELETE {
|
||||
key := string(ev.Kv.Key)
|
||||
mService.processDelete(key)
|
||||
} else {
|
||||
println("Unrecognized etcd msg!")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mService *metaService) loadCollections() error {
|
||||
keys, values, err := mService.kvBase.LoadWithPrefix(CollectionPrefix)
|
||||
if err != nil {
|
||||
|
@ -189,8 +326,8 @@ func (mService *metaService) loadSegments() error {
|
|||
}
|
||||
|
||||
//----------------------------------------------------------------------- Unmarshal and Marshal
|
||||
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionInfo {
|
||||
col := etcdpb.CollectionInfo{}
|
||||
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionMeta {
|
||||
col := etcdpb.CollectionMeta{}
|
||||
err := proto.UnmarshalText(value, &col)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
|
@ -199,7 +336,7 @@ func (mService *metaService) collectionUnmarshal(value string) *etcdpb.Collectio
|
|||
return &col
|
||||
}
|
||||
|
||||
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionInfo) string {
|
||||
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionMeta) string {
|
||||
value := proto.MarshalTextString(col)
|
||||
if value == "" {
|
||||
log.Println("marshal collection failed")
|
||||
|
@ -208,8 +345,8 @@ func (mService *metaService) collectionMarshal(col *etcdpb.CollectionInfo) strin
|
|||
return value
|
||||
}
|
||||
|
||||
func (mService *metaService) segmentUnmarshal(value string) *datapb.SegmentInfo {
|
||||
seg := datapb.SegmentInfo{}
|
||||
func (mService *metaService) segmentUnmarshal(value string) *etcdpb.SegmentMeta {
|
||||
seg := etcdpb.SegmentMeta{}
|
||||
err := proto.UnmarshalText(value, &seg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
)
|
||||
|
||||
func TestMetaService_start(t *testing.T) {
|
||||
|
@ -64,6 +65,36 @@ func TestMetaService_isSegmentObj(t *testing.T) {
|
|||
assert.Equal(t, b2, false)
|
||||
}
|
||||
|
||||
func TestMetaService_isSegmentChannelRangeInQueryNodeChannelRange(t *testing.T) {
|
||||
var s = etcdpb.SegmentMeta{
|
||||
SegmentID: UniqueID(0),
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionTag: "partition0",
|
||||
ChannelStart: 0,
|
||||
ChannelEnd: 1,
|
||||
OpenTime: Timestamp(0),
|
||||
CloseTime: Timestamp(math.MaxUint64),
|
||||
NumRows: UniqueID(0),
|
||||
}
|
||||
|
||||
var b = isSegmentChannelRangeInQueryNodeChannelRange(&s)
|
||||
assert.Equal(t, b, true)
|
||||
|
||||
s = etcdpb.SegmentMeta{
|
||||
SegmentID: UniqueID(0),
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionTag: "partition0",
|
||||
ChannelStart: 128,
|
||||
ChannelEnd: 256,
|
||||
OpenTime: Timestamp(0),
|
||||
CloseTime: Timestamp(math.MaxUint64),
|
||||
NumRows: UniqueID(0),
|
||||
}
|
||||
|
||||
b = isSegmentChannelRangeInQueryNodeChannelRange(&s)
|
||||
assert.Equal(t, b, false)
|
||||
}
|
||||
|
||||
func TestMetaService_printCollectionStruct(t *testing.T) {
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
|
@ -71,11 +102,14 @@ func TestMetaService_printCollectionStruct(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMetaService_printSegmentStruct(t *testing.T) {
|
||||
var s = datapb.SegmentInfo{
|
||||
var s = etcdpb.SegmentMeta{
|
||||
SegmentID: UniqueID(0),
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionID: defaultPartitionID,
|
||||
PartitionTag: "partition0",
|
||||
ChannelStart: 128,
|
||||
ChannelEnd: 256,
|
||||
OpenTime: Timestamp(0),
|
||||
CloseTime: Timestamp(math.MaxUint64),
|
||||
NumRows: UniqueID(0),
|
||||
}
|
||||
|
||||
|
@ -112,7 +146,8 @@ func TestMetaService_processCollectionCreate(t *testing.T) {
|
|||
>
|
||||
>
|
||||
>
|
||||
partitionIDs: 2021
|
||||
segmentIDs: 0
|
||||
partition_tags: "default"
|
||||
`
|
||||
|
||||
node.metaService.processCollectionCreate(id, value)
|
||||
|
@ -133,7 +168,10 @@ func TestMetaService_processSegmentCreate(t *testing.T) {
|
|||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `partitionID: 2021
|
||||
value := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processSegmentCreate(id, value)
|
||||
|
@ -174,7 +212,8 @@ func TestMetaService_processCreate(t *testing.T) {
|
|||
>
|
||||
>
|
||||
>
|
||||
partitionIDs: 2021
|
||||
segmentIDs: 0
|
||||
partition_tags: "default"
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key1, msg1)
|
||||
|
@ -186,7 +225,10 @@ func TestMetaService_processCreate(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
key2 := Params.MetaRootPath + "/segment/0"
|
||||
msg2 := `partitionID: 2021
|
||||
msg2 := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key2, msg2)
|
||||
|
@ -196,6 +238,430 @@ func TestMetaService_processCreate(t *testing.T) {
|
|||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processSegmentModify(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
collectionID := UniqueID(0)
|
||||
segmentID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionID, segmentID)
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processSegmentCreate(id, value)
|
||||
s, err := node.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s.segmentID, segmentID)
|
||||
|
||||
newValue := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
// TODO: modify segment for testing processCollectionModify
|
||||
(*node.metaService).processSegmentModify(id, newValue)
|
||||
seg, err := node.replica.getSegmentByID(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, segmentID)
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processCollectionModify(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "p0"
|
||||
partition_tags: "p1"
|
||||
partition_tags: "p2"
|
||||
`
|
||||
|
||||
(*node.metaService).processCollectionCreate(id, value)
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := node.replica.getCollectionByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
|
||||
newValue := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "p1"
|
||||
partition_tags: "p2"
|
||||
partition_tags: "p3"
|
||||
`
|
||||
|
||||
(*node.metaService).processCollectionModify(id, newValue)
|
||||
collection, err = node.replica.getCollectionByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err = node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processModify(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
key1 := Params.MetaRootPath + "/collection/0"
|
||||
msg1 := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "p0"
|
||||
partition_tags: "p1"
|
||||
partition_tags: "p2"
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key1, msg1)
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := node.replica.getCollectionByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err := node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition := node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
|
||||
key2 := Params.MetaRootPath + "/segment/0"
|
||||
msg2 := `partition_tag: "p1"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key2, msg2)
|
||||
s, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s.segmentID, UniqueID(0))
|
||||
|
||||
// modify
|
||||
// TODO: use different index for testing processCollectionModify
|
||||
msg3 := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "p1"
|
||||
partition_tags: "p2"
|
||||
partition_tags: "p3"
|
||||
`
|
||||
|
||||
(*node.metaService).processModify(key1, msg3)
|
||||
collection, err = node.replica.getCollectionByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
partitionNum, err = node.replica.getPartitionNum(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, partitionNum, 3)
|
||||
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p0")
|
||||
assert.Equal(t, hasPartition, false)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p1")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p2")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
hasPartition = node.replica.hasPartition(UniqueID(0), "p3")
|
||||
assert.Equal(t, hasPartition, true)
|
||||
|
||||
msg4 := `partition_tag: "p1"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processModify(key2, msg4)
|
||||
seg, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processSegmentDelete(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, node, collectionID, 0)
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processSegmentCreate(id, value)
|
||||
seg, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
|
||||
(*node.metaService).processSegmentDelete("0")
|
||||
mapSize := node.replica.getSegmentNum()
|
||||
assert.Equal(t, mapSize, 0)
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processCollectionDelete(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
id := "0"
|
||||
value := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "default"
|
||||
`
|
||||
|
||||
(*node.metaService).processCollectionCreate(id, value)
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := node.replica.getCollectionByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
(*node.metaService).processCollectionDelete(id)
|
||||
collectionNum = node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 0)
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processDelete(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
key1 := Params.MetaRootPath + "/collection/0"
|
||||
msg1 := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "default"
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key1, msg1)
|
||||
collectionNum := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := node.replica.getCollectionByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
|
||||
key2 := Params.MetaRootPath + "/segment/0"
|
||||
msg2 := `partition_tag: "default"
|
||||
channel_start: 0
|
||||
channel_end: 1
|
||||
close_time: 18446744073709551615
|
||||
`
|
||||
|
||||
(*node.metaService).processCreate(key2, msg2)
|
||||
seg, err := node.replica.getSegmentByID(UniqueID(0))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, seg.segmentID, UniqueID(0))
|
||||
|
||||
(*node.metaService).processDelete(key1)
|
||||
collectionsSize := node.replica.getCollectionNum()
|
||||
assert.Equal(t, collectionsSize, 0)
|
||||
|
||||
mapSize := node.replica.getSegmentNum()
|
||||
assert.Equal(t, mapSize, 0)
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_processResp(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
||||
metaChan := (*node.metaService).kvBase.WatchWithPrefix("")
|
||||
|
||||
select {
|
||||
case <-node.queryNodeLoopCtx.Done():
|
||||
return
|
||||
case resp := <-metaChan:
|
||||
_ = (*node.metaService).processResp(resp)
|
||||
}
|
||||
node.Stop()
|
||||
}
|
||||
|
||||
func TestMetaService_loadCollections(t *testing.T) {
|
||||
node := newQueryNodeMock()
|
||||
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
|
||||
|
|
|
@ -58,9 +58,10 @@ type ParamTable struct {
|
|||
StatsChannelName string
|
||||
StatsReceiveBufSize int64
|
||||
|
||||
GracefulTime int64
|
||||
MsgChannelSubName string
|
||||
SliceIndex int
|
||||
GracefulTime int64
|
||||
MsgChannelSubName string
|
||||
DefaultPartitionTag string
|
||||
SliceIndex int
|
||||
}
|
||||
|
||||
var Params ParamTable
|
||||
|
@ -132,6 +133,7 @@ func (p *ParamTable) Init() {
|
|||
|
||||
p.initGracefulTime()
|
||||
p.initMsgChannelSubName()
|
||||
p.initDefaultPartitionTag()
|
||||
p.initSliceIndex()
|
||||
|
||||
p.initFlowGraphMaxQueueLength()
|
||||
|
@ -456,6 +458,15 @@ func (p *ParamTable) initDDChannelNames() {
|
|||
p.DDChannelNames = ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDefaultPartitionTag() {
|
||||
defaultTag, err := p.Load("common.defaultPartitionTag")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.DefaultPartitionTag = defaultTag
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSliceIndex() {
|
||||
queryNodeID := p.QueryNodeID
|
||||
queryNodeIDList := p.QueryNodeIDList()
|
||||
|
|
|
@ -165,3 +165,8 @@ func TestParamTable_ddChannelName(t *testing.T) {
|
|||
contains := strings.Contains(names[0], "data-definition-0")
|
||||
assert.Equal(t, contains, true)
|
||||
}
|
||||
|
||||
func TestParamTable_defaultPartitionTag(t *testing.T) {
|
||||
tag := Params.DefaultPartitionTag
|
||||
assert.Equal(t, tag, "_default")
|
||||
}
|
||||
|
|
|
@ -13,19 +13,33 @@ package querynode
|
|||
import "C"
|
||||
|
||||
type Partition struct {
|
||||
id UniqueID
|
||||
segments []*Segment
|
||||
enableDM bool
|
||||
partitionTag string
|
||||
id UniqueID
|
||||
segments []*Segment
|
||||
enableDM bool
|
||||
}
|
||||
|
||||
func (p *Partition) ID() UniqueID {
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *Partition) Tag() string {
|
||||
return (*p).partitionTag
|
||||
}
|
||||
|
||||
func (p *Partition) Segments() *[]*Segment {
|
||||
return &(*p).segments
|
||||
}
|
||||
|
||||
func newPartition2(partitionTag string) *Partition {
|
||||
var newPartition = &Partition{
|
||||
partitionTag: partitionTag,
|
||||
enableDM: false,
|
||||
}
|
||||
|
||||
return newPartition
|
||||
}
|
||||
|
||||
func newPartition(partitionID UniqueID) *Partition {
|
||||
var newPartition = &Partition{
|
||||
id: partitionID,
|
||||
|
|
|
@ -19,7 +19,7 @@ func TestPartition_Segments(t *testing.T) {
|
|||
|
||||
const segmentNum = 3
|
||||
for i := 0; i < segmentNum; i++ {
|
||||
err := node.replica.addSegment(UniqueID(i), targetPartition.ID(), collection.ID(), segTypeGrowing)
|
||||
err := node.replica.addSegment2(UniqueID(i), targetPartition.partitionTag, collection.ID(), segTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ func TestPartition_Segments(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPartition_newPartition(t *testing.T) {
|
||||
partitionID := defaultPartitionID
|
||||
partition := newPartition(partitionID)
|
||||
assert.Equal(t, partition.ID(), defaultPartitionID)
|
||||
partitionTag := "default"
|
||||
partition := newPartition2(partitionTag)
|
||||
assert.Equal(t, partition.partitionTag, partitionTag)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import (
|
|||
"github.com/uber/jaeger-client-go/config"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
@ -332,11 +331,9 @@ func (node *QueryNode) AddQueryChannel(in *queryPb.AddQueryChannelsRequest) (*co
|
|||
}
|
||||
|
||||
// add request channel
|
||||
pulsarBufSize := Params.SearchPulsarBufSize
|
||||
consumeChannels := []string{in.RequestChannelID}
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
|
||||
// add result channel
|
||||
producerChannels := []string{in.ResultChannelID}
|
||||
|
@ -382,12 +379,10 @@ func (node *QueryNode) RemoveQueryChannel(in *queryPb.RemoveQueryChannelsRequest
|
|||
}
|
||||
|
||||
// remove request channel
|
||||
pulsarBufSize := Params.SearchPulsarBufSize
|
||||
consumeChannels := []string{in.RequestChannelID}
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
// TODO: searchStream.RemovePulsarConsumers(producerChannels)
|
||||
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
|
||||
// remove result channel
|
||||
producerChannels := []string{in.ResultChannelID}
|
||||
|
@ -423,11 +418,9 @@ func (node *QueryNode) WatchDmChannels(in *queryPb.WatchDmChannelsRequest) (*com
|
|||
}
|
||||
|
||||
// add request channel
|
||||
pulsarBufSize := Params.SearchPulsarBufSize
|
||||
consumeChannels := in.ChannelIDs
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
fgDMMsgStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
fgDMMsgStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
|
||||
status := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
|
|
|
@ -20,8 +20,6 @@ import (
|
|||
const ctxTimeInMillisecond = 5000
|
||||
const closeWithDeadline = true
|
||||
|
||||
const defaultPartitionID = UniqueID(2021)
|
||||
|
||||
type queryServiceMock struct{}
|
||||
|
||||
func setup() {
|
||||
|
@ -29,7 +27,7 @@ func setup() {
|
|||
Params.MetaRootPath = "/etcd/test/root/querynode"
|
||||
}
|
||||
|
||||
func genTestCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.CollectionInfo {
|
||||
func genTestCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.CollectionMeta {
|
||||
var fieldVec schemapb.FieldSchema
|
||||
if isBinary {
|
||||
fieldVec = schemapb.FieldSchema{
|
||||
|
@ -78,18 +76,21 @@ func genTestCollectionMeta(collectionID UniqueID, isBinary bool) *etcdpb.Collect
|
|||
DataType: schemapb.DataType_INT32,
|
||||
}
|
||||
|
||||
collectionName := rand.Int63n(1000000)
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: "collection-" + strconv.FormatInt(collectionName, 10),
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionInfo{
|
||||
ID: collectionID,
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
PartitionIDs: []UniqueID{defaultPartitionID},
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
return &collectionMeta
|
||||
|
@ -110,10 +111,10 @@ func initTestMeta(t *testing.T, node *QueryNode, collectionID UniqueID, segmentI
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
assert.Equal(t, node.replica.getCollectionNum(), 1)
|
||||
|
||||
err = node.replica.addPartition(collection.ID(), collectionMeta.PartitionIDs[0])
|
||||
err = node.replica.addPartition2(collection.ID(), collectionMeta.PartitionTags[0])
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = node.replica.addSegment(segmentID, collectionMeta.PartitionIDs[0], collectionID, segTypeGrowing)
|
||||
err = node.replica.addSegment2(segmentID, collectionMeta.PartitionTags[0], collectionID, segTypeGrowing)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ func TestReduce_AllFunc(t *testing.T) {
|
|||
collectionMeta := genTestCollectionMeta(collectionID, false)
|
||||
|
||||
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
|
||||
const DIM = 16
|
||||
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
@ -48,16 +47,17 @@ func newSearchService(ctx context.Context, replica collectionReplica) *searchSer
|
|||
|
||||
msgStreamURL := Params.PulsarAddress
|
||||
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
|
||||
consumeChannels := Params.SearchChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
searchStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
searchStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, pulsarBufSize, factory.NewUnmarshalDispatcher())
|
||||
searchStream.SetPulsarClient(msgStreamURL)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName)
|
||||
var inputStream msgstream.MsgStream = searchStream
|
||||
|
||||
producerChannels := Params.SearchResultChannelNames
|
||||
searchResultStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
searchResultStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize, pulsarBufSize, factory.NewUnmarshalDispatcher())
|
||||
searchResultStream.SetPulsarClient(msgStreamURL)
|
||||
searchResultStream.CreatePulsarProducers(producerChannels)
|
||||
var outputStream msgstream.MsgStream = searchResultStream
|
||||
|
@ -239,6 +239,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||
return errors.New("unmarshal query failed")
|
||||
}
|
||||
collectionID := searchMsg.CollectionID
|
||||
partitionTagsInQuery := query.PartitionNames
|
||||
collection, err := ss.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
|
@ -262,30 +263,29 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||
searchResults := make([]*SearchResult, 0)
|
||||
matchedSegments := make([]*Segment, 0)
|
||||
|
||||
//fmt.Println("search msg's partitionID = ", partitionIDsInQuery)
|
||||
//fmt.Println("search msg's partitionTag = ", partitionTagsInQuery)
|
||||
|
||||
var partitionIDsInCol []UniqueID
|
||||
var partitionTagsInCol []string
|
||||
for _, partition := range collection.partitions {
|
||||
partitionID := partition.ID()
|
||||
partitionIDsInCol = append(partitionIDsInCol, partitionID)
|
||||
partitionTag := partition.partitionTag
|
||||
partitionTagsInCol = append(partitionTagsInCol, partitionTag)
|
||||
}
|
||||
var searchPartitionIDs []UniqueID
|
||||
partitionIDsInQuery := searchMsg.PartitionIDs
|
||||
if len(partitionIDsInQuery) == 0 {
|
||||
searchPartitionIDs = partitionIDsInCol
|
||||
var searchPartitionTag []string
|
||||
if len(partitionTagsInQuery) == 0 {
|
||||
searchPartitionTag = partitionTagsInCol
|
||||
} else {
|
||||
for _, id := range partitionIDsInCol {
|
||||
for _, toMatchID := range partitionIDsInQuery {
|
||||
re := regexp.MustCompile("^" + strconv.FormatInt(toMatchID, 10) + "$")
|
||||
if re.MatchString(strconv.FormatInt(id, 10)) {
|
||||
searchPartitionIDs = append(searchPartitionIDs, id)
|
||||
for _, tag := range partitionTagsInCol {
|
||||
for _, toMatchTag := range partitionTagsInQuery {
|
||||
re := regexp.MustCompile("^" + toMatchTag + "$")
|
||||
if re.MatchString(tag) {
|
||||
searchPartitionTag = append(searchPartitionTag, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, partitionID := range searchPartitionIDs {
|
||||
partition, _ := ss.replica.getPartitionByID(collectionID, partitionID)
|
||||
for _, partitionTag := range searchPartitionTag {
|
||||
partition, _ := ss.replica.getPartitionByTag(collectionID, partitionTag)
|
||||
for _, segment := range partition.segments {
|
||||
//fmt.Println("dsl = ", dsl)
|
||||
|
||||
|
|
|
@ -61,6 +61,8 @@ func TestSearch_Search(t *testing.T) {
|
|||
}
|
||||
|
||||
query := milvuspb.SearchRequest{
|
||||
CollectionName: "collection0",
|
||||
PartitionNames: []string{"default"},
|
||||
Dsl: dslString,
|
||||
PlaceholderGroup: placeGroupByte,
|
||||
}
|
||||
|
@ -93,7 +95,8 @@ func TestSearch_Search(t *testing.T) {
|
|||
msgPackSearch := msgstream.MsgPack{}
|
||||
msgPackSearch.Msgs = append(msgPackSearch.Msgs, searchMsg)
|
||||
|
||||
searchStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
searchStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
searchStream.SetPulsarClient(pulsarURL)
|
||||
searchStream.CreatePulsarProducers(searchProducerChannels)
|
||||
searchStream.Start()
|
||||
|
@ -134,12 +137,12 @@ func TestSearch_Search(t *testing.T) {
|
|||
Timestamp: uint64(10 + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionID: defaultPartitionID,
|
||||
SegmentID: int64(0),
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i)},
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionName: "default",
|
||||
SegmentID: int64(0),
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i)},
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
},
|
||||
|
@ -179,11 +182,11 @@ func TestSearch_Search(t *testing.T) {
|
|||
insertChannels := Params.InsertChannelNames
|
||||
ddChannels := Params.DDChannelNames
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
insertStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
|
@ -253,6 +256,8 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
}
|
||||
|
||||
query := milvuspb.SearchRequest{
|
||||
CollectionName: "collection0",
|
||||
PartitionNames: []string{"default"},
|
||||
Dsl: dslString,
|
||||
PlaceholderGroup: placeGroupByte,
|
||||
}
|
||||
|
@ -285,7 +290,8 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
msgPackSearch := msgstream.MsgPack{}
|
||||
msgPackSearch.Msgs = append(msgPackSearch.Msgs, searchMsg)
|
||||
|
||||
searchStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
searchStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
searchStream.SetPulsarClient(pulsarURL)
|
||||
searchStream.CreatePulsarProducers(searchProducerChannels)
|
||||
searchStream.Start()
|
||||
|
@ -330,12 +336,12 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
Timestamp: uint64(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionID: defaultPartitionID,
|
||||
SegmentID: int64(segmentID),
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i)},
|
||||
CollectionID: UniqueID(0),
|
||||
PartitionName: "default",
|
||||
SegmentID: int64(segmentID),
|
||||
ChannelID: "0",
|
||||
Timestamps: []uint64{uint64(i + 1000)},
|
||||
RowIDs: []int64{int64(i)},
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
},
|
||||
|
@ -375,11 +381,11 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
insertChannels := Params.InsertChannelNames
|
||||
ddChannels := Params.DDChannelNames
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
insertStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
ddStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ type Segment struct {
|
|||
segmentPtr C.CSegmentInterface
|
||||
|
||||
segmentID UniqueID
|
||||
partitionTag string // TODO: use partitionID
|
||||
partitionID UniqueID
|
||||
collectionID UniqueID
|
||||
lastMemSize int64
|
||||
|
@ -80,6 +81,25 @@ func (s *Segment) getType() segmentType {
|
|||
return s.segmentType
|
||||
}
|
||||
|
||||
func newSegment2(collection *Collection, segmentID int64, partitionTag string, collectionID UniqueID, segType segmentType) *Segment {
|
||||
/*
|
||||
CSegmentInterface
|
||||
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
|
||||
*/
|
||||
initIndexParam := make(map[int64]indexParam)
|
||||
segmentPtr := C.NewSegment(collection.collectionPtr, C.ulong(segmentID), segType)
|
||||
var newSegment = &Segment{
|
||||
segmentPtr: segmentPtr,
|
||||
segmentType: segType,
|
||||
segmentID: segmentID,
|
||||
partitionTag: partitionTag,
|
||||
collectionID: collectionID,
|
||||
indexParam: initIndexParam,
|
||||
}
|
||||
|
||||
return newSegment
|
||||
}
|
||||
|
||||
func newSegment(collection *Collection, segmentID int64, partitionID UniqueID, collectionID UniqueID, segType segmentType) *Segment {
|
||||
/*
|
||||
CSegmentInterface
|
||||
|
|
|
@ -22,7 +22,7 @@ func TestSegment_newSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
deleteSegment(segment)
|
||||
deleteCollection(collection)
|
||||
|
@ -36,7 +36,7 @@ func TestSegment_deleteSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
deleteSegment(segment)
|
||||
|
@ -52,7 +52,7 @@ func TestSegment_getRowCount(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -99,7 +99,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -152,7 +152,7 @@ func TestSegment_getMemSize(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -199,7 +199,7 @@ func TestSegment_segmentInsert(t *testing.T) {
|
|||
collection := newCollection(collectionMeta.ID, collectionMeta.Schema)
|
||||
assert.Equal(t, collection.ID(), collectionID)
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -242,7 +242,7 @@ func TestSegment_segmentDelete(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -291,7 +291,7 @@ func TestSegment_segmentSearch(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
@ -372,7 +372,7 @@ func TestSegment_segmentPreInsert(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
const DIM = 16
|
||||
|
@ -410,7 +410,7 @@ func TestSegment_segmentPreDelete(t *testing.T) {
|
|||
assert.Equal(t, collection.ID(), collectionID)
|
||||
|
||||
segmentID := UniqueID(0)
|
||||
segment := newSegment(collection, segmentID, defaultPartitionID, collectionID, segTypeGrowing)
|
||||
segment := newSegment2(collection, segmentID, Params.DefaultPartitionTag, collectionID, segTypeGrowing)
|
||||
assert.Equal(t, segmentID, segment.segmentID)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
|
|
|
@ -42,7 +42,8 @@ func (sService *statsService) start() {
|
|||
msgStreamURL := Params.PulsarAddress
|
||||
producerChannels := []string{Params.StatsChannelName}
|
||||
|
||||
statsStream := pulsarms.NewPulsarMsgStream(sService.ctx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
statsStream := pulsarms.NewPulsarMsgStream(sService.ctx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
statsStream.SetPulsarClient(msgStreamURL)
|
||||
statsStream.CreatePulsarProducers(producerChannels)
|
||||
|
||||
|
|
|
@ -27,7 +27,8 @@ func TestSegmentManagement_sendSegmentStatistic(t *testing.T) {
|
|||
|
||||
pulsarURL := Params.PulsarAddress
|
||||
|
||||
statsStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
|
||||
factory := msgstream.ProtoUDFactory{}
|
||||
statsStream := pulsarms.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize, 1024, factory.NewUnmarshalDispatcher())
|
||||
statsStream.SetPulsarClient(pulsarURL)
|
||||
statsStream.CreatePulsarProducers(producerChannels)
|
||||
|
||||
|
|
|
@ -1,13 +1,19 @@
|
|||
package rocksmq
|
||||
|
||||
var rmq *RocksMQ
|
||||
var Rmq *RocksMQ
|
||||
|
||||
type Consumer struct {
|
||||
GroupName string
|
||||
ChannelName string
|
||||
MsgNum chan int
|
||||
}
|
||||
|
||||
func InitRmq(rocksdbName string, idAllocator IDAllocator) error {
|
||||
var err error
|
||||
rmq, err = NewRocksMQ(rocksdbName, idAllocator)
|
||||
Rmq, err = NewRocksMQ(rocksdbName, idAllocator)
|
||||
return err
|
||||
}
|
||||
|
||||
func GetRmq() *RocksMQ {
|
||||
return rmq
|
||||
return Rmq
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ type ProducerMessage struct {
|
|||
|
||||
type ConsumerMessage struct {
|
||||
msgID UniqueID
|
||||
payload []byte
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
type Channel struct {
|
||||
|
@ -75,6 +75,8 @@ type RocksMQ struct {
|
|||
idAllocator IDAllocator
|
||||
produceMu sync.Mutex
|
||||
consumeMu sync.Mutex
|
||||
|
||||
notify map[string][]Consumer
|
||||
//ctx context.Context
|
||||
//serverLoopWg sync.WaitGroup
|
||||
//serverLoopCtx context.Context
|
||||
|
@ -105,9 +107,16 @@ func NewRocksMQ(name string, idAllocator IDAllocator) (*RocksMQ, error) {
|
|||
idAllocator: idAllocator,
|
||||
}
|
||||
rmq.channels = make(map[string]*Channel)
|
||||
rmq.notify = make(map[string][]Consumer)
|
||||
return rmq, nil
|
||||
}
|
||||
|
||||
func NewProducerMessage(data []byte) *ProducerMessage {
|
||||
return &ProducerMessage{
|
||||
payload: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (rmq *RocksMQ) checkKeyExist(key string) bool {
|
||||
val, _ := rmq.kv.Load(key)
|
||||
return val != ""
|
||||
|
@ -228,7 +237,15 @@ func (rmq *RocksMQ) Produce(channelName string, messages []ProducerMessage) erro
|
|||
kvChannelEndID := channelName + "/end_id"
|
||||
kvValues[kvChannelEndID] = strconv.FormatInt(idEnd, 10)
|
||||
|
||||
return rmq.kv.MultiSave(kvValues)
|
||||
err = rmq.kv.MultiSave(kvValues)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, consumer := range rmq.notify[channelName] {
|
||||
consumer.MsgNum <- msgLen
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rmq *RocksMQ) Consume(groupName string, channelName string, n int) ([]ConsumerMessage, error) {
|
||||
|
@ -274,7 +291,7 @@ func (rmq *RocksMQ) Consume(groupName string, channelName string, n int) ([]Cons
|
|||
}
|
||||
msg := ConsumerMessage{
|
||||
msgID: msgID,
|
||||
payload: val.Data(),
|
||||
Payload: val.Data(),
|
||||
}
|
||||
consumerMessage = append(consumerMessage, msg)
|
||||
key.Free()
|
||||
|
|
|
@ -66,13 +66,13 @@ func TestRocksMQ(t *testing.T) {
|
|||
cMsgs, err := rmq.Consume(groupName, channelName, 1)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(cMsgs), 1)
|
||||
assert.Equal(t, string(cMsgs[0].payload), "a_message")
|
||||
assert.Equal(t, string(cMsgs[0].Payload), "a_message")
|
||||
|
||||
cMsgs, err = rmq.Consume(groupName, channelName, 2)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(cMsgs), 2)
|
||||
assert.Equal(t, string(cMsgs[0].payload), "b_message")
|
||||
assert.Equal(t, string(cMsgs[1].payload), "c_message")
|
||||
assert.Equal(t, string(cMsgs[0].Payload), "b_message")
|
||||
assert.Equal(t, string(cMsgs[1].Payload), "c_message")
|
||||
}
|
||||
|
||||
func TestRocksMQ_Loop(t *testing.T) {
|
||||
|
@ -127,15 +127,15 @@ func TestRocksMQ_Loop(t *testing.T) {
|
|||
cMsgs, err := rmq.Consume(groupName, channelName, loopNum)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(cMsgs), loopNum)
|
||||
assert.Equal(t, string(cMsgs[0].payload), "message_"+strconv.Itoa(0))
|
||||
assert.Equal(t, string(cMsgs[loopNum-1].payload), "message_"+strconv.Itoa(loopNum-1))
|
||||
assert.Equal(t, string(cMsgs[0].Payload), "message_"+strconv.Itoa(0))
|
||||
assert.Equal(t, string(cMsgs[loopNum-1].Payload), "message_"+strconv.Itoa(loopNum-1))
|
||||
|
||||
// Consume one message once
|
||||
for i := 0; i < loopNum; i++ {
|
||||
oneMsgs, err := rmq.Consume(groupName, channelName, 1)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(oneMsgs), 1)
|
||||
assert.Equal(t, string(oneMsgs[0].payload), "message_"+strconv.Itoa(i+loopNum))
|
||||
assert.Equal(t, string(oneMsgs[0].Payload), "message_"+strconv.Itoa(i+loopNum))
|
||||
}
|
||||
|
||||
cMsgs, err = rmq.Consume(groupName, channelName, 1)
|
||||
|
|
Loading…
Reference in New Issue