Add ddService, fieldID, requestID and refactor params table, fix nil hits search result bug

Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
pull/4973/head^2
bigsheeper 2020-12-10 16:31:09 +08:00 committed by yefu.chen
parent 9585819154
commit 762b1e1f1f
48 changed files with 1312 additions and 653 deletions

View File

@ -16,7 +16,7 @@ import (
func main() {
querynode.Init()
fmt.Println("QueryNodeID is", querynode.Params.QueryNodeID())
fmt.Println("QueryNodeID is", querynode.Params.QueryNodeID)
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr := querynode.NewQueryNode(ctx, 0)

View File

@ -24,6 +24,10 @@ queryNode:
recvBufSize: 1024 # msgPack chan buffer size
pulsarBufSize: 1024 # pulsar chan buffer size
dataDefinition:
recvBufSize: 64 # msgPack chan buffer size
pulsarBufSize: 64 # pulsar chan buffer size
delete:
#streamBufSize: 1024 # msgPack chan buffer size
recvBufSize: 1024 # msgPack chan buffer size

View File

@ -71,6 +71,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, fieldid_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, name_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, is_primary_key_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, description_),
@ -89,7 +90,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT
};
static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
{ 0, -1, sizeof(::milvus::proto::schema::FieldSchema)},
{ 11, -1, sizeof(::milvus::proto::schema::CollectionSchema)},
{ 12, -1, sizeof(::milvus::proto::schema::CollectionSchema)},
};
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
@ -99,21 +100,22 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
const char descriptor_table_protodef_schema_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
"\n\014schema.proto\022\023milvus.proto.schema\032\014com"
"mon.proto\"\353\001\n\013FieldSchema\022\014\n\004name\030\001 \001(\t\022"
"\026\n\016is_primary_key\030\002 \001(\010\022\023\n\013description\030\003"
" \001(\t\0220\n\tdata_type\030\004 \001(\0162\035.milvus.proto.s"
"chema.DataType\0226\n\013type_params\030\005 \003(\0132!.mi"
"lvus.proto.common.KeyValuePair\0227\n\014index_"
"mon.proto\"\374\001\n\013FieldSchema\022\017\n\007fieldID\030\001 \001"
"(\003\022\014\n\004name\030\002 \001(\t\022\026\n\016is_primary_key\030\003 \001(\010"
"\022\023\n\013description\030\004 \001(\t\0220\n\tdata_type\030\005 \001(\016"
"2\035.milvus.proto.schema.DataType\0226\n\013type_"
"params\030\006 \003(\0132!.milvus.proto.common.KeyVa"
"luePair\"w\n\020CollectionSchema\022\014\n\004name\030\001 \001("
"\t\022\023\n\013description\030\002 \001(\t\022\016\n\006autoID\030\003 \001(\010\0220"
"\n\006fields\030\004 \003(\0132 .milvus.proto.schema.Fie"
"ldSchema*\221\001\n\010DataType\022\010\n\004NONE\020\000\022\010\n\004BOOL\020"
"\001\022\010\n\004INT8\020\002\022\t\n\005INT16\020\003\022\t\n\005INT32\020\004\022\t\n\005INT"
"64\020\005\022\t\n\005FLOAT\020\n\022\n\n\006DOUBLE\020\013\022\n\n\006STRING\020\024\022"
"\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR_FLOAT\020eBBZ@"
"github.com/zilliztech/milvus-distributed"
"/internal/proto/schemapbb\006proto3"
"luePair\0227\n\014index_params\030\007 \003(\0132!.milvus.p"
"roto.common.KeyValuePair\"w\n\020CollectionSc"
"hema\022\014\n\004name\030\001 \001(\t\022\023\n\013description\030\002 \001(\t\022"
"\016\n\006autoID\030\003 \001(\010\0220\n\006fields\030\004 \003(\0132 .milvus"
".proto.schema.FieldSchema*\221\001\n\010DataType\022\010"
"\n\004NONE\020\000\022\010\n\004BOOL\020\001\022\010\n\004INT8\020\002\022\t\n\005INT16\020\003\022"
"\t\n\005INT32\020\004\022\t\n\005INT64\020\005\022\t\n\005FLOAT\020\n\022\n\n\006DOUB"
"LE\020\013\022\n\n\006STRING\020\024\022\021\n\rVECTOR_BINARY\020d\022\020\n\014V"
"ECTOR_FLOAT\020eBBZ@github.com/zilliztech/m"
"ilvus-distributed/internal/proto/schemap"
"bb\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_schema_2eproto_deps[1] = {
&::descriptor_table_common_2eproto,
@ -125,7 +127,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_sch
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_schema_2eproto_once;
static bool descriptor_table_schema_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_schema_2eproto = {
&descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 632,
&descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 649,
&descriptor_table_schema_2eproto_once, descriptor_table_schema_2eproto_sccs, descriptor_table_schema_2eproto_deps, 2, 1,
schemas, file_default_instances, TableStruct_schema_2eproto::offsets,
file_level_metadata_schema_2eproto, 2, file_level_enum_descriptors_schema_2eproto, file_level_service_descriptors_schema_2eproto,
@ -193,9 +195,9 @@ FieldSchema::FieldSchema(const FieldSchema& from)
if (!from.description().empty()) {
description_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.description_);
}
::memcpy(&is_primary_key_, &from.is_primary_key_,
::memcpy(&fieldid_, &from.fieldid_,
static_cast<size_t>(reinterpret_cast<char*>(&data_type_) -
reinterpret_cast<char*>(&is_primary_key_)) + sizeof(data_type_));
reinterpret_cast<char*>(&fieldid_)) + sizeof(data_type_));
// @@protoc_insertion_point(copy_constructor:milvus.proto.schema.FieldSchema)
}
@ -203,9 +205,9 @@ void FieldSchema::SharedCtor() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_FieldSchema_schema_2eproto.base);
name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
description_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
::memset(&is_primary_key_, 0, static_cast<size_t>(
::memset(&fieldid_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&data_type_) -
reinterpret_cast<char*>(&is_primary_key_)) + sizeof(data_type_));
reinterpret_cast<char*>(&fieldid_)) + sizeof(data_type_));
}
FieldSchema::~FieldSchema() {
@ -237,9 +239,9 @@ void FieldSchema::Clear() {
index_params_.Clear();
name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
description_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
::memset(&is_primary_key_, 0, static_cast<size_t>(
::memset(&fieldid_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&data_type_) -
reinterpret_cast<char*>(&is_primary_key_)) + sizeof(data_type_));
reinterpret_cast<char*>(&fieldid_)) + sizeof(data_type_));
_internal_metadata_.Clear();
}
@ -251,57 +253,64 @@ const char* FieldSchema::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
CHK_(ptr);
switch (tag >> 3) {
// string name = 1;
// int64 fieldID = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 8)) {
fieldid_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
CHK_(ptr);
} else goto handle_unusual;
continue;
// string name = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_name(), ptr, ctx, "milvus.proto.schema.FieldSchema.name");
CHK_(ptr);
} else goto handle_unusual;
continue;
// bool is_primary_key = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 16)) {
// bool is_primary_key = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 24)) {
is_primary_key_ = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
CHK_(ptr);
} else goto handle_unusual;
continue;
// string description = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
// string description = 4;
case 4:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_description(), ptr, ctx, "milvus.proto.schema.FieldSchema.description");
CHK_(ptr);
} else goto handle_unusual;
continue;
// .milvus.proto.schema.DataType data_type = 4;
case 4:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 32)) {
// .milvus.proto.schema.DataType data_type = 5;
case 5:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 40)) {
::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
CHK_(ptr);
set_data_type(static_cast<::milvus::proto::schema::DataType>(val));
} else goto handle_unusual;
continue;
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
case 5:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 42)) {
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
case 6:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
ptr -= 1;
do {
ptr += 1;
ptr = ctx->ParseMessage(add_type_params(), ptr);
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 42);
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50);
} else goto handle_unusual;
continue;
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
case 6:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 50)) {
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
case 7:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 58)) {
ptr -= 1;
do {
ptr += 1;
ptr = ctx->ParseMessage(add_index_params(), ptr);
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 50);
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 58);
} else goto handle_unusual;
continue;
default: {
@ -334,9 +343,22 @@ bool FieldSchema::MergePartialFromCodedStream(
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// string name = 1;
// int64 fieldID = 1;
case 1: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (8 & 0xFF)) {
DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
::PROTOBUF_NAMESPACE_ID::int64, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_INT64>(
input, &fieldid_)));
} else {
goto handle_unusual;
}
break;
}
// string name = 2;
case 2: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->mutable_name()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
@ -349,9 +371,9 @@ bool FieldSchema::MergePartialFromCodedStream(
break;
}
// bool is_primary_key = 2;
case 2: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (16 & 0xFF)) {
// bool is_primary_key = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (24 & 0xFF)) {
DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
bool, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_BOOL>(
@ -362,9 +384,9 @@ bool FieldSchema::MergePartialFromCodedStream(
break;
}
// string description = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
// string description = 4;
case 4: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->mutable_description()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
@ -377,9 +399,9 @@ bool FieldSchema::MergePartialFromCodedStream(
break;
}
// .milvus.proto.schema.DataType data_type = 4;
case 4: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (32 & 0xFF)) {
// .milvus.proto.schema.DataType data_type = 5;
case 5: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (40 & 0xFF)) {
int value = 0;
DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>(
@ -391,9 +413,9 @@ bool FieldSchema::MergePartialFromCodedStream(
break;
}
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
case 5: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (42 & 0xFF)) {
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
case 6: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
input, add_type_params()));
} else {
@ -402,9 +424,9 @@ bool FieldSchema::MergePartialFromCodedStream(
break;
}
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
case 6: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (50 & 0xFF)) {
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
case 7: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (58 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
input, add_index_params()));
} else {
@ -440,51 +462,56 @@ void FieldSchema::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// string name = 1;
// int64 fieldID = 1;
if (this->fieldid() != 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64(1, this->fieldid(), output);
}
// string name = 2;
if (this->name().size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), static_cast<int>(this->name().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.schema.FieldSchema.name");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
1, this->name(), output);
2, this->name(), output);
}
// bool is_primary_key = 2;
// bool is_primary_key = 3;
if (this->is_primary_key() != 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(2, this->is_primary_key(), output);
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(3, this->is_primary_key(), output);
}
// string description = 3;
// string description = 4;
if (this->description().size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->description().data(), static_cast<int>(this->description().length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.schema.FieldSchema.description");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
3, this->description(), output);
4, this->description(), output);
}
// .milvus.proto.schema.DataType data_type = 4;
// .milvus.proto.schema.DataType data_type = 5;
if (this->data_type() != 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum(
4, this->data_type(), output);
5, this->data_type(), output);
}
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
for (unsigned int i = 0,
n = static_cast<unsigned int>(this->type_params_size()); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
5,
6,
this->type_params(static_cast<int>(i)),
output);
}
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
for (unsigned int i = 0,
n = static_cast<unsigned int>(this->index_params_size()); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
6,
7,
this->index_params(static_cast<int>(i)),
output);
}
@ -502,7 +529,12 @@ void FieldSchema::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// string name = 1;
// int64 fieldID = 1;
if (this->fieldid() != 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteInt64ToArray(1, this->fieldid(), target);
}
// string name = 2;
if (this->name().size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->name().data(), static_cast<int>(this->name().length()),
@ -510,15 +542,15 @@ void FieldSchema::SerializeWithCachedSizes(
"milvus.proto.schema.FieldSchema.name");
target =
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
1, this->name(), target);
2, this->name(), target);
}
// bool is_primary_key = 2;
// bool is_primary_key = 3;
if (this->is_primary_key() != 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(2, this->is_primary_key(), target);
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(3, this->is_primary_key(), target);
}
// string description = 3;
// string description = 4;
if (this->description().size() > 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->description().data(), static_cast<int>(this->description().length()),
@ -526,29 +558,29 @@ void FieldSchema::SerializeWithCachedSizes(
"milvus.proto.schema.FieldSchema.description");
target =
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
3, this->description(), target);
4, this->description(), target);
}
// .milvus.proto.schema.DataType data_type = 4;
// .milvus.proto.schema.DataType data_type = 5;
if (this->data_type() != 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
4, this->data_type(), target);
5, this->data_type(), target);
}
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
for (unsigned int i = 0,
n = static_cast<unsigned int>(this->type_params_size()); i < n; i++) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessageToArray(
5, this->type_params(static_cast<int>(i)), target);
6, this->type_params(static_cast<int>(i)), target);
}
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
for (unsigned int i = 0,
n = static_cast<unsigned int>(this->index_params_size()); i < n; i++) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessageToArray(
6, this->index_params(static_cast<int>(i)), target);
7, this->index_params(static_cast<int>(i)), target);
}
if (_internal_metadata_.have_unknown_fields()) {
@ -572,7 +604,7 @@ size_t FieldSchema::ByteSizeLong() const {
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
{
unsigned int count = static_cast<unsigned int>(this->type_params_size());
total_size += 1UL * count;
@ -583,7 +615,7 @@ size_t FieldSchema::ByteSizeLong() const {
}
}
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
{
unsigned int count = static_cast<unsigned int>(this->index_params_size());
total_size += 1UL * count;
@ -594,26 +626,33 @@ size_t FieldSchema::ByteSizeLong() const {
}
}
// string name = 1;
// string name = 2;
if (this->name().size() > 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->name());
}
// string description = 3;
// string description = 4;
if (this->description().size() > 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->description());
}
// bool is_primary_key = 2;
// int64 fieldID = 1;
if (this->fieldid() != 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::Int64Size(
this->fieldid());
}
// bool is_primary_key = 3;
if (this->is_primary_key() != 0) {
total_size += 1 + 1;
}
// .milvus.proto.schema.DataType data_type = 4;
// .milvus.proto.schema.DataType data_type = 5;
if (this->data_type() != 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->data_type());
@ -656,6 +695,9 @@ void FieldSchema::MergeFrom(const FieldSchema& from) {
description_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.description_);
}
if (from.fieldid() != 0) {
set_fieldid(from.fieldid());
}
if (from.is_primary_key() != 0) {
set_is_primary_key(from.is_primary_key());
}
@ -691,6 +733,7 @@ void FieldSchema::InternalSwap(FieldSchema* other) {
GetArenaNoVirtual());
description_.Swap(&other->description_, &::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
GetArenaNoVirtual());
swap(fieldid_, other->fieldid_);
swap(is_primary_key_, other->is_primary_key_);
swap(data_type_, other->data_type_);
}

View File

@ -225,14 +225,15 @@ class FieldSchema :
// accessors -------------------------------------------------------
enum : int {
kTypeParamsFieldNumber = 5,
kIndexParamsFieldNumber = 6,
kNameFieldNumber = 1,
kDescriptionFieldNumber = 3,
kIsPrimaryKeyFieldNumber = 2,
kDataTypeFieldNumber = 4,
kTypeParamsFieldNumber = 6,
kIndexParamsFieldNumber = 7,
kNameFieldNumber = 2,
kDescriptionFieldNumber = 4,
kFieldIDFieldNumber = 1,
kIsPrimaryKeyFieldNumber = 3,
kDataTypeFieldNumber = 5,
};
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
int type_params_size() const;
void clear_type_params();
::milvus::proto::common::KeyValuePair* mutable_type_params(int index);
@ -243,7 +244,7 @@ class FieldSchema :
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::proto::common::KeyValuePair >&
type_params() const;
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
int index_params_size() const;
void clear_index_params();
::milvus::proto::common::KeyValuePair* mutable_index_params(int index);
@ -254,7 +255,7 @@ class FieldSchema :
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::proto::common::KeyValuePair >&
index_params() const;
// string name = 1;
// string name = 2;
void clear_name();
const std::string& name() const;
void set_name(const std::string& value);
@ -265,7 +266,7 @@ class FieldSchema :
std::string* release_name();
void set_allocated_name(std::string* name);
// string description = 3;
// string description = 4;
void clear_description();
const std::string& description() const;
void set_description(const std::string& value);
@ -276,12 +277,17 @@ class FieldSchema :
std::string* release_description();
void set_allocated_description(std::string* description);
// bool is_primary_key = 2;
// int64 fieldID = 1;
void clear_fieldid();
::PROTOBUF_NAMESPACE_ID::int64 fieldid() const;
void set_fieldid(::PROTOBUF_NAMESPACE_ID::int64 value);
// bool is_primary_key = 3;
void clear_is_primary_key();
bool is_primary_key() const;
void set_is_primary_key(bool value);
// .milvus.proto.schema.DataType data_type = 4;
// .milvus.proto.schema.DataType data_type = 5;
void clear_data_type();
::milvus::proto::schema::DataType data_type() const;
void set_data_type(::milvus::proto::schema::DataType value);
@ -295,6 +301,7 @@ class FieldSchema :
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField< ::milvus::proto::common::KeyValuePair > index_params_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr name_;
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr description_;
::PROTOBUF_NAMESPACE_ID::int64 fieldid_;
bool is_primary_key_;
int data_type_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
@ -481,7 +488,21 @@ class CollectionSchema :
#endif // __GNUC__
// FieldSchema
// string name = 1;
// int64 fieldID = 1;
inline void FieldSchema::clear_fieldid() {
fieldid_ = PROTOBUF_LONGLONG(0);
}
inline ::PROTOBUF_NAMESPACE_ID::int64 FieldSchema::fieldid() const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.FieldSchema.fieldID)
return fieldid_;
}
inline void FieldSchema::set_fieldid(::PROTOBUF_NAMESPACE_ID::int64 value) {
fieldid_ = value;
// @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.fieldID)
}
// string name = 2;
inline void FieldSchema::clear_name() {
name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
@ -532,7 +553,7 @@ inline void FieldSchema::set_allocated_name(std::string* name) {
// @@protoc_insertion_point(field_set_allocated:milvus.proto.schema.FieldSchema.name)
}
// bool is_primary_key = 2;
// bool is_primary_key = 3;
inline void FieldSchema::clear_is_primary_key() {
is_primary_key_ = false;
}
@ -546,7 +567,7 @@ inline void FieldSchema::set_is_primary_key(bool value) {
// @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.is_primary_key)
}
// string description = 3;
// string description = 4;
inline void FieldSchema::clear_description() {
description_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
}
@ -597,7 +618,7 @@ inline void FieldSchema::set_allocated_description(std::string* description) {
// @@protoc_insertion_point(field_set_allocated:milvus.proto.schema.FieldSchema.description)
}
// .milvus.proto.schema.DataType data_type = 4;
// .milvus.proto.schema.DataType data_type = 5;
inline void FieldSchema::clear_data_type() {
data_type_ = 0;
}
@ -611,7 +632,7 @@ inline void FieldSchema::set_data_type(::milvus::proto::schema::DataType value)
// @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.data_type)
}
// repeated .milvus.proto.common.KeyValuePair type_params = 5;
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
inline int FieldSchema::type_params_size() const {
return type_params_.size();
}
@ -638,7 +659,7 @@ FieldSchema::type_params() const {
return type_params_;
}
// repeated .milvus.proto.common.KeyValuePair index_params = 6;
// repeated .milvus.proto.common.KeyValuePair index_params = 7;
inline int FieldSchema::index_params_size() const {
return index_params_.size();
}

View File

@ -19,7 +19,7 @@
namespace milvus::segcore {
Collection::Collection(const std::string& collection_proto) : collection_proto_(collection_proto) {
Collection::Collection(const std::string& collection_proto) : schema_proto_(collection_proto) {
parse();
index_ = nullptr;
}
@ -122,25 +122,26 @@ Collection::CreateIndex(std::string& index_config) {
void
Collection::parse() {
if (collection_proto_.empty()) {
if (schema_proto_.empty()) {
// TODO: remove hard code use unittests are ready
std::cout << "WARN: Use default schema" << std::endl;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
collection_name_ = "default-collection";
schema_ = schema;
return;
}
milvus::proto::etcd::CollectionMeta collection_meta;
auto suc = google::protobuf::TextFormat::ParseFromString(collection_proto_, &collection_meta);
milvus::proto::schema::CollectionSchema collection_schema;
auto suc = google::protobuf::TextFormat::ParseFromString(schema_proto_, &collection_schema);
if (!suc) {
std::cerr << "unmarshal schema string failed" << std::endl;
}
collection_name_ = collection_meta.schema().name();
schema_ = Schema::ParseFrom(collection_meta.schema());
collection_name_ = collection_schema.name();
schema_ = Schema::ParseFrom(collection_schema);
}
} // namespace milvus::segcore

View File

@ -44,7 +44,7 @@ class Collection {
private:
IndexMetaPtr index_;
std::string collection_name_;
std::string collection_proto_;
std::string schema_proto_;
SchemaPtr schema_;
};

View File

@ -14,8 +14,8 @@
#include "segcore/Collection.h"
CCollection
NewCollection(const char* collection_proto) {
auto proto = std::string(collection_proto);
NewCollection(const char* schema_proto_blob) {
auto proto = std::string(schema_proto_blob);
auto collection = std::make_unique<milvus::segcore::Collection>(proto);
@ -33,3 +33,10 @@ DeleteCollection(CCollection collection) {
std::cout << "delete collection " << col->get_collection_name() << std::endl;
delete col;
}
const char*
GetCollectionName(CCollection collection) {
auto col = (milvus::segcore::Collection*)collection;
return strdup(col->get_collection_name().data());
}

View File

@ -28,11 +28,14 @@ typedef struct CStatus {
typedef void* CCollection;
CCollection
NewCollection(const char* collection_proto);
NewCollection(const char* schema_proto_blob);
void
DeleteCollection(CCollection collection);
const char*
GetCollectionName(CCollection collection);
#ifdef __cplusplus
}
#endif

View File

@ -26,6 +26,14 @@ TEST(CApiTest, CollectionTest) {
DeleteCollection(collection);
}
TEST(CApiTest, GetCollectionNameTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto name = GetCollectionName(collection);
assert(strcmp(name, "default-collection") == 0);
DeleteCollection(collection);
}
TEST(CApiTest, SegmentTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);

View File

@ -78,12 +78,10 @@ func (t *createCollectionTask) Execute() error {
}
collection := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: ts,
// TODO: initial segment?
SegmentIDs: make([]UniqueID, 0),
// TODO: initial partition?
ID: collectionID,
Schema: &schema,
CreateTime: ts,
SegmentIDs: make([]UniqueID, 0),
PartitionTags: make([]string, 0),
}
err = t.mt.AddCollection(&collection)
@ -97,6 +95,8 @@ func (t *createCollectionTask) Execute() error {
EndTimestamp: t.req.Timestamp,
HashValues: []uint32{0},
}
t.req.CollectionID = collectionID
timeTickMsg := &ms.CreateCollectionMsg{
BaseMsg: baseMsg,
CreateCollectionRequest: *t.req,
@ -150,6 +150,8 @@ func (t *dropCollectionTask) Execute() error {
EndTimestamp: ts,
HashValues: []uint32{0},
}
t.req.CollectionID = collectionID
timeTickMsg := &ms.DropCollectionMsg{
BaseMsg: baseMsg,
DropCollectionRequest: *t.req,

View File

@ -84,6 +84,8 @@ func (t *createPartitionTask) Execute() error {
EndTimestamp: ts,
HashValues: []uint32{0},
}
t.req.CollectionID = collectionMeta.ID
timeTickMsg := &ms.CreatePartitionMsg{
BaseMsg: baseMsg,
CreatePartitionRequest: *t.req,
@ -138,6 +140,8 @@ func (t *dropPartitionTask) Execute() error {
EndTimestamp: ts,
HashValues: []uint32{0},
}
t.req.CollectionID = collectionMeta.ID
timeTickMsg := &ms.DropPartitionMsg{
BaseMsg: baseMsg,
DropPartitionRequest: *t.req,

View File

@ -344,22 +344,27 @@ func (ms *PulsarTtMsgStream) bufMsgPackToChannel() {
defer ms.wait.Done()
ms.unsolvedBuf = make([]TsMsg, 0)
ms.inputBuf = make([]TsMsg, 0)
isChannelReady := make([]bool, len(ms.consumers))
eofMsgTimeStamp := make(map[int]Timestamp)
for {
select {
case <-ms.ctx.Done():
return
default:
wg := sync.WaitGroup{}
wg.Add(len(ms.consumers))
eofMsgTimeStamp := make(map[int]Timestamp)
mu := sync.Mutex{}
for i := 0; i < len(ms.consumers); i++ {
if isChannelReady[i] {
continue
}
wg.Add(1)
go ms.findTimeTick(i, eofMsgTimeStamp, &wg, &mu)
}
wg.Wait()
timeStamp, ok := checkTimeTickMsg(eofMsgTimeStamp)
if !ok {
timeStamp, ok := checkTimeTickMsg(eofMsgTimeStamp, isChannelReady)
if !ok || timeStamp <= ms.lastTimeStamp {
log.Printf("All timeTick's timestamps are inconsistent")
continue
}
timeTickBuf := make([]TsMsg, 0)
@ -384,7 +389,6 @@ func (ms *PulsarTtMsgStream) bufMsgPackToChannel() {
ms.lastTimeStamp = timeStamp
}
}
}
func (ms *PulsarTtMsgStream) findTimeTick(channelIndex int,
@ -463,16 +467,29 @@ func (ms *InMemMsgStream) Chan() <- chan *MsgPack {
}
*/
func checkTimeTickMsg(msg map[int]Timestamp) (Timestamp, bool) {
func checkTimeTickMsg(msg map[int]Timestamp, isChannelReady []bool) (Timestamp, bool) {
checkMap := make(map[Timestamp]int)
var maxTime Timestamp = 0
for _, v := range msg {
checkMap[v]++
}
if len(checkMap) <= 1 {
for k := range checkMap {
return k, true
if v > maxTime {
maxTime = v
}
}
if len(checkMap) <= 1 {
for i := range msg {
isChannelReady[i] = false
}
return maxTime, true
}
for i, v := range msg {
if v != maxTime {
isChannelReady[i] = false
} else {
isChannelReady[i] = true
}
}
return 0, false
}

View File

@ -103,19 +103,21 @@ message AssignSegIDResponse {
message CreateCollectionRequest {
MsgType msg_type = 1;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxyID = 4;
common.Blob schema = 5;
int64 collectionID = 2;
int64 reqID = 3;
uint64 timestamp = 4;
int64 proxyID = 5;
common.Blob schema = 6;
}
message DropCollectionRequest {
MsgType msg_type = 1;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxyID = 4;
service.CollectionName collection_name = 5;
int64 collectionID = 2;
int64 reqID = 3;
uint64 timestamp = 4;
int64 proxyID = 5;
service.CollectionName collection_name = 6;
}
@ -147,19 +149,23 @@ message ShowCollectionRequest {
message CreatePartitionRequest {
MsgType msg_type = 1;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxyID = 4;
service.PartitionName partition_name = 5;
int64 collectionID = 2;
int64 partitionID = 3;
int64 reqID = 4;
uint64 timestamp = 5;
int64 proxyID = 6;
service.PartitionName partition_name = 7;
}
message DropPartitionRequest {
MsgType msg_type = 1;
int64 reqID = 2;
uint64 timestamp = 3;
int64 proxyID = 4;
service.PartitionName partition_name = 5;
int64 collectionID = 2;
int64 partitionID = 3;
int64 reqID = 4;
uint64 timestamp = 5;
int64 proxyID = 6;
service.PartitionName partition_name = 7;
}

View File

@ -605,10 +605,11 @@ func (m *AssignSegIDResponse) GetPerChannelAssignment() []*SegIDAssignment {
type CreateCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,4,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
Schema *commonpb.Blob `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
ReqID int64 `protobuf:"varint,3,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,5,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
Schema *commonpb.Blob `protobuf:"bytes,6,opt,name=schema,proto3" json:"schema,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -646,6 +647,13 @@ func (m *CreateCollectionRequest) GetMsgType() MsgType {
return MsgType_kNone
}
func (m *CreateCollectionRequest) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
func (m *CreateCollectionRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
@ -676,10 +684,11 @@ func (m *CreateCollectionRequest) GetSchema() *commonpb.Blob {
type DropCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,4,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
CollectionName *servicepb.CollectionName `protobuf:"bytes,5,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
ReqID int64 `protobuf:"varint,3,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,5,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
CollectionName *servicepb.CollectionName `protobuf:"bytes,6,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -717,6 +726,13 @@ func (m *DropCollectionRequest) GetMsgType() MsgType {
return MsgType_kNone
}
func (m *DropCollectionRequest) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
func (m *DropCollectionRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
@ -952,10 +968,12 @@ func (m *ShowCollectionRequest) GetProxyID() int64 {
type CreatePartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,4,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
PartitionName *servicepb.PartitionName `protobuf:"bytes,5,opt,name=partition_name,json=partitionName,proto3" json:"partition_name,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionID int64 `protobuf:"varint,3,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
ReqID int64 `protobuf:"varint,4,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,6,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
PartitionName *servicepb.PartitionName `protobuf:"bytes,7,opt,name=partition_name,json=partitionName,proto3" json:"partition_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -993,6 +1011,20 @@ func (m *CreatePartitionRequest) GetMsgType() MsgType {
return MsgType_kNone
}
func (m *CreatePartitionRequest) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
func (m *CreatePartitionRequest) GetPartitionID() int64 {
if m != nil {
return m.PartitionID
}
return 0
}
func (m *CreatePartitionRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
@ -1023,10 +1055,12 @@ func (m *CreatePartitionRequest) GetPartitionName() *servicepb.PartitionName {
type DropPartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,4,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
PartitionName *servicepb.PartitionName `protobuf:"bytes,5,opt,name=partition_name,json=partitionName,proto3" json:"partition_name,omitempty"`
CollectionID int64 `protobuf:"varint,2,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionID int64 `protobuf:"varint,3,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
ReqID int64 `protobuf:"varint,4,opt,name=reqID,proto3" json:"reqID,omitempty"`
Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyID int64 `protobuf:"varint,6,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
PartitionName *servicepb.PartitionName `protobuf:"bytes,7,opt,name=partition_name,json=partitionName,proto3" json:"partition_name,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1064,6 +1098,20 @@ func (m *DropPartitionRequest) GetMsgType() MsgType {
return MsgType_kNone
}
func (m *DropPartitionRequest) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
func (m *DropPartitionRequest) GetPartitionID() int64 {
if m != nil {
return m.PartitionID
}
return 0
}
func (m *DropPartitionRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
@ -2085,98 +2133,101 @@ func init() {
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
var fileDescriptor_7eb37f6b80b23116 = []byte{
// 1474 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
0x12, 0xf6, 0x70, 0x28, 0x3e, 0x8a, 0x14, 0x35, 0x6a, 0x49, 0x36, 0x6d, 0x2f, 0x6c, 0x79, 0xbc,
0x58, 0x6b, 0xbd, 0x58, 0x09, 0x2b, 0xef, 0x61, 0x7d, 0xdb, 0xb5, 0x08, 0xac, 0xb9, 0x86, 0x0c,
0xed, 0x50, 0x48, 0x80, 0xc0, 0xc0, 0x60, 0x44, 0x96, 0x86, 0x83, 0x79, 0xaa, 0x7b, 0x28, 0x99,
0x3a, 0xe4, 0xe4, 0x1f, 0x90, 0x1c, 0x72, 0xc8, 0x21, 0x40, 0x8e, 0x39, 0x19, 0xc9, 0xbf, 0xc8,
0xeb, 0x14, 0x20, 0x7f, 0x22, 0x81, 0x63, 0x20, 0x71, 0xee, 0x41, 0x77, 0xcf, 0x83, 0xa3, 0xa7,
0x01, 0x5b, 0x89, 0x00, 0xdd, 0xba, 0x6a, 0x7a, 0xba, 0xaa, 0xbe, 0xaf, 0xba, 0xba, 0xba, 0x81,
0x38, 0x41, 0x8c, 0x34, 0xb0, 0x3c, 0xd3, 0x67, 0xf6, 0x72, 0x44, 0xc3, 0x38, 0x24, 0x0b, 0xbe,
0xe3, 0xed, 0x8e, 0x98, 0x94, 0x96, 0xd3, 0x09, 0xd7, 0x9a, 0xfd, 0xd0, 0xf7, 0xc3, 0x40, 0xaa,
0xaf, 0xcd, 0x32, 0xa4, 0xbb, 0x4e, 0x1f, 0xf3, 0xff, 0xf4, 0x00, 0xea, 0xdd, 0x8e, 0x81, 0x3b,
0x23, 0x64, 0x31, 0xb9, 0x0c, 0x95, 0x08, 0x91, 0x76, 0x3b, 0x6d, 0x65, 0x51, 0x59, 0x52, 0x8d,
0x44, 0x22, 0xf7, 0xa0, 0x4c, 0x43, 0x0f, 0xdb, 0xa5, 0x45, 0x65, 0xa9, 0xb5, 0x7a, 0x73, 0xf9,
0x48, 0x5b, 0xcb, 0x1b, 0x88, 0xd4, 0x08, 0x3d, 0x34, 0xc4, 0x64, 0x32, 0x0f, 0x53, 0xfd, 0x70,
0x14, 0xc4, 0x6d, 0x75, 0x51, 0x59, 0x9a, 0x36, 0xa4, 0xa0, 0xdb, 0x00, 0xdc, 0x1e, 0x8b, 0xc2,
0x80, 0x21, 0xb9, 0x07, 0x15, 0x16, 0x5b, 0xf1, 0x88, 0x09, 0x83, 0x8d, 0xd5, 0xeb, 0xc5, 0xa5,
0x13, 0xe7, 0x7b, 0x62, 0x8a, 0x91, 0x4c, 0x25, 0x2d, 0x28, 0x75, 0x3b, 0xc2, 0x17, 0xd5, 0x28,
0x75, 0x3b, 0xc7, 0x18, 0x0a, 0x01, 0x36, 0x59, 0xf8, 0x3b, 0x46, 0xb6, 0x0b, 0x0d, 0x61, 0xf0,
0x4d, 0x42, 0xfb, 0x13, 0xd4, 0x63, 0xc7, 0x47, 0x16, 0x5b, 0x7e, 0x24, 0x7c, 0x2a, 0x1b, 0xb9,
0xe2, 0x18, 0xbb, 0xcf, 0x14, 0x68, 0xf6, 0xd0, 0xce, 0x59, 0xcc, 0xa6, 0x29, 0x13, 0xd3, 0xf8,
0xd2, 0xfd, 0xa1, 0x15, 0x04, 0xe8, 0x25, 0xe0, 0x4d, 0x19, 0xb9, 0x82, 0x5c, 0x87, 0x7a, 0x3f,
0xf4, 0x3c, 0x33, 0xb0, 0x7c, 0x14, 0xcb, 0xd7, 0x8d, 0x1a, 0x57, 0x3c, 0xb6, 0x7c, 0x24, 0xb7,
0x61, 0x3a, 0xb2, 0x68, 0xec, 0xc4, 0x4e, 0x18, 0x98, 0xb1, 0x65, 0xb7, 0xcb, 0x62, 0x42, 0x33,
0x53, 0x6e, 0x5a, 0xb6, 0xfe, 0x5c, 0x01, 0xf2, 0x1f, 0xc6, 0x1c, 0x3b, 0x28, 0x38, 0xf3, 0x56,
0x81, 0x7f, 0x04, 0x33, 0x11, 0x52, 0x33, 0x71, 0xdb, 0xa4, 0xb8, 0xd3, 0x56, 0x17, 0xd5, 0xa5,
0xc6, 0xea, 0xed, 0x63, 0xfe, 0x9f, 0x74, 0xc5, 0x98, 0x8e, 0x90, 0xae, 0xc9, 0x5f, 0x0d, 0xdc,
0xd1, 0x3f, 0x51, 0x60, 0x46, 0x7c, 0x97, 0x5e, 0xfb, 0x18, 0x08, 0xe8, 0x18, 0x57, 0x25, 0xce,
0x4a, 0xe1, 0x14, 0xe8, 0x8e, 0x64, 0xa5, 0x08, 0x68, 0xf9, 0x34, 0x40, 0xa7, 0x8e, 0x00, 0xf4,
0xa5, 0x02, 0x73, 0x05, 0x40, 0xcf, 0x2e, 0xb1, 0xee, 0xc0, 0x0c, 0x3e, 0x8d, 0x1c, 0x8a, 0xe6,
0x60, 0x44, 0x2d, 0xee, 0x80, 0x08, 0xa6, 0x6c, 0xb4, 0xa4, 0xba, 0x93, 0x68, 0xc9, 0x13, 0xb8,
0x3c, 0x49, 0x80, 0x95, 0x21, 0xd7, 0x2e, 0x0b, 0x1e, 0xfe, 0x72, 0x12, 0x0f, 0x39, 0xce, 0xc6,
0x7c, 0x4e, 0x45, 0xae, 0xd5, 0xbf, 0x57, 0xe0, 0xca, 0x1a, 0x45, 0x2b, 0xc6, 0xb5, 0xd0, 0xf3,
0xb0, 0xcf, 0x4d, 0xa6, 0x79, 0x74, 0x1f, 0x6a, 0x3e, 0xb3, 0xcd, 0x78, 0x1c, 0xa1, 0x88, 0xbb,
0xb5, 0x7a, 0xe3, 0x18, 0x5b, 0xeb, 0xcc, 0xde, 0x1c, 0x47, 0x68, 0x54, 0x7d, 0x39, 0xe0, 0x04,
0x51, 0xdc, 0xc9, 0x4a, 0x86, 0x14, 0x8a, 0x88, 0xa8, 0x07, 0x11, 0x69, 0x43, 0x35, 0xa2, 0xe1,
0xd3, 0x71, 0xb7, 0x23, 0xc8, 0x53, 0x8d, 0x54, 0x24, 0xff, 0x80, 0x0a, 0xeb, 0x0f, 0xd1, 0xb7,
0x04, 0x69, 0x8d, 0xd5, 0xab, 0x47, 0xc2, 0xff, 0xc0, 0x0b, 0xb7, 0x8c, 0x64, 0x22, 0x67, 0x72,
0xa1, 0x43, 0xc3, 0xe8, 0x1c, 0x47, 0xb5, 0x0e, 0x33, 0xfd, 0xcc, 0x3b, 0x99, 0xb4, 0x32, 0xbc,
0x3f, 0x17, 0xfd, 0x49, 0x0e, 0x90, 0xe5, 0x3c, 0x14, 0x9e, 0xd0, 0x46, 0xab, 0x5f, 0x90, 0xf5,
0x9f, 0x14, 0x98, 0x7f, 0x68, 0xb1, 0x8b, 0x13, 0xf0, 0x2f, 0x0a, 0x5c, 0xed, 0x20, 0xeb, 0x53,
0x67, 0x0b, 0x2f, 0x4e, 0xd4, 0x9f, 0x2a, 0xb0, 0xd0, 0x1b, 0x86, 0x7b, 0xe7, 0x37, 0x62, 0xfd,
0x85, 0x02, 0x97, 0x65, 0x4d, 0xd9, 0x48, 0x8b, 0xeb, 0xb9, 0x63, 0xe5, 0x7f, 0xd0, 0xca, 0x8f,
0x83, 0x09, 0x52, 0x6e, 0x1f, 0x4d, 0x4a, 0x16, 0x88, 0xe0, 0x24, 0x3f, 0x49, 0x04, 0x25, 0x3f,
0x2a, 0x30, 0xcf, 0x6b, 0xcd, 0xc5, 0x88, 0xf6, 0x07, 0x05, 0xe6, 0x1e, 0x5a, 0xec, 0x62, 0x04,
0xfb, 0x52, 0x81, 0x76, 0x5a, 0x63, 0x2e, 0x46, 0xc4, 0xfc, 0x18, 0xe1, 0xf5, 0xe5, 0xfc, 0x46,
0xfb, 0x96, 0x0b, 0xea, 0xcf, 0x25, 0x98, 0xee, 0x06, 0x0c, 0x69, 0x7c, 0x66, 0x91, 0xde, 0x39,
0xec, 0xb1, 0xec, 0xf7, 0x0f, 0xf8, 0xf2, 0x5a, 0x5d, 0x3f, 0xc7, 0x8d, 0xa1, 0xcd, 0xbb, 0xb7,
0x6e, 0x47, 0x44, 0xae, 0x1a, 0xb9, 0xa2, 0xd8, 0x38, 0x57, 0xe4, 0xd7, 0xbc, 0x71, 0x9e, 0x40,
0xb5, 0x5a, 0x44, 0xf5, 0x06, 0x40, 0x06, 0x3e, 0x6b, 0xd7, 0x16, 0xd5, 0xa5, 0xb2, 0x31, 0xa1,
0xe1, 0x97, 0x0a, 0x1a, 0xee, 0x75, 0x3b, 0xac, 0x5d, 0x5f, 0x54, 0xf9, 0xa5, 0x42, 0x4a, 0xe4,
0x9f, 0x50, 0xa3, 0xe1, 0x9e, 0x39, 0xb0, 0x62, 0xab, 0x0d, 0xa2, 0x21, 0x3d, 0xa1, 0x3b, 0xab,
0xd2, 0x70, 0xaf, 0x63, 0xc5, 0x96, 0xfe, 0xac, 0x04, 0xd3, 0x1d, 0xf4, 0x30, 0xc6, 0x3f, 0x1e,
0xf4, 0x02, 0x62, 0xe5, 0x13, 0x10, 0x9b, 0x3a, 0x09, 0xb1, 0xca, 0x21, 0xc4, 0x6e, 0x41, 0x33,
0xa2, 0x8e, 0x6f, 0xd1, 0xb1, 0xe9, 0xe2, 0x98, 0xb5, 0xab, 0x02, 0xb7, 0x46, 0xa2, 0x7b, 0x84,
0x63, 0xa6, 0xbf, 0x52, 0x60, 0xba, 0x87, 0x16, 0xed, 0x0f, 0xcf, 0x0c, 0x86, 0x09, 0xff, 0xd5,
0xa2, 0xff, 0x85, 0xfd, 0x57, 0x3e, 0xb8, 0xff, 0xfe, 0x0a, 0x1a, 0x45, 0x36, 0xf2, 0x62, 0x33,
0x07, 0x47, 0x02, 0x30, 0x23, 0xf5, 0x6b, 0x19, 0x44, 0x2b, 0x30, 0xb5, 0x33, 0x42, 0x3a, 0x16,
0xe9, 0x76, 0x22, 0xff, 0x72, 0x9e, 0xfe, 0x9d, 0x02, 0x5a, 0x6f, 0xcc, 0xd6, 0xc2, 0x60, 0xdb,
0xb1, 0xcf, 0x5d, 0xe4, 0x04, 0xca, 0x82, 0xaf, 0xa9, 0x45, 0x75, 0xa9, 0x6e, 0x88, 0x31, 0xe7,
0xd2, 0xc5, 0xb1, 0x19, 0x51, 0xdc, 0x76, 0x9e, 0xa2, 0x64, 0xbb, 0x6e, 0x34, 0x5c, 0x1c, 0x6f,
0x24, 0x2a, 0xfd, 0x79, 0x09, 0x9a, 0x29, 0x97, 0x1c, 0x9f, 0x37, 0x09, 0x28, 0xbf, 0x6f, 0x96,
0x5e, 0xff, 0xbe, 0x99, 0xa1, 0xa0, 0x1e, 0x83, 0xc2, 0x81, 0x3a, 0x7a, 0x0b, 0x9a, 0x82, 0x0e,
0x33, 0x08, 0x07, 0x98, 0xb1, 0xdb, 0x10, 0xba, 0xc7, 0x42, 0x55, 0x04, 0xaa, 0xf2, 0x3a, 0x29,
0x52, 0x3d, 0x3a, 0x45, 0x08, 0x94, 0x87, 0x4e, 0x2c, 0xeb, 0x4a, 0xd3, 0x10, 0x63, 0xfd, 0x7d,
0x68, 0x6c, 0x3a, 0x3e, 0x6e, 0x3a, 0x7d, 0x77, 0x9d, 0xd9, 0x6f, 0x02, 0x57, 0xfe, 0xe0, 0x51,
0x2a, 0x3c, 0x78, 0x9c, 0x78, 0xc2, 0xe8, 0x1f, 0x2b, 0x50, 0x7d, 0x84, 0xe3, 0xd5, 0x1e, 0xda,
0x02, 0x3b, 0x5e, 0xcf, 0xd2, 0x47, 0x08, 0x21, 0x90, 0x9b, 0xd0, 0x98, 0xd8, 0xc1, 0xc9, 0xe2,
0x90, 0x6f, 0xe0, 0x53, 0x8e, 0xb0, 0xab, 0x50, 0x73, 0x98, 0xb9, 0x6b, 0x79, 0xce, 0x40, 0x60,
0x5f, 0x33, 0xaa, 0x0e, 0x7b, 0x87, 0x8b, 0xbc, 0x76, 0x64, 0x25, 0x5b, 0x66, 0x9a, 0x6a, 0x4c,
0x68, 0xf4, 0x27, 0x00, 0x89, 0x6b, 0x1c, 0x9a, 0x8c, 0x59, 0x65, 0x92, 0xd9, 0x7f, 0x41, 0xd5,
0xc5, 0xf1, 0x2a, 0x43, 0xbb, 0x5d, 0x12, 0x85, 0xf7, 0x38, 0xbc, 0x92, 0x95, 0x8c, 0x74, 0xba,
0xfe, 0x91, 0x7c, 0xbe, 0xe2, 0xc6, 0x78, 0x0e, 0xb1, 0xe2, 0x91, 0xa2, 0x1c, 0x3c, 0x52, 0x6e,
0x42, 0xc3, 0x47, 0x3f, 0xa4, 0x63, 0x93, 0x39, 0xfb, 0x98, 0xc2, 0x20, 0x55, 0x3d, 0x67, 0x1f,
0x79, 0xa0, 0xc1, 0xc8, 0x37, 0x69, 0xb8, 0xc7, 0xd2, 0xad, 0x16, 0x8c, 0x7c, 0x23, 0xdc, 0x63,
0xe4, 0x6f, 0x30, 0x4b, 0xb1, 0x8f, 0x41, 0xec, 0x8d, 0x4d, 0x3f, 0x1c, 0x38, 0xdb, 0x0e, 0xa6,
0x60, 0x68, 0xe9, 0x87, 0xf5, 0x44, 0xaf, 0x7f, 0xa6, 0xc0, 0xec, 0xff, 0xd3, 0xf4, 0xeb, 0xa1,
0x2d, 0x9d, 0x3b, 0x83, 0xc4, 0xf8, 0xb7, 0x88, 0xd7, 0xe4, 0x1b, 0x87, 0x9d, 0xfe, 0x9c, 0x95,
0xe1, 0x64, 0xd4, 0x58, 0xe2, 0xd4, 0xdd, 0x17, 0x25, 0xa8, 0x26, 0xe6, 0x48, 0x1d, 0xa6, 0xdc,
0xc7, 0x61, 0x80, 0xda, 0x25, 0xb2, 0x00, 0xb3, 0xee, 0xc1, 0xe7, 0x14, 0x6d, 0x40, 0xe6, 0x60,
0xc6, 0x2d, 0xbe, 0x46, 0x68, 0x48, 0x08, 0xb4, 0xdc, 0xc2, 0x85, 0x5d, 0xdb, 0x26, 0x57, 0x60,
0xce, 0x3d, 0x7c, 0xa7, 0xd5, 0x78, 0x0a, 0x68, 0x6e, 0xf1, 0xda, 0xc7, 0xb4, 0xa1, 0x58, 0xe2,
0xbf, 0x18, 0x67, 0xb5, 0x94, 0x69, 0x0e, 0x59, 0x00, 0xcd, 0x3d, 0x70, 0xfb, 0xd2, 0xbe, 0x54,
0xc8, 0x1c, 0xb4, 0xdc, 0xc2, 0x25, 0x45, 0xfb, 0x4a, 0x21, 0x04, 0xa6, 0xdd, 0xc9, 0x5e, 0x5e,
0xfb, 0x5a, 0x21, 0x57, 0x80, 0xb8, 0x87, 0x5a, 0x5e, 0xed, 0x1b, 0x85, 0xcc, 0xc3, 0x8c, 0x5b,
0xe8, 0x0c, 0x99, 0xf6, 0xad, 0x42, 0x9a, 0x50, 0x75, 0x65, 0xfb, 0xa4, 0x7d, 0xa0, 0x0a, 0x49,
0x9e, 0xeb, 0xda, 0x87, 0x52, 0x92, 0x25, 0x51, 0x7b, 0xa5, 0x0a, 0x63, 0x93, 0x05, 0x52, 0xfb,
0x55, 0x25, 0x2d, 0xa8, 0xbb, 0x69, 0x11, 0xd0, 0x3e, 0xaf, 0x0b, 0xe3, 0x87, 0x32, 0x40, 0xfb,
0xa2, 0x7e, 0xf7, 0x3e, 0xd4, 0xd2, 0x87, 0x49, 0x02, 0x50, 0x59, 0xb7, 0x58, 0x8c, 0x54, 0xbb,
0xc4, 0xc7, 0x06, 0x5a, 0x03, 0xa4, 0x9a, 0xc2, 0xc7, 0xef, 0x52, 0x87, 0xeb, 0x4b, 0x9c, 0x93,
0x0d, 0x5e, 0xe7, 0x34, 0xf5, 0x41, 0xe7, 0xbd, 0x07, 0xb6, 0x13, 0x0f, 0x47, 0x5b, 0xbc, 0x6e,
0xae, 0xec, 0x3b, 0x9e, 0xe7, 0xec, 0xc7, 0xd8, 0x1f, 0xae, 0x48, 0xc2, 0xff, 0x3e, 0x70, 0x58,
0x4c, 0x9d, 0xad, 0x51, 0x8c, 0x83, 0x95, 0x94, 0xf6, 0x15, 0x91, 0x05, 0x99, 0x18, 0x6d, 0x6d,
0x55, 0x84, 0xe6, 0xde, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x9f, 0x3d, 0x09, 0x18,
0x00, 0x00,
// 1530 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
0x12, 0xf6, 0x70, 0xf8, 0x2c, 0x52, 0xd4, 0xa8, 0x25, 0xd9, 0xb4, 0xbd, 0xb0, 0xe9, 0xf1, 0x62,
0xad, 0xf5, 0x62, 0x25, 0xac, 0xbc, 0x87, 0xf5, 0x6d, 0xd7, 0x22, 0xb0, 0x66, 0x0c, 0x19, 0xca,
0x50, 0x48, 0x80, 0xc0, 0xc0, 0x60, 0x44, 0x96, 0xc8, 0xc1, 0x3c, 0xd5, 0x3d, 0x94, 0x4c, 0x1d,
0x72, 0xf2, 0x0f, 0x48, 0x10, 0xe4, 0x90, 0x43, 0x80, 0x1c, 0x73, 0x32, 0x62, 0xe4, 0x4f, 0xe4,
0x75, 0xca, 0xcf, 0x08, 0x90, 0x18, 0x48, 0x9c, 0x43, 0x6e, 0x41, 0xf7, 0xbc, 0x38, 0x92, 0x48,
0x09, 0x91, 0xe4, 0x38, 0x70, 0x6e, 0x53, 0xc5, 0x66, 0x77, 0x7d, 0xdf, 0x57, 0x5d, 0xdd, 0xd5,
0x40, 0x4c, 0x37, 0x40, 0xea, 0x1a, 0xb6, 0xee, 0xb0, 0xfe, 0xb2, 0x4f, 0xbd, 0xc0, 0x23, 0x8b,
0x8e, 0x69, 0xef, 0x0e, 0x59, 0x68, 0x2d, 0xc7, 0x03, 0xae, 0xd4, 0xba, 0x9e, 0xe3, 0x78, 0x6e,
0xe8, 0xbe, 0x32, 0xc7, 0x90, 0xee, 0x9a, 0x5d, 0x4c, 0xff, 0xa7, 0xba, 0x50, 0x69, 0xb7, 0x34,
0xdc, 0x19, 0x22, 0x0b, 0xc8, 0x45, 0x28, 0xfa, 0x88, 0xb4, 0xdd, 0x6a, 0x48, 0x4d, 0x69, 0x49,
0xd6, 0x22, 0x8b, 0xdc, 0x81, 0x3c, 0xf5, 0x6c, 0x6c, 0xe4, 0x9a, 0xd2, 0x52, 0x7d, 0xf5, 0xfa,
0xf2, 0x91, 0x6b, 0x2d, 0x6f, 0x20, 0x52, 0xcd, 0xb3, 0x51, 0x13, 0x83, 0xc9, 0x02, 0x14, 0xba,
0xde, 0xd0, 0x0d, 0x1a, 0x72, 0x53, 0x5a, 0x9a, 0xd1, 0x42, 0x43, 0xed, 0x03, 0xf0, 0xf5, 0x98,
0xef, 0xb9, 0x0c, 0xc9, 0x1d, 0x28, 0xb2, 0xc0, 0x08, 0x86, 0x4c, 0x2c, 0x58, 0x5d, 0xbd, 0x9a,
0x9d, 0x3a, 0x0a, 0xbe, 0x23, 0x86, 0x68, 0xd1, 0x50, 0x52, 0x87, 0x5c, 0xbb, 0x25, 0x62, 0x91,
0xb5, 0x5c, 0xbb, 0x35, 0x61, 0x21, 0x0f, 0x60, 0x93, 0x79, 0x2f, 0x11, 0xd9, 0x2e, 0x54, 0xc5,
0x82, 0xa7, 0x81, 0xf6, 0x17, 0xa8, 0x04, 0xa6, 0x83, 0x2c, 0x30, 0x1c, 0x5f, 0xc4, 0x94, 0xd7,
0x52, 0xc7, 0x84, 0x75, 0x9f, 0x48, 0x50, 0xeb, 0x60, 0x3f, 0x55, 0x31, 0x19, 0x26, 0x8d, 0x0d,
0xe3, 0x53, 0x77, 0x07, 0x86, 0xeb, 0xa2, 0x1d, 0x91, 0x57, 0xd0, 0x52, 0x07, 0xb9, 0x0a, 0x95,
0xae, 0x67, 0xdb, 0xba, 0x6b, 0x38, 0x28, 0xa6, 0xaf, 0x68, 0x65, 0xee, 0x78, 0x68, 0x38, 0x48,
0x6e, 0xc2, 0x8c, 0x6f, 0xd0, 0xc0, 0x0c, 0x4c, 0xcf, 0xd5, 0x03, 0xa3, 0xdf, 0xc8, 0x8b, 0x01,
0xb5, 0xc4, 0xb9, 0x69, 0xf4, 0xd5, 0xa7, 0x12, 0x90, 0xff, 0x31, 0x66, 0xf6, 0xdd, 0x4c, 0x30,
0x67, 0x4a, 0xfc, 0x03, 0x98, 0xf5, 0x91, 0xea, 0x51, 0xd8, 0x3a, 0xc5, 0x9d, 0x86, 0xdc, 0x94,
0x97, 0xaa, 0xab, 0x37, 0x27, 0xfc, 0x7f, 0x3c, 0x14, 0x6d, 0xc6, 0x47, 0xba, 0x16, 0xfe, 0x55,
0xc3, 0x1d, 0xf5, 0x63, 0x09, 0x66, 0xc5, 0xef, 0x61, 0xd4, 0x0e, 0xba, 0x82, 0x3a, 0xc6, 0x5d,
0x51, 0xb0, 0xa1, 0x71, 0x0c, 0x75, 0x47, 0xaa, 0x92, 0x25, 0x34, 0x7f, 0x1c, 0xa1, 0x85, 0x23,
0x08, 0x7d, 0x2e, 0xc1, 0x7c, 0x86, 0xd0, 0xf3, 0x4b, 0xac, 0x5b, 0x30, 0x8b, 0x8f, 0x7d, 0x93,
0xa2, 0xde, 0x1b, 0x52, 0x83, 0x07, 0x20, 0xc0, 0xe4, 0xb5, 0x7a, 0xe8, 0x6e, 0x45, 0x5e, 0xf2,
0x08, 0x2e, 0x8e, 0x0b, 0x60, 0x24, 0xcc, 0x35, 0xf2, 0x42, 0x87, 0xbf, 0x4d, 0xd3, 0x21, 0xe5,
0x59, 0x5b, 0x48, 0xa5, 0x48, 0xbd, 0xea, 0x2f, 0x12, 0x5c, 0x5a, 0xa3, 0x68, 0x04, 0xb8, 0xe6,
0xd9, 0x36, 0x76, 0xf9, 0x92, 0x71, 0x1e, 0xdd, 0x85, 0xb2, 0xc3, 0xfa, 0x7a, 0x30, 0xf2, 0x51,
0xe0, 0xae, 0xaf, 0x5e, 0x9b, 0xb0, 0xd6, 0x3a, 0xeb, 0x6f, 0x8e, 0x7c, 0xd4, 0x4a, 0x4e, 0xf8,
0x41, 0x54, 0xa8, 0x75, 0x93, 0xf9, 0x92, 0xca, 0x91, 0xf1, 0x71, 0x11, 0x29, 0xee, 0xb4, 0x5b,
0x02, 0xb7, 0xac, 0x85, 0x46, 0x96, 0xb5, 0xfc, 0x41, 0xd6, 0x1a, 0x50, 0xf2, 0xa9, 0xf7, 0x78,
0xd4, 0x6e, 0x09, 0xfd, 0x64, 0x2d, 0x36, 0xc9, 0xbf, 0xa0, 0xc8, 0xba, 0x03, 0x74, 0x8c, 0x46,
0x51, 0x48, 0x74, 0xf9, 0x48, 0x89, 0xee, 0xd9, 0xde, 0x96, 0x16, 0x0d, 0x54, 0x3f, 0xc8, 0xc1,
0x62, 0x8b, 0x7a, 0xfe, 0x1f, 0x1c, 0xf9, 0x3a, 0xcc, 0xa6, 0xb3, 0x87, 0xc9, 0x1f, 0x52, 0xf0,
0xd7, 0x6c, 0xcc, 0xd1, 0x41, 0xb4, 0x9c, 0xc2, 0xe5, 0x1b, 0x43, 0xab, 0x77, 0x33, 0xb6, 0xfa,
0x83, 0x04, 0x0b, 0xf7, 0x0d, 0x76, 0xa6, 0xa4, 0x24, 0x80, 0x73, 0x13, 0x01, 0xcb, 0x53, 0x00,
0xe7, 0x8f, 0x05, 0x5c, 0x38, 0x05, 0xe0, 0x9f, 0x24, 0xb8, 0xdc, 0x42, 0xd6, 0xa5, 0xe6, 0x16,
0xbe, 0x3e, 0xa8, 0x3f, 0x91, 0x60, 0xb1, 0x33, 0xf0, 0xf6, 0x5e, 0x5d, 0xc4, 0xea, 0xe7, 0x39,
0xb8, 0x18, 0xd6, 0xa6, 0x8d, 0xb8, 0x48, 0xbf, 0xa4, 0x0d, 0xda, 0x84, 0x6a, 0x72, 0x2e, 0x24,
0xdb, 0x74, 0xdc, 0x95, 0x22, 0xcd, 0x4f, 0x44, 0x5a, 0x98, 0x82, 0xb4, 0x98, 0xd5, 0xf6, 0x0d,
0xa8, 0xa7, 0x87, 0x93, 0x90, 0xb6, 0x24, 0xa4, 0xbd, 0x79, 0xb4, 0xb4, 0x09, 0x1d, 0x42, 0xd9,
0xf4, 0x5c, 0x13, 0xc2, 0x3e, 0xcb, 0xc1, 0x02, 0xaf, 0x6a, 0x7f, 0x72, 0x76, 0x72, 0xce, 0xbe,
0x93, 0x60, 0xfe, 0xbe, 0xc1, 0xce, 0x92, 0xb2, 0xb3, 0xdd, 0xfc, 0x87, 0xc1, 0x16, 0x7e, 0x33,
0xd8, 0xe7, 0x12, 0x34, 0xe2, 0x7a, 0xf7, 0x7a, 0x20, 0xe6, 0x47, 0x1a, 0xaf, 0x75, 0xaf, 0x2e,
0xda, 0x33, 0x2e, 0xee, 0x3f, 0xe6, 0x60, 0xa6, 0xed, 0x32, 0xa4, 0xc1, 0xb9, 0x21, 0xbd, 0x75,
0x38, 0xe2, 0xb0, 0x87, 0x39, 0x10, 0xcb, 0x89, 0x3a, 0x19, 0xce, 0x1b, 0xc3, 0x3e, 0xbf, 0x91,
0x26, 0xf7, 0x9b, 0xd4, 0x91, 0x6d, 0x06, 0xc2, 0x32, 0x30, 0xd6, 0x0c, 0x8c, 0xb1, 0x5a, 0xca,
0xb2, 0x7a, 0x0d, 0x20, 0x21, 0x9f, 0x35, 0xca, 0x4d, 0x79, 0x29, 0xaf, 0x8d, 0x79, 0x78, 0xa3,
0x44, 0xbd, 0xbd, 0x76, 0x8b, 0x35, 0x2a, 0x4d, 0x99, 0x37, 0x4a, 0xa1, 0x45, 0xfe, 0x0d, 0x65,
0xea, 0xed, 0xe9, 0x3d, 0x23, 0x30, 0x1a, 0x20, 0x2e, 0xd9, 0x53, 0x6e, 0x93, 0x25, 0xea, 0xed,
0xb5, 0x8c, 0xc0, 0x50, 0x9f, 0xe4, 0x60, 0xa6, 0x85, 0x36, 0x06, 0xf8, 0xfb, 0x93, 0x9e, 0x61,
0x2c, 0x3f, 0x85, 0xb1, 0xc2, 0x34, 0xc6, 0x8a, 0x87, 0x18, 0xbb, 0x01, 0x35, 0x9f, 0x9a, 0x8e,
0x41, 0x47, 0xba, 0x85, 0x23, 0xd6, 0x28, 0x09, 0xde, 0xaa, 0x91, 0xef, 0x01, 0x8e, 0x98, 0xfa,
0x42, 0x82, 0x99, 0x0e, 0x1a, 0xb4, 0x3b, 0x38, 0x37, 0x1a, 0xc6, 0xe2, 0x97, 0xb3, 0xf1, 0x4f,
0xbf, 0x43, 0xff, 0x1d, 0x14, 0x8a, 0x6c, 0x68, 0x07, 0x7a, 0x4a, 0x4e, 0x48, 0xc0, 0x6c, 0xe8,
0x5f, 0x4b, 0x28, 0x5a, 0x81, 0xc2, 0xce, 0x10, 0xe9, 0xe8, 0xf8, 0x6e, 0x22, 0x1c, 0xa7, 0x7e,
0x2b, 0x81, 0xd2, 0x19, 0xb1, 0x35, 0xcf, 0xdd, 0x36, 0xfb, 0xaf, 0x1c, 0x72, 0x02, 0x79, 0xa1,
0x57, 0xa1, 0x29, 0x2f, 0x55, 0x34, 0xf1, 0xcd, 0xb5, 0xb4, 0x70, 0xa4, 0xfb, 0x14, 0xb7, 0xcd,
0xc7, 0x18, 0xaa, 0x5d, 0xd1, 0xaa, 0x16, 0x8e, 0x36, 0x22, 0x97, 0xfa, 0x34, 0x07, 0xb5, 0x58,
0x4b, 0xce, 0xcf, 0x69, 0x00, 0xa5, 0x3d, 0x74, 0xee, 0xe4, 0x3d, 0xf4, 0xd1, 0x9d, 0xd2, 0xe4,
0x3a, 0x7a, 0x03, 0x6a, 0x42, 0x0e, 0xdd, 0xf5, 0x7a, 0x98, 0xa8, 0x5b, 0x15, 0xbe, 0x87, 0xc2,
0x95, 0x25, 0xaa, 0x78, 0x92, 0x14, 0x29, 0x1d, 0x9d, 0x22, 0x04, 0xf2, 0x03, 0x33, 0x08, 0xeb,
0x4a, 0x4d, 0x13, 0xdf, 0xea, 0xbb, 0x50, 0xdd, 0x34, 0x1d, 0xdc, 0x34, 0xbb, 0xd6, 0x3a, 0xeb,
0x9f, 0x86, 0xae, 0xf4, 0x11, 0x27, 0x97, 0x79, 0xc4, 0x99, 0x7a, 0xc2, 0xa8, 0x1f, 0x49, 0x50,
0x7a, 0x80, 0xa3, 0xd5, 0x0e, 0xf6, 0x05, 0x77, 0xbc, 0x9e, 0xc5, 0x0f, 0x2b, 0xc2, 0x20, 0xd7,
0xa1, 0x3a, 0xb6, 0x83, 0xa3, 0xc9, 0x21, 0xdd, 0xc0, 0xc7, 0x1c, 0x61, 0x97, 0xa1, 0x6c, 0x32,
0x7d, 0xd7, 0xb0, 0xcd, 0x9e, 0xe0, 0xbe, 0xac, 0x95, 0x4c, 0xf6, 0x16, 0x37, 0x79, 0xed, 0x48,
0x4a, 0x76, 0x98, 0x69, 0xb2, 0x36, 0xe6, 0x51, 0x1f, 0x01, 0x44, 0xa1, 0x71, 0x6a, 0x12, 0x65,
0xa5, 0x71, 0x65, 0xff, 0x03, 0x25, 0x0b, 0x47, 0xab, 0x0c, 0xfb, 0x8d, 0x9c, 0x28, 0xbc, 0x93,
0xf8, 0x8a, 0x66, 0xd2, 0xe2, 0xe1, 0xea, 0x87, 0xe1, 0x93, 0x1c, 0x5f, 0x8c, 0xe7, 0x10, 0xcb,
0x1e, 0x29, 0xd2, 0xc1, 0x23, 0xe5, 0x3a, 0x54, 0x1d, 0x74, 0x3c, 0x3a, 0xd2, 0x99, 0xb9, 0x8f,
0x31, 0x0d, 0xa1, 0xab, 0x63, 0xee, 0x23, 0x07, 0xea, 0x0e, 0x1d, 0x9d, 0x7a, 0x7b, 0x2c, 0xde,
0x6a, 0xee, 0xd0, 0xd1, 0xbc, 0x3d, 0x46, 0xfe, 0x01, 0x73, 0x14, 0xbb, 0xe8, 0x06, 0xf6, 0x48,
0x77, 0xbc, 0x9e, 0xb9, 0x6d, 0x62, 0x4c, 0x86, 0x12, 0xff, 0xb0, 0x1e, 0xf9, 0xd5, 0x4f, 0x25,
0x98, 0x7b, 0x33, 0x4e, 0xbf, 0x0e, 0xf6, 0xc3, 0xe0, 0xce, 0x21, 0x31, 0xfe, 0x2b, 0xf0, 0xea,
0x7c, 0xe3, 0xb0, 0xe3, 0x9f, 0xe8, 0x12, 0x9e, 0xb4, 0x32, 0x8b, 0x82, 0xba, 0xfd, 0x7d, 0x0e,
0x4a, 0xd1, 0x72, 0xa4, 0x02, 0x05, 0xeb, 0xa1, 0xe7, 0xa2, 0x72, 0x81, 0x2c, 0xc2, 0x9c, 0x75,
0xf0, 0x89, 0x48, 0xe9, 0x91, 0x79, 0x98, 0xb5, 0xb2, 0xaf, 0x27, 0x0a, 0x12, 0x02, 0x75, 0x2b,
0xf3, 0x78, 0xa0, 0x6c, 0x93, 0x4b, 0x30, 0x6f, 0x1d, 0xee, 0xaf, 0x15, 0x9e, 0x02, 0x8a, 0x95,
0x6d, 0x41, 0x99, 0x32, 0x10, 0x53, 0xfc, 0x1f, 0x83, 0xa4, 0x96, 0x32, 0xc5, 0x24, 0x8b, 0xa0,
0x58, 0x07, 0x3a, 0x41, 0xe5, 0x0b, 0x89, 0xcc, 0x43, 0xdd, 0xca, 0xb4, 0x3a, 0xca, 0x97, 0x12,
0x21, 0x30, 0x63, 0x8d, 0xdf, 0xe5, 0x95, 0xaf, 0x24, 0x72, 0x09, 0x88, 0x75, 0xe8, 0xca, 0xab,
0x7c, 0x2d, 0x91, 0x05, 0x98, 0xb5, 0x32, 0x37, 0x43, 0xa6, 0x7c, 0x23, 0x91, 0x1a, 0x94, 0xac,
0xf0, 0xfa, 0xa4, 0xbc, 0x27, 0x0b, 0x2b, 0x3c, 0xd7, 0x95, 0xf7, 0x43, 0x2b, 0x2c, 0x89, 0xca,
0x0b, 0x59, 0x2c, 0x36, 0x5e, 0x20, 0x95, 0x9f, 0x65, 0x52, 0x87, 0x8a, 0x15, 0x17, 0x01, 0xe5,
0xb3, 0x8a, 0x58, 0xfc, 0x50, 0x06, 0x28, 0xcf, 0x2a, 0xb7, 0xef, 0x42, 0x39, 0x7e, 0x6c, 0x25,
0x00, 0xc5, 0x75, 0x83, 0x05, 0x48, 0x95, 0x0b, 0xfc, 0x5b, 0x43, 0xa3, 0x87, 0x54, 0x91, 0xf8,
0xf7, 0xdb, 0xd4, 0xe4, 0xfe, 0x1c, 0xd7, 0x64, 0x83, 0xd7, 0x39, 0x45, 0xbe, 0xd7, 0x7a, 0xe7,
0x5e, 0xdf, 0x0c, 0x06, 0xc3, 0x2d, 0x5e, 0x37, 0x57, 0xf6, 0x4d, 0xdb, 0x36, 0xf7, 0x03, 0xec,
0x0e, 0x56, 0x42, 0xc1, 0xff, 0xd9, 0x33, 0x59, 0x40, 0xcd, 0xad, 0x61, 0x80, 0xbd, 0x95, 0x58,
0xf6, 0x15, 0x91, 0x05, 0x89, 0xe9, 0x6f, 0x6d, 0x15, 0x85, 0xe7, 0xce, 0xaf, 0x01, 0x00, 0x00,
0xff, 0xff, 0x9f, 0x3a, 0x8e, 0x13, 0xdd, 0x18, 0x00, 0x00,
}

View File

@ -29,12 +29,13 @@ enum DataType {
* @brief Field schema
*/
message FieldSchema {
string name = 1;
bool is_primary_key = 2;
string description = 3;
DataType data_type = 4;
repeated common.KeyValuePair type_params = 5;
repeated common.KeyValuePair index_params = 6;
int64 fieldID = 1;
string name = 2;
bool is_primary_key = 3;
string description = 4;
DataType data_type = 5;
repeated common.KeyValuePair type_params = 6;
repeated common.KeyValuePair index_params = 7;
}
/**

View File

@ -78,12 +78,13 @@ func (DataType) EnumDescriptor() ([]byte, []int) {
//*
// @brief Field schema
type FieldSchema struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
IsPrimaryKey bool `protobuf:"varint,2,opt,name=is_primary_key,json=isPrimaryKey,proto3" json:"is_primary_key,omitempty"`
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
DataType DataType `protobuf:"varint,4,opt,name=data_type,json=dataType,proto3,enum=milvus.proto.schema.DataType" json:"data_type,omitempty"`
TypeParams []*commonpb.KeyValuePair `protobuf:"bytes,5,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,6,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
FieldID int64 `protobuf:"varint,1,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
IsPrimaryKey bool `protobuf:"varint,3,opt,name=is_primary_key,json=isPrimaryKey,proto3" json:"is_primary_key,omitempty"`
Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
DataType DataType `protobuf:"varint,5,opt,name=data_type,json=dataType,proto3,enum=milvus.proto.schema.DataType" json:"data_type,omitempty"`
TypeParams []*commonpb.KeyValuePair `protobuf:"bytes,6,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,7,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -114,6 +115,13 @@ func (m *FieldSchema) XXX_DiscardUnknown() {
var xxx_messageInfo_FieldSchema proto.InternalMessageInfo
func (m *FieldSchema) GetFieldID() int64 {
if m != nil {
return m.FieldID
}
return 0
}
func (m *FieldSchema) GetName() string {
if m != nil {
return m.Name
@ -230,34 +238,35 @@ func init() {
func init() { proto.RegisterFile("schema.proto", fileDescriptor_1c5fb4d8cc22d66a) }
var fileDescriptor_1c5fb4d8cc22d66a = []byte{
// 451 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xdf, 0x6a, 0xd4, 0x40,
0x18, 0xc5, 0xcd, 0xfe, 0x09, 0xd9, 0x2f, 0xb1, 0x8c, 0xa3, 0x48, 0x10, 0x84, 0x58, 0xbc, 0x58,
0x04, 0x37, 0xb8, 0x95, 0x52, 0xbc, 0xb2, 0xe9, 0x6e, 0x25, 0x74, 0x49, 0x96, 0x69, 0x2c, 0xe8,
0x4d, 0x98, 0x4d, 0x46, 0x77, 0x30, 0xff, 0x48, 0x26, 0x62, 0xfa, 0x16, 0xde, 0xfa, 0x12, 0xbe,
0xa2, 0xcc, 0x24, 0x85, 0xaa, 0x45, 0xbc, 0xfb, 0x7d, 0xdf, 0xcc, 0x39, 0xcc, 0x39, 0x03, 0x56,
0x93, 0xec, 0x59, 0x4e, 0x17, 0x55, 0x5d, 0x8a, 0x12, 0x3f, 0xcc, 0x79, 0xf6, 0xb5, 0x6d, 0xfa,
0x69, 0xd1, 0x1f, 0x3d, 0xb1, 0x92, 0x32, 0xcf, 0xcb, 0xa2, 0x5f, 0x1e, 0xfe, 0x1c, 0x81, 0x79,
0xce, 0x59, 0x96, 0x5e, 0xaa, 0x53, 0x8c, 0x61, 0x52, 0xd0, 0x9c, 0xd9, 0x9a, 0xa3, 0xcd, 0x67,
0x44, 0x31, 0x7e, 0x0e, 0x07, 0xbc, 0x89, 0xab, 0x9a, 0xe7, 0xb4, 0xee, 0xe2, 0x2f, 0xac, 0xb3,
0x47, 0x8e, 0x36, 0x37, 0x88, 0xc5, 0x9b, 0x6d, 0xbf, 0xbc, 0x60, 0x1d, 0x76, 0xc0, 0x4c, 0x59,
0x93, 0xd4, 0xbc, 0x12, 0xbc, 0x2c, 0xec, 0xb1, 0x32, 0xb8, 0xbd, 0xc2, 0x6f, 0x60, 0x96, 0x52,
0x41, 0x63, 0xd1, 0x55, 0xcc, 0x9e, 0x38, 0xda, 0xfc, 0x60, 0xf9, 0x74, 0x71, 0xc7, 0x13, 0x17,
0x2b, 0x2a, 0x68, 0xd4, 0x55, 0x8c, 0x18, 0xe9, 0x40, 0xd8, 0x03, 0x53, 0xca, 0xe2, 0x8a, 0xd6,
0x34, 0x6f, 0xec, 0xa9, 0x33, 0x9e, 0x9b, 0xcb, 0x67, 0xbf, 0xab, 0x87, 0x60, 0x17, 0xac, 0xbb,
0xa2, 0x59, 0xcb, 0xb6, 0x94, 0xd7, 0x04, 0xa4, 0x6a, 0xab, 0x44, 0x78, 0x05, 0x16, 0x2f, 0x52,
0xf6, 0xed, 0xc6, 0x44, 0xff, 0x5f, 0x13, 0x53, 0xc9, 0x7a, 0x97, 0xc3, 0x1f, 0x1a, 0xa0, 0xb3,
0x32, 0xcb, 0x58, 0x22, 0x43, 0xfd, 0xa3, 0xb6, 0x3f, 0x0a, 0x19, 0xfd, 0x5d, 0xc8, 0x63, 0xd0,
0x69, 0x2b, 0x4a, 0x7f, 0xa5, 0xda, 0x32, 0xc8, 0x30, 0xe1, 0x13, 0xd0, 0x3f, 0xc9, 0x3f, 0x69,
0xec, 0x89, 0x7a, 0xa2, 0x73, 0x67, 0x4b, 0xb7, 0xbe, 0x8d, 0x0c, 0xf7, 0x5f, 0x7c, 0xd7, 0xc0,
0xb8, 0x69, 0x0f, 0x1b, 0x30, 0x09, 0xc2, 0x60, 0x8d, 0xee, 0x49, 0xf2, 0xc2, 0x70, 0x83, 0x34,
0x49, 0x7e, 0x10, 0x9d, 0xa0, 0x11, 0x9e, 0xc1, 0xd4, 0x0f, 0xa2, 0x57, 0xc7, 0x68, 0x3c, 0xe0,
0xd1, 0x12, 0x4d, 0x06, 0x3c, 0x7e, 0x8d, 0xa6, 0x12, 0xcf, 0x37, 0xe1, 0x69, 0x84, 0x00, 0x03,
0xe8, 0xab, 0xf0, 0xbd, 0xb7, 0x59, 0x23, 0x53, 0xf2, 0x65, 0x44, 0xfc, 0xe0, 0x1d, 0x7a, 0x84,
0x1f, 0xc0, 0xfd, 0xab, 0xf5, 0x59, 0x14, 0x92, 0xd8, 0xf3, 0x83, 0x53, 0xf2, 0x01, 0xa5, 0x18,
0x81, 0x35, 0xac, 0x7a, 0x31, 0xf3, 0xbc, 0x8f, 0x6f, 0x3f, 0x73, 0xb1, 0x6f, 0x77, 0xb2, 0x5b,
0xf7, 0x9a, 0x67, 0x19, 0xbf, 0x16, 0x2c, 0xd9, 0xbb, 0x7d, 0xa8, 0x97, 0x29, 0x6f, 0x44, 0xcd,
0x77, 0xad, 0x60, 0xa9, 0xcb, 0x0b, 0xc1, 0xea, 0x82, 0x66, 0xae, 0x4a, 0xea, 0xf6, 0x49, 0xab,
0xdd, 0x4e, 0x57, 0xf3, 0xd1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0xa6, 0xc9, 0x55, 0xe0,
// 467 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xdf, 0x8a, 0xd3, 0x40,
0x18, 0xc5, 0x4d, 0x9b, 0x66, 0xdb, 0x2f, 0x71, 0x19, 0x47, 0x91, 0x20, 0x08, 0x71, 0xf1, 0x22,
0x08, 0x36, 0xd8, 0x95, 0x65, 0xf1, 0xca, 0xcd, 0xa6, 0x2b, 0x61, 0x4b, 0x52, 0xb2, 0x71, 0x41,
0x6f, 0xc2, 0x34, 0x19, 0xed, 0x60, 0xfe, 0x91, 0x4c, 0xc4, 0xec, 0x5b, 0x78, 0xeb, 0x1b, 0xf9,
0x56, 0x92, 0x64, 0x0a, 0x55, 0x7b, 0xb1, 0x77, 0xe7, 0x7c, 0x33, 0xe7, 0x63, 0xce, 0x6f, 0x40,
0xab, 0xe3, 0x2d, 0xcd, 0xc8, 0xbc, 0xac, 0x0a, 0x5e, 0xe0, 0xc7, 0x19, 0x4b, 0xbf, 0x37, 0xf5,
0xe0, 0xe6, 0xc3, 0xd1, 0x33, 0x2d, 0x2e, 0xb2, 0xac, 0xc8, 0x87, 0xe1, 0xc9, 0xef, 0x11, 0xa8,
0x57, 0x8c, 0xa6, 0xc9, 0x4d, 0x7f, 0x8a, 0x75, 0x38, 0xfa, 0xd2, 0x59, 0xd7, 0xd1, 0x25, 0x43,
0x32, 0xc7, 0xc1, 0xce, 0x62, 0x0c, 0x72, 0x4e, 0x32, 0xaa, 0x8f, 0x0c, 0xc9, 0x9c, 0x05, 0xbd,
0xc6, 0x2f, 0xe1, 0x98, 0xd5, 0x51, 0x59, 0xb1, 0x8c, 0x54, 0x6d, 0xf4, 0x8d, 0xb6, 0xfa, 0xd8,
0x90, 0xcc, 0x69, 0xa0, 0xb1, 0x7a, 0x3d, 0x0c, 0xaf, 0x69, 0x8b, 0x0d, 0x50, 0x13, 0x5a, 0xc7,
0x15, 0x2b, 0x39, 0x2b, 0x72, 0x5d, 0xee, 0x17, 0xec, 0x8f, 0xf0, 0x3b, 0x98, 0x25, 0x84, 0x93,
0x88, 0xb7, 0x25, 0xd5, 0x27, 0x86, 0x64, 0x1e, 0x2f, 0x9e, 0xcf, 0x0f, 0x3c, 0x7e, 0xee, 0x10,
0x4e, 0xc2, 0xb6, 0xa4, 0xc1, 0x34, 0x11, 0x0a, 0xdb, 0xa0, 0x76, 0xb1, 0xa8, 0x24, 0x15, 0xc9,
0x6a, 0x5d, 0x31, 0xc6, 0xa6, 0xba, 0x78, 0xf1, 0x77, 0x5a, 0x54, 0xbe, 0xa6, 0xed, 0x2d, 0x49,
0x1b, 0xba, 0x26, 0xac, 0x0a, 0xa0, 0x4b, 0xad, 0xfb, 0x10, 0x76, 0x40, 0x63, 0x79, 0x42, 0x7f,
0xec, 0x96, 0x1c, 0xdd, 0x77, 0x89, 0xda, 0xc7, 0x86, 0x2d, 0x27, 0xbf, 0x24, 0x40, 0x97, 0x45,
0x9a, 0xd2, 0xb8, 0x2b, 0x25, 0x80, 0xee, 0xb0, 0x49, 0x7b, 0xd8, 0xfe, 0x01, 0x32, 0xfa, 0x1f,
0xc8, 0x53, 0x50, 0x48, 0xc3, 0x0b, 0xd7, 0x11, 0x40, 0x85, 0xc3, 0xe7, 0xa0, 0xf4, 0xff, 0x51,
0xeb, 0x72, 0xff, 0x44, 0xe3, 0x20, 0xa5, 0xbd, 0x0f, 0x0d, 0xc4, 0xfd, 0x57, 0x3f, 0x25, 0x98,
0xee, 0xe8, 0xe1, 0x29, 0xc8, 0x9e, 0xef, 0x2d, 0xd1, 0x83, 0x4e, 0xd9, 0xbe, 0xbf, 0x42, 0x52,
0xa7, 0x5c, 0x2f, 0x3c, 0x47, 0x23, 0x3c, 0x83, 0x89, 0xeb, 0x85, 0x6f, 0xce, 0xd0, 0x58, 0xc8,
0xd3, 0x05, 0x92, 0x85, 0x3c, 0x7b, 0x8b, 0x26, 0x9d, 0xbc, 0x5a, 0xf9, 0x17, 0x21, 0x02, 0x0c,
0xa0, 0x38, 0xfe, 0x47, 0x7b, 0xb5, 0x44, 0x6a, 0xa7, 0x6f, 0xc2, 0xc0, 0xf5, 0x3e, 0xa0, 0x27,
0xf8, 0x11, 0x3c, 0xbc, 0x5d, 0x5e, 0x86, 0x7e, 0x10, 0xd9, 0xae, 0x77, 0x11, 0x7c, 0x42, 0x09,
0x46, 0xa0, 0x89, 0xd1, 0x10, 0xa6, 0xb6, 0xfd, 0xf9, 0xfd, 0x57, 0xc6, 0xb7, 0xcd, 0xa6, 0x63,
0x6b, 0xdd, 0xb1, 0x34, 0x65, 0x77, 0x9c, 0xc6, 0x5b, 0x6b, 0x28, 0xf5, 0x3a, 0x61, 0x35, 0xaf,
0xd8, 0xa6, 0xe1, 0x34, 0xb1, 0x58, 0xce, 0x69, 0x95, 0x93, 0xd4, 0xea, 0x9b, 0x5a, 0x43, 0xd3,
0x72, 0xb3, 0x51, 0x7a, 0x7f, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xaf, 0x4d, 0x07, 0xfa,
0x02, 0x00, 0x00,
}

View File

@ -429,8 +429,8 @@ func (qt *QueryTask) PostExecute() error {
}
}
rlen := len(filterSearchResult) // query node num
if rlen <= 0 {
availableQueryNodeNum := len(filterSearchResult)
if availableQueryNodeNum <= 0 {
qt.result = &servicepb.QueryResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
@ -440,26 +440,48 @@ func (qt *QueryTask) PostExecute() error {
return errors.New(filterReason)
}
n := len(filterSearchResult[0].Hits) // n
if n <= 0 {
qt.result = &servicepb.QueryResult{}
return nil
}
hits := make([][]*servicepb.Hits, rlen)
for i, partialSearchResult := range filterSearchResult {
hits[i] = make([]*servicepb.Hits, n)
for j, bs := range partialSearchResult.Hits {
hits[i][j] = &servicepb.Hits{}
err := proto.Unmarshal(bs, hits[i][j])
hits := make([][]*servicepb.Hits, 0)
for _, partialSearchResult := range filterSearchResult {
if len(partialSearchResult.Hits) <= 0 {
filterReason += "nq is zero\n"
continue
}
partialHits := make([]*servicepb.Hits, 0)
for _, bs := range partialSearchResult.Hits {
partialHit := &servicepb.Hits{}
err := proto.Unmarshal(bs, partialHit)
if err != nil {
log.Println("unmarshal error")
return err
}
partialHits = append(partialHits, partialHit)
}
hits = append(hits, partialHits)
}
k := len(hits[0][0].IDs)
availableQueryNodeNum = len(hits)
if availableQueryNodeNum <= 0 {
qt.result = &servicepb.QueryResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: filterReason,
},
}
return nil
}
nq := len(hits[0])
if nq <= 0 {
qt.result = &servicepb.QueryResult{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: filterReason,
},
}
return nil
}
topk := len(hits[0][0].IDs)
qt.result = &servicepb.QueryResult{
Status: &commonpb.Status{
ErrorCode: 0,
@ -467,15 +489,15 @@ func (qt *QueryTask) PostExecute() error {
Hits: make([][]byte, 0),
}
for i := 0; i < n; i++ { // n
locs := make([]int, rlen)
for i := 0; i < nq; i++ {
locs := make([]int, availableQueryNodeNum)
reducedHits := &servicepb.Hits{
IDs: make([]int64, 0),
RowData: make([][]byte, 0),
Scores: make([]float32, 0),
}
for j := 0; j < k; j++ { // k
for j := 0; j < topk; j++ {
choice, minDistance := 0, float32(math.MaxFloat32)
for q, loc := range locs { // query num, the number of ways to merge
distance := hits[q][i].Scores[loc]

View File

@ -11,37 +11,47 @@ package querynode
*/
import "C"
import (
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
)
import "unsafe"
type Collection struct {
collectionPtr C.CCollection
meta *etcdpb.CollectionMeta
id UniqueID
name string
partitions []*Partition
}
func (c *Collection) Name() string {
return (*c.meta).Schema.Name
return c.name
}
func (c *Collection) ID() UniqueID {
return (*c.meta).ID
return c.id
}
func (c *Collection) Partitions() *[]*Partition {
return &c.partitions
}
func newCollection(collMeta *etcdpb.CollectionMeta, collMetaBlob string) *Collection {
func newCollection(collectionID UniqueID, schemaBlob string) *Collection {
/*
CCollection
newCollection(const char* schema_conf);
*/
cCollMetaBlob := C.CString(collMetaBlob)
collection := C.NewCollection(cCollMetaBlob)
NewCollection(const char* schema_proto_blob);
var newCollection = &Collection{collectionPtr: collection, meta: collMeta}
const char*
GetCollectionName(CCollection collection);
*/
cSchemaBlob := C.CString(schemaBlob)
collection := C.NewCollection(cSchemaBlob)
name := C.GetCollectionName(collection)
collectionName := C.GoString(name)
defer C.free(unsafe.Pointer(name))
var newCollection = &Collection{
collectionPtr: collection,
id: collectionID,
name: collectionName,
}
return newCollection
}

View File

@ -13,13 +13,13 @@ package querynode
import "C"
import (
"fmt"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"log"
"strconv"
"sync"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
/*
@ -35,7 +35,7 @@ type collectionReplica interface {
// collection
getCollectionNum() int
addCollection(collMeta *etcdpb.CollectionMeta, colMetaBlob string) error
addCollection(collectionID UniqueID, schemaBlob string) error
removeCollection(collectionID UniqueID) error
getCollectionByID(collectionID UniqueID) (*Collection, error)
getCollectionByName(collectionName string) (*Collection, error)
@ -84,11 +84,11 @@ func (colReplica *collectionReplicaImpl) getCollectionNum() int {
return len(colReplica.collections)
}
func (colReplica *collectionReplicaImpl) addCollection(collMeta *etcdpb.CollectionMeta, colMetaBlob string) error {
func (colReplica *collectionReplicaImpl) addCollection(collectionID UniqueID, schemaBlob string) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
var newCollection = newCollection(collMeta, colMetaBlob)
var newCollection = newCollection(collectionID, schemaBlob)
colReplica.collections = append(colReplica.collections, newCollection)
return nil

View File

@ -39,8 +39,8 @@ func TestCollectionReplica_getCollectionByID(t *testing.T) {
targetCollection, err := node.replica.getCollectionByID(collectionID)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.meta.Schema.Name, collectionName)
assert.Equal(t, targetCollection.meta.ID, collectionID)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
node.Close()
}
@ -53,8 +53,8 @@ func TestCollectionReplica_getCollectionByName(t *testing.T) {
targetCollection, err := node.replica.getCollectionByName(collectionName)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.meta.Schema.Name, collectionName)
assert.Equal(t, targetCollection.meta.ID, collectionID)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
node.Close()
}

View File

@ -25,12 +25,12 @@ func TestCollection_newCollection(t *testing.T) {
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
}
func TestCollection_deleteCollection(t *testing.T) {
@ -38,11 +38,11 @@ func TestCollection_deleteCollection(t *testing.T) {
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
deleteCollection(collection)
}

View File

@ -41,15 +41,24 @@ func (dsService *dataSyncService) initNodes() {
dsService.fg = flowgraph.NewTimeTickedFlowGraph(dsService.ctx)
var dmStreamNode Node = newDmInputNode(dsService.ctx)
var ddStreamNode Node = newDDInputNode(dsService.ctx)
var filterDmNode Node = newFilteredDmNode()
var ddNode Node = newDDNode(dsService.replica)
var insertNode Node = newInsertNode(dsService.replica)
var serviceTimeNode Node = newServiceTimeNode(dsService.replica)
dsService.fg.AddNode(&dmStreamNode)
dsService.fg.AddNode(&ddStreamNode)
dsService.fg.AddNode(&filterDmNode)
dsService.fg.AddNode(&ddNode)
dsService.fg.AddNode(&insertNode)
dsService.fg.AddNode(&serviceTimeNode)
// dmStreamNode
var err = dsService.fg.SetEdges(dmStreamNode.Name(),
[]string{},
[]string{filterDmNode.Name()},
@ -58,14 +67,34 @@ func (dsService *dataSyncService) initNodes() {
log.Fatal("set edges failed in node:", dmStreamNode.Name())
}
// ddStreamNode
err = dsService.fg.SetEdges(ddStreamNode.Name(),
[]string{},
[]string{ddNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", ddStreamNode.Name())
}
// filterDmNode
err = dsService.fg.SetEdges(filterDmNode.Name(),
[]string{dmStreamNode.Name()},
[]string{dmStreamNode.Name(), ddNode.Name()},
[]string{insertNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", filterDmNode.Name())
}
// ddNode
err = dsService.fg.SetEdges(ddNode.Name(),
[]string{ddStreamNode.Name()},
[]string{filterDmNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", ddNode.Name())
}
// insertNode
err = dsService.fg.SetEdges(insertNode.Name(),
[]string{filterDmNode.Name()},
[]string{serviceTimeNode.Name()},
@ -74,6 +103,7 @@ func (dsService *dataSyncService) initNodes() {
log.Fatal("set edges failed in node:", insertNode.Name())
}
// serviceTimeNode
err = dsService.fg.SetEdges(serviceTimeNode.Name(),
[]string{insertNode.Name()},
[]string{},

View File

@ -14,7 +14,6 @@ import (
// NOTE: start pulsar before test
func TestDataSyncService_Start(t *testing.T) {
node := newQueryNode()
initTestMeta(t, node, "collection0", 0, 0)
// test data generate
@ -99,26 +98,36 @@ func TestDataSyncService_Start(t *testing.T) {
// pulsar produce
const receiveBufSize = 1024
producerChannels := Params.insertChannelNames()
pulsarURL, _ := Params.pulsarAddress()
insertChannels := Params.InsertChannelNames
ddChannels := Params.DDChannelNames
pulsarURL := Params.PulsarAddress
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
insertStream.SetPulsarClient(pulsarURL)
insertStream.CreatePulsarProducers(producerChannels)
insertStream.CreatePulsarProducers(insertChannels)
ddStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
ddStream.SetPulsarClient(pulsarURL)
ddStream.CreatePulsarProducers(ddChannels)
var insertMsgStream msgstream.MsgStream = insertStream
insertMsgStream.Start()
var ddMsgStream msgstream.MsgStream = ddStream
ddMsgStream.Start()
err := insertMsgStream.Produce(&msgPack)
assert.NoError(t, err)
err = insertMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
err = ddMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
// dataSync
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica)
go node.dataSyncService.start()
<-node.queryNodeLoopCtx.Done()
node.Close()
}

View File

@ -0,0 +1,183 @@
package querynode
import (
"log"
"sort"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
type ddNode struct {
BaseNode
ddMsg *ddMsg
replica collectionReplica
}
type metaOperateRecord struct {
createOrDrop bool // create: true, drop: false
timestamp Timestamp
}
func (ddNode *ddNode) Name() string {
return "ddNode"
}
func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
//fmt.Println("Do filterDmNode operation")
if len(in) != 1 {
log.Println("Invalid operate message input in ddNode, input length = ", len(in))
// TODO: add error handling
}
msMsg, ok := (*in[0]).(*MsgStreamMsg)
if !ok {
log.Println("type assertion failed for MsgStreamMsg")
// TODO: add error handling
}
var ddMsg = ddMsg{
collectionRecords: make(map[string][]metaOperateRecord),
partitionRecords: make(map[string][]metaOperateRecord),
timeRange: TimeRange{
timestampMin: msMsg.TimestampMin(),
timestampMax: msMsg.TimestampMax(),
},
}
ddNode.ddMsg = &ddMsg
// sort tsMessages
tsMessages := msMsg.TsMessages()
sort.Slice(tsMessages,
func(i, j int) bool {
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
})
// do dd tasks
for _, msg := range tsMessages {
switch msg.Type() {
case internalPb.MsgType_kCreateCollection:
ddNode.createCollection(msg.(*msgstream.CreateCollectionMsg))
case internalPb.MsgType_kDropCollection:
ddNode.dropCollection(msg.(*msgstream.DropCollectionMsg))
case internalPb.MsgType_kCreatePartition:
ddNode.createPartition(msg.(*msgstream.CreatePartitionMsg))
case internalPb.MsgType_kDropPartition:
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
default:
log.Println("Non supporting message type:", msg.Type())
}
}
var res Msg = ddNode.ddMsg
return []*Msg{&res}
}
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
collectionID := msg.CollectionID
hasCollection := ddNode.replica.hasCollection(collectionID)
if hasCollection {
log.Println("collection already exists, id = ", collectionID)
return
}
var schema schemapb.CollectionSchema
err := proto.Unmarshal((*msg.Schema).Value, &schema)
if err != nil {
log.Println(err)
return
}
schemaBlob := proto.MarshalTextString(&schema)
// add collection
err = ddNode.replica.addCollection(collectionID, schemaBlob)
if err != nil {
log.Println(err)
return
}
// add default partition
err = ddNode.replica.addPartition(collectionID, Params.DefaultPartitionTag)
if err != nil {
log.Println(err)
return
}
collectionName := schema.Name
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
metaOperateRecord{
createOrDrop: true,
timestamp: msg.Timestamp,
})
}
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
collectionID := msg.CollectionID
err := ddNode.replica.removeCollection(collectionID)
if err != nil {
log.Println(err)
return
}
collectionName := msg.CollectionName.CollectionName
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
metaOperateRecord{
createOrDrop: false,
timestamp: msg.Timestamp,
})
}
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
collectionID := msg.CollectionID
partitionTag := msg.PartitionName.Tag
err := ddNode.replica.addPartition(collectionID, partitionTag)
if err != nil {
log.Println(err)
return
}
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
metaOperateRecord{
createOrDrop: true,
timestamp: msg.Timestamp,
})
}
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
collectionID := msg.CollectionID
partitionTag := msg.PartitionName.Tag
err := ddNode.replica.removePartition(collectionID, partitionTag)
if err != nil {
log.Println(err)
return
}
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
metaOperateRecord{
createOrDrop: false,
timestamp: msg.Timestamp,
})
}
func newDDNode(replica collectionReplica) *ddNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
return &ddNode{
BaseNode: baseNode,
replica: replica,
}
}

View File

@ -14,8 +14,8 @@ func (dNode *deleteNode) Operate(in []*Msg) []*Msg {
}
func newDeleteNode() *deleteNode {
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)

View File

@ -4,10 +4,12 @@ import (
"log"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type filterDmNode struct {
ddMsg *ddMsg
BaseNode
}
@ -18,28 +20,38 @@ func (fdmNode *filterDmNode) Name() string {
func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
//fmt.Println("Do filterDmNode operation")
if len(in) != 1 {
if len(in) != 2 {
log.Println("Invalid operate message input in filterDmNode, input length = ", len(in))
// TODO: add error handling
}
msMsg, ok := (*in[0]).(*MsgStreamMsg)
msgStreamMsg, ok := (*in[0]).(*MsgStreamMsg)
if !ok {
log.Println("type assertion failed for MsgStreamMsg")
// TODO: add error handling
}
ddMsg, ok := (*in[1]).(*ddMsg)
if !ok {
log.Println("type assertion failed for ddMsg")
// TODO: add error handling
}
fdmNode.ddMsg = ddMsg
var iMsg = insertMsg{
insertMessages: make([]*msgstream.InsertMsg, 0),
timeRange: TimeRange{
timestampMin: msMsg.TimestampMin(),
timestampMax: msMsg.TimestampMax(),
timestampMin: msgStreamMsg.TimestampMin(),
timestampMax: msgStreamMsg.TimestampMax(),
},
}
for _, msg := range msMsg.TsMessages() {
for _, msg := range msgStreamMsg.TsMessages() {
switch msg.Type() {
case internalPb.MsgType_kInsert:
iMsg.insertMessages = append(iMsg.insertMessages, msg.(*msgstream.InsertMsg))
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
if resMsg != nil {
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
}
// case internalPb.MsgType_kDelete:
// dmMsg.deleteMessages = append(dmMsg.deleteMessages, (*msg).(*msgstream.DeleteTask))
default:
@ -51,9 +63,44 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
return []*Msg{&res}
}
func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
// No dd record, do all insert requests.
records, ok := fdmNode.ddMsg.collectionRecords[msg.CollectionName]
if !ok {
return msg
}
// If the last record is drop type, all insert requests are invalid.
if !records[len(records)-1].createOrDrop {
return nil
}
// Filter insert requests before last record.
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
log.Println("Error, misaligned messages detected")
return nil
}
tmpTimestamps := make([]Timestamp, 0)
tmpRowIDs := make([]int64, 0)
tmpRowData := make([]*commonpb.Blob, 0)
targetTimestamp := records[len(records)-1].timestamp
for i, t := range msg.Timestamps {
if t >= targetTimestamp {
tmpTimestamps = append(tmpTimestamps, t)
tmpRowIDs = append(tmpRowIDs, msg.RowIDs[i])
tmpRowData = append(tmpRowData, msg.RowData[i])
}
}
msg.Timestamps = tmpTimestamps
msg.RowIDs = tmpRowIDs
msg.RowData = tmpRowData
return msg
}
func newFilteredDmNode() *filterDmNode {
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)

View File

@ -47,12 +47,6 @@ func (iNode *insertNode) Operate(in []*Msg) []*Msg {
// 1. hash insertMessages to insertData
for _, task := range iMsg.insertMessages {
if len(task.RowIDs) != len(task.Timestamps) || len(task.RowIDs) != len(task.RowData) {
// TODO: what if the messages are misaligned?
// Here, we ignore those messages and print error
log.Println("Error, misaligned messages detected")
continue
}
insertData.insertIDs[task.SegmentID] = append(insertData.insertIDs[task.SegmentID], task.RowIDs...)
insertData.insertTimestamps[task.SegmentID] = append(insertData.insertTimestamps[task.SegmentID], task.Timestamps...)
insertData.insertRecords[task.SegmentID] = append(insertData.insertRecords[task.SegmentID], task.RowData...)
@ -128,8 +122,8 @@ func (iNode *insertNode) insert(insertData *InsertData, segmentID int64, wg *syn
}
func newInsertNode(replica collectionReplica) *insertNode {
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)

View File

@ -14,8 +14,8 @@ func (ksNode *key2SegNode) Operate(in []*Msg) []*Msg {
}
func newKey2SegNode() *key2SegNode {
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)

View File

@ -13,8 +13,10 @@ type key2SegMsg struct {
timeRange TimeRange
}
type schemaUpdateMsg struct {
timeRange TimeRange
type ddMsg struct {
collectionRecords map[string][]metaOperateRecord
partitionRecords map[string][]metaOperateRecord
timeRange TimeRange
}
type insertMsg struct {
@ -56,11 +58,11 @@ func (ksMsg *key2SegMsg) DownStreamNodeIdx() int {
return 0
}
func (suMsg *schemaUpdateMsg) TimeTick() Timestamp {
func (suMsg *ddMsg) TimeTick() Timestamp {
return suMsg.timeRange.timestampMax
}
func (suMsg *schemaUpdateMsg) DownStreamNodeIdx() int {
func (suMsg *ddMsg) DownStreamNodeIdx() int {
return 0
}

View File

@ -2,23 +2,19 @@ package querynode
import (
"context"
"log"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
receiveBufSize := Params.insertReceiveBufSize()
pulsarBufSize := Params.insertPulsarBufSize()
receiveBufSize := Params.InsertReceiveBufSize
pulsarBufSize := Params.InsertPulsarBufSize
msgStreamURL, err := Params.pulsarAddress()
if err != nil {
log.Fatal(err)
}
msgStreamURL := Params.PulsarAddress
consumeChannels := Params.insertChannelNames()
consumeSubName := Params.msgChannelSubName()
consumeChannels := Params.InsertChannelNames
consumeSubName := Params.MsgChannelSubName
insertStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
insertStream.SetPulsarClient(msgStreamURL)
@ -27,9 +23,32 @@ func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
var stream msgstream.MsgStream = insertStream
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
return node
}
func newDDInputNode(ctx context.Context) *flowgraph.InputNode {
receiveBufSize := Params.DDReceiveBufSize
pulsarBufSize := Params.DDPulsarBufSize
msgStreamURL := Params.PulsarAddress
consumeChannels := Params.DDChannelNames
consumeSubName := Params.MsgChannelSubName
ddStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
ddStream.SetPulsarClient(msgStreamURL)
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
var stream msgstream.MsgStream = ddStream
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
node := flowgraph.NewInputNode(&stream, "ddInputNode", maxQueueLength, maxParallelism)
return node
}

View File

@ -1,27 +0,0 @@
package querynode
type schemaUpdateNode struct {
BaseNode
schemaUpdateMsg schemaUpdateMsg
}
func (suNode *schemaUpdateNode) Name() string {
return "suNode"
}
func (suNode *schemaUpdateNode) Operate(in []*Msg) []*Msg {
return in
}
func newSchemaUpdateNode() *schemaUpdateNode {
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
return &schemaUpdateNode{
BaseNode: baseNode,
}
}

View File

@ -34,8 +34,8 @@ func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
}
func newServiceTimeNode(replica collectionReplica) *serviceTimeNode {
maxQueueLength := Params.flowGraphMaxQueueLength()
maxParallelism := Params.flowGraphMaxParallelism()
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)

View File

@ -11,11 +11,11 @@ import (
"time"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/mvcc/mvccpb"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/mvcc/mvccpb"
)
const (
@ -30,8 +30,8 @@ type metaService struct {
}
func newMetaService(ctx context.Context, replica collectionReplica) *metaService {
ETCDAddr := Params.etcdAddress()
MetaRootPath := Params.metaRootPath()
ETCDAddr := Params.ETCDAddress
MetaRootPath := Params.MetaRootPath
cli, _ := clientv3.New(clientv3.Config{
Endpoints: []string{ETCDAddr},
@ -55,37 +55,24 @@ func (mService *metaService) start() {
if err != nil {
log.Fatal("metaService loadSegments failed")
}
metaChan := mService.kvBase.WatchWithPrefix("")
for {
select {
case <-mService.ctx.Done():
return
case resp := <-metaChan:
err := mService.processResp(resp)
if err != nil {
log.Println(err)
}
}
}
}
func GetCollectionObjID(key string) string {
ETCDRootPath := Params.metaRootPath()
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
return strings.TrimPrefix(key, prefix)
}
func GetSegmentObjID(key string) string {
ETCDRootPath := Params.metaRootPath()
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, SegmentPrefix) + "/"
return strings.TrimPrefix(key, prefix)
}
func isCollectionObj(key string) bool {
ETCDRootPath := Params.metaRootPath()
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
prefix = strings.TrimSpace(prefix)
@ -95,7 +82,7 @@ func isCollectionObj(key string) bool {
}
func isSegmentObj(key string) bool {
ETCDRootPath := Params.metaRootPath()
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, SegmentPrefix) + "/"
prefix = strings.TrimSpace(prefix)
@ -110,9 +97,8 @@ func isSegmentChannelRangeInQueryNodeChannelRange(segment *etcdpb.SegmentMeta) b
return false
}
Params.Init()
var queryNodeChannelStart = Params.insertChannelRange()[0]
var queryNodeChannelEnd = Params.insertChannelRange()[1]
var queryNodeChannelStart = Params.InsertChannelRange[0]
var queryNodeChannelEnd = Params.InsertChannelRange[1]
if segment.ChannelStart >= int32(queryNodeChannelStart) && segment.ChannelEnd <= int32(queryNodeChannelEnd) {
return true
@ -149,7 +135,9 @@ func (mService *metaService) processCollectionCreate(id string, value string) {
col := mService.collectionUnmarshal(value)
if col != nil {
err := mService.replica.addCollection(col, value)
schema := col.Schema
schemaBlob := proto.MarshalTextString(schema)
err := mService.replica.addCollection(col.ID, schemaBlob)
if err != nil {
log.Println(err)
}

View File

@ -1,32 +1,19 @@
package querynode
import (
"context"
"math"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
)
func TestMetaService_start(t *testing.T) {
var ctx context.Context
node := newQueryNode()
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
ctx = context.Background()
}
// init query node
node := NewQueryNode(ctx, 0)
node.metaService = newMetaService(ctx, node.replica)
(*node.metaService).start()
node.metaService.start()
node.Close()
}

View File

@ -10,6 +10,44 @@ import (
type ParamTable struct {
paramtable.BaseTable
PulsarAddress string
ETCDAddress string
MetaRootPath string
QueryNodeID UniqueID
QueryNodeNum int
FlowGraphMaxQueueLength int32
FlowGraphMaxParallelism int32
// dm
InsertChannelNames []string
InsertChannelRange []int
InsertReceiveBufSize int64
InsertPulsarBufSize int64
// dd
DDChannelNames []string
DDReceiveBufSize int64
DDPulsarBufSize int64
// search
SearchChannelNames []string
SearchResultChannelNames []string
SearchReceiveBufSize int64
SearchPulsarBufSize int64
SearchResultReceiveBufSize int64
// stats
StatsPublishInterval int
StatsChannelName string
StatsReceiveBufSize int64
GracefulTime int64
MsgChannelSubName string
DefaultPartitionTag string
SliceIndex int
}
var Params ParamTable
@ -30,18 +68,60 @@ func (p *ParamTable) Init() {
queryNodeIDStr = strconv.Itoa(int(queryNodeIDList[0]))
}
}
p.Save("_queryNodeID", queryNodeIDStr)
err = p.LoadYaml("advanced/common.yaml")
if err != nil {
panic(err)
}
err = p.Save("_queryNodeID", queryNodeIDStr)
if err != nil {
panic(err)
}
p.initPulsarAddress()
p.initETCDAddress()
p.initMetaRootPath()
p.initQueryNodeID()
p.initQueryNodeNum()
p.initGracefulTime()
p.initMsgChannelSubName()
p.initDefaultPartitionTag()
p.initSliceIndex()
p.initFlowGraphMaxQueueLength()
p.initFlowGraphMaxParallelism()
p.initInsertChannelNames()
p.initInsertChannelRange()
p.initInsertReceiveBufSize()
p.initInsertPulsarBufSize()
p.initDDChannelNames()
p.initDDReceiveBufSize()
p.initDDPulsarBufSize()
p.initSearchChannelNames()
p.initSearchResultChannelNames()
p.initSearchReceiveBufSize()
p.initSearchPulsarBufSize()
p.initSearchResultReceiveBufSize()
p.initStatsPublishInterval()
p.initStatsChannelName()
p.initStatsReceiveBufSize()
}
func (p *ParamTable) pulsarAddress() (string, error) {
func (p *ParamTable) initPulsarAddress() {
url, err := p.Load("_PulsarAddress")
if err != nil {
panic(err)
}
return url, nil
p.PulsarAddress = url
}
func (p *ParamTable) QueryNodeID() UniqueID {
func (p *ParamTable) initQueryNodeID() {
queryNodeID, err := p.Load("_queryNodeID")
if err != nil {
panic(err)
@ -50,66 +130,90 @@ func (p *ParamTable) QueryNodeID() UniqueID {
if err != nil {
panic(err)
}
return UniqueID(id)
p.QueryNodeID = UniqueID(id)
}
func (p *ParamTable) insertChannelRange() []int {
func (p *ParamTable) initInsertChannelRange() {
insertChannelRange, err := p.Load("msgChannel.channelRange.insert")
if err != nil {
panic(err)
}
return paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
p.InsertChannelRange = paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
}
// advanced params
// stats
func (p *ParamTable) statsPublishInterval() int {
return p.ParseInt("queryNode.stats.publishInterval")
func (p *ParamTable) initStatsPublishInterval() {
p.StatsPublishInterval = p.ParseInt("queryNode.stats.publishInterval")
}
// dataSync:
func (p *ParamTable) flowGraphMaxQueueLength() int32 {
return p.ParseInt32("queryNode.dataSync.flowGraph.maxQueueLength")
func (p *ParamTable) initFlowGraphMaxQueueLength() {
p.FlowGraphMaxQueueLength = p.ParseInt32("queryNode.dataSync.flowGraph.maxQueueLength")
}
func (p *ParamTable) flowGraphMaxParallelism() int32 {
return p.ParseInt32("queryNode.dataSync.flowGraph.maxParallelism")
func (p *ParamTable) initFlowGraphMaxParallelism() {
p.FlowGraphMaxParallelism = p.ParseInt32("queryNode.dataSync.flowGraph.maxParallelism")
}
// msgStream
func (p *ParamTable) insertReceiveBufSize() int64 {
return p.ParseInt64("queryNode.msgStream.insert.recvBufSize")
func (p *ParamTable) initInsertReceiveBufSize() {
p.InsertReceiveBufSize = p.ParseInt64("queryNode.msgStream.insert.recvBufSize")
}
func (p *ParamTable) insertPulsarBufSize() int64 {
return p.ParseInt64("queryNode.msgStream.insert.pulsarBufSize")
func (p *ParamTable) initInsertPulsarBufSize() {
p.InsertPulsarBufSize = p.ParseInt64("queryNode.msgStream.insert.pulsarBufSize")
}
func (p *ParamTable) searchReceiveBufSize() int64 {
return p.ParseInt64("queryNode.msgStream.search.recvBufSize")
}
func (p *ParamTable) searchPulsarBufSize() int64 {
return p.ParseInt64("queryNode.msgStream.search.pulsarBufSize")
}
func (p *ParamTable) searchResultReceiveBufSize() int64 {
return p.ParseInt64("queryNode.msgStream.searchResult.recvBufSize")
}
func (p *ParamTable) statsReceiveBufSize() int64 {
return p.ParseInt64("queryNode.msgStream.stats.recvBufSize")
}
func (p *ParamTable) etcdAddress() string {
etcdAddress, err := p.Load("_EtcdAddress")
func (p *ParamTable) initDDReceiveBufSize() {
revBufSize, err := p.Load("queryNode.msgStream.dataDefinition.recvBufSize")
if err != nil {
panic(err)
}
return etcdAddress
bufSize, err := strconv.Atoi(revBufSize)
if err != nil {
panic(err)
}
p.DDReceiveBufSize = int64(bufSize)
}
func (p *ParamTable) metaRootPath() string {
func (p *ParamTable) initDDPulsarBufSize() {
pulsarBufSize, err := p.Load("queryNode.msgStream.dataDefinition.pulsarBufSize")
if err != nil {
panic(err)
}
bufSize, err := strconv.Atoi(pulsarBufSize)
if err != nil {
panic(err)
}
p.DDPulsarBufSize = int64(bufSize)
}
func (p *ParamTable) initSearchReceiveBufSize() {
p.SearchReceiveBufSize = p.ParseInt64("queryNode.msgStream.search.recvBufSize")
}
func (p *ParamTable) initSearchPulsarBufSize() {
p.SearchPulsarBufSize = p.ParseInt64("queryNode.msgStream.search.pulsarBufSize")
}
func (p *ParamTable) initSearchResultReceiveBufSize() {
p.SearchResultReceiveBufSize = p.ParseInt64("queryNode.msgStream.searchResult.recvBufSize")
}
func (p *ParamTable) initStatsReceiveBufSize() {
p.StatsReceiveBufSize = p.ParseInt64("queryNode.msgStream.stats.recvBufSize")
}
func (p *ParamTable) initETCDAddress() {
ETCDAddress, err := p.Load("_EtcdAddress")
if err != nil {
panic(err)
}
p.ETCDAddress = ETCDAddress
}
func (p *ParamTable) initMetaRootPath() {
rootPath, err := p.Load("etcd.rootPath")
if err != nil {
panic(err)
@ -118,14 +222,14 @@ func (p *ParamTable) metaRootPath() string {
if err != nil {
panic(err)
}
return rootPath + "/" + subPath
p.MetaRootPath = rootPath + "/" + subPath
}
func (p *ParamTable) gracefulTime() int64 {
return p.ParseInt64("queryNode.gracefulTime")
func (p *ParamTable) initGracefulTime() {
p.GracefulTime = p.ParseInt64("queryNode.gracefulTime")
}
func (p *ParamTable) insertChannelNames() []string {
func (p *ParamTable) initInsertChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
if err != nil {
@ -142,16 +246,16 @@ func (p *ParamTable) insertChannelNames() []string {
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
sep := len(channelIDs) / p.queryNodeNum()
index := p.sliceIndex()
sep := len(channelIDs) / p.QueryNodeNum
index := p.SliceIndex
if index == -1 {
panic("queryNodeID not Match with Config")
}
start := index * sep
return ret[start : start+sep]
p.InsertChannelNames = ret[start : start+sep]
}
func (p *ParamTable) searchChannelNames() []string {
func (p *ParamTable) initSearchChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.search")
if err != nil {
log.Fatal(err)
@ -168,10 +272,10 @@ func (p *ParamTable) searchChannelNames() []string {
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
return ret
p.SearchChannelNames = ret
}
func (p *ParamTable) searchResultChannelNames() []string {
func (p *ParamTable) initSearchResultChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.searchResult")
if err != nil {
log.Fatal(err)
@ -188,10 +292,10 @@ func (p *ParamTable) searchResultChannelNames() []string {
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
return ret
p.SearchResultChannelNames = ret
}
func (p *ParamTable) msgChannelSubName() string {
func (p *ParamTable) initMsgChannelSubName() {
// TODO: subName = namePrefix + "-" + queryNodeID, queryNodeID is assigned by master
name, err := p.Load("msgChannel.subNamePrefix.queryNodeSubNamePrefix")
if err != nil {
@ -201,28 +305,56 @@ func (p *ParamTable) msgChannelSubName() string {
if err != nil {
panic(err)
}
return name + "-" + queryNodeIDStr
p.MsgChannelSubName = name + "-" + queryNodeIDStr
}
func (p *ParamTable) statsChannelName() string {
func (p *ParamTable) initStatsChannelName() {
channels, err := p.Load("msgChannel.chanNamePrefix.queryNodeStats")
if err != nil {
panic(err)
}
return channels
p.StatsChannelName = channels
}
func (p *ParamTable) sliceIndex() int {
queryNodeID := p.QueryNodeID()
func (p *ParamTable) initDDChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
if err != nil {
panic(err)
}
prefix += "-"
iRangeStr, err := p.Load("msgChannel.channelRange.dataDefinition")
if err != nil {
panic(err)
}
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
var ret []string
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
p.DDChannelNames = ret
}
func (p *ParamTable) initDefaultPartitionTag() {
defaultTag, err := p.Load("common.defaultPartitionTag")
if err != nil {
panic(err)
}
p.DefaultPartitionTag = defaultTag
}
func (p *ParamTable) initSliceIndex() {
queryNodeID := p.QueryNodeID
queryNodeIDList := p.QueryNodeIDList()
for i := 0; i < len(queryNodeIDList); i++ {
if queryNodeID == queryNodeIDList[i] {
return i
p.SliceIndex = i
return
}
}
return -1
p.SliceIndex = -1
}
func (p *ParamTable) queryNodeNum() int {
return len(p.QueryNodeIDList())
func (p *ParamTable) initQueryNodeNum() {
p.QueryNodeNum = len(p.QueryNodeIDList())
}

View File

@ -9,101 +9,124 @@ import (
)
func TestParamTable_PulsarAddress(t *testing.T) {
address, err := Params.pulsarAddress()
assert.NoError(t, err)
address := Params.PulsarAddress
split := strings.Split(address, ":")
assert.Equal(t, "pulsar", split[0])
assert.Equal(t, "6650", split[len(split)-1])
}
func TestParamTable_QueryNodeID(t *testing.T) {
id := Params.QueryNodeID()
id := Params.QueryNodeID
assert.Contains(t, Params.QueryNodeIDList(), id)
}
func TestParamTable_insertChannelRange(t *testing.T) {
channelRange := Params.insertChannelRange()
channelRange := Params.InsertChannelRange
assert.Equal(t, 2, len(channelRange))
}
func TestParamTable_statsServiceTimeInterval(t *testing.T) {
interval := Params.statsPublishInterval()
interval := Params.StatsPublishInterval
assert.Equal(t, 1000, interval)
}
func TestParamTable_statsMsgStreamReceiveBufSize(t *testing.T) {
bufSize := Params.statsReceiveBufSize()
bufSize := Params.StatsReceiveBufSize
assert.Equal(t, int64(64), bufSize)
}
func TestParamTable_insertMsgStreamReceiveBufSize(t *testing.T) {
bufSize := Params.insertReceiveBufSize()
bufSize := Params.InsertReceiveBufSize
assert.Equal(t, int64(1024), bufSize)
}
func TestParamTable_ddMsgStreamReceiveBufSize(t *testing.T) {
bufSize := Params.DDReceiveBufSize
assert.Equal(t, bufSize, int64(64))
}
func TestParamTable_searchMsgStreamReceiveBufSize(t *testing.T) {
bufSize := Params.searchReceiveBufSize()
bufSize := Params.SearchReceiveBufSize
assert.Equal(t, int64(512), bufSize)
}
func TestParamTable_searchResultMsgStreamReceiveBufSize(t *testing.T) {
bufSize := Params.searchResultReceiveBufSize()
bufSize := Params.SearchResultReceiveBufSize
assert.Equal(t, int64(64), bufSize)
}
func TestParamTable_searchPulsarBufSize(t *testing.T) {
bufSize := Params.searchPulsarBufSize()
bufSize := Params.SearchPulsarBufSize
assert.Equal(t, int64(512), bufSize)
}
func TestParamTable_insertPulsarBufSize(t *testing.T) {
bufSize := Params.insertPulsarBufSize()
bufSize := Params.InsertPulsarBufSize
assert.Equal(t, int64(1024), bufSize)
}
func TestParamTable_ddPulsarBufSize(t *testing.T) {
bufSize := Params.DDPulsarBufSize
assert.Equal(t, bufSize, int64(64))
}
func TestParamTable_flowGraphMaxQueueLength(t *testing.T) {
length := Params.flowGraphMaxQueueLength()
length := Params.FlowGraphMaxQueueLength
assert.Equal(t, int32(1024), length)
}
func TestParamTable_flowGraphMaxParallelism(t *testing.T) {
maxParallelism := Params.flowGraphMaxParallelism()
maxParallelism := Params.FlowGraphMaxParallelism
assert.Equal(t, int32(1024), maxParallelism)
}
func TestParamTable_insertChannelNames(t *testing.T) {
names := Params.insertChannelNames()
channelRange := Params.insertChannelRange()
names := Params.InsertChannelNames
channelRange := Params.InsertChannelRange
num := channelRange[1] - channelRange[0]
num = num / Params.queryNodeNum()
num = num / Params.QueryNodeNum
assert.Equal(t, num, len(names))
start := num * Params.sliceIndex()
assert.Equal(t, fmt.Sprintf("insert-%d", channelRange[start]), names[0])
start := num * Params.SliceIndex
contains := strings.Contains(names[0], fmt.Sprintf("insert-%d", channelRange[start]))
assert.Equal(t, contains, true)
}
func TestParamTable_searchChannelNames(t *testing.T) {
names := Params.searchChannelNames()
names := Params.SearchChannelNames
assert.Equal(t, len(names), 1)
assert.Equal(t, "search-0", names[0])
contains := strings.Contains(names[0], "search-0")
assert.Equal(t, contains, true)
}
func TestParamTable_searchResultChannelNames(t *testing.T) {
names := Params.searchResultChannelNames()
names := Params.SearchResultChannelNames
assert.NotNil(t, names)
}
func TestParamTable_msgChannelSubName(t *testing.T) {
name := Params.msgChannelSubName()
expectName := fmt.Sprintf("queryNode-%d", Params.QueryNodeID())
name := Params.MsgChannelSubName
expectName := fmt.Sprintf("queryNode-%d", Params.QueryNodeID)
assert.Equal(t, expectName, name)
}
func TestParamTable_statsChannelName(t *testing.T) {
name := Params.statsChannelName()
assert.Equal(t, "query-node-stats", name)
name := Params.StatsChannelName
contains := strings.Contains(name, "query-node-stats")
assert.Equal(t, contains, true)
}
func TestParamTable_metaRootPath(t *testing.T) {
path := Params.metaRootPath()
path := Params.MetaRootPath
assert.Equal(t, "by-dev/meta", path)
}
func TestParamTable_ddChannelName(t *testing.T) {
names := Params.DDChannelNames
contains := strings.Contains(names[0], "data-definition-0")
assert.Equal(t, contains, true)
}
func TestParamTable_defaultPartitionTag(t *testing.T) {
tag := Params.DefaultPartitionTag
assert.Equal(t, tag, "_default")
}

View File

@ -14,11 +14,8 @@ func TestPartition_Segments(t *testing.T) {
collection, err := node.replica.getCollectionByName(collectionName)
assert.NoError(t, err)
collectionMeta := collection.meta
partitions := collection.Partitions()
assert.Equal(t, len(collectionMeta.PartitionTags), len(*partitions))
targetPartition := (*partitions)[0]
const segmentNum = 3

View File

@ -15,10 +15,10 @@ func TestPlan_Plan(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
@ -35,10 +35,10 @@ func TestPlan_PlaceholderGroup(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"

View File

@ -18,7 +18,7 @@ import (
type QueryNode struct {
queryNodeLoopCtx context.Context
queryNodeLoopCancel func()
queryNodeLoopCancel context.CancelFunc
QueryNodeID uint64
@ -75,6 +75,8 @@ func (node *QueryNode) Start() error {
go node.searchService.start()
go node.metaService.start()
go node.statsService.start()
<-node.queryNodeLoopCtx.Done()
return nil
}

View File

@ -2,18 +2,21 @@ package querynode
import (
"context"
"math/rand"
"os"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
const ctxTimeInMillisecond = 200
const ctxTimeInMillisecond = 2000
const closeWithDeadline = true
func setup() {
@ -66,16 +69,16 @@ func genTestCollectionMeta(collectionName string, collectionID UniqueID) *etcdpb
func initTestMeta(t *testing.T, node *QueryNode, collectionName string, collectionID UniqueID, segmentID UniqueID) {
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
var err = node.replica.addCollection(collectionMeta, collectionMetaBlob)
var err = node.replica.addCollection(collectionMeta.ID, schemaBlob)
assert.NoError(t, err)
collection, err := node.replica.getCollectionByName(collectionName)
assert.NoError(t, err)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
assert.Equal(t, node.replica.getCollectionNum(), 1)
err = node.replica.addPartition(collection.ID(), collectionMeta.PartitionTags[0])
@ -93,7 +96,10 @@ func newQueryNode() *QueryNode {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
go func() {
<-ctx.Done()
cancel()
}()
} else {
ctx = context.Background()
}
@ -103,8 +109,26 @@ func newQueryNode() *QueryNode {
}
func makeNewChannelNames(names []string, suffix string) []string {
var ret []string
for _, name := range names {
ret = append(ret, name+suffix)
}
return ret
}
func refreshChannelNames() {
suffix := "_test_query_node" + strconv.FormatInt(rand.Int63n(100), 10)
Params.DDChannelNames = makeNewChannelNames(Params.DDChannelNames, suffix)
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
Params.SearchChannelNames = makeNewChannelNames(Params.SearchChannelNames, suffix)
Params.SearchResultChannelNames = makeNewChannelNames(Params.SearchResultChannelNames, suffix)
Params.StatsChannelName = Params.StatsChannelName + suffix
}
func TestMain(m *testing.M) {
setup()
refreshChannelNames()
exitCode := m.Run()
os.Exit(exitCode)
}

View File

@ -17,10 +17,10 @@ func TestReduce_AllFunc(t *testing.T) {
collectionID := UniqueID(0)
segmentID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
segment := newSegment(collection, segmentID)
const DIM = 16

View File

@ -37,23 +37,20 @@ type searchService struct {
type ResultEntityIds []UniqueID
func newSearchService(ctx context.Context, replica collectionReplica) *searchService {
receiveBufSize := Params.searchReceiveBufSize()
pulsarBufSize := Params.searchPulsarBufSize()
receiveBufSize := Params.SearchReceiveBufSize
pulsarBufSize := Params.SearchPulsarBufSize
msgStreamURL, err := Params.pulsarAddress()
if err != nil {
log.Fatal(err)
}
msgStreamURL := Params.PulsarAddress
consumeChannels := Params.searchChannelNames()
consumeSubName := Params.msgChannelSubName()
consumeChannels := Params.SearchChannelNames
consumeSubName := Params.MsgChannelSubName
searchStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
searchStream.SetPulsarClient(msgStreamURL)
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
var inputStream msgstream.MsgStream = searchStream
producerChannels := Params.searchResultChannelNames()
producerChannels := Params.SearchResultChannelNames
searchResultStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
searchResultStream.SetPulsarClient(msgStreamURL)
searchResultStream.CreatePulsarProducers(producerChannels)
@ -74,7 +71,7 @@ func newSearchService(ctx context.Context, replica collectionReplica) *searchSer
searchMsgStream: inputStream,
searchResultMsgStream: outputStream,
queryNodeID: Params.QueryNodeID(),
queryNodeID: Params.QueryNodeID,
}
}

View File

@ -18,17 +18,16 @@ import (
)
func TestSearch_Search(t *testing.T) {
node := NewQueryNode(context.Background(), 0)
node := newQueryNode()
initTestMeta(t, node, "collection0", 0, 0)
pulsarURL, _ := Params.pulsarAddress()
pulsarURL := Params.PulsarAddress
// test data generate
const msgLength = 10
const receiveBufSize = 1024
const DIM = 16
insertProducerChannels := Params.insertChannelNames()
searchProducerChannels := Params.searchChannelNames()
searchProducerChannels := Params.SearchChannelNames
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
// start search service
@ -170,13 +169,29 @@ func TestSearch_Search(t *testing.T) {
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
// pulsar produce
insertChannels := Params.InsertChannelNames
ddChannels := Params.DDChannelNames
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
insertStream.SetPulsarClient(pulsarURL)
insertStream.CreatePulsarProducers(insertProducerChannels)
insertStream.Start()
err = insertStream.Produce(&msgPack)
insertStream.CreatePulsarProducers(insertChannels)
ddStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
ddStream.SetPulsarClient(pulsarURL)
ddStream.CreatePulsarProducers(ddChannels)
var insertMsgStream msgstream.MsgStream = insertStream
insertMsgStream.Start()
var ddMsgStream msgstream.MsgStream = ddStream
ddMsgStream.Start()
err = insertMsgStream.Produce(&msgPack)
assert.NoError(t, err)
err = insertStream.Broadcast(&timeTickMsgPack)
err = insertMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
err = ddMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
// dataSync
@ -192,14 +207,13 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
node := NewQueryNode(context.Background(), 0)
initTestMeta(t, node, "collection0", 0, 0)
pulsarURL, _ := Params.pulsarAddress()
pulsarURL := Params.PulsarAddress
// test data generate
const msgLength = 1024
const msgLength = 10
const receiveBufSize = 1024
const DIM = 16
insertProducerChannels := Params.insertChannelNames()
searchProducerChannels := Params.searchChannelNames()
searchProducerChannels := Params.SearchChannelNames
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
// start search service
@ -345,13 +359,29 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
// pulsar produce
insertChannels := Params.InsertChannelNames
ddChannels := Params.DDChannelNames
insertStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
insertStream.SetPulsarClient(pulsarURL)
insertStream.CreatePulsarProducers(insertProducerChannels)
insertStream.Start()
err = insertStream.Produce(&msgPack)
insertStream.CreatePulsarProducers(insertChannels)
ddStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
ddStream.SetPulsarClient(pulsarURL)
ddStream.CreatePulsarProducers(ddChannels)
var insertMsgStream msgstream.MsgStream = insertStream
insertMsgStream.Start()
var ddMsgStream msgstream.MsgStream = ddStream
ddMsgStream.Start()
err = insertMsgStream.Produce(&msgPack)
assert.NoError(t, err)
err = insertStream.Broadcast(&timeTickMsgPack)
err = insertMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
err = ddMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
// dataSync

View File

@ -18,12 +18,12 @@ func TestSegment_newSegment(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -36,12 +36,12 @@ func TestSegment_deleteSegment(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -56,12 +56,12 @@ func TestSegment_getRowCount(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -107,12 +107,12 @@ func TestSegment_getDeletedCount(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -164,12 +164,12 @@ func TestSegment_getMemSize(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -216,12 +216,12 @@ func TestSegment_segmentInsert(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
assert.Equal(t, segmentID, segment.segmentID)
@ -262,12 +262,12 @@ func TestSegment_segmentDelete(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -315,12 +315,12 @@ func TestSegment_segmentSearch(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -400,12 +400,12 @@ func TestSegment_segmentPreInsert(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)
@ -442,12 +442,12 @@ func TestSegment_segmentPreDelete(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
collectionMetaBlob := proto.MarshalTextString(collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta, collectionMetaBlob)
assert.Equal(t, collection.meta.Schema.Name, collectionName)
assert.Equal(t, collection.meta.ID, collectionID)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
segmentID := UniqueID(0)
segment := newSegment(collection, segmentID)

View File

@ -8,7 +8,6 @@ import (
"time"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
@ -28,15 +27,12 @@ func newStatsService(ctx context.Context, replica collectionReplica) *statsServi
}
func (sService *statsService) start() {
sleepTimeInterval := Params.statsPublishInterval()
receiveBufSize := Params.statsReceiveBufSize()
sleepTimeInterval := Params.StatsPublishInterval
receiveBufSize := Params.StatsReceiveBufSize
// start pulsar
msgStreamURL, err := Params.pulsarAddress()
if err != nil {
log.Fatal(err)
}
producerChannels := []string{Params.statsChannelName()}
msgStreamURL := Params.PulsarAddress
producerChannels := []string{Params.StatsChannelName}
statsStream := msgstream.NewPulsarMsgStream(sService.ctx, receiveBufSize)
statsStream.SetPulsarClient(msgStreamURL)

View File

@ -16,15 +16,15 @@ func TestStatsService_start(t *testing.T) {
}
//NOTE: start pulsar before test
func TestSegmentManagement_SegmentStatisticService(t *testing.T) {
func TestSegmentManagement_sendSegmentStatistic(t *testing.T) {
node := newQueryNode()
initTestMeta(t, node, "collection0", 0, 0)
const receiveBufSize = 1024
// start pulsar
producerChannels := []string{Params.statsChannelName()}
producerChannels := []string{Params.StatsChannelName}
pulsarURL, _ := Params.pulsarAddress()
pulsarURL := Params.PulsarAddress
statsStream := msgstream.NewPulsarMsgStream(node.queryNodeLoopCtx, receiveBufSize)
statsStream.SetPulsarClient(pulsarURL)

View File

@ -4,7 +4,10 @@ import (
"context"
"fmt"
"log"
"strconv"
"sync"
"github.com/zilliztech/milvus-distributed/internal/errors"
)
type Node interface {
@ -33,7 +36,7 @@ type nodeCtx struct {
func (nodeCtx *nodeCtx) Start(ctx context.Context, wg *sync.WaitGroup) {
if (*nodeCtx.node).IsInputNode() {
fmt.Println("start InputNode.inStream")
// fmt.Println("start InputNode.inStream")
inStream, ok := (*nodeCtx.node).(*InputNode)
if !ok {
log.Fatal("Invalid inputNode")
@ -109,6 +112,20 @@ func (nodeCtx *nodeCtx) collectInputMessages() {
}
nodeCtx.inputMessages[i] = msg
}
// timeTick alignment check
if len(nodeCtx.inputMessages) > 1 {
time := (*nodeCtx.inputMessages[0]).TimeTick()
for i := 1; i < len(nodeCtx.inputMessages); i++ {
if time != (*nodeCtx.inputMessages[i]).TimeTick() {
err := errors.New("Fatal, misaligned time tick," +
"t1=" + strconv.FormatUint(time, 10) +
", t2=" + strconv.FormatUint((*nodeCtx.inputMessages[i]).TimeTick(), 10) +
", please restart pulsar")
panic(err)
}
}
}
}
func (node *BaseNode) MaxQueueLength() int32 {

View File

@ -4,5 +4,5 @@ numpy==1.18.1
pytest==5.3.4
pytest-cov==2.8.1
pytest-timeout==1.3.4
pymilvus-distributed==0.0.3
pymilvus-distributed==0.0.5
sklearn==0.0