mirror of https://github.com/milvus-io/milvus.git
parent
aa1c26de77
commit
99984b88e1
|
@ -33,6 +33,7 @@ class GenericValueDefaultTypeInternal {
|
|||
bool bool_val_;
|
||||
::PROTOBUF_NAMESPACE_ID::int64 int64_val_;
|
||||
double float_val_;
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_val_;
|
||||
} _GenericValue_default_instance_;
|
||||
class QueryInfoDefaultTypeInternal {
|
||||
public:
|
||||
|
@ -269,6 +270,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_plan_2eproto::offsets[] PROTOB
|
|||
offsetof(::milvus::proto::plan::GenericValueDefaultTypeInternal, bool_val_),
|
||||
offsetof(::milvus::proto::plan::GenericValueDefaultTypeInternal, int64_val_),
|
||||
offsetof(::milvus::proto::plan::GenericValueDefaultTypeInternal, float_val_),
|
||||
offsetof(::milvus::proto::plan::GenericValueDefaultTypeInternal, string_val_),
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::GenericValue, val_),
|
||||
~0u, // no _has_bits_
|
||||
PROTOBUF_FIELD_OFFSET(::milvus::proto::plan::QueryInfo, _internal_metadata_),
|
||||
|
@ -370,17 +372,17 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_plan_2eproto::offsets[] PROTOB
|
|||
};
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
|
||||
{ 0, -1, sizeof(::milvus::proto::plan::GenericValue)},
|
||||
{ 9, -1, sizeof(::milvus::proto::plan::QueryInfo)},
|
||||
{ 18, -1, sizeof(::milvus::proto::plan::ColumnInfo)},
|
||||
{ 27, -1, sizeof(::milvus::proto::plan::UnaryRangeExpr)},
|
||||
{ 35, -1, sizeof(::milvus::proto::plan::BinaryRangeExpr)},
|
||||
{ 45, -1, sizeof(::milvus::proto::plan::CompareExpr)},
|
||||
{ 53, -1, sizeof(::milvus::proto::plan::TermExpr)},
|
||||
{ 60, -1, sizeof(::milvus::proto::plan::UnaryExpr)},
|
||||
{ 67, -1, sizeof(::milvus::proto::plan::BinaryExpr)},
|
||||
{ 75, -1, sizeof(::milvus::proto::plan::Expr)},
|
||||
{ 87, -1, sizeof(::milvus::proto::plan::VectorANNS)},
|
||||
{ 97, -1, sizeof(::milvus::proto::plan::PlanNode)},
|
||||
{ 10, -1, sizeof(::milvus::proto::plan::QueryInfo)},
|
||||
{ 19, -1, sizeof(::milvus::proto::plan::ColumnInfo)},
|
||||
{ 28, -1, sizeof(::milvus::proto::plan::UnaryRangeExpr)},
|
||||
{ 36, -1, sizeof(::milvus::proto::plan::BinaryRangeExpr)},
|
||||
{ 46, -1, sizeof(::milvus::proto::plan::CompareExpr)},
|
||||
{ 54, -1, sizeof(::milvus::proto::plan::TermExpr)},
|
||||
{ 61, -1, sizeof(::milvus::proto::plan::UnaryExpr)},
|
||||
{ 68, -1, sizeof(::milvus::proto::plan::BinaryExpr)},
|
||||
{ 76, -1, sizeof(::milvus::proto::plan::Expr)},
|
||||
{ 88, -1, sizeof(::milvus::proto::plan::VectorANNS)},
|
||||
{ 98, -1, sizeof(::milvus::proto::plan::PlanNode)},
|
||||
};
|
||||
|
||||
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
|
||||
|
@ -400,61 +402,61 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
|
|||
|
||||
const char descriptor_table_protodef_plan_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
|
||||
"\n\nplan.proto\022\021milvus.proto.plan\032\014schema."
|
||||
"proto\"S\n\014GenericValue\022\022\n\010bool_val\030\001 \001(\010H"
|
||||
"proto\"i\n\014GenericValue\022\022\n\010bool_val\030\001 \001(\010H"
|
||||
"\000\022\023\n\tint64_val\030\002 \001(\003H\000\022\023\n\tfloat_val\030\003 \001("
|
||||
"\001H\000B\005\n\003val\"\\\n\tQueryInfo\022\014\n\004topk\030\001 \001(\003\022\023\n"
|
||||
"\013metric_type\030\003 \001(\t\022\025\n\rsearch_params\030\004 \001("
|
||||
"\t\022\025\n\rround_decimal\030\005 \001(\003\"{\n\nColumnInfo\022\020"
|
||||
"\n\010field_id\030\001 \001(\003\0220\n\tdata_type\030\002 \001(\0162\035.mi"
|
||||
"lvus.proto.schema.DataType\022\026\n\016is_primary"
|
||||
"_key\030\003 \001(\010\022\021\n\tis_autoID\030\004 \001(\010\"\233\001\n\016UnaryR"
|
||||
"angeExpr\0222\n\013column_info\030\001 \001(\0132\035.milvus.p"
|
||||
"roto.plan.ColumnInfo\022%\n\002op\030\002 \001(\0162\031.milvu"
|
||||
"s.proto.plan.OpType\022.\n\005value\030\003 \001(\0132\037.mil"
|
||||
"vus.proto.plan.GenericValue\"\343\001\n\017BinaryRa"
|
||||
"ngeExpr\0222\n\013column_info\030\001 \001(\0132\035.milvus.pr"
|
||||
"oto.plan.ColumnInfo\022\027\n\017lower_inclusive\030\002"
|
||||
" \001(\010\022\027\n\017upper_inclusive\030\003 \001(\010\0224\n\013lower_v"
|
||||
"alue\030\004 \001(\0132\037.milvus.proto.plan.GenericVa"
|
||||
"lue\0224\n\013upper_value\030\005 \001(\0132\037.milvus.proto."
|
||||
"plan.GenericValue\"\247\001\n\013CompareExpr\0227\n\020lef"
|
||||
"t_column_info\030\001 \001(\0132\035.milvus.proto.plan."
|
||||
"ColumnInfo\0228\n\021right_column_info\030\002 \001(\0132\035."
|
||||
"milvus.proto.plan.ColumnInfo\022%\n\002op\030\003 \001(\016"
|
||||
"2\031.milvus.proto.plan.OpType\"o\n\010TermExpr\022"
|
||||
"2\n\013column_info\030\001 \001(\0132\035.milvus.proto.plan"
|
||||
".ColumnInfo\022/\n\006values\030\002 \003(\0132\037.milvus.pro"
|
||||
"to.plan.GenericValue\"\206\001\n\tUnaryExpr\0220\n\002op"
|
||||
"\030\001 \001(\0162$.milvus.proto.plan.UnaryExpr.Una"
|
||||
"ryOp\022&\n\005child\030\002 \001(\0132\027.milvus.proto.plan."
|
||||
"Expr\"\037\n\007UnaryOp\022\013\n\007Invalid\020\000\022\007\n\003Not\020\001\"\307\001"
|
||||
"\n\nBinaryExpr\0222\n\002op\030\001 \001(\0162&.milvus.proto."
|
||||
"plan.BinaryExpr.BinaryOp\022%\n\004left\030\002 \001(\0132\027"
|
||||
".milvus.proto.plan.Expr\022&\n\005right\030\003 \001(\0132\027"
|
||||
".milvus.proto.plan.Expr\"6\n\010BinaryOp\022\013\n\007I"
|
||||
"nvalid\020\000\022\016\n\nLogicalAnd\020\001\022\r\n\tLogicalOr\020\002\""
|
||||
"\342\002\n\004Expr\0220\n\tterm_expr\030\001 \001(\0132\033.milvus.pro"
|
||||
"to.plan.TermExprH\000\0222\n\nunary_expr\030\002 \001(\0132\034"
|
||||
".milvus.proto.plan.UnaryExprH\000\0224\n\013binary"
|
||||
"_expr\030\003 \001(\0132\035.milvus.proto.plan.BinaryEx"
|
||||
"prH\000\0226\n\014compare_expr\030\004 \001(\0132\036.milvus.prot"
|
||||
"o.plan.CompareExprH\000\022=\n\020unary_range_expr"
|
||||
"\030\005 \001(\0132!.milvus.proto.plan.UnaryRangeExp"
|
||||
"rH\000\022\?\n\021binary_range_expr\030\006 \001(\0132\".milvus."
|
||||
"proto.plan.BinaryRangeExprH\000B\006\n\004expr\"\251\001\n"
|
||||
"\nVectorANNS\022\021\n\tis_binary\030\001 \001(\010\022\020\n\010field_"
|
||||
"id\030\002 \001(\003\022+\n\npredicates\030\003 \001(\0132\027.milvus.pr"
|
||||
"oto.plan.Expr\0220\n\nquery_info\030\004 \001(\0132\034.milv"
|
||||
"us.proto.plan.QueryInfo\022\027\n\017placeholder_t"
|
||||
"ag\030\005 \001(\t\"\221\001\n\010PlanNode\0224\n\013vector_anns\030\001 \001"
|
||||
"(\0132\035.milvus.proto.plan.VectorANNSH\000\022-\n\np"
|
||||
"redicates\030\002 \001(\0132\027.milvus.proto.plan.Expr"
|
||||
"H\000\022\030\n\020output_field_ids\030\003 \003(\003B\006\n\004node*n\n\006"
|
||||
"OpType\022\013\n\007Invalid\020\000\022\017\n\013GreaterThan\020\001\022\020\n\014"
|
||||
"GreaterEqual\020\002\022\014\n\010LessThan\020\003\022\r\n\tLessEqua"
|
||||
"l\020\004\022\t\n\005Equal\020\005\022\014\n\010NotEqual\020\006B3Z1github.c"
|
||||
"om/milvus-io/milvus/internal/proto/planp"
|
||||
"bb\006proto3"
|
||||
"\001H\000\022\024\n\nstring_val\030\004 \001(\tH\000B\005\n\003val\"\\\n\tQuer"
|
||||
"yInfo\022\014\n\004topk\030\001 \001(\003\022\023\n\013metric_type\030\003 \001(\t"
|
||||
"\022\025\n\rsearch_params\030\004 \001(\t\022\025\n\rround_decimal"
|
||||
"\030\005 \001(\003\"{\n\nColumnInfo\022\020\n\010field_id\030\001 \001(\003\0220"
|
||||
"\n\tdata_type\030\002 \001(\0162\035.milvus.proto.schema."
|
||||
"DataType\022\026\n\016is_primary_key\030\003 \001(\010\022\021\n\tis_a"
|
||||
"utoID\030\004 \001(\010\"\233\001\n\016UnaryRangeExpr\0222\n\013column"
|
||||
"_info\030\001 \001(\0132\035.milvus.proto.plan.ColumnIn"
|
||||
"fo\022%\n\002op\030\002 \001(\0162\031.milvus.proto.plan.OpTyp"
|
||||
"e\022.\n\005value\030\003 \001(\0132\037.milvus.proto.plan.Gen"
|
||||
"ericValue\"\343\001\n\017BinaryRangeExpr\0222\n\013column_"
|
||||
"info\030\001 \001(\0132\035.milvus.proto.plan.ColumnInf"
|
||||
"o\022\027\n\017lower_inclusive\030\002 \001(\010\022\027\n\017upper_incl"
|
||||
"usive\030\003 \001(\010\0224\n\013lower_value\030\004 \001(\0132\037.milvu"
|
||||
"s.proto.plan.GenericValue\0224\n\013upper_value"
|
||||
"\030\005 \001(\0132\037.milvus.proto.plan.GenericValue\""
|
||||
"\247\001\n\013CompareExpr\0227\n\020left_column_info\030\001 \001("
|
||||
"\0132\035.milvus.proto.plan.ColumnInfo\0228\n\021righ"
|
||||
"t_column_info\030\002 \001(\0132\035.milvus.proto.plan."
|
||||
"ColumnInfo\022%\n\002op\030\003 \001(\0162\031.milvus.proto.pl"
|
||||
"an.OpType\"o\n\010TermExpr\0222\n\013column_info\030\001 \001"
|
||||
"(\0132\035.milvus.proto.plan.ColumnInfo\022/\n\006val"
|
||||
"ues\030\002 \003(\0132\037.milvus.proto.plan.GenericVal"
|
||||
"ue\"\206\001\n\tUnaryExpr\0220\n\002op\030\001 \001(\0162$.milvus.pr"
|
||||
"oto.plan.UnaryExpr.UnaryOp\022&\n\005child\030\002 \001("
|
||||
"\0132\027.milvus.proto.plan.Expr\"\037\n\007UnaryOp\022\013\n"
|
||||
"\007Invalid\020\000\022\007\n\003Not\020\001\"\307\001\n\nBinaryExpr\0222\n\002op"
|
||||
"\030\001 \001(\0162&.milvus.proto.plan.BinaryExpr.Bi"
|
||||
"naryOp\022%\n\004left\030\002 \001(\0132\027.milvus.proto.plan"
|
||||
".Expr\022&\n\005right\030\003 \001(\0132\027.milvus.proto.plan"
|
||||
".Expr\"6\n\010BinaryOp\022\013\n\007Invalid\020\000\022\016\n\nLogica"
|
||||
"lAnd\020\001\022\r\n\tLogicalOr\020\002\"\342\002\n\004Expr\0220\n\tterm_e"
|
||||
"xpr\030\001 \001(\0132\033.milvus.proto.plan.TermExprH\000"
|
||||
"\0222\n\nunary_expr\030\002 \001(\0132\034.milvus.proto.plan"
|
||||
".UnaryExprH\000\0224\n\013binary_expr\030\003 \001(\0132\035.milv"
|
||||
"us.proto.plan.BinaryExprH\000\0226\n\014compare_ex"
|
||||
"pr\030\004 \001(\0132\036.milvus.proto.plan.CompareExpr"
|
||||
"H\000\022=\n\020unary_range_expr\030\005 \001(\0132!.milvus.pr"
|
||||
"oto.plan.UnaryRangeExprH\000\022\?\n\021binary_rang"
|
||||
"e_expr\030\006 \001(\0132\".milvus.proto.plan.BinaryR"
|
||||
"angeExprH\000B\006\n\004expr\"\251\001\n\nVectorANNS\022\021\n\tis_"
|
||||
"binary\030\001 \001(\010\022\020\n\010field_id\030\002 \001(\003\022+\n\npredic"
|
||||
"ates\030\003 \001(\0132\027.milvus.proto.plan.Expr\0220\n\nq"
|
||||
"uery_info\030\004 \001(\0132\034.milvus.proto.plan.Quer"
|
||||
"yInfo\022\027\n\017placeholder_tag\030\005 \001(\t\"\221\001\n\010PlanN"
|
||||
"ode\0224\n\013vector_anns\030\001 \001(\0132\035.milvus.proto."
|
||||
"plan.VectorANNSH\000\022-\n\npredicates\030\002 \001(\0132\027."
|
||||
"milvus.proto.plan.ExprH\000\022\030\n\020output_field"
|
||||
"_ids\030\003 \003(\003B\006\n\004node*n\n\006OpType\022\013\n\007Invalid\020"
|
||||
"\000\022\017\n\013GreaterThan\020\001\022\020\n\014GreaterEqual\020\002\022\014\n\010"
|
||||
"LessThan\020\003\022\r\n\tLessEqual\020\004\022\t\n\005Equal\020\005\022\014\n\010"
|
||||
"NotEqual\020\006B3Z1github.com/milvus-io/milvu"
|
||||
"s/internal/proto/planpbb\006proto3"
|
||||
;
|
||||
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_plan_2eproto_deps[1] = {
|
||||
&::descriptor_table_schema_2eproto,
|
||||
|
@ -474,7 +476,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_pla
|
|||
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_plan_2eproto_once;
|
||||
static bool descriptor_table_plan_2eproto_initialized = false;
|
||||
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_plan_2eproto = {
|
||||
&descriptor_table_plan_2eproto_initialized, descriptor_table_protodef_plan_2eproto, "plan.proto", 2209,
|
||||
&descriptor_table_plan_2eproto_initialized, descriptor_table_protodef_plan_2eproto, "plan.proto", 2231,
|
||||
&descriptor_table_plan_2eproto_once, descriptor_table_plan_2eproto_sccs, descriptor_table_plan_2eproto_deps, 10, 1,
|
||||
schemas, file_default_instances, TableStruct_plan_2eproto::offsets,
|
||||
file_level_metadata_plan_2eproto, 12, file_level_enum_descriptors_plan_2eproto, file_level_service_descriptors_plan_2eproto,
|
||||
|
@ -555,6 +557,8 @@ void GenericValue::InitAsDefaultInstance() {
|
|||
::milvus::proto::plan::_GenericValue_default_instance_.bool_val_ = false;
|
||||
::milvus::proto::plan::_GenericValue_default_instance_.int64_val_ = PROTOBUF_LONGLONG(0);
|
||||
::milvus::proto::plan::_GenericValue_default_instance_.float_val_ = 0;
|
||||
::milvus::proto::plan::_GenericValue_default_instance_.string_val_.UnsafeSetDefault(
|
||||
&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
class GenericValue::_Internal {
|
||||
public:
|
||||
|
@ -583,6 +587,10 @@ GenericValue::GenericValue(const GenericValue& from)
|
|||
set_float_val(from.float_val());
|
||||
break;
|
||||
}
|
||||
case kStringVal: {
|
||||
set_string_val(from.string_val());
|
||||
break;
|
||||
}
|
||||
case VAL_NOT_SET: {
|
||||
break;
|
||||
}
|
||||
|
@ -591,6 +599,7 @@ GenericValue::GenericValue(const GenericValue& from)
|
|||
}
|
||||
|
||||
void GenericValue::SharedCtor() {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_GenericValue_plan_2eproto.base);
|
||||
clear_has_val();
|
||||
}
|
||||
|
||||
|
@ -629,6 +638,10 @@ void GenericValue::clear_val() {
|
|||
// No need to clear
|
||||
break;
|
||||
}
|
||||
case kStringVal: {
|
||||
val_.string_val_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
break;
|
||||
}
|
||||
case VAL_NOT_SET: {
|
||||
break;
|
||||
}
|
||||
|
@ -676,6 +689,13 @@ const char* GenericValue::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_I
|
|||
ptr += sizeof(double);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
// string string_val = 4;
|
||||
case 4:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 34)) {
|
||||
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(mutable_string_val(), ptr, ctx, "milvus.proto.plan.GenericValue.string_val");
|
||||
CHK_(ptr);
|
||||
} else goto handle_unusual;
|
||||
continue;
|
||||
default: {
|
||||
handle_unusual:
|
||||
if ((tag & 7) == 4 || tag == 0) {
|
||||
|
@ -748,6 +768,21 @@ bool GenericValue::MergePartialFromCodedStream(
|
|||
break;
|
||||
}
|
||||
|
||||
// string string_val = 4;
|
||||
case 4: {
|
||||
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (34 & 0xFF)) {
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
|
||||
input, this->mutable_string_val()));
|
||||
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->string_val().data(), static_cast<int>(this->string_val().length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
|
||||
"milvus.proto.plan.GenericValue.string_val"));
|
||||
} else {
|
||||
goto handle_unusual;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
handle_unusual:
|
||||
if (tag == 0) {
|
||||
|
@ -790,6 +825,16 @@ void GenericValue::SerializeWithCachedSizes(
|
|||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDouble(3, this->float_val(), output);
|
||||
}
|
||||
|
||||
// string string_val = 4;
|
||||
if (has_string_val()) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->string_val().data(), static_cast<int>(this->string_val().length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.plan.GenericValue.string_val");
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringMaybeAliased(
|
||||
4, this->string_val(), output);
|
||||
}
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
|
||||
_internal_metadata_.unknown_fields(), output);
|
||||
|
@ -818,6 +863,17 @@ void GenericValue::SerializeWithCachedSizes(
|
|||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteDoubleToArray(3, this->float_val(), target);
|
||||
}
|
||||
|
||||
// string string_val = 4;
|
||||
if (has_string_val()) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->string_val().data(), static_cast<int>(this->string_val().length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"milvus.proto.plan.GenericValue.string_val");
|
||||
target =
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteStringToArray(
|
||||
4, this->string_val(), target);
|
||||
}
|
||||
|
||||
if (_internal_metadata_.have_unknown_fields()) {
|
||||
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
|
||||
_internal_metadata_.unknown_fields(), target);
|
||||
|
@ -857,6 +913,13 @@ size_t GenericValue::ByteSizeLong() const {
|
|||
total_size += 1 + 8;
|
||||
break;
|
||||
}
|
||||
// string string_val = 4;
|
||||
case kStringVal: {
|
||||
total_size += 1 +
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
|
||||
this->string_val());
|
||||
break;
|
||||
}
|
||||
case VAL_NOT_SET: {
|
||||
break;
|
||||
}
|
||||
|
@ -901,6 +964,10 @@ void GenericValue::MergeFrom(const GenericValue& from) {
|
|||
set_float_val(from.float_val());
|
||||
break;
|
||||
}
|
||||
case kStringVal: {
|
||||
set_string_val(from.string_val());
|
||||
break;
|
||||
}
|
||||
case VAL_NOT_SET: {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -239,6 +239,7 @@ class GenericValue :
|
|||
kBoolVal = 1,
|
||||
kInt64Val = 2,
|
||||
kFloatVal = 3,
|
||||
kStringVal = 4,
|
||||
VAL_NOT_SET = 0,
|
||||
};
|
||||
|
||||
|
@ -322,6 +323,7 @@ class GenericValue :
|
|||
kBoolValFieldNumber = 1,
|
||||
kInt64ValFieldNumber = 2,
|
||||
kFloatValFieldNumber = 3,
|
||||
kStringValFieldNumber = 4,
|
||||
};
|
||||
// bool bool_val = 1;
|
||||
private:
|
||||
|
@ -347,6 +349,20 @@ class GenericValue :
|
|||
double float_val() const;
|
||||
void set_float_val(double value);
|
||||
|
||||
// string string_val = 4;
|
||||
private:
|
||||
bool has_string_val() const;
|
||||
public:
|
||||
void clear_string_val();
|
||||
const std::string& string_val() const;
|
||||
void set_string_val(const std::string& value);
|
||||
void set_string_val(std::string&& value);
|
||||
void set_string_val(const char* value);
|
||||
void set_string_val(const char* value, size_t size);
|
||||
std::string* mutable_string_val();
|
||||
std::string* release_string_val();
|
||||
void set_allocated_string_val(std::string* string_val);
|
||||
|
||||
void clear_val();
|
||||
ValCase val_case() const;
|
||||
// @@protoc_insertion_point(class_scope:milvus.proto.plan.GenericValue)
|
||||
|
@ -355,6 +371,7 @@ class GenericValue :
|
|||
void set_has_bool_val();
|
||||
void set_has_int64_val();
|
||||
void set_has_float_val();
|
||||
void set_has_string_val();
|
||||
|
||||
inline bool has_val() const;
|
||||
inline void clear_has_val();
|
||||
|
@ -365,6 +382,7 @@ class GenericValue :
|
|||
bool bool_val_;
|
||||
::PROTOBUF_NAMESPACE_ID::int64 int64_val_;
|
||||
double float_val_;
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr string_val_;
|
||||
} val_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
::PROTOBUF_NAMESPACE_ID::uint32 _oneof_case_[1];
|
||||
|
@ -2313,6 +2331,96 @@ inline void GenericValue::set_float_val(double value) {
|
|||
// @@protoc_insertion_point(field_set:milvus.proto.plan.GenericValue.float_val)
|
||||
}
|
||||
|
||||
// string string_val = 4;
|
||||
inline bool GenericValue::has_string_val() const {
|
||||
return val_case() == kStringVal;
|
||||
}
|
||||
inline void GenericValue::set_has_string_val() {
|
||||
_oneof_case_[0] = kStringVal;
|
||||
}
|
||||
inline void GenericValue::clear_string_val() {
|
||||
if (has_string_val()) {
|
||||
val_.string_val_.DestroyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
clear_has_val();
|
||||
}
|
||||
}
|
||||
inline const std::string& GenericValue::string_val() const {
|
||||
// @@protoc_insertion_point(field_get:milvus.proto.plan.GenericValue.string_val)
|
||||
if (has_string_val()) {
|
||||
return val_.string_val_.GetNoArena();
|
||||
}
|
||||
return *&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited();
|
||||
}
|
||||
inline void GenericValue::set_string_val(const std::string& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.plan.GenericValue.string_val)
|
||||
if (!has_string_val()) {
|
||||
clear_val();
|
||||
set_has_string_val();
|
||||
val_.string_val_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
val_.string_val_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), value);
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.plan.GenericValue.string_val)
|
||||
}
|
||||
inline void GenericValue::set_string_val(std::string&& value) {
|
||||
// @@protoc_insertion_point(field_set:milvus.proto.plan.GenericValue.string_val)
|
||||
if (!has_string_val()) {
|
||||
clear_val();
|
||||
set_has_string_val();
|
||||
val_.string_val_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
val_.string_val_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::move(value));
|
||||
// @@protoc_insertion_point(field_set_rvalue:milvus.proto.plan.GenericValue.string_val)
|
||||
}
|
||||
inline void GenericValue::set_string_val(const char* value) {
|
||||
GOOGLE_DCHECK(value != nullptr);
|
||||
if (!has_string_val()) {
|
||||
clear_val();
|
||||
set_has_string_val();
|
||||
val_.string_val_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
val_.string_val_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(),
|
||||
::std::string(value));
|
||||
// @@protoc_insertion_point(field_set_char:milvus.proto.plan.GenericValue.string_val)
|
||||
}
|
||||
inline void GenericValue::set_string_val(const char* value, size_t size) {
|
||||
if (!has_string_val()) {
|
||||
clear_val();
|
||||
set_has_string_val();
|
||||
val_.string_val_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
val_.string_val_.SetNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), ::std::string(
|
||||
reinterpret_cast<const char*>(value), size));
|
||||
// @@protoc_insertion_point(field_set_pointer:milvus.proto.plan.GenericValue.string_val)
|
||||
}
|
||||
inline std::string* GenericValue::mutable_string_val() {
|
||||
if (!has_string_val()) {
|
||||
clear_val();
|
||||
set_has_string_val();
|
||||
val_.string_val_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
// @@protoc_insertion_point(field_mutable:milvus.proto.plan.GenericValue.string_val)
|
||||
return val_.string_val_.MutableNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
}
|
||||
inline std::string* GenericValue::release_string_val() {
|
||||
// @@protoc_insertion_point(field_release:milvus.proto.plan.GenericValue.string_val)
|
||||
if (has_string_val()) {
|
||||
clear_has_val();
|
||||
return val_.string_val_.ReleaseNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
inline void GenericValue::set_allocated_string_val(std::string* string_val) {
|
||||
if (has_val()) {
|
||||
clear_val();
|
||||
}
|
||||
if (string_val != nullptr) {
|
||||
set_has_string_val();
|
||||
val_.string_val_.UnsafeSetDefault(string_val);
|
||||
}
|
||||
// @@protoc_insertion_point(field_set_allocated:milvus.proto.plan.GenericValue.string_val)
|
||||
}
|
||||
|
||||
inline bool GenericValue::has_val() const {
|
||||
return val_case() != VAL_NOT_SET;
|
||||
}
|
||||
|
|
|
@ -46,9 +46,10 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
|
|||
meta := f.GetCollectionMeta(UniqueID(10001), "uploads", schemapb.DataType_Int64)
|
||||
|
||||
iData := genInsertData()
|
||||
pk := newInt64PrimaryKey(888)
|
||||
dData := &DeleteData{
|
||||
RowCount: 1,
|
||||
Pks: []int64{888},
|
||||
Pks: []primaryKey{pk},
|
||||
Tss: []uint64{666666},
|
||||
}
|
||||
|
||||
|
@ -80,7 +81,7 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
|
|||
f := &MetaFactory{}
|
||||
meta := f.GetCollectionMeta(UniqueID(10001), "uploads", schemapb.DataType_Int64)
|
||||
dData := &DeleteData{
|
||||
Pks: []int64{},
|
||||
Pks: []primaryKey{},
|
||||
Tss: []uint64{},
|
||||
}
|
||||
|
||||
|
@ -100,7 +101,7 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
|
|||
|
||||
iData = genInsertData()
|
||||
dData = &DeleteData{
|
||||
Pks: []int64{},
|
||||
Pks: []primaryKey{},
|
||||
Tss: []uint64{1},
|
||||
RowCount: 1,
|
||||
}
|
||||
|
@ -111,8 +112,9 @@ func TestBinlogIOInterfaceMethods(t *testing.T) {
|
|||
mkc := &mockCm{errMultiSave: true}
|
||||
bin := &binlogIO{mkc, alloc}
|
||||
iData = genInsertData()
|
||||
pk := newInt64PrimaryKey(1)
|
||||
dData = &DeleteData{
|
||||
Pks: []int64{1},
|
||||
Pks: []primaryKey{pk},
|
||||
Tss: []uint64{1},
|
||||
RowCount: 1,
|
||||
}
|
||||
|
@ -203,12 +205,12 @@ func TestBinlogIOInnerMethods(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
isvalid bool
|
||||
deletepk int64
|
||||
deletepk primaryKey
|
||||
ts uint64
|
||||
|
||||
description string
|
||||
}{
|
||||
{true, 1, 1111111, "valid input"},
|
||||
{true, newInt64PrimaryKey(1), 1111111, "valid input"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -216,7 +218,7 @@ func TestBinlogIOInnerMethods(t *testing.T) {
|
|||
if test.isvalid {
|
||||
|
||||
k, v, err := b.genDeltaBlobs(&DeleteData{
|
||||
Pks: []int64{test.deletepk},
|
||||
Pks: []primaryKey{test.deletepk},
|
||||
Tss: []uint64{test.ts},
|
||||
}, meta.GetID(), 10, 1)
|
||||
|
||||
|
@ -231,7 +233,8 @@ func TestBinlogIOInnerMethods(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Test genDeltaBlobs error", func(t *testing.T) {
|
||||
k, v, err := b.genDeltaBlobs(&DeleteData{Pks: []int64{1}, Tss: []uint64{}}, 1, 1, 1)
|
||||
pk := newInt64PrimaryKey(1)
|
||||
k, v, err := b.genDeltaBlobs(&DeleteData{Pks: []primaryKey{pk}, Tss: []uint64{}}, 1, 1, 1)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, k)
|
||||
assert.Empty(t, v)
|
||||
|
@ -240,7 +243,7 @@ func TestBinlogIOInnerMethods(t *testing.T) {
|
|||
errAlloc.isvalid = false
|
||||
|
||||
bin := binlogIO{cm, errAlloc}
|
||||
k, v, err = bin.genDeltaBlobs(&DeleteData{Pks: []int64{1}, Tss: []uint64{1}}, 1, 1, 1)
|
||||
k, v, err = bin.genDeltaBlobs(&DeleteData{Pks: []primaryKey{pk}, Tss: []uint64{1}}, 1, 1, 1)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, k)
|
||||
assert.Empty(t, v)
|
||||
|
|
|
@ -133,15 +133,15 @@ func (t *compactionTask) getChannelName() string {
|
|||
return t.plan.GetChannel()
|
||||
}
|
||||
|
||||
func (t *compactionTask) mergeDeltalogs(dBlobs map[UniqueID][]*Blob, timetravelTs Timestamp) (map[UniqueID]Timestamp, *DelDataBuf, error) {
|
||||
func (t *compactionTask) mergeDeltalogs(dBlobs map[UniqueID][]*Blob, timetravelTs Timestamp) (map[primaryKey]Timestamp, *DelDataBuf, error) {
|
||||
mergeStart := time.Now()
|
||||
dCodec := storage.NewDeleteCodec()
|
||||
|
||||
var (
|
||||
pk2ts = make(map[UniqueID]Timestamp)
|
||||
pk2ts = make(map[primaryKey]Timestamp)
|
||||
dbuff = &DelDataBuf{
|
||||
delData: &DeleteData{
|
||||
Pks: make([]UniqueID, 0),
|
||||
Pks: make([]primaryKey, 0),
|
||||
Tss: make([]Timestamp, 0)},
|
||||
Binlog: datapb.Binlog{
|
||||
TimestampFrom: math.MaxUint64,
|
||||
|
@ -191,7 +191,7 @@ func nano2Milli(nano time.Duration) float64 {
|
|||
return float64(nano) / float64(time.Millisecond)
|
||||
}
|
||||
|
||||
func (t *compactionTask) merge(mergeItr iterator, delta map[UniqueID]Timestamp, schema *schemapb.CollectionSchema, currentTs Timestamp) ([]*InsertData, int64, error) {
|
||||
func (t *compactionTask) merge(mergeItr iterator, delta map[primaryKey]Timestamp, schema *schemapb.CollectionSchema, currentTs Timestamp) ([]*InsertData, int64, error) {
|
||||
mergeStart := time.Now()
|
||||
|
||||
var (
|
||||
|
@ -206,6 +206,16 @@ func (t *compactionTask) merge(mergeItr iterator, delta map[UniqueID]Timestamp,
|
|||
fID2Content = make(map[UniqueID][]interface{})
|
||||
)
|
||||
|
||||
isDeletedValue := func(v *storage.Value) bool {
|
||||
for pk, ts := range delta {
|
||||
if pk.EQ(v.PK) && uint64(v.Timestamp) <= ts {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// get dim
|
||||
for _, fs := range schema.GetFields() {
|
||||
fID2Type[fs.GetFieldID()] = fs.GetDataType()
|
||||
|
@ -234,7 +244,7 @@ func (t *compactionTask) merge(mergeItr iterator, delta map[UniqueID]Timestamp,
|
|||
return nil, 0, errors.New("unexpected error")
|
||||
}
|
||||
|
||||
if _, ok := delta[v.PK]; ok {
|
||||
if isDeletedValue(v) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -372,12 +382,14 @@ func (t *compactionTask) compact() error {
|
|||
dmu sync.Mutex
|
||||
|
||||
PKfieldID UniqueID
|
||||
PkType schemapb.DataType
|
||||
)
|
||||
|
||||
// Get PK fieldID
|
||||
for _, fs := range meta.GetSchema().GetFields() {
|
||||
if fs.GetFieldID() >= 100 && fs.GetDataType() == schemapb.DataType_Int64 && fs.GetIsPrimaryKey() {
|
||||
if fs.GetFieldID() >= 100 && typeutil.IsPrimaryFieldType(fs.GetDataType()) && fs.GetIsPrimaryKey() {
|
||||
PKfieldID = fs.GetFieldID()
|
||||
PkType = fs.GetDataType()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -413,7 +425,7 @@ func (t *compactionTask) compact() error {
|
|||
return err
|
||||
}
|
||||
|
||||
itr, err := storage.NewInsertBinlogIterator(bs, PKfieldID)
|
||||
itr, err := storage.NewInsertBinlogIterator(bs, PKfieldID, PkType)
|
||||
if err != nil {
|
||||
log.Warn("new insert binlogs Itr wrong")
|
||||
return err
|
||||
|
|
|
@ -120,7 +120,7 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
1: {},
|
||||
}
|
||||
|
||||
blobs, err := getDeltaBlobs(
|
||||
blobs, err := getInt64DeltaBlobs(
|
||||
100,
|
||||
[]UniqueID{
|
||||
1,
|
||||
|
@ -165,7 +165,8 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
assert.Equal(t, 3, len(pk2ts))
|
||||
assert.Equal(t, int64(3), db.GetEntriesNum())
|
||||
assert.Equal(t, int64(3), db.delData.RowCount)
|
||||
assert.ElementsMatch(t, []UniqueID{1, 4, 5}, db.delData.Pks)
|
||||
matchedPks := []primaryKey{newInt64PrimaryKey(1), newInt64PrimaryKey(4), newInt64PrimaryKey(5)}
|
||||
assert.ElementsMatch(t, matchedPks, db.delData.Pks)
|
||||
assert.ElementsMatch(t, []Timestamp{30000, 50000, 50000}, db.delData.Tss)
|
||||
|
||||
} else {
|
||||
|
@ -217,17 +218,17 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
t.Run(test.description, func(t *testing.T) {
|
||||
dBlobs := make(map[UniqueID][]*Blob)
|
||||
if test.segIDA != UniqueID(0) {
|
||||
d, err := getDeltaBlobs(test.segIDA, test.dataApk, test.dataAts)
|
||||
d, err := getInt64DeltaBlobs(test.segIDA, test.dataApk, test.dataAts)
|
||||
require.NoError(t, err)
|
||||
dBlobs[test.segIDA] = d
|
||||
}
|
||||
if test.segIDB != UniqueID(0) {
|
||||
d, err := getDeltaBlobs(test.segIDB, test.dataBpk, test.dataBts)
|
||||
d, err := getInt64DeltaBlobs(test.segIDB, test.dataBpk, test.dataBts)
|
||||
require.NoError(t, err)
|
||||
dBlobs[test.segIDB] = d
|
||||
}
|
||||
if test.segIDC != UniqueID(0) {
|
||||
d, err := getDeltaBlobs(test.segIDC, test.dataCpk, test.dataCts)
|
||||
d, err := getInt64DeltaBlobs(test.segIDC, test.dataCpk, test.dataCts)
|
||||
require.NoError(t, err)
|
||||
dBlobs[test.segIDC] = d
|
||||
}
|
||||
|
@ -252,19 +253,20 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
iblobs, err := getInsertBlobs(100, iData, meta)
|
||||
require.NoError(t, err)
|
||||
|
||||
iitr, err := storage.NewInsertBinlogIterator(iblobs, 106)
|
||||
iitr, err := storage.NewInsertBinlogIterator(iblobs, 106, schemapb.DataType_Int64)
|
||||
require.NoError(t, err)
|
||||
|
||||
mitr := storage.NewMergeIterator([]iterator{iitr})
|
||||
|
||||
dm := map[UniqueID]Timestamp{
|
||||
1: 10000,
|
||||
pk := newInt64PrimaryKey(1)
|
||||
dm := map[primaryKey]Timestamp{
|
||||
pk: 10000,
|
||||
}
|
||||
|
||||
ct := &compactionTask{}
|
||||
idata, numOfRow, err := ct.merge(mitr, dm, meta.GetSchema(), ct.GetCurrentTime())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1), numOfRow)
|
||||
assert.Equal(t, int64(2), numOfRow)
|
||||
assert.Equal(t, 1, len(idata))
|
||||
assert.NotEmpty(t, idata[0].Data)
|
||||
})
|
||||
|
@ -281,12 +283,12 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
iblobs, err := getInsertBlobs(100, iData, meta)
|
||||
require.NoError(t, err)
|
||||
|
||||
iitr, err := storage.NewInsertBinlogIterator(iblobs, 106)
|
||||
iitr, err := storage.NewInsertBinlogIterator(iblobs, 106, schemapb.DataType_Int64)
|
||||
require.NoError(t, err)
|
||||
|
||||
mitr := storage.NewMergeIterator([]iterator{iitr})
|
||||
|
||||
dm := map[UniqueID]Timestamp{}
|
||||
dm := map[primaryKey]Timestamp{}
|
||||
|
||||
ct := &compactionTask{}
|
||||
idata, numOfRow, err := ct.merge(mitr, dm, meta.GetSchema(), ct.GetCurrentTime())
|
||||
|
@ -304,20 +306,21 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
iblobs, err := getInsertBlobs(100, iData, meta)
|
||||
require.NoError(t, err)
|
||||
|
||||
iitr, err := storage.NewInsertBinlogIterator(iblobs, 106)
|
||||
iitr, err := storage.NewInsertBinlogIterator(iblobs, 106, schemapb.DataType_Int64)
|
||||
require.NoError(t, err)
|
||||
|
||||
mitr := storage.NewMergeIterator([]iterator{iitr})
|
||||
|
||||
dm := map[UniqueID]Timestamp{
|
||||
1: 10000,
|
||||
pk := newInt64PrimaryKey(1)
|
||||
dm := map[primaryKey]Timestamp{
|
||||
pk: 10000,
|
||||
}
|
||||
|
||||
ct := &compactionTask{}
|
||||
idata, numOfRow, err := ct.merge(mitr, dm, meta.GetSchema(), genTimestamp())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), numOfRow)
|
||||
assert.Equal(t, 0, len(idata))
|
||||
assert.Equal(t, int64(1), numOfRow)
|
||||
assert.Equal(t, 1, len(idata))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -382,9 +385,13 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func getDeltaBlobs(segID UniqueID, pks []UniqueID, tss []Timestamp) ([]*Blob, error) {
|
||||
func getInt64DeltaBlobs(segID UniqueID, pks []UniqueID, tss []Timestamp) ([]*Blob, error) {
|
||||
primaryKeys := make([]primaryKey, len(pks))
|
||||
for index, v := range pks {
|
||||
primaryKeys[index] = newInt64PrimaryKey(v)
|
||||
}
|
||||
deltaData := &DeleteData{
|
||||
Pks: pks,
|
||||
Pks: primaryKeys,
|
||||
Tss: tss,
|
||||
RowCount: int64(len(pks)),
|
||||
}
|
||||
|
@ -449,257 +456,323 @@ func TestCompactorInterfaceMethods(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Test typeI compact valid", func(t *testing.T) {
|
||||
var collID, partID, segID UniqueID = 1, 10, 100
|
||||
|
||||
alloc := NewAllocatorFactory(1)
|
||||
rc := &RootCoordFactory{
|
||||
pkType: schemapb.DataType_Int64,
|
||||
}
|
||||
dc := &DataCoordFactory{}
|
||||
mockfm := &mockFlushManager{}
|
||||
mockbIO := &binlogIO{cm, alloc}
|
||||
replica, err := newReplica(context.TODO(), rc, cm, collID)
|
||||
require.NoError(t, err)
|
||||
replica.addFlushedSegmentWithPKs(segID, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{1, 2}})
|
||||
|
||||
iData := genInsertData()
|
||||
meta := NewMetaFactory().GetCollectionMeta(collID, "test_compact_coll_name", schemapb.DataType_Int64)
|
||||
dData := &DeleteData{
|
||||
Pks: []UniqueID{1},
|
||||
Tss: []Timestamp{20000},
|
||||
RowCount: 1,
|
||||
}
|
||||
|
||||
cpaths, err := mockbIO.upload(context.TODO(), segID, partID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(cpaths.inPaths))
|
||||
segBinlogs := []*datapb.CompactionSegmentBinlogs{
|
||||
type testCase struct {
|
||||
pkType schemapb.DataType
|
||||
fieldData storage.FieldData
|
||||
pk1 primaryKey
|
||||
pk2 primaryKey
|
||||
colID UniqueID
|
||||
parID UniqueID
|
||||
segID UniqueID
|
||||
}
|
||||
cases := []testCase{
|
||||
{
|
||||
SegmentID: segID,
|
||||
FieldBinlogs: cpaths.inPaths,
|
||||
Field2StatslogPaths: cpaths.statsPaths,
|
||||
Deltalogs: cpaths.deltaInfo,
|
||||
}}
|
||||
|
||||
plan := &datapb.CompactionPlan{
|
||||
PlanID: 10080,
|
||||
SegmentBinlogs: segBinlogs,
|
||||
StartTime: 0,
|
||||
TimeoutInSeconds: 1,
|
||||
Type: datapb.CompactionType_InnerCompaction,
|
||||
Timetravel: 30000,
|
||||
Channel: "channelname",
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
cancel()
|
||||
canceledTask := newCompactionTask(ctx, mockbIO, mockbIO, replica, mockfm, alloc, dc, plan)
|
||||
err = canceledTask.compact()
|
||||
assert.Error(t, err)
|
||||
|
||||
task := newCompactionTask(context.TODO(), mockbIO, mockbIO, replica, mockfm, alloc, dc, plan)
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
|
||||
updates, err := replica.getSegmentStatisticsUpdates(segID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1), updates.GetNumRows())
|
||||
|
||||
id := task.getCollection()
|
||||
assert.Equal(t, UniqueID(1), id)
|
||||
|
||||
planID := task.getPlanID()
|
||||
assert.Equal(t, plan.GetPlanID(), planID)
|
||||
|
||||
// Compact to delete the entire segment
|
||||
deleteAllData := &DeleteData{
|
||||
Pks: []UniqueID{1, 2},
|
||||
Tss: []Timestamp{20000, 20001},
|
||||
RowCount: 2,
|
||||
}
|
||||
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), segID, partID, []*InsertData{iData}, deleteAllData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
// The segment should be removed
|
||||
assert.False(t, replica.hasSegment(segID, true))
|
||||
|
||||
// re-add the segment
|
||||
replica.addFlushedSegmentWithPKs(segID, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{1, 2}})
|
||||
|
||||
// Compact empty segment
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), segID, partID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID = 999876
|
||||
segmentBinlogsWithEmptySegment := []*datapb.CompactionSegmentBinlogs{
|
||||
pkType: schemapb.DataType_Int64,
|
||||
fieldData: &storage.Int64FieldData{Data: []UniqueID{1, 2}},
|
||||
pk1: newInt64PrimaryKey(1),
|
||||
pk2: newInt64PrimaryKey(2),
|
||||
colID: 1,
|
||||
parID: 10,
|
||||
segID: 100,
|
||||
},
|
||||
{
|
||||
SegmentID: segID,
|
||||
pkType: schemapb.DataType_VarChar,
|
||||
fieldData: &storage.StringFieldData{Data: []string{"test1", "test2"}},
|
||||
pk1: newVarCharPrimaryKey("test1"),
|
||||
pk2: newVarCharPrimaryKey("test2"),
|
||||
colID: 2,
|
||||
parID: 11,
|
||||
segID: 101,
|
||||
},
|
||||
}
|
||||
plan.SegmentBinlogs = segmentBinlogsWithEmptySegment
|
||||
err = task.compact()
|
||||
assert.Error(t, err)
|
||||
for _, c := range cases {
|
||||
rc := &RootCoordFactory{
|
||||
pkType: c.pkType,
|
||||
}
|
||||
dc := &DataCoordFactory{}
|
||||
mockfm := &mockFlushManager{}
|
||||
mockbIO := &binlogIO{cm, alloc}
|
||||
replica, err := newReplica(context.TODO(), rc, cm, c.colID)
|
||||
require.NoError(t, err)
|
||||
replica.addFlushedSegmentWithPKs(c.segID, c.colID, c.parID, "channelname", 2, c.fieldData)
|
||||
|
||||
plan.SegmentBinlogs = segBinlogs
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Deltas in timetravel range
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), segID, partID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
meta := NewMetaFactory().GetCollectionMeta(c.colID, "test_compact_coll_name", c.pkType)
|
||||
dData := &DeleteData{
|
||||
Pks: []primaryKey{c.pk1},
|
||||
Tss: []Timestamp{20000},
|
||||
RowCount: 1,
|
||||
}
|
||||
|
||||
plan.Timetravel = Timestamp(10000)
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
cpaths, err := mockbIO.upload(context.TODO(), c.segID, c.parID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(cpaths.inPaths))
|
||||
segBinlogs := []*datapb.CompactionSegmentBinlogs{
|
||||
{
|
||||
SegmentID: c.segID,
|
||||
FieldBinlogs: cpaths.inPaths,
|
||||
Field2StatslogPaths: cpaths.statsPaths,
|
||||
Deltalogs: cpaths.deltaInfo,
|
||||
}}
|
||||
|
||||
updates, err = replica.getSegmentStatisticsUpdates(segID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), updates.GetNumRows())
|
||||
plan := &datapb.CompactionPlan{
|
||||
PlanID: 10080,
|
||||
SegmentBinlogs: segBinlogs,
|
||||
StartTime: 0,
|
||||
TimeoutInSeconds: 1,
|
||||
Type: datapb.CompactionType_InnerCompaction,
|
||||
Timetravel: 30000,
|
||||
Channel: "channelname",
|
||||
}
|
||||
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Timeout
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), segID, partID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
cancel()
|
||||
canceledTask := newCompactionTask(ctx, mockbIO, mockbIO, replica, mockfm, alloc, dc, plan)
|
||||
err = canceledTask.compact()
|
||||
assert.Error(t, err)
|
||||
|
||||
mockfm.sleepSeconds = plan.TimeoutInSeconds + int32(1)
|
||||
err = task.compact()
|
||||
assert.Error(t, err)
|
||||
task := newCompactionTask(context.TODO(), mockbIO, mockbIO, replica, mockfm, alloc, dc, plan)
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
|
||||
updates, err := replica.getSegmentStatisticsUpdates(c.segID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1), updates.GetNumRows())
|
||||
|
||||
id := task.getCollection()
|
||||
assert.Equal(t, c.colID, id)
|
||||
|
||||
planID := task.getPlanID()
|
||||
assert.Equal(t, plan.GetPlanID(), planID)
|
||||
|
||||
// Compact to delete the entire segment
|
||||
deleteAllData := &DeleteData{
|
||||
Pks: []primaryKey{c.pk1, c.pk2},
|
||||
Tss: []Timestamp{20000, 20001},
|
||||
RowCount: 2,
|
||||
}
|
||||
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), c.segID, c.parID, []*InsertData{iData}, deleteAllData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
// The segment should be removed
|
||||
assert.False(t, replica.hasSegment(c.segID, true))
|
||||
|
||||
// re-add the segment
|
||||
replica.addFlushedSegmentWithPKs(c.segID, c.colID, c.parID, "channelname", 2, c.fieldData)
|
||||
|
||||
// Compact empty segment
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), c.segID, c.parID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID = 999876
|
||||
segmentBinlogsWithEmptySegment := []*datapb.CompactionSegmentBinlogs{
|
||||
{
|
||||
SegmentID: c.segID,
|
||||
},
|
||||
}
|
||||
plan.SegmentBinlogs = segmentBinlogsWithEmptySegment
|
||||
err = task.compact()
|
||||
assert.Error(t, err)
|
||||
|
||||
plan.SegmentBinlogs = segBinlogs
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Deltas in timetravel range
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), c.segID, c.parID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
|
||||
plan.Timetravel = Timestamp(10000)
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
|
||||
updates, err = replica.getSegmentStatisticsUpdates(c.segID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), updates.GetNumRows())
|
||||
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Timeout
|
||||
err = cm.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
cpaths, err = mockbIO.upload(context.TODO(), c.segID, c.parID, []*InsertData{iData}, dData, meta)
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
|
||||
mockfm.sleepSeconds = plan.TimeoutInSeconds + int32(1)
|
||||
err = task.compact()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Test typeII compact valid", func(t *testing.T) {
|
||||
var collID, partID, segID1, segID2 UniqueID = 1, 10, 200, 201
|
||||
|
||||
alloc := NewAllocatorFactory(1)
|
||||
rc := &RootCoordFactory{
|
||||
pkType: schemapb.DataType_Int64,
|
||||
type testCase struct {
|
||||
pkType schemapb.DataType
|
||||
iData1 storage.FieldData
|
||||
iData2 storage.FieldData
|
||||
pks1 [2]primaryKey
|
||||
pks2 [2]primaryKey
|
||||
colID UniqueID
|
||||
parID UniqueID
|
||||
segID1 UniqueID
|
||||
segID2 UniqueID
|
||||
}
|
||||
dc := &DataCoordFactory{}
|
||||
mockfm := &mockFlushManager{}
|
||||
mockKv := memkv.NewMemoryKV()
|
||||
mockbIO := &binlogIO{cm, alloc}
|
||||
replica, err := newReplica(context.TODO(), rc, cm, collID)
|
||||
require.NoError(t, err)
|
||||
|
||||
replica.addFlushedSegmentWithPKs(segID1, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{1}})
|
||||
replica.addFlushedSegmentWithPKs(segID2, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{9}})
|
||||
require.True(t, replica.hasSegment(segID1, true))
|
||||
require.True(t, replica.hasSegment(segID2, true))
|
||||
|
||||
meta := NewMetaFactory().GetCollectionMeta(collID, "test_compact_coll_name", schemapb.DataType_Int64)
|
||||
iData1 := genInsertDataWithPKs([2]int64{1, 2})
|
||||
dData1 := &DeleteData{
|
||||
Pks: []UniqueID{1},
|
||||
Tss: []Timestamp{20000},
|
||||
RowCount: 1,
|
||||
}
|
||||
iData2 := genInsertDataWithPKs([2]int64{9, 10})
|
||||
dData2 := &DeleteData{
|
||||
Pks: []UniqueID{9},
|
||||
Tss: []Timestamp{30000},
|
||||
RowCount: 1,
|
||||
}
|
||||
|
||||
cpaths1, err := mockbIO.upload(context.TODO(), segID1, partID, []*InsertData{iData1}, dData1, meta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(cpaths1.inPaths))
|
||||
|
||||
cpaths2, err := mockbIO.upload(context.TODO(), segID2, partID, []*InsertData{iData2}, dData2, meta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(cpaths2.inPaths))
|
||||
|
||||
plan := &datapb.CompactionPlan{
|
||||
PlanID: 10080,
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{
|
||||
SegmentID: segID1,
|
||||
FieldBinlogs: cpaths1.inPaths,
|
||||
Field2StatslogPaths: cpaths1.statsPaths,
|
||||
Deltalogs: cpaths1.deltaInfo,
|
||||
},
|
||||
{
|
||||
SegmentID: segID2,
|
||||
FieldBinlogs: cpaths2.inPaths,
|
||||
Field2StatslogPaths: cpaths2.statsPaths,
|
||||
Deltalogs: cpaths2.deltaInfo,
|
||||
},
|
||||
cases := []testCase{
|
||||
{
|
||||
pkType: schemapb.DataType_Int64,
|
||||
iData1: &storage.Int64FieldData{Data: []UniqueID{1}},
|
||||
iData2: &storage.Int64FieldData{Data: []UniqueID{9}},
|
||||
pks1: [2]primaryKey{newInt64PrimaryKey(1), newInt64PrimaryKey(2)},
|
||||
pks2: [2]primaryKey{newInt64PrimaryKey(9), newInt64PrimaryKey(10)},
|
||||
colID: 1,
|
||||
parID: 10,
|
||||
segID1: 100,
|
||||
segID2: 101,
|
||||
},
|
||||
{
|
||||
pkType: schemapb.DataType_VarChar,
|
||||
iData1: &storage.StringFieldData{Data: []string{"aaaa"}},
|
||||
iData2: &storage.StringFieldData{Data: []string{"milvus"}},
|
||||
pks1: [2]primaryKey{newVarCharPrimaryKey("aaaa"), newVarCharPrimaryKey("bbbb")},
|
||||
pks2: [2]primaryKey{newVarCharPrimaryKey("milvus"), newVarCharPrimaryKey("mmmm")},
|
||||
colID: 2,
|
||||
parID: 11,
|
||||
segID1: 102,
|
||||
segID2: 103,
|
||||
},
|
||||
StartTime: 0,
|
||||
TimeoutInSeconds: 1,
|
||||
Type: datapb.CompactionType_MergeCompaction,
|
||||
Timetravel: 40000,
|
||||
Channel: "channelname",
|
||||
}
|
||||
|
||||
alloc.random = false // generated ID = 19530
|
||||
task := newCompactionTask(context.TODO(), mockbIO, mockbIO, replica, mockfm, alloc, dc, plan)
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
for _, c := range cases {
|
||||
rc := &RootCoordFactory{
|
||||
pkType: c.pkType,
|
||||
}
|
||||
dc := &DataCoordFactory{}
|
||||
mockfm := &mockFlushManager{}
|
||||
mockKv := memkv.NewMemoryKV()
|
||||
mockbIO := &binlogIO{cm, alloc}
|
||||
replica, err := newReplica(context.TODO(), rc, cm, c.colID)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.False(t, replica.hasSegment(segID1, true))
|
||||
assert.False(t, replica.hasSegment(segID2, true))
|
||||
assert.True(t, replica.hasSegment(19530, true))
|
||||
updates, err := replica.getSegmentStatisticsUpdates(19530)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), updates.GetNumRows())
|
||||
replica.addFlushedSegmentWithPKs(c.segID1, c.colID, c.parID, "channelname", 2, c.iData1)
|
||||
replica.addFlushedSegmentWithPKs(c.segID2, c.colID, c.parID, "channelname", 2, c.iData2)
|
||||
require.True(t, replica.hasSegment(c.segID1, true))
|
||||
require.True(t, replica.hasSegment(c.segID2, true))
|
||||
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Deltas in timetravel range
|
||||
err = mockKv.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
meta := NewMetaFactory().GetCollectionMeta(c.colID, "test_compact_coll_name", c.pkType)
|
||||
iData1 := genInsertDataWithPKs(c.pks1, c.pkType)
|
||||
dData1 := &DeleteData{
|
||||
Pks: []primaryKey{c.pks1[0]},
|
||||
Tss: []Timestamp{20000},
|
||||
RowCount: 1,
|
||||
}
|
||||
iData2 := genInsertDataWithPKs(c.pks2, c.pkType)
|
||||
dData2 := &DeleteData{
|
||||
Pks: []primaryKey{c.pks2[0]},
|
||||
Tss: []Timestamp{30000},
|
||||
RowCount: 1,
|
||||
}
|
||||
|
||||
plan.Timetravel = Timestamp(25000)
|
||||
replica.addFlushedSegmentWithPKs(segID1, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{1}})
|
||||
replica.addFlushedSegmentWithPKs(segID2, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{9}})
|
||||
replica.removeSegments(19530)
|
||||
require.True(t, replica.hasSegment(segID1, true))
|
||||
require.True(t, replica.hasSegment(segID2, true))
|
||||
require.False(t, replica.hasSegment(19530, true))
|
||||
cpaths1, err := mockbIO.upload(context.TODO(), c.segID1, c.parID, []*InsertData{iData1}, dData1, meta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(cpaths1.inPaths))
|
||||
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
cpaths2, err := mockbIO.upload(context.TODO(), c.segID2, c.parID, []*InsertData{iData2}, dData2, meta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 12, len(cpaths2.inPaths))
|
||||
|
||||
assert.False(t, replica.hasSegment(segID1, true))
|
||||
assert.False(t, replica.hasSegment(segID2, true))
|
||||
assert.True(t, replica.hasSegment(19530, true))
|
||||
updates, err = replica.getSegmentStatisticsUpdates(19530)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(3), updates.GetNumRows())
|
||||
plan := &datapb.CompactionPlan{
|
||||
PlanID: 10080,
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{
|
||||
SegmentID: c.segID1,
|
||||
FieldBinlogs: cpaths1.inPaths,
|
||||
Field2StatslogPaths: cpaths1.statsPaths,
|
||||
Deltalogs: cpaths1.deltaInfo,
|
||||
},
|
||||
{
|
||||
SegmentID: c.segID2,
|
||||
FieldBinlogs: cpaths2.inPaths,
|
||||
Field2StatslogPaths: cpaths2.statsPaths,
|
||||
Deltalogs: cpaths2.deltaInfo,
|
||||
},
|
||||
},
|
||||
StartTime: 0,
|
||||
TimeoutInSeconds: 1,
|
||||
Type: datapb.CompactionType_MergeCompaction,
|
||||
Timetravel: 40000,
|
||||
Channel: "channelname",
|
||||
}
|
||||
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Deltas in timetravel range
|
||||
err = mockKv.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
alloc.random = false // generated ID = 19530
|
||||
task := newCompactionTask(context.TODO(), mockbIO, mockbIO, replica, mockfm, alloc, dc, plan)
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
|
||||
plan.Timetravel = Timestamp(10000)
|
||||
replica.addFlushedSegmentWithPKs(segID1, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{1}})
|
||||
replica.addFlushedSegmentWithPKs(segID2, collID, partID, "channelname", 2, &storage.Int64FieldData{Data: []UniqueID{9}})
|
||||
replica.removeSegments(19530)
|
||||
require.True(t, replica.hasSegment(segID1, true))
|
||||
require.True(t, replica.hasSegment(segID2, true))
|
||||
require.False(t, replica.hasSegment(19530, true))
|
||||
assert.False(t, replica.hasSegment(c.segID1, true))
|
||||
assert.False(t, replica.hasSegment(c.segID2, true))
|
||||
assert.True(t, replica.hasSegment(19530, true))
|
||||
updates, err := replica.getSegmentStatisticsUpdates(19530)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), updates.GetNumRows())
|
||||
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Deltas in timetravel range
|
||||
err = mockKv.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
|
||||
assert.False(t, replica.hasSegment(segID1, true))
|
||||
assert.False(t, replica.hasSegment(segID2, true))
|
||||
assert.True(t, replica.hasSegment(19530, true))
|
||||
updates, err = replica.getSegmentStatisticsUpdates(19530)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(4), updates.GetNumRows())
|
||||
plan.Timetravel = Timestamp(25000)
|
||||
replica.addFlushedSegmentWithPKs(c.segID1, c.colID, c.parID, "channelname", 2, c.iData1)
|
||||
replica.addFlushedSegmentWithPKs(c.segID2, c.colID, c.parID, "channelname", 2, c.iData2)
|
||||
replica.removeSegments(19530)
|
||||
require.True(t, replica.hasSegment(c.segID1, true))
|
||||
require.True(t, replica.hasSegment(c.segID2, true))
|
||||
require.False(t, replica.hasSegment(19530, true))
|
||||
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, replica.hasSegment(c.segID1, true))
|
||||
assert.False(t, replica.hasSegment(c.segID2, true))
|
||||
assert.True(t, replica.hasSegment(19530, true))
|
||||
updates, err = replica.getSegmentStatisticsUpdates(19530)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(3), updates.GetNumRows())
|
||||
|
||||
// New test, remove all the binlogs in memkv
|
||||
// Deltas in timetravel range
|
||||
err = mockKv.RemoveWithPrefix("/")
|
||||
require.NoError(t, err)
|
||||
plan.PlanID++
|
||||
|
||||
plan.Timetravel = Timestamp(10000)
|
||||
replica.addFlushedSegmentWithPKs(c.segID1, c.colID, c.parID, "channelname", 2, c.iData1)
|
||||
replica.addFlushedSegmentWithPKs(c.segID2, c.colID, c.parID, "channelname", 2, c.iData2)
|
||||
replica.removeSegments(19530)
|
||||
require.True(t, replica.hasSegment(c.segID1, true))
|
||||
require.True(t, replica.hasSegment(c.segID2, true))
|
||||
require.False(t, replica.hasSegment(19530, true))
|
||||
|
||||
err = task.compact()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, replica.hasSegment(c.segID1, true))
|
||||
assert.False(t, replica.hasSegment(c.segID2, true))
|
||||
assert.True(t, replica.hasSegment(19530, true))
|
||||
updates, err = replica.getSegmentStatisticsUpdates(19530)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(4), updates.GetNumRows())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Test typeII compact 2 segments with the same pk", func(t *testing.T) {
|
||||
|
@ -725,17 +798,19 @@ func TestCompactorInterfaceMethods(t *testing.T) {
|
|||
|
||||
meta := NewMetaFactory().GetCollectionMeta(collID, "test_compact_coll_name", schemapb.DataType_Int64)
|
||||
// the same pk for segmentI and segmentII
|
||||
iData1 := genInsertDataWithPKs([2]int64{1, 2})
|
||||
iData2 := genInsertDataWithPKs([2]int64{1, 2})
|
||||
pks := [2]primaryKey{newInt64PrimaryKey(1), newInt64PrimaryKey(2)}
|
||||
iData1 := genInsertDataWithPKs(pks, schemapb.DataType_Int64)
|
||||
iData2 := genInsertDataWithPKs(pks, schemapb.DataType_Int64)
|
||||
|
||||
pk1 := newInt64PrimaryKey(1)
|
||||
dData1 := &DeleteData{
|
||||
Pks: []UniqueID{1},
|
||||
Pks: []primaryKey{pk1},
|
||||
Tss: []Timestamp{20000},
|
||||
RowCount: 1,
|
||||
}
|
||||
// empty dData2
|
||||
dData2 := &DeleteData{
|
||||
Pks: []UniqueID{},
|
||||
Pks: []primaryKey{},
|
||||
Tss: []Timestamp{},
|
||||
RowCount: 0,
|
||||
}
|
||||
|
|
|
@ -148,9 +148,9 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
|
|||
case commonpb.MsgType_Delete:
|
||||
dmsg := msg.(*msgstream.DeleteMsg)
|
||||
log.Debug("DDNode receive delete messages",
|
||||
zap.Int("num", len(dmsg.GetPrimaryKeys())),
|
||||
zap.Int64("num", dmsg.NumRows),
|
||||
zap.String("vChannelName", ddn.vchannelName))
|
||||
for i := 0; i < len(dmsg.PrimaryKeys); i++ {
|
||||
for i := int64(0); i < dmsg.NumRows; i++ {
|
||||
dmsg.HashValues = append(dmsg.HashValues, uint32(0))
|
||||
}
|
||||
forwardMsgs = append(forwardMsgs, dmsg)
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/metrics"
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
@ -121,27 +122,12 @@ func (dn *deleteNode) bufferDeleteMsg(msg *msgstream.DeleteMsg, tr TimeRange) er
|
|||
)
|
||||
}
|
||||
|
||||
segIDToPkMap := make(map[UniqueID][]int64)
|
||||
segIDToTsMap := make(map[UniqueID][]uint64)
|
||||
primaryKeys := storage.ParseIDs2PrimaryKeys(msg.PrimaryKeys)
|
||||
segIDToPks, segIDToTss := dn.filterSegmentByPK(msg.PartitionID, primaryKeys, msg.Timestamps)
|
||||
|
||||
m := dn.filterSegmentByPK(msg.PartitionID, msg.PrimaryKeys)
|
||||
for i, pk := range msg.PrimaryKeys {
|
||||
segIDs, ok := m[pk]
|
||||
if !ok {
|
||||
log.Warn("primary key not exist in all segments",
|
||||
zap.Int64("primary key", pk),
|
||||
zap.String("vChannelName", dn.channelName))
|
||||
continue
|
||||
}
|
||||
for _, segID := range segIDs {
|
||||
segIDToPkMap[segID] = append(segIDToPkMap[segID], pk)
|
||||
segIDToTsMap[segID] = append(segIDToTsMap[segID], msg.Timestamps[i])
|
||||
}
|
||||
}
|
||||
|
||||
for segID, pks := range segIDToPkMap {
|
||||
for segID, pks := range segIDToPks {
|
||||
rows := len(pks)
|
||||
tss, ok := segIDToTsMap[segID]
|
||||
tss, ok := segIDToTss[segID]
|
||||
if !ok || rows != len(tss) {
|
||||
// TODO: what's the expected behavior after this Error?
|
||||
log.Error("primary keys and timestamp's element num mis-match")
|
||||
|
@ -161,7 +147,7 @@ func (dn *deleteNode) bufferDeleteMsg(msg *msgstream.DeleteMsg, tr TimeRange) er
|
|||
delData.Pks = append(delData.Pks, pks[i])
|
||||
delData.Tss = append(delData.Tss, tss[i])
|
||||
log.Debug("delete",
|
||||
zap.Int64("primary key", pks[i]),
|
||||
zap.Any("primary key", pks[i]),
|
||||
zap.Uint64("ts", tss[i]),
|
||||
zap.Int64("segmentID", segID),
|
||||
zap.String("vChannelName", dn.channelName))
|
||||
|
@ -191,7 +177,7 @@ func (dn *deleteNode) showDelBuf() {
|
|||
length := len(delDataBuf.delData.Pks)
|
||||
for i := 0; i < length; i++ {
|
||||
log.Debug("del data",
|
||||
zap.Int64("pk", delDataBuf.delData.Pks[i]),
|
||||
zap.Any("pk", delDataBuf.delData.Pks[i]),
|
||||
zap.Uint64("ts", delDataBuf.delData.Tss[i]),
|
||||
zap.Int64("segmentID", segID),
|
||||
zap.String("vchannel", dn.channelName),
|
||||
|
@ -279,20 +265,34 @@ func (dn *deleteNode) Operate(in []Msg) []Msg {
|
|||
// filterSegmentByPK returns the bloom filter check result.
|
||||
// If the key may exists in the segment, returns it in map.
|
||||
// If the key not exists in the segment, the segment is filter out.
|
||||
func (dn *deleteNode) filterSegmentByPK(partID UniqueID, pks []int64) map[int64][]int64 {
|
||||
result := make(map[int64][]int64)
|
||||
func (dn *deleteNode) filterSegmentByPK(partID UniqueID, pks []primaryKey, tss []Timestamp) (map[UniqueID][]primaryKey, map[UniqueID][]uint64) {
|
||||
segID2Pks := make(map[UniqueID][]primaryKey)
|
||||
segID2Tss := make(map[UniqueID][]uint64)
|
||||
buf := make([]byte, 8)
|
||||
segments := dn.replica.filterSegments(dn.channelName, partID)
|
||||
for _, pk := range pks {
|
||||
for index, pk := range pks {
|
||||
for _, segment := range segments {
|
||||
common.Endian.PutUint64(buf, uint64(pk))
|
||||
exist := segment.pkFilter.Test(buf)
|
||||
segmentID := segment.segmentID
|
||||
exist := false
|
||||
switch pk.Type() {
|
||||
case schemapb.DataType_Int64:
|
||||
int64Pk := pk.(*int64PrimaryKey)
|
||||
common.Endian.PutUint64(buf, uint64(int64Pk.Value))
|
||||
exist = segment.pkFilter.Test(buf)
|
||||
case schemapb.DataType_VarChar:
|
||||
varCharPk := pk.(*varCharPrimaryKey)
|
||||
exist = segment.pkFilter.TestString(varCharPk.Value)
|
||||
default:
|
||||
//TODO::
|
||||
}
|
||||
if exist {
|
||||
result[pk] = append(result[pk], segment.segmentID)
|
||||
segID2Pks[segmentID] = append(segID2Pks[segmentID], pk)
|
||||
segID2Tss[segmentID] = append(segID2Tss[segmentID], tss[index])
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
return segID2Pks, segID2Tss
|
||||
}
|
||||
|
||||
func newDeleteNode(ctx context.Context, fm flushManager, sig chan<- string, config *nodeConfig) (*deleteNode, error) {
|
||||
|
|
|
@ -129,18 +129,28 @@ func TestFlowGraphDeleteNode_newDeleteNode(te *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func genMockReplica(segIDs []int64, pks []int64, chanName string) *mockReplica {
|
||||
func genMockReplica(segIDs []int64, pks []primaryKey, chanName string) *mockReplica {
|
||||
buf := make([]byte, 8)
|
||||
filter0 := bloom.NewWithEstimates(1000000, 0.01)
|
||||
for i := 0; i < 3; i++ {
|
||||
common.Endian.PutUint64(buf, uint64(pks[i]))
|
||||
filter0.Add(buf)
|
||||
switch pks[i].Type() {
|
||||
case schemapb.DataType_Int64:
|
||||
common.Endian.PutUint64(buf, uint64(pks[i].(*int64PrimaryKey).Value))
|
||||
filter0.Add(buf)
|
||||
case schemapb.DataType_VarChar:
|
||||
filter0.AddString(pks[i].(*varCharPrimaryKey).Value)
|
||||
}
|
||||
}
|
||||
|
||||
filter1 := bloom.NewWithEstimates(1000000, 0.01)
|
||||
for i := 3; i < 5; i++ {
|
||||
common.Endian.PutUint64(buf, uint64(pks[i]))
|
||||
filter1.Add(buf)
|
||||
switch pks[i].Type() {
|
||||
case schemapb.DataType_Int64:
|
||||
common.Endian.PutUint64(buf, uint64(pks[i].(*int64PrimaryKey).Value))
|
||||
filter1.Add(buf)
|
||||
case schemapb.DataType_VarChar:
|
||||
filter1.AddString(pks[i].(*varCharPrimaryKey).Value)
|
||||
}
|
||||
}
|
||||
|
||||
seg0 := &Segment{
|
||||
|
@ -212,14 +222,29 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
chanName = "channel-test"
|
||||
)
|
||||
var (
|
||||
segIDs = []int64{11, 22, 33, 44, 55}
|
||||
pks = []int64{3, 17, 44, 190, 425}
|
||||
segIDs = []int64{11, 22, 33, 44, 55}
|
||||
int64Pks = []primaryKey{
|
||||
newInt64PrimaryKey(3),
|
||||
newInt64PrimaryKey(17),
|
||||
newInt64PrimaryKey(44),
|
||||
newInt64PrimaryKey(190),
|
||||
newInt64PrimaryKey(425),
|
||||
}
|
||||
varCharPks = []primaryKey{
|
||||
newVarCharPrimaryKey("ab"),
|
||||
newVarCharPrimaryKey("ac"),
|
||||
newVarCharPrimaryKey("bcd"),
|
||||
newVarCharPrimaryKey("gggg"),
|
||||
newVarCharPrimaryKey("milvus"),
|
||||
}
|
||||
tss = []uint64{1, 1, 1, 1, 1}
|
||||
)
|
||||
replica := genMockReplica(segIDs, pks, chanName)
|
||||
cm := storage.NewLocalChunkManager(storage.RootPath(deleteNodeTestDir))
|
||||
defer cm.RemoveWithPrefix("")
|
||||
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, replica, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
|
||||
t.Run("Test get segment by primary keys", func(te *testing.T) {
|
||||
|
||||
t.Run("Test get segment by varChar primary keys", func(te *testing.T) {
|
||||
replica := genMockReplica(segIDs, varCharPks, chanName)
|
||||
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, replica, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
|
||||
c := &nodeConfig{
|
||||
replica: replica,
|
||||
allocator: &allocator{},
|
||||
|
@ -229,16 +254,49 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
dn, err := newDeleteNode(context.Background(), fm, make(chan string, 1), c)
|
||||
assert.Nil(t, err)
|
||||
|
||||
results := dn.filterSegmentByPK(0, pks)
|
||||
expected := map[int64][]int64{
|
||||
pks[0]: segIDs[0:3],
|
||||
pks[1]: segIDs[0:3],
|
||||
pks[2]: segIDs[0:3],
|
||||
pks[3]: segIDs[3:5],
|
||||
pks[4]: segIDs[3:5],
|
||||
segID2Pks, _ := dn.filterSegmentByPK(0, varCharPks, tss)
|
||||
expected := map[int64][]primaryKey{
|
||||
segIDs[0]: varCharPks[0:3],
|
||||
segIDs[1]: varCharPks[0:3],
|
||||
segIDs[2]: varCharPks[0:3],
|
||||
segIDs[3]: varCharPks[3:5],
|
||||
segIDs[4]: varCharPks[3:5],
|
||||
}
|
||||
for key, value := range expected {
|
||||
assert.ElementsMatch(t, value, results[key])
|
||||
for segmentID, expectedPks := range expected {
|
||||
filterPks := segID2Pks[segmentID]
|
||||
assert.Equal(t, len(expectedPks), len(filterPks))
|
||||
for index, pk := range expectedPks {
|
||||
assert.Equal(t, true, pk.EQ(filterPks[index]))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
replica := genMockReplica(segIDs, int64Pks, chanName)
|
||||
fm := NewRendezvousFlushManager(NewAllocatorFactory(), cm, replica, func(*segmentFlushPack) {}, emptyFlushAndDropFunc)
|
||||
t.Run("Test get segment by int64 primary keys", func(te *testing.T) {
|
||||
c := &nodeConfig{
|
||||
replica: replica,
|
||||
allocator: &allocator{},
|
||||
vChannelName: chanName,
|
||||
}
|
||||
|
||||
dn, err := newDeleteNode(context.Background(), fm, make(chan string, 1), c)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segID2Pks, _ := dn.filterSegmentByPK(0, int64Pks, tss)
|
||||
expected := map[int64][]primaryKey{
|
||||
segIDs[0]: int64Pks[0:3],
|
||||
segIDs[1]: int64Pks[0:3],
|
||||
segIDs[2]: int64Pks[0:3],
|
||||
segIDs[3]: int64Pks[3:5],
|
||||
segIDs[4]: int64Pks[3:5],
|
||||
}
|
||||
for segmentID, expectedPks := range expected {
|
||||
filterPks := segID2Pks[segmentID]
|
||||
assert.Equal(t, len(expectedPks), len(filterPks))
|
||||
for index, pk := range expectedPks {
|
||||
assert.Equal(t, true, pk.EQ(filterPks[index]))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -260,7 +318,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
delNode, err := newDeleteNode(ctx, fm, make(chan string, 1), c)
|
||||
assert.Nil(te, err)
|
||||
|
||||
msg := genFlowGraphDeleteMsg(pks, chanName)
|
||||
msg := genFlowGraphDeleteMsg(int64Pks, chanName)
|
||||
msg.segmentsToFlush = segIDs
|
||||
// this will fail since ts = 0 will trigger mocked error
|
||||
var fgMsg flowgraph.Msg = &msg
|
||||
|
@ -284,7 +342,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
delNode, err := newDeleteNode(ctx, fm, make(chan string, 1), c)
|
||||
assert.Nil(te, err)
|
||||
|
||||
msg := genFlowGraphDeleteMsg(pks, chanName)
|
||||
msg := genFlowGraphDeleteMsg(int64Pks, chanName)
|
||||
msg.segmentsToFlush = segIDs
|
||||
|
||||
msg.endPositions[0].Timestamp = 100 // set to normal timestamp
|
||||
|
@ -314,7 +372,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
delNode, err := newDeleteNode(ctx, fm, sig, c)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := genFlowGraphDeleteMsg(pks, chanName)
|
||||
msg := genFlowGraphDeleteMsg(int64Pks, chanName)
|
||||
msg.segmentsToFlush = segIDs
|
||||
|
||||
msg.endPositions[0].Timestamp = 100 // set to normal timestamp
|
||||
|
|
|
@ -654,6 +654,7 @@ func TestInsertBufferNode_bufferInsertMsg(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
msFactory := msgstream.NewPmsFactory()
|
||||
|
||||
err = msFactory.Init(&Params)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
|
|
@ -683,7 +683,7 @@ func (df *DataFactory) GetMsgStreamInsertMsgs(n int) (msgs []*msgstream.InsertMs
|
|||
return
|
||||
}
|
||||
|
||||
func (df *DataFactory) GenMsgStreamDeleteMsg(pks []int64, chanName string) *msgstream.DeleteMsg {
|
||||
func (df *DataFactory) GenMsgStreamDeleteMsg(pks []primaryKey, chanName string) *msgstream.DeleteMsg {
|
||||
idx := 100
|
||||
timestamps := make([]Timestamp, len(pks))
|
||||
for i := 0; i < len(pks); i++ {
|
||||
|
@ -703,8 +703,9 @@ func (df *DataFactory) GenMsgStreamDeleteMsg(pks []int64, chanName string) *msgs
|
|||
CollectionName: "col1",
|
||||
PartitionName: "default",
|
||||
ShardName: chanName,
|
||||
PrimaryKeys: pks,
|
||||
PrimaryKeys: s.ParsePrimaryKeys2IDs(pks),
|
||||
Timestamps: timestamps,
|
||||
NumRows: int64(len(pks)),
|
||||
},
|
||||
}
|
||||
return msg
|
||||
|
@ -740,7 +741,7 @@ func genFlowGraphInsertMsg(chanName string) flowGraphMsg {
|
|||
return *fgMsg
|
||||
}
|
||||
|
||||
func genFlowGraphDeleteMsg(pks []int64, chanName string) flowGraphMsg {
|
||||
func genFlowGraphDeleteMsg(pks []primaryKey, chanName string) flowGraphMsg {
|
||||
timeRange := TimeRange{
|
||||
timestampMin: 0,
|
||||
timestampMax: math.MaxUint64,
|
||||
|
@ -922,10 +923,24 @@ func (f *FailMessageStreamFactory) NewTtMsgStream(ctx context.Context) (msgstrea
|
|||
return nil, errors.New("mocked failure")
|
||||
}
|
||||
|
||||
func genInsertDataWithPKs(PKs [2]int64) *InsertData {
|
||||
func genInsertDataWithPKs(PKs [2]primaryKey, dataType schemapb.DataType) *InsertData {
|
||||
iD := genInsertData()
|
||||
iD.Data[106].(*s.Int64FieldData).Data = PKs[:]
|
||||
|
||||
switch dataType {
|
||||
case schemapb.DataType_Int64:
|
||||
values := make([]int64, len(PKs))
|
||||
for index, pk := range PKs {
|
||||
values[index] = pk.(*int64PrimaryKey).Value
|
||||
}
|
||||
iD.Data[106].(*s.Int64FieldData).Data = values
|
||||
case schemapb.DataType_VarChar:
|
||||
values := make([]string, len(PKs))
|
||||
for index, pk := range PKs {
|
||||
values[index] = pk.(*varCharPrimaryKey).Value
|
||||
}
|
||||
iD.Data[109].(*s.StringFieldData).Data = values
|
||||
default:
|
||||
//TODO::
|
||||
}
|
||||
return iD
|
||||
}
|
||||
|
||||
|
|
|
@ -41,9 +41,12 @@ const (
|
|||
maxBloomFalsePositive float64 = 0.005
|
||||
)
|
||||
|
||||
type PrimaryKey = storage.PrimaryKey
|
||||
type Int64PrimaryKey = storage.Int64PrimaryKey
|
||||
type StringPrimaryKey = storage.StringPrimaryKey
|
||||
type primaryKey = storage.PrimaryKey
|
||||
type int64PrimaryKey = storage.Int64PrimaryKey
|
||||
type varCharPrimaryKey = storage.VarCharPrimaryKey
|
||||
|
||||
var newInt64PrimaryKey = storage.NewInt64PrimaryKey
|
||||
var newVarCharPrimaryKey = storage.NewVarCharPrimaryKey
|
||||
|
||||
// Replica is DataNode unique replication
|
||||
type Replica interface {
|
||||
|
@ -90,8 +93,8 @@ type Segment struct {
|
|||
|
||||
pkFilter *bloom.BloomFilter // bloom filter of pk inside a segment
|
||||
// TODO silverxia, needs to change to interface to support `string` type PK
|
||||
minPK PrimaryKey // minimal pk value, shortcut for checking whether a pk is inside this segment
|
||||
maxPK PrimaryKey // maximal pk value, same above
|
||||
minPK primaryKey // minimal pk value, shortcut for checking whether a pk is inside this segment
|
||||
maxPK primaryKey // maximal pk value, same above
|
||||
}
|
||||
|
||||
// SegmentReplica is the data replication of persistent data in datanode.
|
||||
|
@ -110,7 +113,7 @@ type SegmentReplica struct {
|
|||
chunkManager storage.ChunkManager
|
||||
}
|
||||
|
||||
func (s *Segment) updatePk(pk PrimaryKey) error {
|
||||
func (s *Segment) updatePk(pk primaryKey) error {
|
||||
if s.minPK == nil {
|
||||
s.minPK = pk
|
||||
} else if s.minPK.GT(pk) {
|
||||
|
@ -131,9 +134,7 @@ func (s *Segment) updatePKRange(ids storage.FieldData) error {
|
|||
case *storage.Int64FieldData:
|
||||
buf := make([]byte, 8)
|
||||
for _, pk := range pks.Data {
|
||||
id := &Int64PrimaryKey{
|
||||
Value: pk,
|
||||
}
|
||||
id := newInt64PrimaryKey(pk)
|
||||
err := s.updatePk(id)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -143,14 +144,12 @@ func (s *Segment) updatePKRange(ids storage.FieldData) error {
|
|||
}
|
||||
case *storage.StringFieldData:
|
||||
for _, pk := range pks.Data {
|
||||
id := &StringPrimaryKey{
|
||||
Value: pk,
|
||||
}
|
||||
id := newVarCharPrimaryKey(pk)
|
||||
err := s.updatePk(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.pkFilter.Add([]byte(pk))
|
||||
s.pkFilter.AddString(pk)
|
||||
}
|
||||
default:
|
||||
//TODO::
|
||||
|
|
|
@ -772,9 +772,7 @@ func TestSegmentReplica_UpdatePKRange(t *testing.T) {
|
|||
Data: []int64{c},
|
||||
})
|
||||
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: c,
|
||||
}
|
||||
pk := newInt64PrimaryKey(c)
|
||||
|
||||
assert.Equal(t, true, seg.minPK.LE(pk))
|
||||
assert.Equal(t, true, seg.maxPK.GE(pk))
|
||||
|
@ -820,9 +818,7 @@ func TestReplica_UpdatePKRange(t *testing.T) {
|
|||
replica.updateSegmentPKRange(2, &storage.Int64FieldData{Data: []int64{c}}) // normal segment
|
||||
replica.updateSegmentPKRange(3, &storage.Int64FieldData{Data: []int64{c}}) // non-exist segment
|
||||
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: c,
|
||||
}
|
||||
pk := newInt64PrimaryKey(c)
|
||||
|
||||
assert.Equal(t, true, segNew.minPK.LE(pk))
|
||||
assert.Equal(t, true, segNew.maxPK.GE(pk))
|
||||
|
|
|
@ -600,10 +600,11 @@ func TestStream_PulsarMsgStream_DeleteRepackFunc(t *testing.T) {
|
|||
Timestamp: 1,
|
||||
SourceID: 1,
|
||||
},
|
||||
CollectionName: "Collection",
|
||||
ShardName: "chan-1",
|
||||
Timestamps: []Timestamp{1},
|
||||
PrimaryKeys: []int64{1},
|
||||
CollectionName: "Collection",
|
||||
ShardName: "chan-1",
|
||||
Timestamps: []Timestamp{1},
|
||||
Int64PrimaryKeys: []int64{1},
|
||||
NumRows: 1,
|
||||
}
|
||||
deleteMsg := &DeleteMsg{
|
||||
BaseMsg: baseMsg,
|
||||
|
@ -1858,10 +1859,11 @@ func getTsMsg(msgType MsgType, reqID UniqueID) TsMsg {
|
|||
Timestamp: 11,
|
||||
SourceID: reqID,
|
||||
},
|
||||
CollectionName: "Collection",
|
||||
ShardName: "1",
|
||||
Timestamps: []Timestamp{time},
|
||||
PrimaryKeys: []int64{1},
|
||||
CollectionName: "Collection",
|
||||
ShardName: "1",
|
||||
Timestamps: []Timestamp{time},
|
||||
Int64PrimaryKeys: []int64{1},
|
||||
NumRows: 1,
|
||||
}
|
||||
deleteMsg := &DeleteMsg{
|
||||
BaseMsg: baseMsg,
|
||||
|
|
|
@ -336,6 +336,19 @@ func (dt *DeleteMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Compatible with primary keys that only support int64 type
|
||||
if deleteRequest.PrimaryKeys == nil {
|
||||
deleteRequest.PrimaryKeys = &schemapb.IDs{
|
||||
IdField: &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
Data: deleteRequest.Int64PrimaryKeys,
|
||||
},
|
||||
},
|
||||
}
|
||||
deleteRequest.NumRows = int64(len(deleteRequest.Int64PrimaryKeys))
|
||||
}
|
||||
|
||||
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
|
||||
for _, timestamp := range deleteMsg.Timestamps {
|
||||
deleteMsg.BeginTimestamp = timestamp
|
||||
|
@ -354,6 +367,21 @@ func (dt *DeleteMsg) Unmarshal(input MarshalType) (TsMsg, error) {
|
|||
return deleteMsg, nil
|
||||
}
|
||||
|
||||
func (dt *DeleteMsg) CheckAligned() error {
|
||||
numRows := dt.GetNumRows()
|
||||
|
||||
if numRows != int64(len(dt.GetTimestamps())) {
|
||||
return fmt.Errorf("the num_rows(%d) of pks is not equal to the num_rows(%d) of timestamps", numRows, len(dt.GetTimestamps()))
|
||||
}
|
||||
|
||||
numPks := int64(typeutil.GetSizeOfIDs(dt.PrimaryKeys))
|
||||
if numRows != numPks {
|
||||
return fmt.Errorf("the num_rows(%d) of pks is not equal to passed NumRows(%d)", numPks, numRows)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////Search//////////////////////////////////////////
|
||||
|
||||
// SearchMsg is a message pack that contains search request
|
||||
|
|
|
@ -282,10 +282,11 @@ func TestDeleteMsg(t *testing.T) {
|
|||
SourceID: 3,
|
||||
},
|
||||
|
||||
CollectionName: "test_collection",
|
||||
ShardName: "test-channel",
|
||||
Timestamps: []uint64{2, 1, 3},
|
||||
PrimaryKeys: []int64{1, 2, 3},
|
||||
CollectionName: "test_collection",
|
||||
ShardName: "test-channel",
|
||||
Timestamps: []uint64{2, 1, 3},
|
||||
Int64PrimaryKeys: []int64{1, 2, 3},
|
||||
NumRows: 3,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
)
|
||||
|
||||
// InsertRepackFunc is used to repack messages after hash by primary key
|
||||
|
@ -67,50 +66,22 @@ func DeleteRepackFunc(tsMsgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, e
|
|||
deleteRequest := request.(*DeleteMsg)
|
||||
keys := hashKeys[i]
|
||||
|
||||
if len(keys) != 1 {
|
||||
return nil, errors.New("len(msg.hashValue) must equal 1, but it is: " + strconv.Itoa(len(keys)))
|
||||
}
|
||||
|
||||
timestampLen := len(deleteRequest.Timestamps)
|
||||
pkLen := len(deleteRequest.PrimaryKeys)
|
||||
keysLen := len(keys)
|
||||
|
||||
if keysLen != timestampLen || keysLen != pkLen {
|
||||
if keysLen != timestampLen || int64(keysLen) != deleteRequest.NumRows {
|
||||
return nil, errors.New("the length of hashValue, timestamps, primaryKeys are not equal")
|
||||
}
|
||||
|
||||
for index, key := range keys {
|
||||
_, ok := result[key]
|
||||
if !ok {
|
||||
msgPack := MsgPack{}
|
||||
result[key] = &msgPack
|
||||
}
|
||||
|
||||
sliceRequest := internalpb.DeleteRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: deleteRequest.Base.MsgID,
|
||||
Timestamp: deleteRequest.Timestamps[index],
|
||||
SourceID: deleteRequest.Base.SourceID,
|
||||
},
|
||||
DbID: deleteRequest.DbID,
|
||||
CollectionID: deleteRequest.CollectionID,
|
||||
PartitionID: deleteRequest.PartitionID,
|
||||
CollectionName: deleteRequest.CollectionName,
|
||||
PartitionName: deleteRequest.PartitionName,
|
||||
ShardName: deleteRequest.ShardName,
|
||||
Timestamps: []uint64{deleteRequest.Timestamps[index]},
|
||||
PrimaryKeys: []int64{deleteRequest.PrimaryKeys[index]},
|
||||
}
|
||||
|
||||
deleteMsg := &DeleteMsg{
|
||||
BaseMsg: BaseMsg{
|
||||
Ctx: request.TraceCtx(),
|
||||
},
|
||||
DeleteRequest: sliceRequest,
|
||||
}
|
||||
result[key].Msgs = append(result[key].Msgs, deleteMsg)
|
||||
if keysLen != 1 {
|
||||
return nil, errors.New("len(msg.hashValue) must equal 1, but it is: " + strconv.Itoa(keysLen))
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
_, ok := result[key]
|
||||
if !ok {
|
||||
result[key] = &MsgPack{}
|
||||
}
|
||||
result[key].Msgs = append(result[key].Msgs, request)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
|
|
@ -219,8 +219,10 @@ message DeleteRequest {
|
|||
int64 dbID = 6;
|
||||
int64 collectionID = 7;
|
||||
int64 partitionID = 8;
|
||||
repeated int64 primary_keys = 9;
|
||||
repeated int64 int64_primary_keys = 9; // deprecated
|
||||
repeated uint64 timestamps = 10;
|
||||
int64 num_rows = 11;
|
||||
schema.IDs primary_keys = 12;
|
||||
}
|
||||
|
||||
message LoadIndex {
|
||||
|
|
|
@ -1762,8 +1762,10 @@ type DeleteRequest struct {
|
|||
DbID int64 `protobuf:"varint,6,opt,name=dbID,proto3" json:"dbID,omitempty"`
|
||||
CollectionID int64 `protobuf:"varint,7,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
|
||||
PartitionID int64 `protobuf:"varint,8,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
|
||||
PrimaryKeys []int64 `protobuf:"varint,9,rep,packed,name=primary_keys,json=primaryKeys,proto3" json:"primary_keys,omitempty"`
|
||||
Int64PrimaryKeys []int64 `protobuf:"varint,9,rep,packed,name=int64_primary_keys,json=int64PrimaryKeys,proto3" json:"int64_primary_keys,omitempty"`
|
||||
Timestamps []uint64 `protobuf:"varint,10,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"`
|
||||
NumRows int64 `protobuf:"varint,11,opt,name=num_rows,json=numRows,proto3" json:"num_rows,omitempty"`
|
||||
PrimaryKeys *schemapb.IDs `protobuf:"bytes,12,opt,name=primary_keys,json=primaryKeys,proto3" json:"primary_keys,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -1850,9 +1852,9 @@ func (m *DeleteRequest) GetPartitionID() int64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *DeleteRequest) GetPrimaryKeys() []int64 {
|
||||
func (m *DeleteRequest) GetInt64PrimaryKeys() []int64 {
|
||||
if m != nil {
|
||||
return m.PrimaryKeys
|
||||
return m.Int64PrimaryKeys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1864,6 +1866,20 @@ func (m *DeleteRequest) GetTimestamps() []uint64 {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *DeleteRequest) GetNumRows() int64 {
|
||||
if m != nil {
|
||||
return m.NumRows
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *DeleteRequest) GetPrimaryKeys() *schemapb.IDs {
|
||||
if m != nil {
|
||||
return m.PrimaryKeys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LoadIndex struct {
|
||||
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
SegmentID int64 `protobuf:"varint,2,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
|
||||
|
@ -2328,128 +2344,129 @@ func init() {
|
|||
func init() { proto.RegisterFile("internal.proto", fileDescriptor_41f4a519b878ee3b) }
|
||||
|
||||
var fileDescriptor_41f4a519b878ee3b = []byte{
|
||||
// 1954 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0x4f, 0x73, 0x1b, 0x49,
|
||||
0x15, 0xdf, 0x91, 0x64, 0x4b, 0x7a, 0x23, 0xdb, 0x72, 0xc7, 0xc9, 0x4e, 0xfe, 0xec, 0xc6, 0x3b,
|
||||
0xbb, 0x80, 0x49, 0x8a, 0x24, 0x78, 0x81, 0xdd, 0xa2, 0x28, 0xb2, 0xb1, 0x05, 0x41, 0x95, 0x4d,
|
||||
0x30, 0xe3, 0x90, 0x2a, 0xb8, 0x4c, 0xb5, 0x34, 0x6d, 0xa9, 0xc9, 0xcc, 0xf4, 0x6c, 0x77, 0x8f,
|
||||
0x1d, 0xe5, 0xc4, 0x81, 0x13, 0x14, 0x54, 0x71, 0xe0, 0x08, 0x37, 0xce, 0x1c, 0x39, 0x01, 0x55,
|
||||
0x9c, 0x38, 0x71, 0xe7, 0x03, 0xf0, 0x25, 0x38, 0x51, 0xfd, 0x67, 0x46, 0x23, 0x59, 0x76, 0x6c,
|
||||
0x6f, 0x2d, 0x1b, 0xaa, 0xf6, 0x36, 0xfd, 0xde, 0xeb, 0xee, 0xd7, 0xbf, 0xf7, 0x7b, 0xaf, 0x5f,
|
||||
0x4b, 0xb0, 0x4a, 0x53, 0x49, 0x78, 0x8a, 0xe3, 0x3b, 0x19, 0x67, 0x92, 0xa1, 0xcb, 0x09, 0x8d,
|
||||
0x0f, 0x73, 0x61, 0x46, 0x77, 0x0a, 0xe5, 0xb5, 0xce, 0x90, 0x25, 0x09, 0x4b, 0x8d, 0xf8, 0x5a,
|
||||
0x47, 0x0c, 0xc7, 0x24, 0xc1, 0x66, 0xe4, 0xff, 0xd5, 0x81, 0x95, 0x5d, 0x96, 0x64, 0x2c, 0x25,
|
||||
0xa9, 0xec, 0xa7, 0x07, 0x0c, 0x5d, 0x81, 0xe5, 0x94, 0x45, 0xa4, 0xdf, 0xf3, 0x9c, 0x4d, 0x67,
|
||||
0xab, 0x1e, 0xd8, 0x11, 0x42, 0xd0, 0xe0, 0x2c, 0x26, 0x5e, 0x6d, 0xd3, 0xd9, 0x6a, 0x07, 0xfa,
|
||||
0x1b, 0xdd, 0x07, 0x10, 0x12, 0x4b, 0x12, 0x0e, 0x59, 0x44, 0xbc, 0xfa, 0xa6, 0xb3, 0xb5, 0xba,
|
||||
0xbd, 0x79, 0x67, 0xa1, 0x17, 0x77, 0xf6, 0x95, 0xe1, 0x2e, 0x8b, 0x48, 0xd0, 0x16, 0xc5, 0x27,
|
||||
0xfa, 0x08, 0x80, 0xbc, 0x90, 0x1c, 0x87, 0x34, 0x3d, 0x60, 0x5e, 0x63, 0xb3, 0xbe, 0xe5, 0x6e,
|
||||
0xbf, 0x33, 0xbb, 0x80, 0x75, 0xfe, 0x11, 0x99, 0x3c, 0xc3, 0x71, 0x4e, 0xf6, 0x30, 0xe5, 0x41,
|
||||
0x5b, 0x4f, 0x52, 0xee, 0xfa, 0xff, 0x72, 0x60, 0xad, 0x3c, 0x80, 0xde, 0x43, 0xa0, 0x6f, 0xc3,
|
||||
0x92, 0xde, 0x42, 0x9f, 0xc0, 0xdd, 0x7e, 0xef, 0x04, 0x8f, 0x66, 0xce, 0x1d, 0x98, 0x29, 0xe8,
|
||||
0xc7, 0x70, 0x49, 0xe4, 0x83, 0x61, 0xa1, 0x0a, 0xb5, 0x54, 0x78, 0x35, 0xed, 0xda, 0xd9, 0x56,
|
||||
0x42, 0xd5, 0x05, 0xac, 0x4b, 0xef, 0xc3, 0xb2, 0x5a, 0x29, 0x17, 0x1a, 0x25, 0x77, 0xfb, 0xfa,
|
||||
0xc2, 0x43, 0xee, 0x6b, 0x93, 0xc0, 0x9a, 0xfa, 0xd7, 0xe1, 0xea, 0x43, 0x22, 0xe7, 0x4e, 0x17,
|
||||
0x90, 0x4f, 0x72, 0x22, 0xa4, 0x55, 0x3e, 0xa5, 0x09, 0x79, 0x4a, 0x87, 0xcf, 0x77, 0xc7, 0x38,
|
||||
0x4d, 0x49, 0x5c, 0x28, 0xdf, 0x82, 0xeb, 0x0f, 0x89, 0x9e, 0x40, 0x85, 0xa4, 0x43, 0x31, 0xa7,
|
||||
0xbe, 0x0c, 0x97, 0x1e, 0x12, 0xd9, 0x8b, 0xe6, 0xc4, 0xcf, 0xa0, 0xf5, 0x44, 0x05, 0x5b, 0xd1,
|
||||
0xe0, 0x5b, 0xd0, 0xc4, 0x51, 0xc4, 0x89, 0x10, 0x16, 0xc5, 0x1b, 0x0b, 0x3d, 0x7e, 0x60, 0x6c,
|
||||
0x82, 0xc2, 0x78, 0x11, 0x4d, 0xfc, 0x9f, 0x01, 0xf4, 0x53, 0x2a, 0xf7, 0x30, 0xc7, 0x89, 0x38,
|
||||
0x91, 0x60, 0x3d, 0xe8, 0x08, 0x89, 0xb9, 0x0c, 0x33, 0x6d, 0x67, 0x21, 0x3f, 0x03, 0x1b, 0x5c,
|
||||
0x3d, 0xcd, 0xac, 0xee, 0xff, 0x04, 0x60, 0x5f, 0x72, 0x9a, 0x8e, 0x3e, 0xa6, 0x42, 0xaa, 0xbd,
|
||||
0x0e, 0x95, 0x9d, 0x3a, 0x44, 0x7d, 0xab, 0x1d, 0xd8, 0x51, 0x25, 0x1c, 0xb5, 0xb3, 0x87, 0xe3,
|
||||
0x3e, 0xb8, 0x05, 0xdc, 0x8f, 0xc5, 0x08, 0xdd, 0x83, 0xc6, 0x00, 0x0b, 0x72, 0x2a, 0x3c, 0x8f,
|
||||
0xc5, 0x68, 0x07, 0x0b, 0x12, 0x68, 0x4b, 0xff, 0x97, 0x75, 0x78, 0x73, 0x97, 0x13, 0x4d, 0xfe,
|
||||
0x38, 0x26, 0x43, 0x49, 0x59, 0x6a, 0xb1, 0x3f, 0xff, 0x6a, 0xe8, 0x4d, 0x68, 0x46, 0x83, 0x30,
|
||||
0xc5, 0x49, 0x01, 0xf6, 0x72, 0x34, 0x78, 0x82, 0x13, 0x82, 0xbe, 0x0c, 0xab, 0xc3, 0x72, 0x7d,
|
||||
0x25, 0xd1, 0x9c, 0x6b, 0x07, 0x73, 0x52, 0xf4, 0x1e, 0xac, 0x64, 0x98, 0x4b, 0x5a, 0x9a, 0x35,
|
||||
0xb4, 0xd9, 0xac, 0x50, 0x05, 0x34, 0x1a, 0xf4, 0x7b, 0xde, 0x92, 0x0e, 0x96, 0xfe, 0x46, 0x3e,
|
||||
0x74, 0xa6, 0x6b, 0xf5, 0x7b, 0xde, 0xb2, 0xd6, 0xcd, 0xc8, 0xd0, 0x26, 0xb8, 0xe5, 0x42, 0xfd,
|
||||
0x9e, 0xd7, 0xd4, 0x26, 0x55, 0x91, 0x0a, 0x8e, 0xa9, 0x45, 0x5e, 0x6b, 0xd3, 0xd9, 0xea, 0x04,
|
||||
0x76, 0x84, 0xee, 0xc1, 0xa5, 0x43, 0xca, 0x65, 0x8e, 0x63, 0xcb, 0x4f, 0xe5, 0x87, 0xf0, 0xda,
|
||||
0x3a, 0x82, 0x8b, 0x54, 0x68, 0x1b, 0x36, 0xb2, 0xf1, 0x44, 0xd0, 0xe1, 0xdc, 0x14, 0xd0, 0x53,
|
||||
0x16, 0xea, 0xfc, 0xbf, 0x3b, 0x70, 0xb9, 0xc7, 0x59, 0xf6, 0x5a, 0x84, 0xa2, 0x00, 0xb9, 0x71,
|
||||
0x0a, 0xc8, 0x4b, 0xc7, 0x41, 0xf6, 0x7f, 0x5d, 0x83, 0x2b, 0x86, 0x51, 0x7b, 0x05, 0xb0, 0x9f,
|
||||
0xc1, 0x29, 0xbe, 0x02, 0x6b, 0xd3, 0x5d, 0x8d, 0xc1, 0xe2, 0x63, 0x7c, 0x09, 0x56, 0xcb, 0x00,
|
||||
0x1b, 0xbb, 0xff, 0x2d, 0xa5, 0xfc, 0x5f, 0xd5, 0x60, 0x43, 0x05, 0xf5, 0x0b, 0x34, 0x14, 0x1a,
|
||||
0x7f, 0x70, 0x00, 0x19, 0x76, 0x3c, 0x88, 0x29, 0x16, 0x9f, 0x27, 0x16, 0x1b, 0xb0, 0x84, 0x95,
|
||||
0x0f, 0x16, 0x02, 0x33, 0xf0, 0x05, 0x74, 0x55, 0xb4, 0x3e, 0x2b, 0xef, 0xca, 0x4d, 0xeb, 0xd5,
|
||||
0x4d, 0x7f, 0xef, 0xc0, 0xfa, 0x83, 0x58, 0x12, 0xfe, 0x9a, 0x82, 0xf2, 0xb7, 0x5a, 0x11, 0xb5,
|
||||
0x7e, 0x1a, 0x91, 0x17, 0x9f, 0xa7, 0x83, 0x6f, 0x01, 0x1c, 0x50, 0x12, 0x47, 0x55, 0xf6, 0xb6,
|
||||
0xb5, 0xe4, 0x53, 0x31, 0xd7, 0x83, 0xa6, 0x5e, 0xa4, 0x64, 0x6d, 0x31, 0x54, 0x3d, 0x80, 0xe9,
|
||||
0x07, 0x6d, 0x0f, 0xd0, 0x3a, 0x73, 0x0f, 0xa0, 0xa7, 0xd9, 0x1e, 0xe0, 0x9f, 0x0d, 0x58, 0xe9,
|
||||
0xa7, 0x82, 0x70, 0x79, 0x71, 0xf0, 0x6e, 0x40, 0x5b, 0x8c, 0x31, 0xd7, 0x07, 0xb5, 0xf0, 0x4d,
|
||||
0x05, 0x55, 0x68, 0xeb, 0xaf, 0x82, 0xb6, 0x71, 0xc6, 0xe2, 0xb0, 0x74, 0x5a, 0x71, 0x58, 0x3e,
|
||||
0x05, 0xe2, 0xe6, 0xab, 0x8b, 0x43, 0xeb, 0xf8, 0xed, 0xab, 0x0e, 0x48, 0x46, 0x89, 0x6a, 0x5a,
|
||||
0x7b, 0x5e, 0x5b, 0xeb, 0xa7, 0x02, 0xf4, 0x36, 0x80, 0xa4, 0x09, 0x11, 0x12, 0x27, 0x99, 0xb9,
|
||||
0x47, 0x1b, 0x41, 0x45, 0xa2, 0xee, 0x6e, 0xce, 0x8e, 0xfa, 0x3d, 0xe1, 0xb9, 0x9b, 0x75, 0xd5,
|
||||
0xc4, 0x99, 0x11, 0xfa, 0x06, 0xb4, 0x38, 0x3b, 0x0a, 0x23, 0x2c, 0xb1, 0xd7, 0xd1, 0xc1, 0xbb,
|
||||
0xba, 0x10, 0xec, 0x9d, 0x98, 0x0d, 0x82, 0x26, 0x67, 0x47, 0x3d, 0x2c, 0x31, 0xba, 0x0f, 0xae,
|
||||
0x66, 0x80, 0x30, 0x13, 0x57, 0xf4, 0xc4, 0xb7, 0x67, 0x27, 0xda, 0x67, 0xcb, 0xf7, 0x95, 0x9d,
|
||||
0x9a, 0x14, 0x18, 0x6a, 0x0a, 0xbd, 0xc0, 0x55, 0x68, 0xa5, 0x79, 0x12, 0x72, 0x76, 0x24, 0xbc,
|
||||
0xd5, 0x4d, 0x67, 0xab, 0x11, 0x34, 0xd3, 0x3c, 0x09, 0xd8, 0x91, 0x40, 0x3b, 0xd0, 0x3c, 0x24,
|
||||
0x5c, 0x50, 0x96, 0x7a, 0x6b, 0xfa, 0x81, 0xb2, 0x75, 0x42, 0x13, 0x6f, 0x18, 0xa3, 0x96, 0x7b,
|
||||
0x66, 0xec, 0x83, 0x62, 0xa2, 0xff, 0xc7, 0x06, 0xac, 0xec, 0x13, 0xcc, 0x87, 0xe3, 0x8b, 0x13,
|
||||
0xea, 0xab, 0xd0, 0xe5, 0x44, 0xe4, 0xb1, 0x0c, 0x87, 0xa6, 0x0d, 0xe9, 0xf7, 0x2c, 0xaf, 0xd6,
|
||||
0x8c, 0x7c, 0xb7, 0x10, 0x97, 0x41, 0xaf, 0x9f, 0x12, 0xf4, 0xc6, 0x82, 0xa0, 0xfb, 0xd0, 0xa9,
|
||||
0x44, 0x58, 0x78, 0x4b, 0x3a, 0x34, 0x33, 0x32, 0xd4, 0x85, 0x7a, 0x24, 0x62, 0xcd, 0xa7, 0x76,
|
||||
0xa0, 0x3e, 0xd1, 0x6d, 0x58, 0xcf, 0x62, 0x3c, 0x24, 0x63, 0x16, 0x47, 0x84, 0x87, 0x23, 0xce,
|
||||
0xf2, 0x4c, 0x73, 0xaa, 0x13, 0x74, 0x2b, 0x8a, 0x87, 0x4a, 0x8e, 0x3e, 0x80, 0x56, 0x24, 0xe2,
|
||||
0x50, 0x4e, 0x32, 0xa2, 0x49, 0xb5, 0x7a, 0xc2, 0xd9, 0x7b, 0x22, 0x7e, 0x3a, 0xc9, 0x48, 0xd0,
|
||||
0x8c, 0xcc, 0x07, 0xba, 0x07, 0x1b, 0x82, 0x70, 0x8a, 0x63, 0xfa, 0x92, 0x44, 0x21, 0x79, 0x91,
|
||||
0xf1, 0x30, 0x8b, 0x71, 0xaa, 0x99, 0xd7, 0x09, 0xd0, 0x54, 0xf7, 0xbd, 0x17, 0x19, 0xdf, 0x8b,
|
||||
0x71, 0x8a, 0xb6, 0xa0, 0xcb, 0x72, 0x99, 0xe5, 0x32, 0xb4, 0xdc, 0xa0, 0x91, 0x26, 0x62, 0x3d,
|
||||
0x58, 0x35, 0x72, 0x4d, 0x05, 0xd1, 0x8f, 0x14, 0xb4, 0x92, 0xe3, 0x43, 0x12, 0x87, 0x25, 0x43,
|
||||
0x3d, 0x57, 0xb3, 0x60, 0xcd, 0xc8, 0x9f, 0x16, 0x62, 0x74, 0x17, 0x2e, 0x8d, 0x72, 0xcc, 0x71,
|
||||
0x2a, 0x09, 0xa9, 0x58, 0x77, 0xb4, 0x35, 0x2a, 0x55, 0xd3, 0x09, 0xb7, 0x61, 0x5d, 0x99, 0xb1,
|
||||
0x5c, 0x56, 0xcc, 0x57, 0xb4, 0x79, 0xd7, 0x2a, 0x4a, 0x63, 0xff, 0xb7, 0x15, 0x9e, 0xa8, 0x90,
|
||||
0x8a, 0x0b, 0xf0, 0xe4, 0x22, 0x4f, 0x93, 0x85, 0xe4, 0xaa, 0x2f, 0x26, 0xd7, 0x4d, 0x70, 0x13,
|
||||
0x22, 0x39, 0x1d, 0x9a, 0x20, 0x9a, 0xea, 0x04, 0x46, 0xa4, 0x23, 0x75, 0x13, 0x5c, 0x95, 0x4b,
|
||||
0x9f, 0xe4, 0x84, 0x53, 0x22, 0x6c, 0x71, 0x87, 0x34, 0x4f, 0x7e, 0x64, 0x24, 0xe8, 0x12, 0x2c,
|
||||
0x49, 0x96, 0x85, 0xcf, 0x8b, 0xa2, 0x24, 0x59, 0xf6, 0x08, 0x7d, 0x07, 0xae, 0x09, 0x82, 0x63,
|
||||
0x12, 0x85, 0x65, 0x11, 0x11, 0xa1, 0xd0, 0x58, 0x90, 0xc8, 0x6b, 0xea, 0xb8, 0x79, 0xc6, 0x62,
|
||||
0xbf, 0x34, 0xd8, 0xb7, 0x7a, 0x15, 0x96, 0xd2, 0xf1, 0xca, 0xb4, 0x96, 0xee, 0xdf, 0xd1, 0x54,
|
||||
0x55, 0x4e, 0xf8, 0x10, 0xbc, 0x51, 0xcc, 0x06, 0x38, 0x0e, 0x8f, 0xed, 0xaa, 0x1f, 0x0a, 0xf5,
|
||||
0xe0, 0x8a, 0xd1, 0xef, 0xcf, 0x6d, 0xa9, 0x8e, 0x27, 0x62, 0x3a, 0x24, 0x51, 0x38, 0x88, 0xd9,
|
||||
0xc0, 0x03, 0xcd, 0x3f, 0x30, 0x22, 0x55, 0x95, 0x14, 0xef, 0xac, 0x81, 0x82, 0x61, 0xc8, 0xf2,
|
||||
0x54, 0x6a, 0x36, 0xd5, 0x83, 0x55, 0x23, 0x7f, 0x92, 0x27, 0xbb, 0x4a, 0x8a, 0xde, 0x85, 0x15,
|
||||
0x6b, 0xc9, 0x0e, 0x0e, 0x04, 0x91, 0x9a, 0x46, 0xf5, 0xa0, 0x63, 0x84, 0x3f, 0xd4, 0x32, 0xff,
|
||||
0x4f, 0x75, 0x58, 0x0b, 0x14, 0xba, 0xe4, 0x90, 0xfc, 0xdf, 0x57, 0x8f, 0x93, 0xb2, 0x78, 0xf9,
|
||||
0x5c, 0x59, 0xdc, 0x3c, 0x73, 0x16, 0xb7, 0xce, 0x95, 0xc5, 0xed, 0xf3, 0x65, 0x31, 0x9c, 0x90,
|
||||
0xc5, 0x7f, 0x99, 0x89, 0xd8, 0xeb, 0x9a, 0xc7, 0xb7, 0xa0, 0x4e, 0x23, 0xd3, 0x3a, 0xba, 0xdb,
|
||||
0xde, 0xc2, 0xbb, 0xb2, 0xdf, 0x13, 0x81, 0x32, 0x9a, 0xbf, 0x5f, 0x97, 0xce, 0x7d, 0xbf, 0x7e,
|
||||
0x17, 0xae, 0x1f, 0xcf, 0x6e, 0x6e, 0x31, 0x8a, 0xbc, 0x65, 0x1d, 0xd0, 0xab, 0xf3, 0xe9, 0x5d,
|
||||
0x80, 0x18, 0xa1, 0xaf, 0xc3, 0x46, 0x25, 0xbf, 0xa7, 0x13, 0x9b, 0xe6, 0x4d, 0x3f, 0xd5, 0x4d,
|
||||
0xa7, 0x9c, 0x96, 0xe1, 0xad, 0xd3, 0x32, 0xdc, 0xff, 0x77, 0x0d, 0x56, 0x7a, 0x24, 0x26, 0x92,
|
||||
0x7c, 0xd1, 0xfe, 0x9d, 0xd8, 0xfe, 0xbd, 0x03, 0x9d, 0x8c, 0xd3, 0x04, 0xf3, 0x49, 0xf8, 0x9c,
|
||||
0x4c, 0x8a, 0xa2, 0xe9, 0x5a, 0xd9, 0x23, 0x32, 0x11, 0xaf, 0xea, 0x01, 0xfd, 0xff, 0x38, 0xd0,
|
||||
0xfe, 0x98, 0xe1, 0x48, 0x3f, 0x53, 0x2e, 0x88, 0x71, 0xd9, 0x81, 0xd6, 0xe6, 0x3b, 0xd0, 0x1b,
|
||||
0x30, 0x7d, 0x69, 0x58, 0x94, 0x2b, 0x4f, 0x8f, 0xca, 0x13, 0xa2, 0x31, 0xfb, 0x84, 0xb8, 0x09,
|
||||
0x2e, 0x55, 0x0e, 0x85, 0x19, 0x96, 0x63, 0x53, 0xc5, 0xda, 0x01, 0x68, 0xd1, 0x9e, 0x92, 0xa8,
|
||||
0x37, 0x46, 0x61, 0xa0, 0xdf, 0x18, 0xcb, 0x67, 0x7e, 0x63, 0xd8, 0x45, 0xf4, 0x1b, 0xe3, 0x17,
|
||||
0x0e, 0x80, 0x3e, 0xb8, 0xca, 0xe0, 0xe3, 0x8b, 0x3a, 0x17, 0x59, 0x54, 0x95, 0x57, 0xdd, 0xc6,
|
||||
0x92, 0x18, 0xcb, 0x29, 0xe3, 0x85, 0x05, 0x07, 0xa9, 0x96, 0xd6, 0xa8, 0x2c, 0xdb, 0x85, 0xff,
|
||||
0x1b, 0x07, 0x40, 0xa7, 0xac, 0x71, 0x63, 0x9e, 0x1b, 0xce, 0xe9, 0xaf, 0xaf, 0xda, 0x2c, 0x74,
|
||||
0x3b, 0x05, 0x74, 0xaa, 0x1a, 0xa9, 0x57, 0xf3, 0x82, 0x33, 0x54, 0xda, 0xe5, 0xe2, 0xf0, 0x16,
|
||||
0x5d, 0xfd, 0xed, 0xff, 0xce, 0x81, 0x8e, 0xf5, 0xce, 0xb8, 0x34, 0x13, 0x65, 0x67, 0x3e, 0xca,
|
||||
0xba, 0x1b, 0x49, 0x18, 0x9f, 0x84, 0x82, 0xbe, 0x24, 0xd6, 0x21, 0x30, 0xa2, 0x7d, 0xfa, 0x92,
|
||||
0xcc, 0x74, 0xf6, 0xe6, 0x46, 0x2b, 0x3b, 0xfb, 0xdb, 0xb0, 0xce, 0xc9, 0x90, 0xa4, 0x32, 0x9e,
|
||||
0x84, 0x09, 0x8b, 0xe8, 0x01, 0x25, 0x91, 0x66, 0x43, 0x2b, 0xe8, 0x16, 0x8a, 0xc7, 0x56, 0xee,
|
||||
0xff, 0xc3, 0x81, 0x55, 0xd5, 0xc0, 0x4c, 0x9e, 0xb0, 0x88, 0x18, 0xcf, 0xce, 0xcf, 0xd8, 0x8f,
|
||||
0xf4, 0x59, 0x2c, 0x3c, 0xe6, 0xf7, 0xe9, 0x77, 0x4f, 0xfa, 0xbb, 0xa3, 0x82, 0x41, 0xd0, 0x12,
|
||||
0x64, 0x64, 0xf6, 0xdc, 0xb1, 0x95, 0xf8, 0x4c, 0x10, 0x4f, 0x03, 0x6b, 0x8b, 0xb1, 0x81, 0xf8,
|
||||
0xe7, 0x0e, 0xb8, 0x8f, 0xc5, 0x68, 0x8f, 0x09, 0x9d, 0xcc, 0x2a, 0x95, 0x6d, 0x01, 0x35, 0x95,
|
||||
0xc4, 0xd1, 0xc9, 0xe2, 0x0e, 0xa7, 0xbf, 0x76, 0xa2, 0x0d, 0x58, 0x4a, 0xc4, 0xc8, 0x46, 0xbc,
|
||||
0x13, 0x98, 0x01, 0xba, 0x06, 0xad, 0x44, 0x8c, 0x74, 0x63, 0x6f, 0x33, 0xac, 0x1c, 0xab, 0xb0,
|
||||
0x4d, 0x6f, 0xca, 0x86, 0xbe, 0x29, 0xa7, 0x02, 0xff, 0xcf, 0x0e, 0x20, 0x7b, 0x15, 0x7d, 0xaa,
|
||||
0x9f, 0xc4, 0x35, 0x61, 0xab, 0xbf, 0xd8, 0xd6, 0x74, 0xba, 0xce, 0xc8, 0xe6, 0xea, 0x50, 0xfd,
|
||||
0xd8, 0x5b, 0xf4, 0x36, 0xac, 0x47, 0xe4, 0x00, 0xab, 0x5b, 0x73, 0xde, 0xe5, 0xae, 0x55, 0x94,
|
||||
0x97, 0xfb, 0xad, 0x0f, 0xa1, 0x5d, 0xfe, 0x13, 0x85, 0xba, 0xd0, 0xe9, 0xa7, 0x54, 0xea, 0x9e,
|
||||
0x85, 0xa6, 0xa3, 0xee, 0x1b, 0xc8, 0x85, 0xe6, 0x0f, 0x08, 0x8e, 0xe5, 0x78, 0xd2, 0x75, 0x50,
|
||||
0x07, 0x5a, 0x0f, 0x06, 0x29, 0xe3, 0x09, 0x8e, 0xbb, 0xb5, 0x5b, 0xdb, 0xb0, 0x7e, 0xec, 0x89,
|
||||
0xa8, 0x4c, 0x02, 0x76, 0xa4, 0x0e, 0x14, 0x75, 0xdf, 0x40, 0x6b, 0xe0, 0xee, 0xb2, 0x38, 0x4f,
|
||||
0x52, 0x23, 0x70, 0x76, 0x3e, 0xf8, 0xe9, 0x37, 0x47, 0x54, 0x8e, 0xf3, 0x81, 0x3a, 0xfd, 0x5d,
|
||||
0x03, 0xc7, 0xd7, 0x28, 0xb3, 0x5f, 0x77, 0x8b, 0x48, 0xdf, 0xd5, 0x08, 0x95, 0xc3, 0x6c, 0x30,
|
||||
0x58, 0xd6, 0x92, 0xf7, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x52, 0x88, 0x12, 0x39, 0xe3, 0x1b,
|
||||
0x00, 0x00,
|
||||
// 1979 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcd, 0x73, 0x1c, 0x47,
|
||||
0x15, 0xcf, 0xec, 0xac, 0xb4, 0xbb, 0x6f, 0x57, 0xd2, 0xaa, 0x25, 0x3b, 0xe3, 0x8f, 0xc4, 0xca,
|
||||
0x24, 0x80, 0xb0, 0xc1, 0x36, 0x4a, 0x48, 0x52, 0x40, 0xe1, 0x58, 0x5a, 0x30, 0x5b, 0x8e, 0x8d,
|
||||
0x18, 0x19, 0x57, 0xc1, 0x65, 0xaa, 0x77, 0xa7, 0xb5, 0x6a, 0x3c, 0x33, 0x3d, 0xe9, 0xee, 0x91,
|
||||
0xbc, 0x3e, 0x71, 0xe0, 0x04, 0x05, 0x55, 0x1c, 0x38, 0xc2, 0x8d, 0x33, 0x47, 0x4e, 0x40, 0x15,
|
||||
0xa7, 0x9c, 0xb8, 0xf3, 0xaf, 0x70, 0xa2, 0xfa, 0x63, 0x66, 0x67, 0x57, 0x2b, 0x59, 0x52, 0x2a,
|
||||
0xc4, 0x54, 0xe5, 0x36, 0xfd, 0xde, 0xeb, 0xee, 0xd7, 0xbf, 0xf7, 0xeb, 0xd7, 0xef, 0xed, 0xc2,
|
||||
0x32, 0x4d, 0x25, 0xe1, 0x29, 0x8e, 0x6f, 0x67, 0x9c, 0x49, 0x86, 0x2e, 0x25, 0x34, 0x3e, 0xcc,
|
||||
0x85, 0x19, 0xdd, 0x2e, 0x94, 0x57, 0x3b, 0x43, 0x96, 0x24, 0x2c, 0x35, 0xe2, 0xab, 0x1d, 0x31,
|
||||
0x3c, 0x20, 0x09, 0x36, 0x23, 0xff, 0xef, 0x0e, 0x2c, 0xed, 0xb0, 0x24, 0x63, 0x29, 0x49, 0x65,
|
||||
0x3f, 0xdd, 0x67, 0xe8, 0x32, 0x2c, 0xa6, 0x2c, 0x22, 0xfd, 0x9e, 0xe7, 0x6c, 0x38, 0x9b, 0x6e,
|
||||
0x60, 0x47, 0x08, 0x41, 0x9d, 0xb3, 0x98, 0x78, 0xb5, 0x0d, 0x67, 0xb3, 0x15, 0xe8, 0x6f, 0x74,
|
||||
0x0f, 0x40, 0x48, 0x2c, 0x49, 0x38, 0x64, 0x11, 0xf1, 0xdc, 0x0d, 0x67, 0x73, 0x79, 0x6b, 0xe3,
|
||||
0xf6, 0x5c, 0x2f, 0x6e, 0xef, 0x29, 0xc3, 0x1d, 0x16, 0x91, 0xa0, 0x25, 0x8a, 0x4f, 0xf4, 0x11,
|
||||
0x00, 0x79, 0x2e, 0x39, 0x0e, 0x69, 0xba, 0xcf, 0xbc, 0xfa, 0x86, 0xbb, 0xd9, 0xde, 0x7a, 0x6b,
|
||||
0x7a, 0x01, 0xeb, 0xfc, 0x43, 0x32, 0x7e, 0x8a, 0xe3, 0x9c, 0xec, 0x62, 0xca, 0x83, 0x96, 0x9e,
|
||||
0xa4, 0xdc, 0xf5, 0xff, 0xed, 0xc0, 0x4a, 0x79, 0x00, 0xbd, 0x87, 0x40, 0xdf, 0x81, 0x05, 0xbd,
|
||||
0x85, 0x3e, 0x41, 0x7b, 0xeb, 0x9d, 0x13, 0x3c, 0x9a, 0x3a, 0x77, 0x60, 0xa6, 0xa0, 0x9f, 0xc2,
|
||||
0x9a, 0xc8, 0x07, 0xc3, 0x42, 0x15, 0x6a, 0xa9, 0xf0, 0x6a, 0xda, 0xb5, 0xb3, 0xad, 0x84, 0xaa,
|
||||
0x0b, 0x58, 0x97, 0xde, 0x85, 0x45, 0xb5, 0x52, 0x2e, 0x34, 0x4a, 0xed, 0xad, 0x6b, 0x73, 0x0f,
|
||||
0xb9, 0xa7, 0x4d, 0x02, 0x6b, 0xea, 0x5f, 0x83, 0x2b, 0x0f, 0x88, 0x9c, 0x39, 0x5d, 0x40, 0x3e,
|
||||
0xc9, 0x89, 0x90, 0x56, 0xf9, 0x84, 0x26, 0xe4, 0x09, 0x1d, 0x3e, 0xdb, 0x39, 0xc0, 0x69, 0x4a,
|
||||
0xe2, 0x42, 0xf9, 0x06, 0x5c, 0x7b, 0x40, 0xf4, 0x04, 0x2a, 0x24, 0x1d, 0x8a, 0x19, 0xf5, 0x25,
|
||||
0x58, 0x7b, 0x40, 0x64, 0x2f, 0x9a, 0x11, 0x3f, 0x85, 0xe6, 0x63, 0x15, 0x6c, 0x45, 0x83, 0xf7,
|
||||
0xa1, 0x81, 0xa3, 0x88, 0x13, 0x21, 0x2c, 0x8a, 0xd7, 0xe7, 0x7a, 0x7c, 0xdf, 0xd8, 0x04, 0x85,
|
||||
0xf1, 0x3c, 0x9a, 0xf8, 0xbf, 0x00, 0xe8, 0xa7, 0x54, 0xee, 0x62, 0x8e, 0x13, 0x71, 0x22, 0xc1,
|
||||
0x7a, 0xd0, 0x11, 0x12, 0x73, 0x19, 0x66, 0xda, 0xce, 0x42, 0x7e, 0x06, 0x36, 0xb4, 0xf5, 0x34,
|
||||
0xb3, 0xba, 0xff, 0x33, 0x80, 0x3d, 0xc9, 0x69, 0x3a, 0xfa, 0x98, 0x0a, 0xa9, 0xf6, 0x3a, 0x54,
|
||||
0x76, 0xea, 0x10, 0xee, 0x66, 0x2b, 0xb0, 0xa3, 0x4a, 0x38, 0x6a, 0x67, 0x0f, 0xc7, 0x3d, 0x68,
|
||||
0x17, 0x70, 0x3f, 0x12, 0x23, 0x74, 0x17, 0xea, 0x03, 0x2c, 0xc8, 0xa9, 0xf0, 0x3c, 0x12, 0xa3,
|
||||
0x6d, 0x2c, 0x48, 0xa0, 0x2d, 0xfd, 0x5f, 0xbb, 0xf0, 0xfa, 0x0e, 0x27, 0x9a, 0xfc, 0x71, 0x4c,
|
||||
0x86, 0x92, 0xb2, 0xd4, 0x62, 0x7f, 0xfe, 0xd5, 0xd0, 0xeb, 0xd0, 0x88, 0x06, 0x61, 0x8a, 0x93,
|
||||
0x02, 0xec, 0xc5, 0x68, 0xf0, 0x18, 0x27, 0x04, 0x7d, 0x15, 0x96, 0x87, 0xe5, 0xfa, 0x4a, 0xa2,
|
||||
0x39, 0xd7, 0x0a, 0x66, 0xa4, 0xe8, 0x1d, 0x58, 0xca, 0x30, 0x97, 0xb4, 0x34, 0xab, 0x6b, 0xb3,
|
||||
0x69, 0xa1, 0x0a, 0x68, 0x34, 0xe8, 0xf7, 0xbc, 0x05, 0x1d, 0x2c, 0xfd, 0x8d, 0x7c, 0xe8, 0x4c,
|
||||
0xd6, 0xea, 0xf7, 0xbc, 0x45, 0xad, 0x9b, 0x92, 0xa1, 0x0d, 0x68, 0x97, 0x0b, 0xf5, 0x7b, 0x5e,
|
||||
0x43, 0x9b, 0x54, 0x45, 0x2a, 0x38, 0x26, 0x17, 0x79, 0xcd, 0x0d, 0x67, 0xb3, 0x13, 0xd8, 0x11,
|
||||
0xba, 0x0b, 0x6b, 0x87, 0x94, 0xcb, 0x1c, 0xc7, 0x96, 0x9f, 0xca, 0x0f, 0xe1, 0xb5, 0x74, 0x04,
|
||||
0xe7, 0xa9, 0xd0, 0x16, 0xac, 0x67, 0x07, 0x63, 0x41, 0x87, 0x33, 0x53, 0x40, 0x4f, 0x99, 0xab,
|
||||
0xf3, 0xff, 0xe9, 0xc0, 0xa5, 0x1e, 0x67, 0xd9, 0x2b, 0x11, 0x8a, 0x02, 0xe4, 0xfa, 0x29, 0x20,
|
||||
0x2f, 0x1c, 0x07, 0xd9, 0xff, 0x6d, 0x0d, 0x2e, 0x1b, 0x46, 0xed, 0x16, 0xc0, 0x7e, 0x0e, 0xa7,
|
||||
0xf8, 0x1a, 0xac, 0x4c, 0x76, 0x35, 0x06, 0xf3, 0x8f, 0xf1, 0x15, 0x58, 0x2e, 0x03, 0x6c, 0xec,
|
||||
0xfe, 0xb7, 0x94, 0xf2, 0x7f, 0x53, 0x83, 0x75, 0x15, 0xd4, 0x2f, 0xd1, 0x50, 0x68, 0xfc, 0xc9,
|
||||
0x01, 0x64, 0xd8, 0x71, 0x3f, 0xa6, 0x58, 0x7c, 0x91, 0x58, 0xac, 0xc3, 0x02, 0x56, 0x3e, 0x58,
|
||||
0x08, 0xcc, 0xc0, 0x17, 0xd0, 0x55, 0xd1, 0xfa, 0xbc, 0xbc, 0x2b, 0x37, 0x75, 0xab, 0x9b, 0xfe,
|
||||
0xd1, 0x81, 0xd5, 0xfb, 0xb1, 0x24, 0xfc, 0x15, 0x05, 0xe5, 0x1f, 0xb5, 0x22, 0x6a, 0xfd, 0x34,
|
||||
0x22, 0xcf, 0xbf, 0x48, 0x07, 0xdf, 0x00, 0xd8, 0xa7, 0x24, 0x8e, 0xaa, 0xec, 0x6d, 0x69, 0xc9,
|
||||
0x67, 0x62, 0xae, 0x07, 0x0d, 0xbd, 0x48, 0xc9, 0xda, 0x62, 0xa8, 0x6a, 0x00, 0x53, 0x0f, 0xda,
|
||||
0x1a, 0xa0, 0x79, 0xe6, 0x1a, 0x40, 0x4f, 0xb3, 0x35, 0xc0, 0xbf, 0xea, 0xb0, 0xd4, 0x4f, 0x05,
|
||||
0xe1, 0xf2, 0xe2, 0xe0, 0x5d, 0x87, 0x96, 0x38, 0xc0, 0x5c, 0x1f, 0xd4, 0xc2, 0x37, 0x11, 0x54,
|
||||
0xa1, 0x75, 0x5f, 0x06, 0x6d, 0xfd, 0x8c, 0xc9, 0x61, 0xe1, 0xb4, 0xe4, 0xb0, 0x78, 0x0a, 0xc4,
|
||||
0x8d, 0x97, 0x27, 0x87, 0xe6, 0xf1, 0xd7, 0x57, 0x1d, 0x90, 0x8c, 0x12, 0x55, 0xb4, 0xf6, 0xbc,
|
||||
0x96, 0xd6, 0x4f, 0x04, 0xe8, 0x4d, 0x00, 0x49, 0x13, 0x22, 0x24, 0x4e, 0x32, 0xf3, 0x8e, 0xd6,
|
||||
0x83, 0x8a, 0x44, 0xbd, 0xdd, 0x9c, 0x1d, 0xf5, 0x7b, 0xc2, 0x6b, 0x6f, 0xb8, 0xaa, 0x88, 0x33,
|
||||
0x23, 0xf4, 0x1e, 0x34, 0x39, 0x3b, 0x0a, 0x23, 0x2c, 0xb1, 0xd7, 0xd1, 0xc1, 0xbb, 0x32, 0x17,
|
||||
0xec, 0xed, 0x98, 0x0d, 0x82, 0x06, 0x67, 0x47, 0x3d, 0x2c, 0x31, 0xba, 0x07, 0x6d, 0xcd, 0x00,
|
||||
0x61, 0x26, 0x2e, 0xe9, 0x89, 0x6f, 0x4e, 0x4f, 0xb4, 0x6d, 0xcb, 0x0f, 0x95, 0x9d, 0x9a, 0x14,
|
||||
0x18, 0x6a, 0x0a, 0xbd, 0xc0, 0x15, 0x68, 0xa6, 0x79, 0x12, 0x72, 0x76, 0x24, 0xbc, 0xe5, 0x0d,
|
||||
0x67, 0xb3, 0x1e, 0x34, 0xd2, 0x3c, 0x09, 0xd8, 0x91, 0x40, 0xdb, 0xd0, 0x38, 0x24, 0x5c, 0x50,
|
||||
0x96, 0x7a, 0x2b, 0xba, 0x41, 0xd9, 0x3c, 0xa1, 0x88, 0x37, 0x8c, 0x51, 0xcb, 0x3d, 0x35, 0xf6,
|
||||
0x41, 0x31, 0xd1, 0xff, 0x73, 0x1d, 0x96, 0xf6, 0x08, 0xe6, 0xc3, 0x83, 0x8b, 0x13, 0xea, 0xeb,
|
||||
0xd0, 0xe5, 0x44, 0xe4, 0xb1, 0x0c, 0x87, 0xa6, 0x0c, 0xe9, 0xf7, 0x2c, 0xaf, 0x56, 0x8c, 0x7c,
|
||||
0xa7, 0x10, 0x97, 0x41, 0x77, 0x4f, 0x09, 0x7a, 0x7d, 0x4e, 0xd0, 0x7d, 0xe8, 0x54, 0x22, 0x2c,
|
||||
0xbc, 0x05, 0x1d, 0x9a, 0x29, 0x19, 0xea, 0x82, 0x1b, 0x89, 0x58, 0xf3, 0xa9, 0x15, 0xa8, 0x4f,
|
||||
0x74, 0x0b, 0x56, 0xb3, 0x18, 0x0f, 0xc9, 0x01, 0x8b, 0x23, 0xc2, 0xc3, 0x11, 0x67, 0x79, 0xa6,
|
||||
0x39, 0xd5, 0x09, 0xba, 0x15, 0xc5, 0x03, 0x25, 0x47, 0x1f, 0x40, 0x33, 0x12, 0x71, 0x28, 0xc7,
|
||||
0x19, 0xd1, 0xa4, 0x5a, 0x3e, 0xe1, 0xec, 0x3d, 0x11, 0x3f, 0x19, 0x67, 0x24, 0x68, 0x44, 0xe6,
|
||||
0x03, 0xdd, 0x85, 0x75, 0x41, 0x38, 0xc5, 0x31, 0x7d, 0x41, 0xa2, 0x90, 0x3c, 0xcf, 0x78, 0x98,
|
||||
0xc5, 0x38, 0xd5, 0xcc, 0xeb, 0x04, 0x68, 0xa2, 0xfb, 0xc1, 0xf3, 0x8c, 0xef, 0xc6, 0x38, 0x45,
|
||||
0x9b, 0xd0, 0x65, 0xb9, 0xcc, 0x72, 0x19, 0x5a, 0x6e, 0xd0, 0x48, 0x13, 0xd1, 0x0d, 0x96, 0x8d,
|
||||
0x5c, 0x53, 0x41, 0xf4, 0x23, 0x05, 0xad, 0xe4, 0xf8, 0x90, 0xc4, 0x61, 0xc9, 0x50, 0xaf, 0xad,
|
||||
0x59, 0xb0, 0x62, 0xe4, 0x4f, 0x0a, 0x31, 0xba, 0x03, 0x6b, 0xa3, 0x1c, 0x73, 0x9c, 0x4a, 0x42,
|
||||
0x2a, 0xd6, 0x1d, 0x6d, 0x8d, 0x4a, 0xd5, 0x64, 0xc2, 0x2d, 0x58, 0x55, 0x66, 0x2c, 0x97, 0x15,
|
||||
0xf3, 0x25, 0x6d, 0xde, 0xb5, 0x8a, 0xd2, 0xd8, 0xff, 0x7d, 0x85, 0x27, 0x2a, 0xa4, 0xe2, 0x02,
|
||||
0x3c, 0xb9, 0x48, 0x6b, 0x32, 0x97, 0x5c, 0xee, 0x7c, 0x72, 0xdd, 0x80, 0x76, 0x42, 0x24, 0xa7,
|
||||
0x43, 0x13, 0x44, 0x93, 0x9d, 0xc0, 0x88, 0x74, 0xa4, 0x6e, 0x40, 0x5b, 0xdd, 0xa5, 0x4f, 0x72,
|
||||
0xc2, 0x29, 0x11, 0x36, 0xb9, 0x43, 0x9a, 0x27, 0x3f, 0x31, 0x12, 0xb4, 0x06, 0x0b, 0x92, 0x65,
|
||||
0xe1, 0xb3, 0x22, 0x29, 0x49, 0x96, 0x3d, 0x44, 0xdf, 0x83, 0xab, 0x82, 0xe0, 0x98, 0x44, 0x61,
|
||||
0x99, 0x44, 0x44, 0x28, 0x34, 0x16, 0x24, 0xf2, 0x1a, 0x3a, 0x6e, 0x9e, 0xb1, 0xd8, 0x2b, 0x0d,
|
||||
0xf6, 0xac, 0x5e, 0x85, 0xa5, 0x74, 0xbc, 0x32, 0xad, 0xa9, 0xeb, 0x77, 0x34, 0x51, 0x95, 0x13,
|
||||
0x3e, 0x04, 0x6f, 0x14, 0xb3, 0x01, 0x8e, 0xc3, 0x63, 0xbb, 0xea, 0x46, 0xc1, 0x0d, 0x2e, 0x1b,
|
||||
0xfd, 0xde, 0xcc, 0x96, 0xea, 0x78, 0x22, 0xa6, 0x43, 0x12, 0x85, 0x83, 0x98, 0x0d, 0x3c, 0xd0,
|
||||
0xfc, 0x03, 0x23, 0x52, 0x59, 0x49, 0xf1, 0xce, 0x1a, 0x28, 0x18, 0x86, 0x2c, 0x4f, 0xa5, 0x66,
|
||||
0x93, 0x1b, 0x2c, 0x1b, 0xf9, 0xe3, 0x3c, 0xd9, 0x51, 0x52, 0xf4, 0x36, 0x2c, 0x59, 0x4b, 0xb6,
|
||||
0xbf, 0x2f, 0x88, 0xd4, 0x34, 0x72, 0x83, 0x8e, 0x11, 0xfe, 0x58, 0xcb, 0xfc, 0xbf, 0xb8, 0xb0,
|
||||
0x12, 0x28, 0x74, 0xc9, 0x21, 0xf9, 0xbf, 0xcf, 0x1e, 0x27, 0xdd, 0xe2, 0xc5, 0x73, 0xdd, 0xe2,
|
||||
0xc6, 0x99, 0x6f, 0x71, 0xf3, 0x5c, 0xb7, 0xb8, 0x75, 0xbe, 0x5b, 0x0c, 0x27, 0xdc, 0xe2, 0xbf,
|
||||
0x4d, 0x45, 0xec, 0x55, 0xbd, 0xc7, 0x37, 0xc1, 0xa5, 0x91, 0x29, 0x1d, 0xdb, 0x5b, 0xde, 0xdc,
|
||||
0xb7, 0xb2, 0xdf, 0x13, 0x81, 0x32, 0x9a, 0x7d, 0x5f, 0x17, 0xce, 0xfd, 0xbe, 0x7e, 0x1f, 0xae,
|
||||
0x1d, 0xbf, 0xdd, 0xdc, 0x62, 0x14, 0x79, 0x8b, 0x3a, 0xa0, 0x57, 0x66, 0xaf, 0x77, 0x01, 0x62,
|
||||
0x84, 0xbe, 0x05, 0xeb, 0x95, 0xfb, 0x3d, 0x99, 0xd8, 0x30, 0x3d, 0xfd, 0x44, 0x37, 0x99, 0x72,
|
||||
0xda, 0x0d, 0x6f, 0x9e, 0x76, 0xc3, 0xfd, 0x4f, 0x5d, 0x58, 0xea, 0x91, 0x98, 0x48, 0xf2, 0x65,
|
||||
0xf9, 0x77, 0x62, 0xf9, 0xf7, 0x0d, 0x40, 0x34, 0x95, 0xef, 0xbf, 0x17, 0x66, 0x9c, 0x26, 0x98,
|
||||
0x8f, 0xc3, 0x67, 0x64, 0x5c, 0xa4, 0xce, 0xae, 0xd6, 0xec, 0x1a, 0xc5, 0x43, 0x32, 0x16, 0x2f,
|
||||
0x2d, 0x07, 0xab, 0xf5, 0x97, 0xc9, 0x95, 0x65, 0xfd, 0xf5, 0x5d, 0xe8, 0x4c, 0x6d, 0xd1, 0x79,
|
||||
0x09, 0x61, 0xdb, 0xd9, 0x64, 0x5f, 0xff, 0x3f, 0x0e, 0xb4, 0x3e, 0x66, 0x38, 0xd2, 0x9d, 0xd0,
|
||||
0x05, 0xc3, 0x58, 0x16, 0xb9, 0xb5, 0xd9, 0x22, 0xf7, 0x3a, 0x4c, 0x9a, 0x19, 0x1b, 0xc8, 0x4a,
|
||||
0x77, 0x53, 0xe9, 0x52, 0xea, 0xd3, 0x5d, 0xca, 0x0d, 0x68, 0x53, 0xe5, 0x50, 0x98, 0x61, 0x79,
|
||||
0x60, 0x12, 0x65, 0x2b, 0x00, 0x2d, 0xda, 0x55, 0x12, 0xd5, 0xc6, 0x14, 0x06, 0xba, 0x8d, 0x59,
|
||||
0x3c, 0x73, 0x1b, 0x63, 0x17, 0xd1, 0x6d, 0xcc, 0xaf, 0x1c, 0x00, 0x7d, 0x70, 0x95, 0x24, 0x8e,
|
||||
0x2f, 0xea, 0x5c, 0x64, 0x51, 0x95, 0xc1, 0x75, 0xa4, 0x48, 0x8c, 0xe5, 0xe4, 0x52, 0x09, 0x0b,
|
||||
0x0e, 0x52, 0x51, 0x33, 0x2a, 0x7b, 0xa1, 0x84, 0xff, 0x3b, 0x07, 0x40, 0x67, 0x05, 0xe3, 0xc6,
|
||||
0x2c, 0xfd, 0x9c, 0xd3, 0x1b, 0xbc, 0xda, 0x34, 0x74, 0xdb, 0x05, 0x74, 0x2a, 0xe1, 0xa9, 0xc6,
|
||||
0x7c, 0xce, 0x19, 0x2a, 0x15, 0x79, 0x71, 0x78, 0x8b, 0xae, 0xfe, 0xf6, 0xff, 0xe0, 0x40, 0xc7,
|
||||
0x7a, 0x67, 0x5c, 0x9a, 0x8a, 0xb2, 0x33, 0x1b, 0x65, 0x5d, 0xf0, 0x24, 0x8c, 0x8f, 0x43, 0x41,
|
||||
0x5f, 0x10, 0xeb, 0x10, 0x18, 0xd1, 0x1e, 0x7d, 0x41, 0xa6, 0xc8, 0xeb, 0x4e, 0x93, 0xf7, 0x16,
|
||||
0xac, 0x72, 0x32, 0x24, 0xa9, 0x8c, 0xc7, 0x61, 0xc2, 0x22, 0xba, 0x4f, 0x49, 0xa4, 0xd9, 0xd0,
|
||||
0x0c, 0xba, 0x85, 0xe2, 0x91, 0x95, 0xfb, 0x9f, 0x3a, 0xb0, 0xac, 0x6a, 0xa4, 0xf1, 0x63, 0x16,
|
||||
0x11, 0xe3, 0xd9, 0xf9, 0x19, 0xfb, 0x91, 0x3e, 0x8b, 0x85, 0xc7, 0xfc, 0x04, 0xfe, 0xf6, 0x49,
|
||||
0xff, 0xa8, 0x54, 0x30, 0x08, 0x9a, 0x82, 0x8c, 0xcc, 0x9e, 0xdb, 0x36, 0xd9, 0x9f, 0x09, 0xe2,
|
||||
0x49, 0x60, 0x6d, 0xbe, 0x37, 0x10, 0xff, 0xd2, 0x81, 0xf6, 0x23, 0x31, 0xda, 0x65, 0x42, 0xe7,
|
||||
0x0b, 0xf4, 0x16, 0x74, 0x6c, 0x8e, 0x36, 0xc9, 0xca, 0xd1, 0x97, 0xa5, 0x3d, 0x9c, 0xfc, 0xa0,
|
||||
0x8a, 0xd6, 0x61, 0x21, 0x11, 0x23, 0x1b, 0xf1, 0x4e, 0x60, 0x06, 0xe8, 0x2a, 0x34, 0x13, 0x31,
|
||||
0xd2, 0xbd, 0x83, 0xbd, 0x61, 0xe5, 0x58, 0x85, 0x6d, 0xf2, 0x18, 0xd7, 0xf5, 0x63, 0x3c, 0x11,
|
||||
0xf8, 0x7f, 0x75, 0x00, 0xd9, 0xd7, 0xee, 0x33, 0xfd, 0xea, 0xae, 0x09, 0x5b, 0xfd, 0x51, 0xb8,
|
||||
0xa6, 0xaf, 0xeb, 0x94, 0x6c, 0x26, 0xbf, 0xb9, 0xc7, 0xf2, 0xdb, 0x2d, 0x58, 0x8d, 0xc8, 0x3e,
|
||||
0x56, 0x0f, 0xf3, 0xac, 0xcb, 0x5d, 0xab, 0x28, 0xeb, 0x87, 0x9b, 0x1f, 0x42, 0xab, 0xfc, 0xb3,
|
||||
0x0b, 0x75, 0xa1, 0xd3, 0x4f, 0xa9, 0xd4, 0x65, 0x11, 0x4d, 0x47, 0xdd, 0xd7, 0x50, 0x1b, 0x1a,
|
||||
0x3f, 0x22, 0x38, 0x96, 0x07, 0xe3, 0xae, 0x83, 0x3a, 0xd0, 0xbc, 0x3f, 0x48, 0x19, 0x4f, 0x70,
|
||||
0xdc, 0xad, 0xdd, 0xdc, 0x82, 0xd5, 0x63, 0x5d, 0xa8, 0x32, 0x09, 0xd8, 0x91, 0x3a, 0x50, 0xd4,
|
||||
0x7d, 0x0d, 0xad, 0x40, 0x7b, 0x87, 0xc5, 0x79, 0x92, 0x1a, 0x81, 0xb3, 0xfd, 0xc1, 0xcf, 0xbf,
|
||||
0x3d, 0xa2, 0xf2, 0x20, 0x1f, 0xa8, 0xd3, 0xdf, 0x31, 0x70, 0x7c, 0x93, 0x32, 0xfb, 0x75, 0xa7,
|
||||
0x88, 0xf4, 0x1d, 0x8d, 0x50, 0x39, 0xcc, 0x06, 0x83, 0x45, 0x2d, 0x79, 0xf7, 0xbf, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0x66, 0xd1, 0x46, 0x17, 0x46, 0x1c, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ message GenericValue {
|
|||
bool bool_val = 1;
|
||||
int64 int64_val = 2;
|
||||
double float_val = 3;
|
||||
string string_val = 4;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -119,6 +119,7 @@ type GenericValue struct {
|
|||
// *GenericValue_BoolVal
|
||||
// *GenericValue_Int64Val
|
||||
// *GenericValue_FloatVal
|
||||
// *GenericValue_StringVal
|
||||
Val isGenericValue_Val `protobuf_oneof:"val"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
|
@ -166,12 +167,18 @@ type GenericValue_FloatVal struct {
|
|||
FloatVal float64 `protobuf:"fixed64,3,opt,name=float_val,json=floatVal,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GenericValue_StringVal struct {
|
||||
StringVal string `protobuf:"bytes,4,opt,name=string_val,json=stringVal,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*GenericValue_BoolVal) isGenericValue_Val() {}
|
||||
|
||||
func (*GenericValue_Int64Val) isGenericValue_Val() {}
|
||||
|
||||
func (*GenericValue_FloatVal) isGenericValue_Val() {}
|
||||
|
||||
func (*GenericValue_StringVal) isGenericValue_Val() {}
|
||||
|
||||
func (m *GenericValue) GetVal() isGenericValue_Val {
|
||||
if m != nil {
|
||||
return m.Val
|
||||
|
@ -200,12 +207,20 @@ func (m *GenericValue) GetFloatVal() float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *GenericValue) GetStringVal() string {
|
||||
if x, ok := m.GetVal().(*GenericValue_StringVal); ok {
|
||||
return x.StringVal
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*GenericValue) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*GenericValue_BoolVal)(nil),
|
||||
(*GenericValue_Int64Val)(nil),
|
||||
(*GenericValue_FloatVal)(nil),
|
||||
(*GenericValue_StringVal)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -985,74 +1000,75 @@ func init() {
|
|||
func init() { proto.RegisterFile("plan.proto", fileDescriptor_2d655ab2f7683c23) }
|
||||
|
||||
var fileDescriptor_2d655ab2f7683c23 = []byte{
|
||||
// 1091 bytes of a gzipped FileDescriptorProto
|
||||
// 1108 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcd, 0x72, 0x1b, 0xc5,
|
||||
0x13, 0xd7, 0x6a, 0x25, 0x79, 0xb7, 0xa5, 0x48, 0xca, 0x5e, 0xfe, 0xce, 0x3f, 0x04, 0x9b, 0x25,
|
||||
0x45, 0x0c, 0x54, 0xec, 0x22, 0x09, 0x4e, 0x11, 0x0a, 0xca, 0x5f, 0xc1, 0x52, 0x11, 0x6c, 0xb3,
|
||||
0x18, 0x1f, 0xb8, 0x6c, 0x8d, 0x76, 0xc7, 0xd2, 0x54, 0x66, 0x77, 0xd6, 0xb3, 0xb3, 0x22, 0x3a,
|
||||
0x73, 0xe3, 0xc6, 0x4b, 0xc0, 0x19, 0x6e, 0xbc, 0x03, 0x0f, 0xc0, 0x9d, 0x17, 0xa1, 0xa6, 0x67,
|
||||
0xad, 0x8f, 0x94, 0xec, 0x98, 0xaa, 0xdc, 0x66, 0x7e, 0xd3, 0xdd, 0xd3, 0xbf, 0xfe, 0x9a, 0x01,
|
||||
0xc8, 0x38, 0x49, 0x37, 0x33, 0x29, 0x94, 0xf0, 0x6e, 0x27, 0x8c, 0x8f, 0x8b, 0xdc, 0xec, 0x36,
|
||||
0xf5, 0xc1, 0xff, 0x5b, 0x79, 0x34, 0xa2, 0x09, 0x31, 0x90, 0x9f, 0x41, 0xeb, 0x90, 0xa6, 0x54,
|
||||
0xb2, 0xe8, 0x8c, 0xf0, 0x82, 0x7a, 0x77, 0xc1, 0x19, 0x08, 0xc1, 0xc3, 0x31, 0xe1, 0xab, 0xd6,
|
||||
0xba, 0xb5, 0xe1, 0xf4, 0x2a, 0xc1, 0x8a, 0x46, 0xce, 0x08, 0xf7, 0xee, 0x81, 0xcb, 0x52, 0xb5,
|
||||
0xfd, 0x04, 0x4f, 0xab, 0xeb, 0xd6, 0x86, 0xdd, 0xab, 0x04, 0x0e, 0x42, 0xe5, 0xf1, 0x39, 0x17,
|
||||
0x44, 0xe1, 0xb1, 0xbd, 0x6e, 0x6d, 0x58, 0xfa, 0x18, 0xa1, 0x33, 0xc2, 0xf7, 0xea, 0x60, 0x8f,
|
||||
0x09, 0xf7, 0x7f, 0xb6, 0xc0, 0xfd, 0xb6, 0xa0, 0x72, 0xd2, 0x4f, 0xcf, 0x85, 0xe7, 0x41, 0x4d,
|
||||
0x89, 0xec, 0x25, 0xde, 0x65, 0x07, 0xb8, 0xf6, 0xd6, 0xa0, 0x99, 0x50, 0x25, 0x59, 0x14, 0xaa,
|
||||
0x49, 0x46, 0xd1, 0x92, 0x1b, 0x80, 0x81, 0x4e, 0x27, 0x19, 0xf5, 0xde, 0x87, 0x5b, 0x39, 0x25,
|
||||
0x32, 0x1a, 0x85, 0x19, 0x91, 0x24, 0xc9, 0x57, 0x6b, 0x28, 0xd2, 0x32, 0xe0, 0x09, 0x62, 0x5a,
|
||||
0x48, 0x8a, 0x22, 0x8d, 0xc3, 0x98, 0x46, 0x2c, 0x21, 0x7c, 0xb5, 0x8e, 0x57, 0xb4, 0x10, 0x3c,
|
||||
0x30, 0x98, 0xff, 0xab, 0x05, 0xb0, 0x2f, 0x78, 0x91, 0xa4, 0xe8, 0xcd, 0x1d, 0x70, 0xce, 0x19,
|
||||
0xe5, 0x71, 0xc8, 0xe2, 0xd2, 0xa3, 0x15, 0xdc, 0xf7, 0x63, 0xef, 0x19, 0xb8, 0x31, 0x51, 0xc4,
|
||||
0xb8, 0xa4, 0xb9, 0xb7, 0x1f, 0xdd, 0xdb, 0x5c, 0x88, 0x6e, 0x19, 0xd7, 0x03, 0xa2, 0x88, 0xf6,
|
||||
0x32, 0x70, 0xe2, 0x72, 0xe5, 0xdd, 0x87, 0x36, 0xcb, 0xc3, 0x4c, 0xb2, 0x84, 0xc8, 0x49, 0xf8,
|
||||
0x92, 0x4e, 0x90, 0x93, 0x13, 0xb4, 0x58, 0x7e, 0x62, 0xc0, 0xaf, 0xe9, 0xc4, 0xbb, 0x0b, 0x2e,
|
||||
0xcb, 0x43, 0x52, 0x28, 0xd1, 0x3f, 0x40, 0x46, 0x4e, 0xe0, 0xb0, 0x7c, 0x17, 0xf7, 0xfe, 0x1f,
|
||||
0x16, 0xb4, 0xbf, 0x4f, 0x89, 0x9c, 0x04, 0x24, 0x1d, 0xd2, 0xe7, 0xaf, 0x32, 0xe9, 0x7d, 0x09,
|
||||
0xcd, 0x08, 0x5d, 0x0f, 0x59, 0x7a, 0x2e, 0xd0, 0xdf, 0xe6, 0xeb, 0x3e, 0x61, 0x29, 0xcc, 0x08,
|
||||
0x06, 0x10, 0xcd, 0xc8, 0x7e, 0x08, 0x55, 0x91, 0x95, 0x54, 0xee, 0x2c, 0x51, 0x3b, 0xce, 0x90,
|
||||
0x46, 0x55, 0x64, 0xde, 0xa7, 0x50, 0x1f, 0xeb, 0xf2, 0x40, 0xbf, 0x9b, 0x8f, 0xd6, 0x96, 0x48,
|
||||
0xcf, 0x57, 0x51, 0x60, 0xa4, 0xfd, 0xdf, 0xaa, 0xd0, 0xd9, 0x63, 0x6f, 0xd7, 0xeb, 0x07, 0xd0,
|
||||
0xe1, 0xe2, 0x47, 0x2a, 0x43, 0x96, 0x46, 0xbc, 0xc8, 0xd9, 0xd8, 0x64, 0xc3, 0x09, 0xda, 0x08,
|
||||
0xf7, 0x2f, 0x51, 0x2d, 0x58, 0x64, 0xd9, 0x82, 0xa0, 0x89, 0x7a, 0x1b, 0xe1, 0x99, 0xe0, 0x0e,
|
||||
0x34, 0x8d, 0x45, 0x43, 0xb1, 0x76, 0x33, 0x8a, 0x80, 0x3a, 0xa6, 0x69, 0x76, 0xa0, 0x69, 0xae,
|
||||
0x32, 0x16, 0xea, 0x37, 0xb4, 0x80, 0x3a, 0xb8, 0xf6, 0xff, 0xb2, 0xa0, 0xb9, 0x2f, 0x92, 0x8c,
|
||||
0x48, 0x13, 0xa5, 0x43, 0xe8, 0x72, 0x7a, 0xae, 0xc2, 0xff, 0x1c, 0xaa, 0xb6, 0x56, 0x9b, 0xab,
|
||||
0xe8, 0x3e, 0xdc, 0x96, 0x6c, 0x38, 0x5a, 0xb4, 0x54, 0xbd, 0x89, 0xa5, 0x0e, 0xea, 0xed, 0xbf,
|
||||
0x5e, 0x2f, 0xf6, 0x0d, 0xea, 0xc5, 0xff, 0xc9, 0x02, 0xe7, 0x94, 0xca, 0xe4, 0xad, 0x64, 0xfc,
|
||||
0x29, 0x34, 0x30, 0xae, 0xf9, 0x6a, 0x75, 0xdd, 0xbe, 0x49, 0x60, 0x4b, 0x71, 0xff, 0x17, 0x0b,
|
||||
0x5c, 0xec, 0x19, 0x74, 0xe3, 0x09, 0xba, 0x6f, 0xa1, 0xfb, 0xf7, 0x97, 0x98, 0x98, 0x4a, 0x9a,
|
||||
0xd5, 0x71, 0x86, 0x95, 0xff, 0x10, 0xea, 0xd1, 0x88, 0xf1, 0xb8, 0x8c, 0xd9, 0xff, 0x96, 0x28,
|
||||
0x6a, 0x9d, 0xc0, 0x48, 0xf9, 0x6b, 0xb0, 0x52, 0x6a, 0x7b, 0x4d, 0x58, 0xe9, 0xa7, 0x63, 0xc2,
|
||||
0x59, 0xdc, 0xad, 0x78, 0x2b, 0x60, 0x1f, 0x09, 0xd5, 0xb5, 0xfc, 0xbf, 0x2d, 0x00, 0xd3, 0x12,
|
||||
0xe8, 0xd4, 0xf6, 0x9c, 0x53, 0x1f, 0x2c, 0xb1, 0x3d, 0x13, 0x2d, 0x97, 0xa5, 0x5b, 0x1f, 0x43,
|
||||
0x4d, 0x27, 0xfa, 0x4d, 0x5e, 0xa1, 0x90, 0xe6, 0x80, 0xb9, 0x2c, 0xbb, 0xf7, 0x6a, 0x0e, 0x28,
|
||||
0xe5, 0x6f, 0x83, 0x73, 0x79, 0xd7, 0x22, 0x89, 0x36, 0xc0, 0x0b, 0x31, 0x64, 0x11, 0xe1, 0xbb,
|
||||
0x69, 0xdc, 0xb5, 0xbc, 0x5b, 0xe0, 0x96, 0xfb, 0x63, 0xd9, 0xad, 0xfa, 0xbf, 0xdb, 0x50, 0x43,
|
||||
0x52, 0xcf, 0xc0, 0x55, 0x54, 0x26, 0x21, 0x7d, 0x95, 0xc9, 0x32, 0xdd, 0x77, 0x97, 0xdc, 0x79,
|
||||
0x59, 0x20, 0xfa, 0x91, 0x50, 0x97, 0xc5, 0xf2, 0x05, 0x40, 0xa1, 0xef, 0x36, 0xca, 0x86, 0xde,
|
||||
0x3b, 0xd7, 0x65, 0xab, 0x57, 0x09, 0xdc, 0x62, 0x1a, 0xcf, 0x1d, 0x68, 0x0e, 0xd8, 0x4c, 0xdf,
|
||||
0xbe, 0xb2, 0xd6, 0x66, 0x81, 0xed, 0x55, 0x02, 0x18, 0xcc, 0x32, 0xb2, 0x0f, 0xad, 0xc8, 0x34,
|
||||
0xa2, 0x31, 0x61, 0xc6, 0xc1, 0xbb, 0x4b, 0xcb, 0x75, 0xda, 0xaf, 0xbd, 0x4a, 0xd0, 0x8c, 0xe6,
|
||||
0xda, 0xf7, 0x1b, 0xe8, 0x1a, 0x16, 0x52, 0xcf, 0x3d, 0x63, 0xc8, 0x4c, 0x85, 0xf7, 0xae, 0xe2,
|
||||
0x32, 0x9d, 0x90, 0xbd, 0x4a, 0xd0, 0x2e, 0x16, 0x67, 0xe6, 0x09, 0xdc, 0x2e, 0x59, 0xcd, 0xd9,
|
||||
0x6b, 0xa0, 0x3d, 0xff, 0x4a, 0x6e, 0xf3, 0x06, 0x3b, 0x83, 0x45, 0x68, 0xaf, 0x01, 0x35, 0x6d,
|
||||
0xc4, 0xff, 0xc7, 0x02, 0x38, 0xa3, 0x91, 0x12, 0x72, 0xf7, 0xe8, 0xe8, 0xbb, 0xf2, 0x09, 0x32,
|
||||
0xc2, 0xe6, 0xf9, 0xd7, 0x4f, 0x90, 0xb1, 0xb7, 0xf0, 0x38, 0x56, 0x17, 0x1f, 0xc7, 0xa7, 0x00,
|
||||
0x99, 0xa4, 0x31, 0x8b, 0x88, 0xa2, 0xf9, 0x9b, 0xca, 0x6c, 0x4e, 0xd4, 0xfb, 0x1c, 0xe0, 0x42,
|
||||
0xff, 0x05, 0xcc, 0x68, 0xa8, 0x5d, 0x99, 0xee, 0xe9, 0x87, 0x21, 0x70, 0x2f, 0xa6, 0x7f, 0x87,
|
||||
0x07, 0xd0, 0xc9, 0x38, 0x89, 0xe8, 0x48, 0xf0, 0x98, 0xca, 0x50, 0x91, 0x21, 0x06, 0xd9, 0x0d,
|
||||
0xda, 0x73, 0xf0, 0x29, 0x19, 0xfa, 0x7f, 0x5a, 0xe0, 0x9c, 0x70, 0x92, 0x1e, 0x89, 0x18, 0x87,
|
||||
0xf5, 0x18, 0x19, 0x87, 0x24, 0x4d, 0xf3, 0x6b, 0xc6, 0xd1, 0x2c, 0x2e, 0xba, 0x44, 0x8c, 0xce,
|
||||
0x6e, 0x9a, 0xe6, 0xde, 0x67, 0x0b, 0x6c, 0xaf, 0x6f, 0x41, 0xad, 0x3a, 0xc7, 0x77, 0x03, 0xba,
|
||||
0xa2, 0x50, 0x59, 0xa1, 0xc2, 0xcb, 0x50, 0xea, 0x70, 0xd9, 0x1b, 0x76, 0xd0, 0x36, 0xf8, 0x57,
|
||||
0x26, 0xa2, 0xb9, 0xce, 0x50, 0x2a, 0x62, 0xfa, 0x51, 0x0a, 0x0d, 0x33, 0x58, 0x17, 0x7b, 0xb1,
|
||||
0x03, 0xcd, 0x43, 0x49, 0x89, 0xa2, 0xf2, 0x74, 0x44, 0xd2, 0xae, 0xe5, 0x75, 0xa1, 0x55, 0x02,
|
||||
0xcf, 0x2f, 0x0a, 0xc2, 0xbb, 0x55, 0xaf, 0x05, 0xce, 0x0b, 0x9a, 0xe7, 0x78, 0x6e, 0x63, 0xb3,
|
||||
0xd2, 0x3c, 0x37, 0x87, 0x35, 0xcf, 0x85, 0xba, 0x59, 0xd6, 0xb5, 0xdc, 0x91, 0x50, 0x66, 0xd7,
|
||||
0xd8, 0x7b, 0xfc, 0xc3, 0x27, 0x43, 0xa6, 0x46, 0xc5, 0x60, 0x33, 0x12, 0xc9, 0x96, 0x21, 0xf5,
|
||||
0x90, 0x89, 0x72, 0xb5, 0xc5, 0x52, 0x45, 0x65, 0x4a, 0xf8, 0x16, 0xf2, 0xdc, 0xd2, 0x3c, 0xb3,
|
||||
0xc1, 0xa0, 0x81, 0xbb, 0xc7, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x21, 0x9d, 0x5f, 0x11, 0x7b,
|
||||
0x0a, 0x00, 0x00,
|
||||
0x13, 0xd7, 0x6a, 0x25, 0x79, 0xb7, 0xa5, 0x48, 0xf2, 0x5e, 0xfe, 0xce, 0xdf, 0x04, 0x9b, 0x25,
|
||||
0x45, 0x04, 0x54, 0xec, 0x22, 0x09, 0x4e, 0x11, 0x0a, 0xca, 0x5f, 0xc1, 0x52, 0x11, 0x6c, 0xb3,
|
||||
0x18, 0x1f, 0xb8, 0x6c, 0x8d, 0x76, 0xc7, 0xd2, 0x54, 0x56, 0x33, 0xeb, 0xd9, 0x59, 0x11, 0x9d,
|
||||
0xb9, 0x71, 0xe3, 0x25, 0xe0, 0x0c, 0x37, 0xde, 0x81, 0x07, 0xe0, 0xce, 0x8b, 0x50, 0xd3, 0xb3,
|
||||
0xd6, 0x47, 0x4a, 0x76, 0x4c, 0x55, 0x6e, 0x33, 0xbf, 0xe9, 0xee, 0xe9, 0x5f, 0x7f, 0xcd, 0x00,
|
||||
0xa4, 0x09, 0xe1, 0x5b, 0xa9, 0x14, 0x4a, 0x78, 0xab, 0x23, 0x96, 0x8c, 0xf3, 0xcc, 0xec, 0xb6,
|
||||
0xf4, 0xc1, 0xff, 0x1b, 0x59, 0x34, 0xa4, 0x23, 0x62, 0x20, 0xff, 0x17, 0x0b, 0x1a, 0x47, 0x94,
|
||||
0x53, 0xc9, 0xa2, 0x73, 0x92, 0xe4, 0xd4, 0x5b, 0x07, 0xa7, 0x2f, 0x44, 0x12, 0x8e, 0x49, 0xb2,
|
||||
0x66, 0x6d, 0x5a, 0x1d, 0xa7, 0x5b, 0x0a, 0x56, 0x34, 0x72, 0x4e, 0x12, 0xef, 0x1e, 0xb8, 0x8c,
|
||||
0xab, 0x9d, 0x27, 0x78, 0x5a, 0xde, 0xb4, 0x3a, 0x76, 0xb7, 0x14, 0x38, 0x08, 0x15, 0xc7, 0x17,
|
||||
0x89, 0x20, 0x0a, 0x8f, 0xed, 0x4d, 0xab, 0x63, 0xe9, 0x63, 0x84, 0xf4, 0xf1, 0x06, 0x40, 0xa6,
|
||||
0x24, 0xe3, 0x03, 0x3c, 0xaf, 0x6c, 0x5a, 0x1d, 0xb7, 0x5b, 0x0a, 0x5c, 0x83, 0x9d, 0x93, 0x64,
|
||||
0xbf, 0x0a, 0xf6, 0x98, 0x24, 0xfe, 0xcf, 0x16, 0xb8, 0xdf, 0xe6, 0x54, 0x4e, 0x7a, 0xfc, 0x42,
|
||||
0x78, 0x1e, 0x54, 0x94, 0x48, 0x5f, 0xa2, 0x33, 0x76, 0x80, 0x6b, 0x6f, 0x03, 0xea, 0x23, 0xaa,
|
||||
0x24, 0x8b, 0x42, 0x35, 0x49, 0x29, 0x5e, 0xe5, 0x06, 0x60, 0xa0, 0xb3, 0x49, 0x4a, 0xbd, 0xf7,
|
||||
0xe1, 0x4e, 0x46, 0x89, 0x8c, 0x86, 0x61, 0x4a, 0x24, 0x19, 0x65, 0xe6, 0xb6, 0xa0, 0x61, 0xc0,
|
||||
0x53, 0xc4, 0xb4, 0x90, 0x14, 0x39, 0x8f, 0xc3, 0x98, 0x46, 0x6c, 0x44, 0x92, 0xb5, 0x2a, 0x5e,
|
||||
0xd1, 0x40, 0xf0, 0xd0, 0x60, 0xfe, 0xaf, 0x16, 0xc0, 0x81, 0x48, 0xf2, 0x11, 0x47, 0x6f, 0xee,
|
||||
0x82, 0x73, 0xc1, 0x68, 0x12, 0x87, 0x2c, 0x2e, 0x3c, 0x5a, 0xc1, 0x7d, 0x2f, 0xf6, 0x9e, 0x81,
|
||||
0x1b, 0x13, 0x45, 0x8c, 0x4b, 0x3a, 0x38, 0xcd, 0x47, 0xf7, 0xb6, 0x16, 0xe2, 0x5f, 0x44, 0xfe,
|
||||
0x90, 0x28, 0xa2, 0xbd, 0x0c, 0x9c, 0xb8, 0x58, 0x79, 0xf7, 0xa1, 0xc9, 0xb2, 0x30, 0x95, 0x6c,
|
||||
0x44, 0xe4, 0x24, 0x7c, 0x49, 0x27, 0xc8, 0xc9, 0x09, 0x1a, 0x2c, 0x3b, 0x35, 0xe0, 0xd7, 0x74,
|
||||
0xe2, 0xad, 0x83, 0xcb, 0xb2, 0x90, 0xe4, 0x4a, 0xf4, 0x0e, 0x91, 0x91, 0x13, 0x38, 0x2c, 0xdb,
|
||||
0xc3, 0xbd, 0xff, 0x87, 0x05, 0xcd, 0xef, 0x39, 0x91, 0x93, 0x80, 0xf0, 0x01, 0x7d, 0xfe, 0x2a,
|
||||
0x95, 0xde, 0x97, 0x50, 0x8f, 0xd0, 0xf5, 0x90, 0xf1, 0x0b, 0x81, 0xfe, 0xd6, 0x5f, 0xf7, 0x09,
|
||||
0x8b, 0x65, 0x46, 0x30, 0x80, 0x68, 0x46, 0xf6, 0x43, 0x28, 0x8b, 0xb4, 0xa0, 0x72, 0x77, 0x89,
|
||||
0xda, 0x49, 0x8a, 0x34, 0xca, 0x22, 0xf5, 0x3e, 0x85, 0xea, 0x58, 0xd7, 0x0f, 0xfa, 0x5d, 0x7f,
|
||||
0xb4, 0xb1, 0x44, 0x7a, 0xbe, 0xcc, 0x02, 0x23, 0xed, 0xff, 0x56, 0x86, 0xd6, 0x3e, 0x7b, 0xbb,
|
||||
0x5e, 0x3f, 0x80, 0x56, 0x22, 0x7e, 0xa4, 0x32, 0x64, 0x3c, 0x4a, 0xf2, 0x8c, 0x8d, 0x4d, 0x36,
|
||||
0x9c, 0xa0, 0x89, 0x70, 0xef, 0x0a, 0xd5, 0x82, 0x79, 0x9a, 0x2e, 0x08, 0x9a, 0xa8, 0x37, 0x11,
|
||||
0x9e, 0x09, 0xee, 0x42, 0xdd, 0x58, 0x34, 0x14, 0x2b, 0xb7, 0xa3, 0x08, 0xa8, 0x63, 0xba, 0x6a,
|
||||
0x17, 0xea, 0xe6, 0x2a, 0x63, 0xa1, 0x7a, 0x4b, 0x0b, 0xa8, 0x83, 0x6b, 0xff, 0x2f, 0x0b, 0xea,
|
||||
0x07, 0x62, 0x94, 0x12, 0x69, 0xa2, 0x74, 0x04, 0xed, 0x84, 0x5e, 0xa8, 0xf0, 0x3f, 0x87, 0xaa,
|
||||
0xa9, 0xd5, 0xe6, 0x2a, 0xba, 0x07, 0xab, 0x92, 0x0d, 0x86, 0x8b, 0x96, 0xca, 0xb7, 0xb1, 0xd4,
|
||||
0x42, 0xbd, 0x83, 0xd7, 0xeb, 0xc5, 0xbe, 0x45, 0xbd, 0xf8, 0x3f, 0x59, 0xe0, 0x9c, 0x51, 0x39,
|
||||
0x7a, 0x2b, 0x19, 0x7f, 0x0a, 0x35, 0x8c, 0x6b, 0xb6, 0x56, 0xde, 0xb4, 0x6f, 0x13, 0xd8, 0x42,
|
||||
0x5c, 0x4f, 0x3f, 0x17, 0x7b, 0x06, 0xdd, 0x78, 0x82, 0xee, 0x5b, 0xe8, 0xfe, 0xfd, 0x25, 0x26,
|
||||
0xa6, 0x92, 0x66, 0x75, 0x92, 0x62, 0xe5, 0x3f, 0x84, 0x6a, 0x34, 0x64, 0x49, 0x5c, 0xc4, 0xec,
|
||||
0x7f, 0x4b, 0x14, 0xb5, 0x4e, 0x60, 0xa4, 0xfc, 0x0d, 0x58, 0x29, 0xb4, 0xbd, 0x3a, 0xac, 0xf4,
|
||||
0xf8, 0x98, 0x24, 0x2c, 0x6e, 0x97, 0xbc, 0x15, 0xb0, 0x8f, 0x85, 0x6a, 0x5b, 0xfe, 0xdf, 0x16,
|
||||
0x80, 0x69, 0x09, 0x74, 0x6a, 0x67, 0xce, 0xa9, 0x0f, 0x96, 0xd8, 0x9e, 0x89, 0x16, 0xcb, 0xc2,
|
||||
0xad, 0x8f, 0xa1, 0xa2, 0x13, 0xfd, 0x26, 0xaf, 0x50, 0x48, 0x73, 0xc0, 0x5c, 0x16, 0xdd, 0x7b,
|
||||
0x3d, 0x07, 0x94, 0xf2, 0x77, 0xc0, 0xb9, 0xba, 0x6b, 0x91, 0x44, 0x13, 0xe0, 0x85, 0x18, 0xb0,
|
||||
0x88, 0x24, 0x7b, 0x3c, 0x6e, 0x5b, 0xde, 0x1d, 0x70, 0x8b, 0xfd, 0x89, 0x6c, 0x97, 0xfd, 0xdf,
|
||||
0x6d, 0xa8, 0x20, 0xa9, 0x67, 0xe0, 0x2a, 0x2a, 0x47, 0x21, 0x7d, 0x95, 0xca, 0x22, 0xdd, 0xeb,
|
||||
0x4b, 0xee, 0xbc, 0x2a, 0x10, 0xfd, 0x8a, 0xa8, 0xab, 0x62, 0xf9, 0x02, 0x20, 0xd7, 0x77, 0x1b,
|
||||
0x65, 0x43, 0xef, 0x9d, 0x9b, 0xb2, 0xa5, 0xdf, 0x98, 0x7c, 0x1a, 0xcf, 0x5d, 0xa8, 0xf7, 0xd9,
|
||||
0x4c, 0xdf, 0xbe, 0xb6, 0xd6, 0x66, 0x81, 0xed, 0x96, 0x02, 0xe8, 0xcf, 0x32, 0x72, 0x00, 0x8d,
|
||||
0xc8, 0x34, 0xa2, 0x31, 0x61, 0xc6, 0xc1, 0xbb, 0x4b, 0xcb, 0x75, 0xda, 0xaf, 0xdd, 0x52, 0x50,
|
||||
0x8f, 0xe6, 0xda, 0xf7, 0x1b, 0x68, 0x1b, 0x16, 0x52, 0xcf, 0x3d, 0x63, 0xc8, 0x4c, 0x85, 0xf7,
|
||||
0xae, 0xe3, 0x32, 0x9d, 0x90, 0xdd, 0x52, 0xd0, 0xcc, 0x17, 0x67, 0xe6, 0x29, 0xac, 0x16, 0xac,
|
||||
0xe6, 0xec, 0xd5, 0xd0, 0x9e, 0x7f, 0x2d, 0xb7, 0x79, 0x83, 0xad, 0xfe, 0x22, 0xb4, 0x5f, 0x83,
|
||||
0x8a, 0x36, 0xe2, 0xff, 0x63, 0x01, 0x9c, 0xd3, 0x48, 0x09, 0xb9, 0x77, 0x7c, 0xfc, 0x5d, 0xf1,
|
||||
0x04, 0x19, 0x61, 0xf3, 0x3f, 0xd0, 0x4f, 0x90, 0xb1, 0xb7, 0xf0, 0x38, 0x96, 0x17, 0x1f, 0xc7,
|
||||
0xa7, 0x00, 0xa9, 0xa4, 0x31, 0x8b, 0x88, 0xa2, 0xd9, 0x9b, 0xca, 0x6c, 0x4e, 0xd4, 0xfb, 0x1c,
|
||||
0xe0, 0x52, 0xff, 0x05, 0xcc, 0x68, 0xa8, 0x5c, 0x9b, 0xee, 0xe9, 0x87, 0x21, 0x70, 0x2f, 0xa7,
|
||||
0x7f, 0x87, 0x07, 0xd0, 0x4a, 0x13, 0x12, 0xd1, 0xa1, 0x48, 0x62, 0x2a, 0x43, 0x45, 0x06, 0x18,
|
||||
0x64, 0x37, 0x68, 0xce, 0xc1, 0x67, 0x64, 0xe0, 0xff, 0x69, 0x81, 0x73, 0x9a, 0x10, 0x7e, 0x2c,
|
||||
0x62, 0x1c, 0xd6, 0x63, 0x64, 0x1c, 0x12, 0xce, 0xb3, 0x1b, 0xc6, 0xd1, 0x2c, 0x2e, 0xba, 0x44,
|
||||
0x8c, 0xce, 0x1e, 0xe7, 0x99, 0xf7, 0xd9, 0x02, 0xdb, 0x9b, 0x5b, 0x50, 0xab, 0xce, 0xf1, 0xed,
|
||||
0x40, 0x5b, 0xe4, 0x2a, 0xcd, 0x55, 0x78, 0x15, 0x4a, 0x1d, 0x2e, 0xbb, 0x63, 0x07, 0x4d, 0x83,
|
||||
0x7f, 0x65, 0x22, 0x9a, 0xe9, 0x0c, 0x71, 0x11, 0xd3, 0x8f, 0x38, 0xd4, 0xcc, 0x60, 0x5d, 0xec,
|
||||
0xc5, 0x16, 0xd4, 0x8f, 0x24, 0x25, 0x8a, 0xca, 0xb3, 0x21, 0xe1, 0x6d, 0xcb, 0x6b, 0x43, 0xa3,
|
||||
0x00, 0x9e, 0x5f, 0xe6, 0x24, 0x69, 0x97, 0xbd, 0x06, 0x38, 0x2f, 0x68, 0x96, 0xe1, 0xb9, 0x8d,
|
||||
0xcd, 0x4a, 0xb3, 0xcc, 0x1c, 0x56, 0x3c, 0x17, 0xaa, 0x66, 0x59, 0xd5, 0x72, 0xc7, 0x42, 0x99,
|
||||
0x5d, 0x6d, 0xff, 0xf1, 0x0f, 0x9f, 0x0c, 0x98, 0x1a, 0xe6, 0xfd, 0xad, 0x48, 0x8c, 0xb6, 0x0d,
|
||||
0xa9, 0x87, 0x4c, 0x14, 0xab, 0x6d, 0xc6, 0x15, 0x95, 0x9c, 0x24, 0xdb, 0xc8, 0x73, 0x5b, 0xf3,
|
||||
0x4c, 0xfb, 0xfd, 0x1a, 0xee, 0x1e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xeb, 0xf1, 0x90, 0xdc,
|
||||
0x9d, 0x0a, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -129,3 +129,4 @@ message SearchResultData {
|
|||
IDs ids = 5;
|
||||
repeated int64 topks = 6;
|
||||
}
|
||||
|
||||
|
|
|
@ -2297,17 +2297,10 @@ func (node *Proxy) Delete(ctx context.Context, request *milvuspb.DeleteRequest)
|
|||
method := "Delete"
|
||||
tr := timerecord.NewTimeRecorder(method)
|
||||
|
||||
deleteReq := &milvuspb.DeleteRequest{
|
||||
DbName: request.DbName,
|
||||
CollectionName: request.CollectionName,
|
||||
PartitionName: request.PartitionName,
|
||||
Expr: request.Expr,
|
||||
}
|
||||
|
||||
dt := &deleteTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
req: deleteReq,
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
deleteExpr: request.Expr,
|
||||
BaseDeleteTask: BaseDeleteTask{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: request.HashKeys,
|
||||
|
@ -2317,6 +2310,7 @@ func (node *Proxy) Delete(ctx context.Context, request *milvuspb.DeleteRequest)
|
|||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
},
|
||||
DbName: request.DbName,
|
||||
CollectionName: request.CollectionName,
|
||||
PartitionName: request.PartitionName,
|
||||
// RowData: transfer column based request to this
|
||||
|
|
|
@ -458,7 +458,16 @@ func generateFieldData(dataType schemapb.DataType, fieldName string, fieldID int
|
|||
},
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
//TODO::
|
||||
fieldData.FieldName = testVarCharField
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_StringData{
|
||||
StringData: &schemapb.StringArray{
|
||||
Data: generateVarCharArray(numRows, maxTestStringLen),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_FloatVector:
|
||||
fieldData.FieldName = testFloatVecField
|
||||
fieldData.Field = &schemapb.FieldData_Vectors{
|
||||
|
|
|
@ -580,6 +580,16 @@ func (pc *parserContext) handleLeafValue(nodeRaw *ant_ast.Node, dataType schemap
|
|||
} else {
|
||||
return nil, fmt.Errorf("type mismatch")
|
||||
}
|
||||
case *ant_ast.StringNode:
|
||||
if typeutil.IsStringType(dataType) {
|
||||
gv = &planpb.GenericValue{
|
||||
Val: &planpb.GenericValue_StringVal{
|
||||
StringVal: node.Value,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("type mismatch")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported leaf node")
|
||||
}
|
||||
|
@ -611,7 +621,8 @@ func (pc *parserContext) handleExpr(nodeRaw *ant_ast.Node) (*planpb.Expr, error)
|
|||
case *ant_ast.IdentifierNode,
|
||||
*ant_ast.FloatNode,
|
||||
*ant_ast.IntegerNode,
|
||||
*ant_ast.BoolNode:
|
||||
*ant_ast.BoolNode,
|
||||
*ant_ast.StringNode:
|
||||
return nil, fmt.Errorf("scalar expr is not supported yet")
|
||||
case *ant_ast.UnaryNode:
|
||||
expr, err := pc.handleUnaryExpr(node)
|
||||
|
|
|
@ -241,7 +241,7 @@ func (it *insertTask) checkPrimaryFieldData() error {
|
|||
// get primaryFieldData whether autoID is true or not
|
||||
var primaryFieldData *schemapb.FieldData
|
||||
if !primaryFieldSchema.AutoID {
|
||||
primaryFieldData, err = getPrimaryFieldData(it.GetFieldsData(), primaryFieldSchema)
|
||||
primaryFieldData, err = typeutil.GetPrimaryFieldData(it.GetFieldsData(), primaryFieldSchema)
|
||||
if err != nil {
|
||||
log.Error("get primary field data failed", zap.String("collection name", it.CollectionName), zap.Error(err))
|
||||
return err
|
||||
|
@ -2980,8 +2980,9 @@ type BaseDeleteTask = msgstream.DeleteMsg
|
|||
type deleteTask struct {
|
||||
Condition
|
||||
BaseDeleteTask
|
||||
ctx context.Context
|
||||
req *milvuspb.DeleteRequest
|
||||
ctx context.Context
|
||||
deleteExpr string
|
||||
//req *milvuspb.DeleteRequest
|
||||
result *milvuspb.MutationResult
|
||||
chMgr channelsMgr
|
||||
chTicker channelsTimeTicker
|
||||
|
@ -2989,6 +2990,7 @@ type deleteTask struct {
|
|||
pChannels []pChan
|
||||
|
||||
collectionID UniqueID
|
||||
schema *schemapb.CollectionSchema
|
||||
}
|
||||
|
||||
func (dt *deleteTask) TraceCtx() context.Context {
|
||||
|
@ -3065,7 +3067,7 @@ func (dt *deleteTask) getChannels() ([]pChan, error) {
|
|||
return channels, err
|
||||
}
|
||||
|
||||
func getPrimaryKeysFromExpr(schema *schemapb.CollectionSchema, expr string) (res []int64, err error) {
|
||||
func getPrimaryKeysFromExpr(schema *schemapb.CollectionSchema, expr string) (res *schemapb.IDs, rowNum int64, err error) {
|
||||
if len(expr) == 0 {
|
||||
log.Warn("empty expr")
|
||||
return
|
||||
|
@ -3073,20 +3075,43 @@ func getPrimaryKeysFromExpr(schema *schemapb.CollectionSchema, expr string) (res
|
|||
|
||||
plan, err := createExprPlan(schema, expr)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to create expr plan, expr = %s", expr)
|
||||
return res, 0, fmt.Errorf("failed to create expr plan, expr = %s", expr)
|
||||
}
|
||||
|
||||
// delete request only support expr "id in [a, b]"
|
||||
termExpr, ok := plan.Node.(*planpb.PlanNode_Predicates).Predicates.Expr.(*planpb.Expr_TermExpr)
|
||||
if !ok {
|
||||
return res, fmt.Errorf("invalid plan node type")
|
||||
return res, 0, fmt.Errorf("invalid plan node type")
|
||||
}
|
||||
|
||||
for _, v := range termExpr.TermExpr.Values {
|
||||
res = append(res, v.GetInt64Val())
|
||||
res = &schemapb.IDs{}
|
||||
rowNum = int64(len(termExpr.TermExpr.Values))
|
||||
switch termExpr.TermExpr.ColumnInfo.GetDataType() {
|
||||
case schemapb.DataType_Int64:
|
||||
ids := make([]int64, 0)
|
||||
for _, v := range termExpr.TermExpr.Values {
|
||||
ids = append(ids, v.GetInt64Val())
|
||||
}
|
||||
res.IdField = &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
Data: ids,
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
ids := make([]string, 0)
|
||||
for _, v := range termExpr.TermExpr.Values {
|
||||
ids = append(ids, v.GetStringVal())
|
||||
}
|
||||
res.IdField = &schemapb.IDs_StrId{
|
||||
StrId: &schemapb.StringArray{
|
||||
Data: ids,
|
||||
},
|
||||
}
|
||||
default:
|
||||
return res, 0, fmt.Errorf("invalid field data type specifyed in delete expr")
|
||||
}
|
||||
|
||||
return res, nil
|
||||
return res, rowNum, nil
|
||||
}
|
||||
|
||||
func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
||||
|
@ -3103,7 +3128,7 @@ func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
|||
Timestamp: dt.BeginTs(),
|
||||
}
|
||||
|
||||
collName := dt.req.CollectionName
|
||||
collName := dt.CollectionName
|
||||
if err := validateCollectionName(collName); err != nil {
|
||||
log.Error("Invalid collection name", zap.String("collectionName", collName))
|
||||
return err
|
||||
|
@ -3117,8 +3142,8 @@ func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
|||
dt.collectionID = collID
|
||||
|
||||
// If partitionName is not empty, partitionID will be set.
|
||||
if len(dt.req.PartitionName) > 0 {
|
||||
partName := dt.req.PartitionName
|
||||
if len(dt.PartitionName) > 0 {
|
||||
partName := dt.PartitionName
|
||||
if err := validatePartitionTag(partName, true); err != nil {
|
||||
log.Error("Invalid partition name", zap.String("partitionName", partName))
|
||||
return err
|
||||
|
@ -3133,30 +3158,29 @@ func (dt *deleteTask) PreExecute(ctx context.Context) error {
|
|||
dt.DeleteRequest.PartitionID = common.InvalidPartitionID
|
||||
}
|
||||
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, dt.req.CollectionName)
|
||||
schema, err := globalMetaCache.GetCollectionSchema(ctx, collName)
|
||||
if err != nil {
|
||||
log.Error("Failed to get collection schema", zap.String("collectionName", dt.req.CollectionName))
|
||||
log.Error("Failed to get collection schema", zap.String("collectionName", collName))
|
||||
return err
|
||||
}
|
||||
dt.schema = schema
|
||||
|
||||
primaryKeys, err := getPrimaryKeysFromExpr(schema, dt.req.Expr)
|
||||
// get delete.primaryKeys from delete expr
|
||||
primaryKeys, numRow, err := getPrimaryKeysFromExpr(schema, dt.deleteExpr)
|
||||
if err != nil {
|
||||
log.Error("Failed to get primary keys from expr", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
log.Debug("get primary keys from expr", zap.Int("len of primary keys", len(primaryKeys)))
|
||||
|
||||
dt.DeleteRequest.NumRows = numRow
|
||||
dt.DeleteRequest.PrimaryKeys = primaryKeys
|
||||
log.Debug("get primary keys from expr", zap.Int64("len of primary keys", dt.DeleteRequest.NumRows))
|
||||
|
||||
// set result
|
||||
dt.result.IDs.IdField = &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
Data: primaryKeys,
|
||||
},
|
||||
}
|
||||
dt.result.DeleteCnt = int64(len(primaryKeys))
|
||||
dt.result.IDs = primaryKeys
|
||||
dt.result.DeleteCnt = dt.DeleteRequest.NumRows
|
||||
|
||||
rowNum := len(primaryKeys)
|
||||
dt.Timestamps = make([]uint64, rowNum)
|
||||
dt.Timestamps = make([]uint64, numRow)
|
||||
for index := range dt.Timestamps {
|
||||
dt.Timestamps[index] = dt.BeginTs()
|
||||
}
|
||||
|
@ -3217,6 +3241,7 @@ func (dt *deleteTask) Execute(ctx context.Context) (err error) {
|
|||
PartitionID: partitionID,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
PrimaryKeys: &schemapb.IDs{},
|
||||
}
|
||||
deleteMsg := &msgstream.DeleteMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
|
@ -3229,7 +3254,8 @@ func (dt *deleteTask) Execute(ctx context.Context) (err error) {
|
|||
curMsg := result[key].(*msgstream.DeleteMsg)
|
||||
curMsg.HashValues = append(curMsg.HashValues, dt.HashValues[index])
|
||||
curMsg.Timestamps = append(curMsg.Timestamps, dt.Timestamps[index])
|
||||
curMsg.PrimaryKeys = append(curMsg.PrimaryKeys, dt.PrimaryKeys[index])
|
||||
typeutil.AppendIDs(curMsg.PrimaryKeys, dt.PrimaryKeys, index)
|
||||
curMsg.NumRows++
|
||||
}
|
||||
|
||||
// send delete request to log broker
|
||||
|
|
|
@ -52,7 +52,7 @@ const (
|
|||
testInt64Field = "int64"
|
||||
testFloatField = "float"
|
||||
testDoubleField = "double"
|
||||
testStringField = "stringField"
|
||||
testVarCharField = "varChar"
|
||||
testFloatVecField = "fvec"
|
||||
testBinaryVecField = "bvec"
|
||||
testVecDim = 128
|
||||
|
@ -117,6 +117,14 @@ func constructCollectionSchemaByDataType(collectionName string, fieldName2DataTy
|
|||
},
|
||||
}
|
||||
}
|
||||
if dataType == schemapb.DataType_VarChar {
|
||||
fieldSchema.TypeParams = []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "max_length_per_row",
|
||||
Value: strconv.Itoa(testMaxVarCharLength),
|
||||
},
|
||||
}
|
||||
}
|
||||
if fieldName == primaryFieldName {
|
||||
fieldSchema.IsPrimaryKey = true
|
||||
fieldSchema.AutoID = autoID
|
||||
|
@ -1602,8 +1610,7 @@ func TestShowPartitionsTask(t *testing.T) {
|
|||
assert.NotNil(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestTask_all(t *testing.T) {
|
||||
func TestTask_Int64PrimaryKey(t *testing.T) {
|
||||
var err error
|
||||
|
||||
Params.Init()
|
||||
|
@ -1625,12 +1632,11 @@ func TestTask_all(t *testing.T) {
|
|||
partitionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
fieldName2Types := map[string]schemapb.DataType{
|
||||
testBoolField: schemapb.DataType_Bool,
|
||||
testInt32Field: schemapb.DataType_Int32,
|
||||
testInt64Field: schemapb.DataType_Int64,
|
||||
testFloatField: schemapb.DataType_Float,
|
||||
testDoubleField: schemapb.DataType_Double,
|
||||
//testStringField: schemapb.DataType_String,
|
||||
testBoolField: schemapb.DataType_Bool,
|
||||
testInt32Field: schemapb.DataType_Int32,
|
||||
testInt64Field: schemapb.DataType_Int64,
|
||||
testFloatField: schemapb.DataType_Float,
|
||||
testDoubleField: schemapb.DataType_Double,
|
||||
testFloatVecField: schemapb.DataType_FloatVector}
|
||||
if enableMultipleVectorFields {
|
||||
fieldName2Types[testBinaryVecField] = schemapb.DataType_BinaryVector
|
||||
|
@ -1782,19 +1788,228 @@ func TestTask_all(t *testing.T) {
|
|||
PartitionName: partitionName,
|
||||
},
|
||||
},
|
||||
req: &milvuspb.DeleteRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
deleteExpr: "int64 in [0, 1]",
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
},
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
Acknowledged: false,
|
||||
InsertCnt: 0,
|
||||
DeleteCnt: 0,
|
||||
UpsertCnt: 0,
|
||||
Timestamp: 0,
|
||||
},
|
||||
chMgr: chMgr,
|
||||
chTicker: ticker,
|
||||
}
|
||||
|
||||
assert.NoError(t, task.OnEnqueue())
|
||||
assert.NotNil(t, task.TraceCtx())
|
||||
|
||||
id := UniqueID(uniquegenerator.GetUniqueIntGeneratorIns().GetInt())
|
||||
task.SetID(id)
|
||||
assert.Equal(t, id, task.ID())
|
||||
|
||||
task.Base.MsgType = commonpb.MsgType_Delete
|
||||
assert.Equal(t, commonpb.MsgType_Delete, task.Type())
|
||||
|
||||
ts := Timestamp(time.Now().UnixNano())
|
||||
task.SetTs(ts)
|
||||
assert.Equal(t, ts, task.BeginTs())
|
||||
assert.Equal(t, ts, task.EndTs())
|
||||
|
||||
assert.NoError(t, task.PreExecute(ctx))
|
||||
assert.NoError(t, task.Execute(ctx))
|
||||
assert.NoError(t, task.PostExecute(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func TestTask_VarCharPrimaryKey(t *testing.T) {
|
||||
var err error
|
||||
|
||||
Params.Init()
|
||||
Params.ProxyCfg.RetrieveResultChannelNames = []string{funcutil.GenRandomStr()}
|
||||
|
||||
rc := NewRootCoordMock()
|
||||
rc.Start()
|
||||
defer rc.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
err = InitMetaCache(rc)
|
||||
assert.NoError(t, err)
|
||||
|
||||
shardsNum := int32(2)
|
||||
prefix := "TestTask_all"
|
||||
dbName := ""
|
||||
collectionName := prefix + funcutil.GenRandomStr()
|
||||
partitionName := prefix + funcutil.GenRandomStr()
|
||||
|
||||
fieldName2Types := map[string]schemapb.DataType{
|
||||
testBoolField: schemapb.DataType_Bool,
|
||||
testInt32Field: schemapb.DataType_Int32,
|
||||
testInt64Field: schemapb.DataType_Int64,
|
||||
testFloatField: schemapb.DataType_Float,
|
||||
testDoubleField: schemapb.DataType_Double,
|
||||
testVarCharField: schemapb.DataType_VarChar,
|
||||
testFloatVecField: schemapb.DataType_FloatVector}
|
||||
if enableMultipleVectorFields {
|
||||
fieldName2Types[testBinaryVecField] = schemapb.DataType_BinaryVector
|
||||
}
|
||||
nb := 10
|
||||
|
||||
t.Run("create collection", func(t *testing.T) {
|
||||
schema := constructCollectionSchemaByDataType(collectionName, fieldName2Types, testVarCharField, false)
|
||||
marshaledSchema, err := proto.Marshal(schema)
|
||||
assert.NoError(t, err)
|
||||
|
||||
createColT := &createCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: &milvuspb.CreateCollectionRequest{
|
||||
Base: nil,
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
Expr: "int64 in [0, 1]",
|
||||
Schema: marshaledSchema,
|
||||
ShardsNum: shardsNum,
|
||||
},
|
||||
ctx: ctx,
|
||||
ctx: ctx,
|
||||
rootCoord: rc,
|
||||
result: nil,
|
||||
schema: nil,
|
||||
}
|
||||
|
||||
assert.NoError(t, createColT.OnEnqueue())
|
||||
assert.NoError(t, createColT.PreExecute(ctx))
|
||||
assert.NoError(t, createColT.Execute(ctx))
|
||||
assert.NoError(t, createColT.PostExecute(ctx))
|
||||
|
||||
_, _ = rc.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_CreatePartition,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
})
|
||||
})
|
||||
|
||||
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dmlChannelsFunc := getDmlChannelsFunc(ctx, rc)
|
||||
query := newMockGetChannelsService()
|
||||
factory := newSimpleMockMsgStreamFactory()
|
||||
chMgr := newChannelsMgrImpl(dmlChannelsFunc, nil, query.GetChannels, nil, factory)
|
||||
defer chMgr.removeAllDMLStream()
|
||||
defer chMgr.removeAllDQLStream()
|
||||
|
||||
err = chMgr.createDMLMsgStream(collectionID)
|
||||
assert.NoError(t, err)
|
||||
pchans, err := chMgr.getChannels(collectionID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
interval := time.Millisecond * 10
|
||||
tso := newMockTsoAllocator()
|
||||
|
||||
ticker := newChannelsTimeTicker(ctx, interval, []string{}, newGetStatisticsFunc(pchans), tso)
|
||||
_ = ticker.start()
|
||||
defer ticker.close()
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, rc, Params.ProxyCfg.ProxyID)
|
||||
assert.NoError(t, err)
|
||||
_ = idAllocator.Start()
|
||||
defer idAllocator.Close()
|
||||
|
||||
segAllocator, err := newSegIDAssigner(ctx, &mockDataCoord{expireTime: Timestamp(2500)}, getLastTick1)
|
||||
assert.NoError(t, err)
|
||||
segAllocator.Init()
|
||||
_ = segAllocator.Start()
|
||||
defer segAllocator.Close()
|
||||
|
||||
t.Run("insert", func(t *testing.T) {
|
||||
hash := generateHashKeys(nb)
|
||||
task := &insertTask{
|
||||
BaseInsertTask: BaseInsertTask{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: hash,
|
||||
},
|
||||
InsertRequest: internalpb.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Insert,
|
||||
MsgID: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
},
|
||||
DbName: dbName,
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
NumRows: uint64(nb),
|
||||
Version: internalpb.InsertDataVersion_ColumnBased,
|
||||
},
|
||||
},
|
||||
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
Reason: "",
|
||||
},
|
||||
IDs: nil,
|
||||
SuccIndex: nil,
|
||||
ErrIndex: nil,
|
||||
Acknowledged: false,
|
||||
InsertCnt: 0,
|
||||
DeleteCnt: 0,
|
||||
UpsertCnt: 0,
|
||||
Timestamp: 0,
|
||||
},
|
||||
rowIDAllocator: idAllocator,
|
||||
segIDAssigner: segAllocator,
|
||||
chMgr: chMgr,
|
||||
chTicker: ticker,
|
||||
vChannels: nil,
|
||||
pChannels: nil,
|
||||
schema: nil,
|
||||
}
|
||||
|
||||
fieldID := common.StartOfUserFieldID
|
||||
for fieldName, dataType := range fieldName2Types {
|
||||
task.FieldsData = append(task.FieldsData, generateFieldData(dataType, fieldName, int64(fieldID), nb))
|
||||
fieldID++
|
||||
}
|
||||
|
||||
assert.NoError(t, task.OnEnqueue())
|
||||
assert.NoError(t, task.PreExecute(ctx))
|
||||
assert.NoError(t, task.Execute(ctx))
|
||||
assert.NoError(t, task.PostExecute(ctx))
|
||||
})
|
||||
|
||||
t.Run("delete", func(t *testing.T) {
|
||||
task := &deleteTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
BaseDeleteTask: msgstream.DeleteMsg{
|
||||
BaseMsg: msgstream.BaseMsg{},
|
||||
DeleteRequest: internalpb.DeleteRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_Delete,
|
||||
MsgID: 0,
|
||||
Timestamp: 0,
|
||||
SourceID: Params.ProxyCfg.ProxyID,
|
||||
},
|
||||
CollectionName: collectionName,
|
||||
PartitionName: partitionName,
|
||||
},
|
||||
},
|
||||
deleteExpr: "varChar in [\"milvus\", \"test\"]",
|
||||
ctx: ctx,
|
||||
result: &milvuspb.MutationResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_Success,
|
||||
|
|
|
@ -414,27 +414,6 @@ func validateMultipleVectorFields(schema *schemapb.CollectionSchema) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// getPrimaryFieldData get primary field data from all field data inserted from sdk
|
||||
func getPrimaryFieldData(datas []*schemapb.FieldData, primaryFieldSchema *schemapb.FieldSchema) (*schemapb.FieldData, error) {
|
||||
primaryFieldName := primaryFieldSchema.Name
|
||||
|
||||
var primaryFieldData *schemapb.FieldData
|
||||
for _, field := range datas {
|
||||
if field.FieldName == primaryFieldName {
|
||||
if primaryFieldSchema.AutoID {
|
||||
return nil, fmt.Errorf("autoID field %v does not require data", primaryFieldName)
|
||||
}
|
||||
primaryFieldData = field
|
||||
}
|
||||
}
|
||||
|
||||
if primaryFieldData == nil {
|
||||
return nil, fmt.Errorf("can't find data for primary field %v", primaryFieldName)
|
||||
}
|
||||
|
||||
return primaryFieldData, nil
|
||||
}
|
||||
|
||||
// parsePrimaryFieldData2IDs get IDs to fill grpc result, for example insert request, delete request etc.
|
||||
func parsePrimaryFieldData2IDs(fieldData *schemapb.FieldData) (*schemapb.IDs, error) {
|
||||
primaryData := &schemapb.IDs{}
|
||||
|
|
|
@ -23,10 +23,18 @@ import (
|
|||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
)
|
||||
|
||||
type primaryKey = storage.PrimaryKey
|
||||
type int64PrimaryKey = storage.Int64PrimaryKey
|
||||
type varCharPrimaryKey = storage.VarCharPrimaryKey
|
||||
|
||||
var newInt64PrimaryKey = storage.NewInt64PrimaryKey
|
||||
var newVarCharPrimaryKey = storage.NewVarCharPrimaryKey
|
||||
|
||||
// deleteNode is the one of nodes in delta flow graph
|
||||
type deleteNode struct {
|
||||
baseNode
|
||||
|
@ -52,7 +60,7 @@ func (dNode *deleteNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
}
|
||||
|
||||
delData := &deleteData{
|
||||
deleteIDs: map[UniqueID][]int64{},
|
||||
deleteIDs: map[UniqueID][]primaryKey{},
|
||||
deleteTimestamps: map[UniqueID][]Timestamp{},
|
||||
deleteOffset: map[UniqueID]int64{},
|
||||
}
|
||||
|
@ -77,7 +85,7 @@ func (dNode *deleteNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
log.Debug("delete in historical replica",
|
||||
zap.Any("collectionID", delMsg.CollectionID),
|
||||
zap.Any("collectionName", delMsg.CollectionName),
|
||||
zap.Int("numPKs", len(delMsg.PrimaryKeys)),
|
||||
zap.Int64("numPKs", delMsg.NumRows),
|
||||
zap.Any("timestamp", delMsg.Timestamps),
|
||||
zap.Any("timestampBegin", delMsg.BeginTs()),
|
||||
zap.Any("timestampEnd", delMsg.EndTs()),
|
||||
|
@ -133,7 +141,7 @@ func (dNode *deleteNode) delete(deleteData *deleteData, segmentID UniqueID, wg *
|
|||
timestamps := deleteData.deleteTimestamps[segmentID]
|
||||
offset := deleteData.deleteOffset[segmentID]
|
||||
|
||||
err = targetSegment.segmentDelete(offset, &ids, ×tamps)
|
||||
err = targetSegment.segmentDelete(offset, ids, timestamps)
|
||||
if err != nil {
|
||||
log.Warn("delete segment data failed", zap.Int64("segmentID", segmentID), zap.Error(err))
|
||||
return
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
|
@ -113,7 +114,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
dMsg := deleteMsg{
|
||||
deleteMessages: []*msgstream.DeleteMsg{
|
||||
|
@ -123,9 +124,9 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
msg := []flowgraph.Msg{&dMsg}
|
||||
deleteNode.Operate(msg)
|
||||
s, err := historical.getSegmentByID(defaultSegmentID)
|
||||
pks := make([]int64, defaultMsgLength)
|
||||
pks := make([]primaryKey, defaultMsgLength)
|
||||
for i := 0; i < defaultMsgLength; i++ {
|
||||
pks[i] = int64(i)
|
||||
pks[i] = newInt64PrimaryKey(int64(i))
|
||||
}
|
||||
s.updateBloomFilter(pks)
|
||||
assert.Nil(t, err)
|
||||
|
@ -134,7 +135,6 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
common.Endian.PutUint64(buf, uint64(i))
|
||||
assert.True(t, s.pkFilter.Test(buf))
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("test invalid partitionID", func(t *testing.T) {
|
||||
|
@ -150,7 +150,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
msgDeleteMsg.PartitionID = common.InvalidPartitionID
|
||||
assert.NoError(t, err)
|
||||
|
@ -176,7 +176,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
msgDeleteMsg.CollectionID = 9999
|
||||
msgDeleteMsg.PartitionID = -1
|
||||
assert.NoError(t, err)
|
||||
|
@ -202,7 +202,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
msgDeleteMsg.PartitionID = 9999
|
||||
assert.NoError(t, err)
|
||||
dMsg := deleteMsg{
|
||||
|
@ -227,7 +227,7 @@ func TestFlowGraphDeleteNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
dMsg := deleteMsg{
|
||||
deleteMessages: []*msgstream.DeleteMsg{
|
||||
|
|
|
@ -93,6 +93,12 @@ func (fddNode *filterDeleteNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
|
||||
// filterInvalidDeleteMessage would filter invalid delete messages
|
||||
func (fddNode *filterDeleteNode) filterInvalidDeleteMessage(msg *msgstream.DeleteMsg) *msgstream.DeleteMsg {
|
||||
if err := msg.CheckAligned(); err != nil {
|
||||
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
|
||||
log.Warn("misaligned delete messages detected", zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
||||
msg.SetTraceCtx(ctx)
|
||||
defer sp.Finish()
|
||||
|
@ -101,11 +107,6 @@ func (fddNode *filterDeleteNode) filterInvalidDeleteMessage(msg *msgstream.Delet
|
|||
return nil
|
||||
}
|
||||
|
||||
if len(msg.PrimaryKeys) != len(msg.Timestamps) {
|
||||
log.Warn("Error, misaligned messages detected")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(msg.Timestamps) <= 0 {
|
||||
log.Debug("filter invalid delete message, no message",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
|
@ -49,7 +51,7 @@ func TestFlowGraphFilterDeleteNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
defer cancel()
|
||||
|
||||
t.Run("delete valid test", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDeleteNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
@ -58,7 +60,7 @@ func TestFlowGraphFilterDeleteNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete no collection", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
msg.CollectionID = UniqueID(1003)
|
||||
fg, err := getFilterDeleteNode(ctx)
|
||||
|
@ -68,7 +70,7 @@ func TestFlowGraphFilterDeleteNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete not target collection", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDeleteNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
@ -78,14 +80,17 @@ func TestFlowGraphFilterDeleteNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete no data", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDeleteNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
msg.Timestamps = make([]Timestamp, 0)
|
||||
msg.PrimaryKeys = make([]IntPrimaryKey, 0)
|
||||
msg.Int64PrimaryKeys = make([]IntPrimaryKey, 0)
|
||||
res := fg.filterInvalidDeleteMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
msg.PrimaryKeys = storage.ParsePrimaryKeys2IDs([]primaryKey{})
|
||||
res = fg.filterInvalidDeleteMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -94,7 +99,7 @@ func TestFlowGraphFilterDeleteNode_Operate(t *testing.T) {
|
|||
defer cancel()
|
||||
|
||||
genFilterDeleteMsg := func() []flowgraph.Msg {
|
||||
dMsg, err := genSimpleDeleteMsg()
|
||||
dMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
msg := flowgraph.GenerateMsgStreamMsg([]msgstream.TsMsg{dMsg}, 0, 1000, nil, nil)
|
||||
return []flowgraph.Msg{msg}
|
||||
|
|
|
@ -104,6 +104,12 @@ func (fdmNode *filterDmNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
|
||||
// filterInvalidDeleteMessage would filter out invalid delete messages
|
||||
func (fdmNode *filterDmNode) filterInvalidDeleteMessage(msg *msgstream.DeleteMsg) *msgstream.DeleteMsg {
|
||||
if err := msg.CheckAligned(); err != nil {
|
||||
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
|
||||
log.Warn("misaligned delete messages detected", zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
|
||||
sp, ctx := trace.StartSpanFromContext(msg.TraceCtx())
|
||||
msg.SetTraceCtx(ctx)
|
||||
defer sp.Finish()
|
||||
|
@ -129,11 +135,6 @@ func (fdmNode *filterDmNode) filterInvalidDeleteMessage(msg *msgstream.DeleteMsg
|
|||
}
|
||||
}
|
||||
|
||||
if len(msg.PrimaryKeys) != len(msg.Timestamps) {
|
||||
log.Warn("Error, misaligned messages detected")
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(msg.Timestamps) <= 0 {
|
||||
log.Debug("filter invalid delete message, no message",
|
||||
zap.Any("collectionID", msg.CollectionID),
|
||||
|
@ -147,7 +148,7 @@ func (fdmNode *filterDmNode) filterInvalidDeleteMessage(msg *msgstream.DeleteMsg
|
|||
func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
|
||||
if err := msg.CheckAligned(); err != nil {
|
||||
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
|
||||
log.Warn("Error, misaligned messages detected")
|
||||
log.Warn("Error, misaligned insert messages detected")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/internalpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
|
@ -152,7 +154,7 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
defer cancel()
|
||||
|
||||
t.Run("delete valid test", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
@ -161,7 +163,7 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete no collection", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
msg.CollectionID = UniqueID(1003)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
|
@ -171,7 +173,7 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete no partition", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
msg.PartitionID = UniqueID(1000)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
|
@ -186,7 +188,7 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete not target collection", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
@ -196,7 +198,7 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete misaligned messages", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
@ -206,14 +208,18 @@ func TestFlowGraphFilterDmNode_filterInvalidDeleteMessage(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("test delete no data", func(t *testing.T) {
|
||||
msg, err := genSimpleDeleteMsg()
|
||||
msg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
fg, err := getFilterDMNode(ctx)
|
||||
assert.NoError(t, err)
|
||||
msg.Timestamps = make([]Timestamp, 0)
|
||||
msg.PrimaryKeys = make([]IntPrimaryKey, 0)
|
||||
msg.NumRows = 0
|
||||
msg.Int64PrimaryKeys = make([]IntPrimaryKey, 0)
|
||||
res := fg.filterInvalidDeleteMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
msg.PrimaryKeys = storage.ParsePrimaryKeys2IDs([]primaryKey{})
|
||||
res = fg.filterInvalidDeleteMessage(msg)
|
||||
assert.Nil(t, res)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,6 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -35,8 +33,10 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
"github.com/milvus-io/milvus/internal/util/trace"
|
||||
"github.com/milvus-io/milvus/internal/util/typeutil"
|
||||
)
|
||||
|
||||
// insertNode is one of the nodes in query flow graph
|
||||
|
@ -47,16 +47,16 @@ type insertNode struct {
|
|||
|
||||
// insertData stores the valid insert data
|
||||
type insertData struct {
|
||||
insertIDs map[UniqueID][]int64
|
||||
insertIDs map[UniqueID][]int64 // rowIDs
|
||||
insertTimestamps map[UniqueID][]Timestamp
|
||||
insertRecords map[UniqueID][]*commonpb.Blob
|
||||
insertOffset map[UniqueID]int64
|
||||
insertPKs map[UniqueID][]int64
|
||||
insertPKs map[UniqueID][]primaryKey // pks
|
||||
}
|
||||
|
||||
// deleteData stores the valid delete data
|
||||
type deleteData struct {
|
||||
deleteIDs map[UniqueID][]int64
|
||||
deleteIDs map[UniqueID][]primaryKey // pks
|
||||
deleteTimestamps map[UniqueID][]Timestamp
|
||||
deleteOffset map[UniqueID]int64
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
insertTimestamps: make(map[UniqueID][]Timestamp),
|
||||
insertRecords: make(map[UniqueID][]*commonpb.Blob),
|
||||
insertOffset: make(map[UniqueID]int64),
|
||||
insertPKs: make(map[UniqueID][]int64),
|
||||
insertPKs: make(map[UniqueID][]primaryKey),
|
||||
}
|
||||
|
||||
if iMsg == nil {
|
||||
|
@ -176,7 +176,7 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
wg.Wait()
|
||||
|
||||
delData := &deleteData{
|
||||
deleteIDs: make(map[UniqueID][]int64),
|
||||
deleteIDs: make(map[UniqueID][]primaryKey),
|
||||
deleteTimestamps: make(map[UniqueID][]Timestamp),
|
||||
deleteOffset: make(map[UniqueID]int64),
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func (iNode *insertNode) Operate(in []flowgraph.Msg) []flowgraph.Msg {
|
|||
log.Debug("delete in streaming replica",
|
||||
zap.Any("collectionID", delMsg.CollectionID),
|
||||
zap.Any("collectionName", delMsg.CollectionName),
|
||||
zap.Int("numPKs", len(delMsg.PrimaryKeys)),
|
||||
zap.Int64("numPKs", delMsg.NumRows),
|
||||
zap.Any("timestamp", delMsg.Timestamps))
|
||||
processDeleteMessages(iNode.streamingReplica, delMsg, delData)
|
||||
}
|
||||
|
@ -242,44 +242,58 @@ func processDeleteMessages(replica ReplicaInterface, msg *msgstream.DeleteMsg, d
|
|||
}
|
||||
resultSegmentIDs = append(resultSegmentIDs, segmentIDs...)
|
||||
}
|
||||
|
||||
primaryKeys := storage.ParseIDs2PrimaryKeys(msg.PrimaryKeys)
|
||||
for _, segmentID := range resultSegmentIDs {
|
||||
segment, err := replica.getSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
pks, err := filterSegmentsByPKs(msg.PrimaryKeys, segment)
|
||||
pks, tss, err := filterSegmentsByPKs(primaryKeys, msg.Timestamps, segment)
|
||||
if err != nil {
|
||||
log.Warn(err.Error())
|
||||
continue
|
||||
}
|
||||
if len(pks) > 0 {
|
||||
delData.deleteIDs[segmentID] = append(delData.deleteIDs[segmentID], pks...)
|
||||
// TODO(yukun) get offset of pks
|
||||
delData.deleteTimestamps[segmentID] = append(delData.deleteTimestamps[segmentID], msg.Timestamps[:len(pks)]...)
|
||||
delData.deleteTimestamps[segmentID] = append(delData.deleteTimestamps[segmentID], tss...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// filterSegmentsByPKs would filter segments by primary keys
|
||||
func filterSegmentsByPKs(pks []int64, segment *Segment) ([]int64, error) {
|
||||
func filterSegmentsByPKs(pks []primaryKey, timestamps []Timestamp, segment *Segment) ([]primaryKey, []Timestamp, error) {
|
||||
if pks == nil {
|
||||
return nil, fmt.Errorf("pks is nil when getSegmentsByPKs")
|
||||
return nil, nil, fmt.Errorf("pks is nil when getSegmentsByPKs")
|
||||
}
|
||||
if segment == nil {
|
||||
return nil, fmt.Errorf("segments is nil when getSegmentsByPKs")
|
||||
return nil, nil, fmt.Errorf("segments is nil when getSegmentsByPKs")
|
||||
}
|
||||
|
||||
retPks := make([]primaryKey, 0)
|
||||
retTss := make([]Timestamp, 0)
|
||||
buf := make([]byte, 8)
|
||||
res := make([]int64, 0)
|
||||
for _, pk := range pks {
|
||||
common.Endian.PutUint64(buf, uint64(pk))
|
||||
exist := segment.pkFilter.Test(buf)
|
||||
for index, pk := range pks {
|
||||
exist := false
|
||||
switch pk.Type() {
|
||||
case schemapb.DataType_Int64:
|
||||
int64Pk := pk.(*int64PrimaryKey)
|
||||
common.Endian.PutUint64(buf, uint64(int64Pk.Value))
|
||||
exist = segment.pkFilter.Test(buf)
|
||||
case schemapb.DataType_VarChar:
|
||||
varCharPk := pk.(*varCharPrimaryKey)
|
||||
exist = segment.pkFilter.TestString(varCharPk.Value)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("invalid data type of delete primary keys")
|
||||
}
|
||||
if exist {
|
||||
res = append(res, pk)
|
||||
retPks = append(retPks, pk)
|
||||
retTss = append(retTss, timestamps[index])
|
||||
}
|
||||
}
|
||||
log.Debug("In filterSegmentsByPKs", zap.Any("pk len", len(res)), zap.Any("segment", segment.segmentID))
|
||||
return res, nil
|
||||
log.Debug("In filterSegmentsByPKs", zap.Any("pk len", len(retPks)), zap.Any("segment", segment.segmentID))
|
||||
return retPks, retTss, nil
|
||||
}
|
||||
|
||||
// insert would execute insert operations for specific growing segment
|
||||
|
@ -333,7 +347,7 @@ func (iNode *insertNode) delete(deleteData *deleteData, segmentID UniqueID, wg *
|
|||
timestamps := deleteData.deleteTimestamps[segmentID]
|
||||
offset := deleteData.deleteOffset[segmentID]
|
||||
|
||||
err = targetSegment.segmentDelete(offset, &ids, ×tamps)
|
||||
err = targetSegment.segmentDelete(offset, ids, timestamps)
|
||||
if err != nil {
|
||||
log.Warn("QueryNode: targetSegmentDelete failed", zap.Error(err))
|
||||
return
|
||||
|
@ -344,7 +358,7 @@ func (iNode *insertNode) delete(deleteData *deleteData, segmentID UniqueID, wg *
|
|||
|
||||
// TODO: remove this function to proper file
|
||||
// getPrimaryKeys would get primary keys by insert messages
|
||||
func getPrimaryKeys(msg *msgstream.InsertMsg, streamingReplica ReplicaInterface) ([]int64, error) {
|
||||
func getPrimaryKeys(msg *msgstream.InsertMsg, streamingReplica ReplicaInterface) ([]primaryKey, error) {
|
||||
if err := msg.CheckAligned(); err != nil {
|
||||
log.Warn("misaligned messages detected")
|
||||
return nil, errors.New("misaligned messages detected")
|
||||
|
@ -360,14 +374,14 @@ func getPrimaryKeys(msg *msgstream.InsertMsg, streamingReplica ReplicaInterface)
|
|||
return getPKs(msg, collection.schema)
|
||||
}
|
||||
|
||||
func getPKs(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]int64, error) {
|
||||
func getPKs(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]primaryKey, error) {
|
||||
if msg.IsRowBased() {
|
||||
return getPKsFromRowBasedInsertMsg(msg, schema)
|
||||
}
|
||||
return getPKsFromColumnBasedInsertMsg(msg, schema)
|
||||
}
|
||||
|
||||
func getPKsFromRowBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]int64, error) {
|
||||
func getPKsFromRowBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]primaryKey, error) {
|
||||
offset := 0
|
||||
for _, field := range schema.Fields {
|
||||
if field.IsPrimaryKey {
|
||||
|
@ -419,36 +433,38 @@ func getPKsFromRowBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.Coll
|
|||
for i, blob := range msg.RowData {
|
||||
blobReaders[i] = bytes.NewReader(blob.GetValue()[offset : offset+8])
|
||||
}
|
||||
pks := make([]int64, len(blobReaders))
|
||||
pks := make([]primaryKey, len(blobReaders))
|
||||
|
||||
for i, reader := range blobReaders {
|
||||
err := binary.Read(reader, common.Endian, &pks[i])
|
||||
var int64PkValue int64
|
||||
err := binary.Read(reader, common.Endian, &int64PkValue)
|
||||
if err != nil {
|
||||
log.Warn("binary read blob value failed", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
pks[i] = newInt64PrimaryKey(int64PkValue)
|
||||
}
|
||||
|
||||
return pks, nil
|
||||
}
|
||||
|
||||
func getPKsFromColumnBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]int64, error) {
|
||||
loc := -1
|
||||
for idx, field := range schema.Fields {
|
||||
if field.IsPrimaryKey {
|
||||
loc = idx
|
||||
break
|
||||
}
|
||||
}
|
||||
if loc == -1 {
|
||||
return nil, errors.New("no primary field found")
|
||||
func getPKsFromColumnBasedInsertMsg(msg *msgstream.InsertMsg, schema *schemapb.CollectionSchema) ([]primaryKey, error) {
|
||||
primaryFieldSchema, err := typeutil.GetPrimaryFieldSchema(schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(msg.GetFieldsData()) <= loc {
|
||||
return nil, errors.New("insert msg mismatch the schema")
|
||||
primaryFieldData, err := typeutil.GetPrimaryFieldData(msg.GetFieldsData(), primaryFieldSchema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msg.GetFieldsData()[loc].GetScalars().GetLongData().GetData(), nil
|
||||
pks, err := storage.ParseFieldData2PrimaryKeys(primaryFieldData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pks, nil
|
||||
}
|
||||
|
||||
// newInsertNode returns a new insertNode
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package querynode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
|
@ -26,6 +27,8 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/internal/proto/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
|
@ -53,13 +56,13 @@ func genFlowGraphInsertData() (*insertData, error) {
|
|||
}
|
||||
|
||||
func genFlowGraphDeleteData() (*deleteData, error) {
|
||||
deleteMsg, err := genSimpleDeleteMsg()
|
||||
deleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dData := &deleteData{
|
||||
deleteIDs: map[UniqueID][]UniqueID{
|
||||
defaultSegmentID: deleteMsg.PrimaryKeys,
|
||||
deleteIDs: map[UniqueID][]primaryKey{
|
||||
defaultSegmentID: storage.ParseIDs2PrimaryKeys(deleteMsg.PrimaryKeys),
|
||||
},
|
||||
deleteTimestamps: map[UniqueID][]Timestamp{
|
||||
defaultSegmentID: deleteMsg.Timestamps,
|
||||
|
@ -237,7 +240,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
|
||||
msgInsertMsg, err := genSimpleInsertMsg()
|
||||
assert.NoError(t, err)
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
iMsg := insertMsg{
|
||||
insertMessages: []*msgstream.InsertMsg{
|
||||
|
@ -272,7 +275,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
msgDeleteMsg.PartitionID = common.InvalidPartitionID
|
||||
assert.NoError(t, err)
|
||||
|
@ -298,7 +301,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
msgDeleteMsg.CollectionID = 9999
|
||||
msgDeleteMsg.PartitionID = -1
|
||||
assert.NoError(t, err)
|
||||
|
@ -324,7 +327,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
msgDeleteMsg.PartitionID = 9999
|
||||
assert.NoError(t, err)
|
||||
iMsg := insertMsg{
|
||||
|
@ -351,7 +354,7 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
|
||||
msgInsertMsg, err := genSimpleInsertMsg()
|
||||
assert.NoError(t, err)
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg()
|
||||
msgDeleteMsg, err := genSimpleDeleteMsg(schemapb.DataType_Int64)
|
||||
assert.NoError(t, err)
|
||||
iMsg := insertMsg{
|
||||
insertMessages: []*msgstream.InsertMsg{
|
||||
|
@ -366,26 +369,66 @@ func TestFlowGraphInsertNode_operate(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestGetSegmentsByPKs(t *testing.T) {
|
||||
buf := make([]byte, 8)
|
||||
filter := bloom.NewWithEstimates(1000000, 0.01)
|
||||
for i := 0; i < 3; i++ {
|
||||
common.Endian.PutUint64(buf, uint64(i))
|
||||
filter.Add(buf)
|
||||
}
|
||||
segment := &Segment{
|
||||
segmentID: 1,
|
||||
pkFilter: filter,
|
||||
}
|
||||
pks, err := filterSegmentsByPKs([]int64{0, 1, 2, 3, 4}, segment)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(pks), 3)
|
||||
func TestFilterSegmentsByPKs(t *testing.T) {
|
||||
t.Run("filter int64 pks", func(t *testing.T) {
|
||||
buf := make([]byte, 8)
|
||||
filter := bloom.NewWithEstimates(1000000, 0.01)
|
||||
for i := 0; i < 3; i++ {
|
||||
common.Endian.PutUint64(buf, uint64(i))
|
||||
filter.Add(buf)
|
||||
}
|
||||
segment := &Segment{
|
||||
segmentID: 1,
|
||||
pkFilter: filter,
|
||||
}
|
||||
|
||||
pks, err = filterSegmentsByPKs([]int64{}, segment)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(pks), 0)
|
||||
_, err = filterSegmentsByPKs(nil, segment)
|
||||
assert.NotNil(t, err)
|
||||
_, err = filterSegmentsByPKs([]int64{0, 1, 2, 3, 4}, nil)
|
||||
assert.NotNil(t, err)
|
||||
pk0 := newInt64PrimaryKey(0)
|
||||
pk1 := newInt64PrimaryKey(1)
|
||||
pk2 := newInt64PrimaryKey(2)
|
||||
pk3 := newInt64PrimaryKey(3)
|
||||
pk4 := newInt64PrimaryKey(4)
|
||||
|
||||
timestamps := []uint64{1, 1, 1, 1, 1}
|
||||
pks, _, err := filterSegmentsByPKs([]primaryKey{pk0, pk1, pk2, pk3, pk4}, timestamps, segment)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(pks), 3)
|
||||
|
||||
pks, _, err = filterSegmentsByPKs([]primaryKey{}, timestamps, segment)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(pks), 0)
|
||||
_, _, err = filterSegmentsByPKs(nil, timestamps, segment)
|
||||
assert.NotNil(t, err)
|
||||
_, _, err = filterSegmentsByPKs([]primaryKey{pk0, pk1, pk2, pk3, pk4}, timestamps, nil)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("filter varChar pks", func(t *testing.T) {
|
||||
filter := bloom.NewWithEstimates(1000000, 0.01)
|
||||
for i := 0; i < 3; i++ {
|
||||
filter.AddString(fmt.Sprintf("test%d", i))
|
||||
}
|
||||
segment := &Segment{
|
||||
segmentID: 1,
|
||||
pkFilter: filter,
|
||||
}
|
||||
|
||||
pk0 := newVarCharPrimaryKey("test0")
|
||||
pk1 := newVarCharPrimaryKey("test1")
|
||||
pk2 := newVarCharPrimaryKey("test2")
|
||||
pk3 := newVarCharPrimaryKey("test3")
|
||||
pk4 := newVarCharPrimaryKey("test4")
|
||||
|
||||
timestamps := []uint64{1, 1, 1, 1, 1}
|
||||
pks, _, err := filterSegmentsByPKs([]primaryKey{pk0, pk1, pk2, pk3, pk4}, timestamps, segment)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(pks), 3)
|
||||
|
||||
pks, _, err = filterSegmentsByPKs([]primaryKey{}, timestamps, segment)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(pks), 0)
|
||||
_, _, err = filterSegmentsByPKs(nil, timestamps, segment)
|
||||
assert.NotNil(t, err)
|
||||
_, _, err = filterSegmentsByPKs([]primaryKey{pk0, pk1, pk2, pk3, pk4}, timestamps, nil)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -978,12 +978,34 @@ func genSimpleRowIDField() []IntPrimaryKey {
|
|||
return ids
|
||||
}
|
||||
|
||||
func genSimpleDeleteID() []IntPrimaryKey {
|
||||
ids := make([]IntPrimaryKey, defaultDelLength)
|
||||
for i := 0; i < defaultDelLength; i++ {
|
||||
ids[0] = IntPrimaryKey(i)
|
||||
func genSimpleDeleteID(dataType schemapb.DataType) *schemapb.IDs {
|
||||
ret := &schemapb.IDs{}
|
||||
switch dataType {
|
||||
case schemapb.DataType_Int64:
|
||||
ids := make([]IntPrimaryKey, defaultDelLength)
|
||||
for i := 0; i < defaultDelLength; i++ {
|
||||
ids[i] = IntPrimaryKey(i)
|
||||
}
|
||||
ret.IdField = &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
Data: ids,
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
ids := make([]string, defaultDelLength)
|
||||
for i := 0; i < defaultDelLength; i++ {
|
||||
ids[i] = funcutil.GenRandomStr()
|
||||
}
|
||||
ret.IdField = &schemapb.IDs_StrId{
|
||||
StrId: &schemapb.StringArray{
|
||||
Data: ids,
|
||||
},
|
||||
}
|
||||
default:
|
||||
//TODO
|
||||
}
|
||||
return ids
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func genMsgStreamBaseMsg() msgstream.BaseMsg {
|
||||
|
@ -1022,7 +1044,7 @@ func genSimpleInsertMsg() (*msgstream.InsertMsg, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func genDeleteMsg(reqID UniqueID, collectionID int64) msgstream.TsMsg {
|
||||
func genDeleteMsg(reqID UniqueID, collectionID int64, dataType schemapb.DataType) msgstream.TsMsg {
|
||||
hashValue := uint32(reqID)
|
||||
baseMsg := msgstream.BaseMsg{
|
||||
BeginTimestamp: 0,
|
||||
|
@ -1047,13 +1069,14 @@ func genDeleteMsg(reqID UniqueID, collectionID int64) msgstream.TsMsg {
|
|||
PartitionName: defaultPartitionName,
|
||||
CollectionID: collectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PrimaryKeys: genSimpleDeleteID(),
|
||||
PrimaryKeys: genSimpleDeleteID(dataType),
|
||||
Timestamps: genSimpleTimestampDeletedPK(),
|
||||
NumRows: defaultDelLength,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func genSimpleDeleteMsg() (*msgstream.DeleteMsg, error) {
|
||||
func genSimpleDeleteMsg(dataType schemapb.DataType) (*msgstream.DeleteMsg, error) {
|
||||
return &msgstream.DeleteMsg{
|
||||
BaseMsg: genMsgStreamBaseMsg(),
|
||||
DeleteRequest: internalpb.DeleteRequest{
|
||||
|
@ -1062,8 +1085,9 @@ func genSimpleDeleteMsg() (*msgstream.DeleteMsg, error) {
|
|||
PartitionName: defaultPartitionName,
|
||||
CollectionID: defaultCollectionID,
|
||||
PartitionID: defaultPartitionID,
|
||||
PrimaryKeys: genSimpleDeleteID(),
|
||||
PrimaryKeys: genSimpleDeleteID(dataType),
|
||||
Timestamps: genSimpleTimestampDeletedPK(),
|
||||
NumRows: defaultDelLength,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -560,11 +560,20 @@ func (s *Segment) fillIndexedFieldsData(collectionID UniqueID,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Segment) updateBloomFilter(pks []int64) {
|
||||
func (s *Segment) updateBloomFilter(pks []primaryKey) {
|
||||
buf := make([]byte, 8)
|
||||
for _, pk := range pks {
|
||||
common.Endian.PutUint64(buf, uint64(pk))
|
||||
s.pkFilter.Add(buf)
|
||||
switch pk.Type() {
|
||||
case schemapb.DataType_Int64:
|
||||
int64Value := pk.(*int64PrimaryKey).Value
|
||||
common.Endian.PutUint64(buf, uint64(int64Value))
|
||||
s.pkFilter.Add(buf)
|
||||
case schemapb.DataType_VarChar:
|
||||
stringValue := pk.(*varCharPrimaryKey).Value
|
||||
s.pkFilter.AddString(stringValue)
|
||||
default:
|
||||
//TODO::
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -661,7 +670,7 @@ func (s *Segment) segmentInsert(offset int64, entityIDs *[]UniqueID, timestamps
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Segment) segmentDelete(offset int64, entityIDs *[]UniqueID, timestamps *[]Timestamp) error {
|
||||
func (s *Segment) segmentDelete(offset int64, entityIDs []primaryKey, timestamps []Timestamp) error {
|
||||
/*
|
||||
CStatus
|
||||
Delete(CSegmentInterface c_segment,
|
||||
|
@ -670,24 +679,40 @@ func (s *Segment) segmentDelete(offset int64, entityIDs *[]UniqueID, timestamps
|
|||
const long* primary_keys,
|
||||
const unsigned long* timestamps);
|
||||
*/
|
||||
if len(entityIDs) <= 0 {
|
||||
return fmt.Errorf("empty pks to delete")
|
||||
}
|
||||
|
||||
s.segPtrMu.RLock()
|
||||
defer s.segPtrMu.RUnlock() // thread safe guaranteed by segCore, use RLock
|
||||
if s.segmentPtr == nil {
|
||||
return errors.New("null seg core pointer")
|
||||
}
|
||||
|
||||
if len(*entityIDs) != len(*timestamps) {
|
||||
if len(entityIDs) != len(timestamps) {
|
||||
return errors.New("length of entityIDs not equal to length of timestamps")
|
||||
}
|
||||
|
||||
var cOffset = C.int64_t(offset)
|
||||
var cSize = C.int64_t(len(*entityIDs))
|
||||
var cEntityIdsPtr = (*C.int64_t)(&(*entityIDs)[0])
|
||||
var cTimestampsPtr = (*C.uint64_t)(&(*timestamps)[0])
|
||||
var cSize = C.int64_t(len(entityIDs))
|
||||
var cTimestampsPtr = (*C.uint64_t)(&(timestamps)[0])
|
||||
|
||||
status := C.Delete(s.segmentPtr, cOffset, cSize, cEntityIdsPtr, cTimestampsPtr)
|
||||
if err := HandleCStatus(&status, "Delete failed"); err != nil {
|
||||
return err
|
||||
pkType := entityIDs[0].Type()
|
||||
switch pkType {
|
||||
case schemapb.DataType_Int64:
|
||||
int64Pks := make([]int64, len(entityIDs))
|
||||
for index, entity := range entityIDs {
|
||||
int64Pks[index] = entity.(*int64PrimaryKey).Value
|
||||
}
|
||||
var cEntityIdsPtr = (*C.int64_t)(&int64Pks[0])
|
||||
status := C.Delete(s.segmentPtr, cOffset, cSize, cEntityIdsPtr, cTimestampsPtr)
|
||||
if err := HandleCStatus(&status, "Delete failed"); err != nil {
|
||||
return err
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
//TODO::
|
||||
default:
|
||||
return fmt.Errorf("invalid data type of primary keys")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -786,7 +811,7 @@ func (s *Segment) segmentLoadFieldData(fieldID int64, rowCount int, data interfa
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Segment) segmentLoadDeletedRecord(primaryKeys []IntPrimaryKey, timestamps []Timestamp, rowCount int64) error {
|
||||
func (s *Segment) segmentLoadDeletedRecord(primaryKeys []primaryKey, timestamps []Timestamp, rowCount int64) error {
|
||||
s.segPtrMu.RLock()
|
||||
defer s.segPtrMu.RUnlock() // thread safe guaranteed by segCore, use RLock
|
||||
if s.segmentPtr == nil {
|
||||
|
@ -796,18 +821,34 @@ func (s *Segment) segmentLoadDeletedRecord(primaryKeys []IntPrimaryKey, timestam
|
|||
errMsg := fmt.Sprintln("segmentLoadFieldData failed, illegal segment type ", s.segmentType, "segmentID = ", s.ID())
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
loadInfo := C.CLoadDeletedRecordInfo{
|
||||
timestamps: unsafe.Pointer(×tamps[0]),
|
||||
primary_keys: unsafe.Pointer(&primaryKeys[0]),
|
||||
row_count: C.int64_t(rowCount),
|
||||
|
||||
if len(primaryKeys) <= 0 {
|
||||
return fmt.Errorf("empty pks to delete")
|
||||
}
|
||||
/*
|
||||
CStatus
|
||||
LoadDeletedRecord(CSegmentInterface c_segment, CLoadDeletedRecordInfo deleted_record_info)
|
||||
*/
|
||||
status := C.LoadDeletedRecord(s.segmentPtr, loadInfo)
|
||||
if err := HandleCStatus(&status, "LoadDeletedRecord failed"); err != nil {
|
||||
return err
|
||||
pkType := primaryKeys[0].Type()
|
||||
switch pkType {
|
||||
case schemapb.DataType_Int64:
|
||||
int64Pks := make([]int64, len(primaryKeys))
|
||||
for index, pk := range primaryKeys {
|
||||
int64Pks[index] = pk.(*int64PrimaryKey).Value
|
||||
}
|
||||
loadInfo := C.CLoadDeletedRecordInfo{
|
||||
timestamps: unsafe.Pointer(×tamps[0]),
|
||||
primary_keys: unsafe.Pointer(&int64Pks[0]),
|
||||
row_count: C.int64_t(rowCount),
|
||||
}
|
||||
/*
|
||||
CStatus
|
||||
LoadDeletedRecord(CSegmentInterface c_segment, CLoadDeletedRecordInfo deleted_record_info)
|
||||
*/
|
||||
status := C.LoadDeletedRecord(s.segmentPtr, loadInfo)
|
||||
if err := HandleCStatus(&status, "LoadDeletedRecord failed"); err != nil {
|
||||
return err
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
//TODO::
|
||||
default:
|
||||
return fmt.Errorf("invalid data type of primary keys")
|
||||
}
|
||||
|
||||
log.Debug("load deleted record done",
|
||||
|
|
|
@ -545,7 +545,7 @@ func (loader *segmentLoader) FromDmlCPLoadDelete(ctx context.Context, collection
|
|||
stream.Start()
|
||||
|
||||
delData := &deleteData{
|
||||
deleteIDs: make(map[UniqueID][]int64),
|
||||
deleteIDs: make(map[UniqueID][]primaryKey),
|
||||
deleteTimestamps: make(map[UniqueID][]Timestamp),
|
||||
deleteOffset: make(map[UniqueID]int64),
|
||||
}
|
||||
|
@ -635,7 +635,7 @@ func deletePk(replica ReplicaInterface, deleteData *deleteData, segmentID Unique
|
|||
timestamps := deleteData.deleteTimestamps[segmentID]
|
||||
offset := deleteData.deleteOffset[segmentID]
|
||||
|
||||
err = targetSegment.segmentDelete(offset, &ids, ×tamps)
|
||||
err = targetSegment.segmentDelete(offset, ids, timestamps)
|
||||
if err != nil {
|
||||
log.Warn("QueryNode: targetSegmentDelete failed", zap.Error(err))
|
||||
return
|
||||
|
|
|
@ -586,8 +586,8 @@ func testConsumingDeltaMsg(ctx context.Context, t *testing.T, position *msgstrea
|
|||
msgChan := make(chan *msgstream.MsgPack)
|
||||
go func() {
|
||||
msgChan <- nil
|
||||
deleteMsg1 := genDeleteMsg(int64(1), defaultCollectionID+1)
|
||||
deleteMsg2 := genDeleteMsg(int64(1), defaultCollectionID)
|
||||
deleteMsg1 := genDeleteMsg(int64(1), defaultCollectionID+1, schemapb.DataType_Int64)
|
||||
deleteMsg2 := genDeleteMsg(int64(1), defaultCollectionID, schemapb.DataType_Int64)
|
||||
msgChan <- &msgstream.MsgPack{Msgs: []msgstream.TsMsg{deleteMsg1, deleteMsg2}}
|
||||
}()
|
||||
|
||||
|
|
|
@ -253,6 +253,11 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
pks := make([]primaryKey, 0)
|
||||
for _, id := range ids {
|
||||
pks = append(pks, newInt64PrimaryKey(id))
|
||||
}
|
||||
|
||||
timestamps := []uint64{0, 0, 0}
|
||||
|
||||
const DIM = 16
|
||||
|
@ -285,7 +290,7 @@ func TestSegment_getDeletedCount(t *testing.T) {
|
|||
var offsetDelete = segment.segmentPreDelete(10)
|
||||
assert.GreaterOrEqual(t, offsetDelete, int64(0))
|
||||
|
||||
err = segment.segmentDelete(offsetDelete, &ids, ×tamps)
|
||||
err = segment.segmentDelete(offsetDelete, pks, timestamps)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var deletedCount = segment.getDeletedCount()
|
||||
|
@ -426,6 +431,10 @@ func TestSegment_segmentDelete(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
|
||||
ids := []int64{1, 2, 3}
|
||||
pks := make([]primaryKey, 0)
|
||||
for _, id := range ids {
|
||||
pks = append(pks, newInt64PrimaryKey(id))
|
||||
}
|
||||
timestamps := []uint64{0, 0, 0}
|
||||
|
||||
const DIM = 16
|
||||
|
@ -458,7 +467,7 @@ func TestSegment_segmentDelete(t *testing.T) {
|
|||
var offsetDelete = segment.segmentPreDelete(10)
|
||||
assert.GreaterOrEqual(t, offsetDelete, int64(0))
|
||||
|
||||
err = segment.segmentDelete(offsetDelete, &ids, ×tamps)
|
||||
err = segment.segmentDelete(offsetDelete, pks, timestamps)
|
||||
assert.NoError(t, err)
|
||||
|
||||
deleteCollection(collection)
|
||||
|
@ -682,7 +691,11 @@ func TestSegment_segmentLoadDeletedRecord(t *testing.T) {
|
|||
segmentTypeSealed,
|
||||
true)
|
||||
assert.Nil(t, err)
|
||||
pks := []IntPrimaryKey{1, 2, 3}
|
||||
ids := []int64{1, 2, 3}
|
||||
pks := make([]primaryKey, 0)
|
||||
for _, id := range ids {
|
||||
pks = append(pks, newInt64PrimaryKey(id))
|
||||
}
|
||||
timestamps := []Timestamp{10, 10, 10}
|
||||
var rowCount int64 = 3
|
||||
error := seg.segmentLoadDeletedRecord(pks, timestamps, rowCount)
|
||||
|
@ -1531,3 +1544,53 @@ func Test_fillFieldData(t *testing.T) {
|
|||
|
||||
assert.Error(t, fillFieldData(m, path, &schemapb.FieldData{Type: schemapb.DataType_None}, index, offset, endian))
|
||||
}
|
||||
|
||||
func TestUpdateBloomFilter(t *testing.T) {
|
||||
t.Run("test int64 pk", func(t *testing.T) {
|
||||
historical, err := genSimpleReplica()
|
||||
assert.NoError(t, err)
|
||||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
seg, err := historical.getSegmentByID(defaultSegmentID)
|
||||
assert.Nil(t, err)
|
||||
pkValues := []int64{1, 2}
|
||||
pks := make([]primaryKey, len(pkValues))
|
||||
for index, v := range pkValues {
|
||||
pks[index] = newInt64PrimaryKey(v)
|
||||
}
|
||||
seg.updateBloomFilter(pks)
|
||||
buf := make([]byte, 8)
|
||||
for _, v := range pkValues {
|
||||
common.Endian.PutUint64(buf, uint64(v))
|
||||
assert.True(t, seg.pkFilter.Test(buf))
|
||||
}
|
||||
})
|
||||
t.Run("test string pk", func(t *testing.T) {
|
||||
historical, err := genSimpleReplica()
|
||||
assert.NoError(t, err)
|
||||
err = historical.addSegment(defaultSegmentID,
|
||||
defaultPartitionID,
|
||||
defaultCollectionID,
|
||||
defaultDMLChannel,
|
||||
segmentTypeSealed,
|
||||
true)
|
||||
assert.NoError(t, err)
|
||||
seg, err := historical.getSegmentByID(defaultSegmentID)
|
||||
assert.Nil(t, err)
|
||||
pkValues := []string{"test1", "test2"}
|
||||
pks := make([]primaryKey, len(pkValues))
|
||||
for index, v := range pkValues {
|
||||
pks[index] = newVarCharPrimaryKey(v)
|
||||
}
|
||||
seg.updateBloomFilter(pks)
|
||||
for _, v := range pkValues {
|
||||
assert.True(t, seg.pkFilter.TestString(v))
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"errors"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/rootcoord"
|
||||
)
|
||||
|
||||
|
@ -43,7 +44,7 @@ type Iterator interface {
|
|||
// Value is the return value of Next
|
||||
type Value struct {
|
||||
ID int64
|
||||
PK int64
|
||||
PK PrimaryKey
|
||||
Timestamp int64
|
||||
IsDeleted bool
|
||||
Value interface{}
|
||||
|
@ -54,11 +55,12 @@ type InsertBinlogIterator struct {
|
|||
dispose int32 // 0: false, 1: true
|
||||
data *InsertData
|
||||
PKfieldID int64
|
||||
PkType schemapb.DataType
|
||||
pos int
|
||||
}
|
||||
|
||||
// NewInsertBinlogIterator creates a new iterator
|
||||
func NewInsertBinlogIterator(blobs []*Blob, PKfieldID UniqueID) (*InsertBinlogIterator, error) {
|
||||
func NewInsertBinlogIterator(blobs []*Blob, PKfieldID UniqueID, pkType schemapb.DataType) (*InsertBinlogIterator, error) {
|
||||
// TODO: load part of file to read records other than loading all content
|
||||
reader := NewInsertCodec(nil)
|
||||
|
||||
|
@ -68,7 +70,7 @@ func NewInsertBinlogIterator(blobs []*Blob, PKfieldID UniqueID) (*InsertBinlogIt
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return &InsertBinlogIterator{data: serData, PKfieldID: PKfieldID}, nil
|
||||
return &InsertBinlogIterator{data: serData, PKfieldID: PKfieldID, PkType: pkType}, nil
|
||||
}
|
||||
|
||||
// HasNext returns true if the iterator have unread record
|
||||
|
@ -90,11 +92,15 @@ func (itr *InsertBinlogIterator) Next() (interface{}, error) {
|
|||
for fieldID, fieldData := range itr.data.Data {
|
||||
m[fieldID] = fieldData.GetRow(itr.pos)
|
||||
}
|
||||
pk, err := GenPrimaryKeyByRawData(itr.data.Data[itr.PKfieldID].GetRow(itr.pos), itr.PkType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := &Value{
|
||||
ID: itr.data.Data[rootcoord.RowIDField].GetRow(itr.pos).(int64),
|
||||
Timestamp: itr.data.Data[rootcoord.TimeStampField].GetRow(itr.pos).(int64),
|
||||
PK: itr.data.Data[itr.PKfieldID].GetRow(itr.pos).(int64),
|
||||
PK: pk,
|
||||
IsDeleted: false,
|
||||
Value: m,
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ func TestInsertlogIterator(t *testing.T) {
|
|||
|
||||
t.Run("test dispose", func(t *testing.T) {
|
||||
blobs := generateTestData(t, 1)
|
||||
itr, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField)
|
||||
itr, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField, schemapb.DataType_Int64)
|
||||
assert.Nil(t, err)
|
||||
|
||||
itr.Dispose()
|
||||
|
@ -102,7 +102,7 @@ func TestInsertlogIterator(t *testing.T) {
|
|||
|
||||
t.Run("not empty iterator", func(t *testing.T) {
|
||||
blobs := generateTestData(t, 3)
|
||||
itr, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField)
|
||||
itr, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField, schemapb.DataType_Int64)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for i := 1; i <= 3; i++ {
|
||||
|
@ -116,9 +116,12 @@ func TestInsertlogIterator(t *testing.T) {
|
|||
f102[j] = float32(i)
|
||||
}
|
||||
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: int64(i),
|
||||
}
|
||||
expected := &Value{
|
||||
int64(i),
|
||||
int64(i),
|
||||
pk,
|
||||
int64(i),
|
||||
false,
|
||||
map[FieldID]interface{}{
|
||||
|
@ -154,7 +157,7 @@ func TestMergeIterator(t *testing.T) {
|
|||
|
||||
t.Run("empty and non-empty iterators", func(t *testing.T) {
|
||||
blobs := generateTestData(t, 3)
|
||||
insertItr, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField)
|
||||
insertItr, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField, schemapb.DataType_Int64)
|
||||
assert.Nil(t, err)
|
||||
iterators := []Iterator{
|
||||
&InsertBinlogIterator{data: &InsertData{}},
|
||||
|
@ -173,9 +176,12 @@ func TestMergeIterator(t *testing.T) {
|
|||
f102[j] = float32(i)
|
||||
}
|
||||
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: int64(i),
|
||||
}
|
||||
expected := &Value{
|
||||
int64(i),
|
||||
int64(i),
|
||||
pk,
|
||||
int64(i),
|
||||
false,
|
||||
map[FieldID]interface{}{
|
||||
|
@ -195,9 +201,9 @@ func TestMergeIterator(t *testing.T) {
|
|||
|
||||
t.Run("non-empty iterators", func(t *testing.T) {
|
||||
blobs := generateTestData(t, 3)
|
||||
itr1, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField)
|
||||
itr1, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField, schemapb.DataType_Int64)
|
||||
assert.Nil(t, err)
|
||||
itr2, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField)
|
||||
itr2, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField, schemapb.DataType_Int64)
|
||||
assert.Nil(t, err)
|
||||
iterators := []Iterator{itr1, itr2}
|
||||
itr := NewMergeIterator(iterators)
|
||||
|
@ -208,9 +214,12 @@ func TestMergeIterator(t *testing.T) {
|
|||
f102[j] = float32(i)
|
||||
}
|
||||
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: int64(i),
|
||||
}
|
||||
expected := &Value{
|
||||
int64(i),
|
||||
int64(i),
|
||||
pk,
|
||||
int64(i),
|
||||
false,
|
||||
map[FieldID]interface{}{
|
||||
|
@ -237,7 +246,7 @@ func TestMergeIterator(t *testing.T) {
|
|||
|
||||
t.Run("test dispose", func(t *testing.T) {
|
||||
blobs := generateTestData(t, 3)
|
||||
itr1, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField)
|
||||
itr1, err := NewInsertBinlogIterator(blobs, rootcoord.RowIDField, schemapb.DataType_Int64)
|
||||
assert.Nil(t, err)
|
||||
itr := NewMergeIterator([]Iterator{itr1})
|
||||
|
||||
|
|
|
@ -486,14 +486,8 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
boolFieldData.Data = append(boolFieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
boolFieldData.NumRows = append(boolFieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
boolFieldData.NumRows = append(boolFieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = boolFieldData
|
||||
case schemapb.DataType_Int8:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -507,14 +501,8 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
int8FieldData.Data = append(int8FieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
int8FieldData.NumRows = append(int8FieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
int8FieldData.NumRows = append(int8FieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = int8FieldData
|
||||
case schemapb.DataType_Int16:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -528,14 +516,8 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
int16FieldData.Data = append(int16FieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
int16FieldData.NumRows = append(int16FieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
int16FieldData.NumRows = append(int16FieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = int16FieldData
|
||||
case schemapb.DataType_Int32:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -549,14 +531,8 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
int32FieldData.Data = append(int32FieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
int32FieldData.NumRows = append(int32FieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
int32FieldData.NumRows = append(int32FieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = int32FieldData
|
||||
case schemapb.DataType_Int64:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -570,14 +546,8 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
int64FieldData.Data = append(int64FieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
int64FieldData.NumRows = append(int64FieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
int64FieldData.NumRows = append(int64FieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = int64FieldData
|
||||
case schemapb.DataType_Float:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -591,14 +561,8 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
floatFieldData.Data = append(floatFieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
floatFieldData.NumRows = append(floatFieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
floatFieldData.NumRows = append(floatFieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = floatFieldData
|
||||
case schemapb.DataType_Double:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -612,37 +576,24 @@ func (insertCodec *InsertCodec) DeserializeAll(blobs []*Blob) (
|
|||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
doubleFieldData.Data = append(doubleFieldData.Data, singleData...)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
doubleFieldData.NumRows = append(doubleFieldData.NumRows, int64(length))
|
||||
totalLength += len(singleData)
|
||||
doubleFieldData.NumRows = append(doubleFieldData.NumRows, int64(len(singleData)))
|
||||
resultData.Data[fieldID] = doubleFieldData
|
||||
case schemapb.DataType_String, schemapb.DataType_VarChar:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
resultData.Data[fieldID] = &StringFieldData{}
|
||||
}
|
||||
stringFieldData := resultData.Data[fieldID].(*StringFieldData)
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
totalLength += length
|
||||
stringFieldData.NumRows = append(stringFieldData.NumRows, int64(length))
|
||||
stringPayload, err := eventReader.GetStringFromPayload()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
for idx := range stringPayload {
|
||||
stringFieldData.Data = append(stringFieldData.Data, stringPayload[idx])
|
||||
}
|
||||
|
||||
stringFieldData.Data = append(stringFieldData.Data, stringPayload...)
|
||||
totalLength += len(stringPayload)
|
||||
stringFieldData.NumRows = append(stringFieldData.NumRows, int64(len(stringPayload)))
|
||||
resultData.Data[fieldID] = stringFieldData
|
||||
case schemapb.DataType_BinaryVector:
|
||||
if resultData.Data[fieldID] == nil {
|
||||
|
@ -716,16 +667,64 @@ func (insertCodec *InsertCodec) Deserialize(blobs []*Blob) (partitionID UniqueID
|
|||
return partitionID, segmentID, data, err
|
||||
}
|
||||
|
||||
type DeleteLog struct {
|
||||
Pk PrimaryKey `json:"pk"`
|
||||
Ts uint64 `json:"ts"`
|
||||
PkType int64 `json:"pkType"`
|
||||
}
|
||||
|
||||
func NewDeleteLog(pk PrimaryKey, ts Timestamp) *DeleteLog {
|
||||
pkType := pk.Type()
|
||||
|
||||
return &DeleteLog{
|
||||
Pk: pk,
|
||||
Ts: ts,
|
||||
PkType: int64(pkType),
|
||||
}
|
||||
}
|
||||
|
||||
func (dl *DeleteLog) UnmarshalJSON(data []byte) error {
|
||||
var messageMap map[string]*json.RawMessage
|
||||
err := json.Unmarshal(data, &messageMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(*messageMap["pkType"], &dl.PkType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch schemapb.DataType(dl.PkType) {
|
||||
case schemapb.DataType_Int64:
|
||||
dl.Pk = &Int64PrimaryKey{}
|
||||
case schemapb.DataType_VarChar:
|
||||
dl.Pk = &VarCharPrimaryKey{}
|
||||
}
|
||||
|
||||
err = json.Unmarshal(*messageMap["pk"], dl.Pk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(*messageMap["ts"], &dl.Ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteData saves each entity delete message represented as <primarykey,timestamp> map.
|
||||
// timestamp represents the time when this instance was deleted
|
||||
type DeleteData struct {
|
||||
Pks []int64 // primary keys
|
||||
Tss []Timestamp // timestamps
|
||||
Pks []PrimaryKey // primary keys
|
||||
Tss []Timestamp // timestamps
|
||||
RowCount int64
|
||||
}
|
||||
|
||||
// Append append 1 pk&ts pair to DeleteData
|
||||
func (data *DeleteData) Append(pk UniqueID, ts Timestamp) {
|
||||
func (data *DeleteData) Append(pk PrimaryKey, ts Timestamp) {
|
||||
data.Pks = append(data.Pks, pk)
|
||||
data.Tss = append(data.Tss, ts)
|
||||
data.RowCount++
|
||||
|
@ -754,15 +753,15 @@ func (deleteCodec *DeleteCodec) Serialize(collectionID UniqueID, partitionID Uni
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(data.Pks) != len(data.Tss) {
|
||||
length := len(data.Pks)
|
||||
if length != len(data.Tss) {
|
||||
return nil, fmt.Errorf("the length of pks, and TimeStamps is not equal")
|
||||
}
|
||||
length := len(data.Pks)
|
||||
|
||||
sizeTotal := 0
|
||||
var startTs, endTs Timestamp
|
||||
startTs, endTs = math.MaxUint64, 0
|
||||
for i := 0; i < length; i++ {
|
||||
pk := data.Pks[i]
|
||||
ts := data.Tss[i]
|
||||
if ts < startTs {
|
||||
startTs = ts
|
||||
|
@ -770,12 +769,17 @@ func (deleteCodec *DeleteCodec) Serialize(collectionID UniqueID, partitionID Uni
|
|||
if ts > endTs {
|
||||
endTs = ts
|
||||
}
|
||||
err := eventWriter.AddOneStringToPayload(fmt.Sprintf("%d,%d", pk, ts))
|
||||
|
||||
deleteLog := NewDeleteLog(data.Pks[i], ts)
|
||||
serializedPayload, err := json.Marshal(deleteLog)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sizeTotal += binary.Size(pk)
|
||||
sizeTotal += binary.Size(ts)
|
||||
err = eventWriter.AddOneStringToPayload(string(serializedPayload))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sizeTotal += binary.Size(serializedPayload)
|
||||
}
|
||||
eventWriter.SetEventTimestamp(startTs, endTs)
|
||||
binlogWriter.SetEventTimeStamp(startTs, endTs)
|
||||
|
@ -798,7 +802,6 @@ func (deleteCodec *DeleteCodec) Serialize(collectionID UniqueID, partitionID Uni
|
|||
Value: buffer,
|
||||
}
|
||||
return blob, nil
|
||||
|
||||
}
|
||||
|
||||
// Deserialize deserializes the deltalog blobs into DeleteData
|
||||
|
@ -822,43 +825,44 @@ func (deleteCodec *DeleteCodec) Deserialize(blobs []*Blob) (partitionID UniqueID
|
|||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
|
||||
length, err := eventReader.GetPayloadLengthFromReader()
|
||||
stringArray, err := eventReader.GetStringFromPayload()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
|
||||
stringarray, err := eventReader.GetStringFromPayload()
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
for i := 0; i < length; i++ {
|
||||
splits := strings.Split(stringarray[i], ",")
|
||||
if len(splits) != 2 {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, fmt.Errorf("the format of delta log is incorrect, %v can not be split", stringarray[i])
|
||||
for i := 0; i < len(stringArray); i++ {
|
||||
deleteLog := &DeleteLog{}
|
||||
if err = json.Unmarshal([]byte(stringArray[i]), deleteLog); err != nil {
|
||||
// compatible with versions that only support int64 type primary keys
|
||||
// compatible with fmt.Sprintf("%d,%d", pk, ts)
|
||||
// compatible error info (unmarshal err invalid character ',' after top-level value)
|
||||
splits := strings.Split(stringArray[i], ",")
|
||||
if len(splits) != 2 {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, fmt.Errorf("the format of delta log is incorrect, %v can not be split", stringArray[i])
|
||||
}
|
||||
pk, err := strconv.ParseInt(splits[0], 10, 64)
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
deleteLog.Pk = &Int64PrimaryKey{
|
||||
Value: pk,
|
||||
}
|
||||
deleteLog.PkType = int64(schemapb.DataType_Int64)
|
||||
deleteLog.Ts, err = strconv.ParseUint(splits[1], 10, 64)
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pk, err := strconv.ParseInt(splits[0], 10, 64)
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
|
||||
ts, err := strconv.ParseUint(splits[1], 10, 64)
|
||||
if err != nil {
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
return InvalidUniqueID, InvalidUniqueID, nil, err
|
||||
}
|
||||
|
||||
result.Pks = append(result.Pks, pk)
|
||||
result.Tss = append(result.Tss, ts)
|
||||
result.Pks = append(result.Pks, deleteLog.Pk)
|
||||
result.Tss = append(result.Tss, deleteLog.Ts)
|
||||
}
|
||||
eventReader.Close()
|
||||
binlogReader.Close()
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
@ -337,22 +338,90 @@ func TestInsertCodec(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeleteCodec(t *testing.T) {
|
||||
deleteCodec := NewDeleteCodec()
|
||||
deleteData := &DeleteData{
|
||||
Pks: []int64{1},
|
||||
Tss: []uint64{43757345},
|
||||
RowCount: int64(1),
|
||||
t.Run("int64 pk", func(t *testing.T) {
|
||||
deleteCodec := NewDeleteCodec()
|
||||
pk1 := &Int64PrimaryKey{
|
||||
Value: 1,
|
||||
}
|
||||
deleteData := &DeleteData{
|
||||
Pks: []PrimaryKey{pk1},
|
||||
Tss: []uint64{43757345},
|
||||
RowCount: int64(1),
|
||||
}
|
||||
|
||||
pk2 := &Int64PrimaryKey{
|
||||
Value: 2,
|
||||
}
|
||||
deleteData.Append(pk2, 23578294723)
|
||||
blob, err := deleteCodec.Serialize(CollectionID, 1, 1, deleteData)
|
||||
assert.Nil(t, err)
|
||||
|
||||
pid, sid, data, err := deleteCodec.Deserialize([]*Blob{blob})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pid, int64(1))
|
||||
assert.Equal(t, sid, int64(1))
|
||||
assert.Equal(t, data, deleteData)
|
||||
})
|
||||
|
||||
t.Run("string pk", func(t *testing.T) {
|
||||
deleteCodec := NewDeleteCodec()
|
||||
pk1 := NewVarCharPrimaryKey("test1")
|
||||
deleteData := &DeleteData{
|
||||
Pks: []PrimaryKey{pk1},
|
||||
Tss: []uint64{43757345},
|
||||
RowCount: int64(1),
|
||||
}
|
||||
|
||||
pk2 := NewVarCharPrimaryKey("test2")
|
||||
deleteData.Append(pk2, 23578294723)
|
||||
blob, err := deleteCodec.Serialize(CollectionID, 1, 1, deleteData)
|
||||
assert.Nil(t, err)
|
||||
|
||||
pid, sid, data, err := deleteCodec.Deserialize([]*Blob{blob})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pid, int64(1))
|
||||
assert.Equal(t, sid, int64(1))
|
||||
assert.Equal(t, data, deleteData)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpgradeDeleteLog(t *testing.T) {
|
||||
binlogWriter := NewDeleteBinlogWriter(schemapb.DataType_String, CollectionID, 1, 1)
|
||||
eventWriter, err := binlogWriter.NextDeleteEventWriter()
|
||||
assert.Nil(t, err)
|
||||
|
||||
dData := &DeleteData{
|
||||
Pks: []PrimaryKey{&Int64PrimaryKey{Value: 1}, &Int64PrimaryKey{Value: 2}},
|
||||
Tss: []Timestamp{100, 200},
|
||||
RowCount: 2,
|
||||
}
|
||||
|
||||
deleteData.Append(2, 23578294723)
|
||||
blob, err := deleteCodec.Serialize(CollectionID, 1, 1, deleteData)
|
||||
assert.Nil(t, err)
|
||||
sizeTotal := 0
|
||||
for i := int64(0); i < dData.RowCount; i++ {
|
||||
int64PkValue := dData.Pks[i].(*Int64PrimaryKey).Value
|
||||
ts := dData.Tss[i]
|
||||
err = eventWriter.AddOneStringToPayload(fmt.Sprintf("%d,%d", int64PkValue, ts))
|
||||
assert.Nil(t, err)
|
||||
sizeTotal += binary.Size(int64PkValue)
|
||||
sizeTotal += binary.Size(ts)
|
||||
}
|
||||
eventWriter.SetEventTimestamp(100, 200)
|
||||
binlogWriter.SetEventTimeStamp(100, 200)
|
||||
binlogWriter.AddExtra(originalSizeKey, fmt.Sprintf("%v", sizeTotal))
|
||||
|
||||
pid, sid, data, err := deleteCodec.Deserialize([]*Blob{blob})
|
||||
err = binlogWriter.Finish()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pid, int64(1))
|
||||
assert.Equal(t, sid, int64(1))
|
||||
assert.Equal(t, data, deleteData)
|
||||
buffer, err := binlogWriter.GetBuffer()
|
||||
assert.Nil(t, err)
|
||||
blob := &Blob{Value: buffer}
|
||||
|
||||
dCodec := NewDeleteCodec()
|
||||
parID, segID, deleteData, err := dCodec.Deserialize([]*Blob{blob})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(1), parID)
|
||||
assert.Equal(t, int64(1), segID)
|
||||
assert.ElementsMatch(t, dData.Pks, deleteData.Pks)
|
||||
assert.ElementsMatch(t, dData.Tss, deleteData.Tss)
|
||||
}
|
||||
|
||||
func TestDDCodec(t *testing.T) {
|
||||
|
|
|
@ -62,7 +62,7 @@ func (r *PayloadReader) GetDataFromPayload() (interface{}, int, error) {
|
|||
return r.GetBinaryVectorFromPayload()
|
||||
case schemapb.DataType_FloatVector:
|
||||
return r.GetFloatVectorFromPayload()
|
||||
case schemapb.DataType_String:
|
||||
case schemapb.DataType_String, schemapb.DataType_VarChar:
|
||||
val, err := r.GetStringFromPayload()
|
||||
return val, 0, err
|
||||
default:
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/log"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
type PrimaryKey interface {
|
||||
|
@ -33,12 +34,19 @@ type PrimaryKey interface {
|
|||
MarshalJSON() ([]byte, error)
|
||||
UnmarshalJSON(data []byte) error
|
||||
SetValue(interface{}) error
|
||||
Type() schemapb.DataType
|
||||
}
|
||||
|
||||
type Int64PrimaryKey struct {
|
||||
Value int64 `json:"pkValue"`
|
||||
}
|
||||
|
||||
func NewInt64PrimaryKey(v int64) *Int64PrimaryKey {
|
||||
return &Int64PrimaryKey{
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
|
||||
func (ip *Int64PrimaryKey) GT(key PrimaryKey) bool {
|
||||
pk, ok := key.(*Int64PrimaryKey)
|
||||
if !ok {
|
||||
|
@ -135,76 +143,35 @@ func (ip *Int64PrimaryKey) SetValue(data interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type StringPrimaryKey struct {
|
||||
func (ip *Int64PrimaryKey) Type() schemapb.DataType {
|
||||
return schemapb.DataType_Int64
|
||||
}
|
||||
|
||||
type BaseStringPrimaryKey struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) GT(key PrimaryKey) bool {
|
||||
pk, ok := key.(*StringPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not string")
|
||||
return false
|
||||
}
|
||||
if strings.Compare(sp.Value, pk.Value) > 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
func (sp *BaseStringPrimaryKey) GT(key BaseStringPrimaryKey) bool {
|
||||
return strings.Compare(sp.Value, key.Value) > 0
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) GE(key PrimaryKey) bool {
|
||||
pk, ok := key.(*StringPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not string")
|
||||
return false
|
||||
}
|
||||
if strings.Compare(sp.Value, pk.Value) >= 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
func (sp *BaseStringPrimaryKey) GE(key BaseStringPrimaryKey) bool {
|
||||
return strings.Compare(sp.Value, key.Value) >= 0
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) LT(key PrimaryKey) bool {
|
||||
pk, ok := key.(*StringPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not string")
|
||||
return false
|
||||
}
|
||||
if strings.Compare(sp.Value, pk.Value) < 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
func (sp *BaseStringPrimaryKey) LT(key BaseStringPrimaryKey) bool {
|
||||
return strings.Compare(sp.Value, key.Value) < 0
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) LE(key PrimaryKey) bool {
|
||||
pk, ok := key.(*StringPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not string")
|
||||
return false
|
||||
}
|
||||
if strings.Compare(sp.Value, pk.Value) <= 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
func (sp *BaseStringPrimaryKey) LE(key BaseStringPrimaryKey) bool {
|
||||
return strings.Compare(sp.Value, key.Value) <= 0
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) EQ(key PrimaryKey) bool {
|
||||
pk, ok := key.(*StringPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not string")
|
||||
return false
|
||||
}
|
||||
if strings.Compare(sp.Value, pk.Value) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
func (sp *BaseStringPrimaryKey) EQ(key BaseStringPrimaryKey) bool {
|
||||
return strings.Compare(sp.Value, key.Value) == 0
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) MarshalJSON() ([]byte, error) {
|
||||
func (sp *BaseStringPrimaryKey) MarshalJSON() ([]byte, error) {
|
||||
ret, err := json.Marshal(sp.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -213,7 +180,7 @@ func (sp *StringPrimaryKey) MarshalJSON() ([]byte, error) {
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) UnmarshalJSON(data []byte) error {
|
||||
func (sp *BaseStringPrimaryKey) UnmarshalJSON(data []byte) error {
|
||||
err := json.Unmarshal(data, &sp.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -222,7 +189,7 @@ func (sp *StringPrimaryKey) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (sp *StringPrimaryKey) SetValue(data interface{}) error {
|
||||
func (sp *BaseStringPrimaryKey) SetValue(data interface{}) error {
|
||||
value, ok := data.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("wrong type value when setValue for StringPrimaryKey")
|
||||
|
@ -231,3 +198,172 @@ func (sp *StringPrimaryKey) SetValue(data interface{}) error {
|
|||
sp.Value = value
|
||||
return nil
|
||||
}
|
||||
|
||||
type VarCharPrimaryKey struct {
|
||||
BaseStringPrimaryKey
|
||||
}
|
||||
|
||||
func NewVarCharPrimaryKey(v string) *VarCharPrimaryKey {
|
||||
return &VarCharPrimaryKey{
|
||||
BaseStringPrimaryKey: BaseStringPrimaryKey{
|
||||
Value: v,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (vcp *VarCharPrimaryKey) GT(key PrimaryKey) bool {
|
||||
pk, ok := key.(*VarCharPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not varChar")
|
||||
return false
|
||||
}
|
||||
|
||||
return vcp.BaseStringPrimaryKey.GT(pk.BaseStringPrimaryKey)
|
||||
}
|
||||
|
||||
func (vcp *VarCharPrimaryKey) GE(key PrimaryKey) bool {
|
||||
pk, ok := key.(*VarCharPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not varChar")
|
||||
return false
|
||||
}
|
||||
|
||||
return vcp.BaseStringPrimaryKey.GE(pk.BaseStringPrimaryKey)
|
||||
}
|
||||
|
||||
func (vcp *VarCharPrimaryKey) LT(key PrimaryKey) bool {
|
||||
pk, ok := key.(*VarCharPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not varChar")
|
||||
return false
|
||||
}
|
||||
|
||||
return vcp.BaseStringPrimaryKey.LT(pk.BaseStringPrimaryKey)
|
||||
}
|
||||
|
||||
func (vcp *VarCharPrimaryKey) LE(key PrimaryKey) bool {
|
||||
pk, ok := key.(*VarCharPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not varChar")
|
||||
return false
|
||||
}
|
||||
|
||||
return vcp.BaseStringPrimaryKey.LE(pk.BaseStringPrimaryKey)
|
||||
}
|
||||
|
||||
func (vcp *VarCharPrimaryKey) EQ(key PrimaryKey) bool {
|
||||
pk, ok := key.(*VarCharPrimaryKey)
|
||||
if !ok {
|
||||
log.Warn("type of compared pk is not varChar")
|
||||
return false
|
||||
}
|
||||
|
||||
return vcp.BaseStringPrimaryKey.EQ(pk.BaseStringPrimaryKey)
|
||||
}
|
||||
|
||||
func (vcp *VarCharPrimaryKey) Type() schemapb.DataType {
|
||||
return schemapb.DataType_VarChar
|
||||
}
|
||||
|
||||
func GenPrimaryKeyByRawData(data interface{}, pkType schemapb.DataType) (PrimaryKey, error) {
|
||||
var result PrimaryKey
|
||||
switch pkType {
|
||||
case schemapb.DataType_Int64:
|
||||
result = &Int64PrimaryKey{
|
||||
Value: data.(int64),
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
result = &VarCharPrimaryKey{
|
||||
BaseStringPrimaryKey: BaseStringPrimaryKey{
|
||||
Value: data.(string),
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("not supported primary data type")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func ParseFieldData2PrimaryKeys(data *schemapb.FieldData) ([]PrimaryKey, error) {
|
||||
ret := make([]PrimaryKey, 0)
|
||||
if data == nil {
|
||||
return ret, fmt.Errorf("failed to parse pks from nil field data")
|
||||
}
|
||||
scalarData := data.GetScalars()
|
||||
if scalarData == nil {
|
||||
return ret, fmt.Errorf("failed to parse pks from nil scalar data")
|
||||
}
|
||||
|
||||
switch data.Type {
|
||||
case schemapb.DataType_Int64:
|
||||
for _, value := range scalarData.GetLongData().GetData() {
|
||||
pk := NewInt64PrimaryKey(value)
|
||||
ret = append(ret, pk)
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
for _, value := range scalarData.GetStringData().GetData() {
|
||||
pk := NewVarCharPrimaryKey(value)
|
||||
ret = append(ret, pk)
|
||||
}
|
||||
default:
|
||||
return ret, fmt.Errorf("not supported primary data type")
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func ParseIDs2PrimaryKeys(ids *schemapb.IDs) []PrimaryKey {
|
||||
ret := make([]PrimaryKey, 0)
|
||||
switch ids.IdField.(type) {
|
||||
case *schemapb.IDs_IntId:
|
||||
int64Pks := ids.GetIntId().GetData()
|
||||
for _, v := range int64Pks {
|
||||
pk := NewInt64PrimaryKey(v)
|
||||
ret = append(ret, pk)
|
||||
}
|
||||
case *schemapb.IDs_StrId:
|
||||
stringPks := ids.GetStrId().GetData()
|
||||
for _, v := range stringPks {
|
||||
pk := NewVarCharPrimaryKey(v)
|
||||
ret = append(ret, pk)
|
||||
}
|
||||
default:
|
||||
//TODO::
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func ParsePrimaryKeys2IDs(pks []PrimaryKey) *schemapb.IDs {
|
||||
ret := &schemapb.IDs{}
|
||||
if len(pks) == 0 {
|
||||
return ret
|
||||
}
|
||||
switch pks[0].Type() {
|
||||
case schemapb.DataType_Int64:
|
||||
int64Pks := make([]int64, 0)
|
||||
for _, pk := range pks {
|
||||
int64Pks = append(int64Pks, pk.(*Int64PrimaryKey).Value)
|
||||
}
|
||||
ret.IdField = &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
Data: int64Pks,
|
||||
},
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
stringPks := make([]string, 0)
|
||||
for _, pk := range pks {
|
||||
stringPks = append(stringPks, pk.(*VarCharPrimaryKey).Value)
|
||||
}
|
||||
ret.IdField = &schemapb.IDs_StrId{
|
||||
StrId: &schemapb.StringArray{
|
||||
Data: stringPks,
|
||||
},
|
||||
}
|
||||
default:
|
||||
//TODO::
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
|
|
@ -5,16 +5,14 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
func TestStringPrimaryKey(t *testing.T) {
|
||||
pk := &StringPrimaryKey{
|
||||
Value: "milvus",
|
||||
}
|
||||
func TestVarCharPrimaryKey(t *testing.T) {
|
||||
pk := NewVarCharPrimaryKey("milvus")
|
||||
|
||||
testPk := &StringPrimaryKey{
|
||||
Value: "milvus",
|
||||
}
|
||||
testPk := NewVarCharPrimaryKey("milvus")
|
||||
|
||||
// test GE
|
||||
assert.Equal(t, true, pk.GE(testPk))
|
||||
|
@ -37,7 +35,7 @@ func TestStringPrimaryKey(t *testing.T) {
|
|||
blob, err := json.Marshal(pk)
|
||||
assert.Nil(t, err)
|
||||
|
||||
unmarshalledPk := &StringPrimaryKey{}
|
||||
unmarshalledPk := &VarCharPrimaryKey{}
|
||||
err = json.Unmarshal(blob, unmarshalledPk)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, pk.Value, unmarshalledPk.Value)
|
||||
|
@ -45,14 +43,9 @@ func TestStringPrimaryKey(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInt64PrimaryKey(t *testing.T) {
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: 100,
|
||||
}
|
||||
|
||||
testPk := &Int64PrimaryKey{
|
||||
Value: 100,
|
||||
}
|
||||
pk := NewInt64PrimaryKey(100)
|
||||
|
||||
testPk := NewInt64PrimaryKey(100)
|
||||
// test GE
|
||||
assert.Equal(t, true, pk.GE(testPk))
|
||||
// test LE
|
||||
|
@ -80,3 +73,107 @@ func TestInt64PrimaryKey(t *testing.T) {
|
|||
assert.Equal(t, pk.Value, unmarshalledPk.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseFieldData2PrimaryKeys(t *testing.T) {
|
||||
t.Run("int64 pk", func(t *testing.T) {
|
||||
pkValues := []int64{1, 2}
|
||||
var fieldData *schemapb.FieldData
|
||||
|
||||
// test nil fieldData
|
||||
_, err := ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test nil scalar data
|
||||
fieldData = &schemapb.FieldData{
|
||||
FieldName: "int64Field",
|
||||
}
|
||||
_, err = ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test invalid pk type
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_LongData{
|
||||
LongData: &schemapb.LongArray{
|
||||
Data: pkValues,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test parse success
|
||||
fieldData.Type = schemapb.DataType_Int64
|
||||
testPks := make([]PrimaryKey, len(pkValues))
|
||||
for index, v := range pkValues {
|
||||
testPks[index] = NewInt64PrimaryKey(v)
|
||||
}
|
||||
|
||||
pks, err := ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.ElementsMatch(t, pks, testPks)
|
||||
})
|
||||
|
||||
t.Run("varChar pk", func(t *testing.T) {
|
||||
pkValues := []string{"test1", "test2"}
|
||||
var fieldData *schemapb.FieldData
|
||||
|
||||
// test nil fieldData
|
||||
_, err := ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test nil scalar data
|
||||
fieldData = &schemapb.FieldData{
|
||||
FieldName: "VarCharField",
|
||||
}
|
||||
_, err = ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test invalid pk type
|
||||
fieldData.Field = &schemapb.FieldData_Scalars{
|
||||
Scalars: &schemapb.ScalarField{
|
||||
Data: &schemapb.ScalarField_StringData{
|
||||
StringData: &schemapb.StringArray{
|
||||
Data: pkValues,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Error(t, err)
|
||||
|
||||
// test parse success
|
||||
fieldData.Type = schemapb.DataType_VarChar
|
||||
testPks := make([]PrimaryKey, len(pkValues))
|
||||
for index, v := range pkValues {
|
||||
testPks[index] = NewVarCharPrimaryKey(v)
|
||||
}
|
||||
|
||||
pks, err := ParseFieldData2PrimaryKeys(fieldData)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.ElementsMatch(t, pks, testPks)
|
||||
})
|
||||
}
|
||||
|
||||
func TestParsePrimaryKeysAndIDs(t *testing.T) {
|
||||
type testCase struct {
|
||||
pks []PrimaryKey
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
pks: []PrimaryKey{NewInt64PrimaryKey(1), NewInt64PrimaryKey(2)},
|
||||
},
|
||||
{
|
||||
pks: []PrimaryKey{NewVarCharPrimaryKey("test1"), NewVarCharPrimaryKey("test2")},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range testCases {
|
||||
ids := ParsePrimaryKeys2IDs(c.pks)
|
||||
testPks := ParseIDs2PrimaryKeys(ids)
|
||||
assert.ElementsMatch(t, c.pks, testPks)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
"github.com/bits-and-blooms/bloom/v3"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/common"
|
||||
"github.com/milvus-io/milvus/internal/proto/schemapb"
|
||||
)
|
||||
|
@ -92,8 +91,8 @@ func (stats *PrimaryKeyStats) UnmarshalJSON(data []byte) error {
|
|||
return err
|
||||
}
|
||||
case schemapb.DataType_VarChar:
|
||||
stats.MaxPk = &StringPrimaryKey{}
|
||||
stats.MinPk = &StringPrimaryKey{}
|
||||
stats.MaxPk = &VarCharPrimaryKey{}
|
||||
stats.MinPk = &VarCharPrimaryKey{}
|
||||
}
|
||||
|
||||
if maxPkMessage, ok := messageMap["maxPk"]; ok && maxPkMessage != nil {
|
||||
|
@ -154,9 +153,9 @@ func (sw *StatsWriter) generatePrimaryKeyStats(fieldID int64, pkType schemapb.Da
|
|||
}
|
||||
|
||||
stats.BF = bloom.NewWithEstimates(bloomFilterSize, maxBloomFalsePositive)
|
||||
switch fieldData := msgs.(type) {
|
||||
case *Int64FieldData:
|
||||
data := fieldData.Data
|
||||
switch pkType {
|
||||
case schemapb.DataType_Int64:
|
||||
data := msgs.(*Int64FieldData).Data
|
||||
if len(data) < 1 {
|
||||
// return error: msgs must has one element at least
|
||||
return nil
|
||||
|
@ -164,26 +163,22 @@ func (sw *StatsWriter) generatePrimaryKeyStats(fieldID int64, pkType schemapb.Da
|
|||
|
||||
b := make([]byte, 8)
|
||||
for _, int64Value := range data {
|
||||
pk := &Int64PrimaryKey{
|
||||
Value: int64Value,
|
||||
}
|
||||
pk := NewInt64PrimaryKey(int64Value)
|
||||
stats.updatePk(pk)
|
||||
common.Endian.PutUint64(b, uint64(int64Value))
|
||||
stats.BF.Add(b)
|
||||
}
|
||||
case *StringFieldData:
|
||||
data := fieldData.Data
|
||||
case schemapb.DataType_VarChar:
|
||||
data := msgs.(*StringFieldData).Data
|
||||
if len(data) < 1 {
|
||||
// return error: msgs must has one element at least
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, str := range data {
|
||||
pk := &StringPrimaryKey{
|
||||
Value: str,
|
||||
}
|
||||
pk := NewVarCharPrimaryKey(str)
|
||||
stats.updatePk(pk)
|
||||
stats.BF.Add([]byte(str))
|
||||
stats.BF.AddString(str)
|
||||
}
|
||||
default:
|
||||
//TODO::
|
||||
|
|
|
@ -62,7 +62,7 @@ func TestStatsWriter_Int64PrimaryKey(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestStatsWriter_StringPrimaryKey(t *testing.T) {
|
||||
func TestStatsWriter_VarCharPrimaryKey(t *testing.T) {
|
||||
data := &StringFieldData{
|
||||
Data: []string{"bc", "ac", "abd", "cd", "milvus"},
|
||||
}
|
||||
|
@ -75,12 +75,8 @@ func TestStatsWriter_StringPrimaryKey(t *testing.T) {
|
|||
sr.SetBuffer(b)
|
||||
stats, err := sr.GetPrimaryKeyStats()
|
||||
assert.Nil(t, err)
|
||||
maxPk := &StringPrimaryKey{
|
||||
Value: "milvus",
|
||||
}
|
||||
minPk := &StringPrimaryKey{
|
||||
Value: "abd",
|
||||
}
|
||||
maxPk := NewVarCharPrimaryKey("milvus")
|
||||
minPk := NewVarCharPrimaryKey("abd")
|
||||
assert.Equal(t, true, stats.MaxPk.EQ(maxPk))
|
||||
assert.Equal(t, true, stats.MinPk.EQ(minPk))
|
||||
for _, id := range data.Data {
|
||||
|
|
|
@ -248,6 +248,16 @@ func IsBoolType(dataType schemapb.DataType) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// IsStringType returns true if input is a varChar type, otherwise false
|
||||
func IsStringType(dataType schemapb.DataType) bool {
|
||||
switch dataType {
|
||||
case schemapb.DataType_VarChar:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// AppendFieldData appends fields data of specified index from src to dst
|
||||
func AppendFieldData(dst []*schemapb.FieldData, src []*schemapb.FieldData, idx int64) {
|
||||
for i, fieldData := range src {
|
||||
|
@ -384,3 +394,77 @@ func GetPrimaryFieldSchema(schema *schemapb.CollectionSchema) (*schemapb.FieldSc
|
|||
|
||||
return nil, errors.New("primary field is not found")
|
||||
}
|
||||
|
||||
// GetPrimaryFieldData get primary field data from all field data inserted from sdk
|
||||
func GetPrimaryFieldData(datas []*schemapb.FieldData, primaryFieldSchema *schemapb.FieldSchema) (*schemapb.FieldData, error) {
|
||||
primaryFieldName := primaryFieldSchema.Name
|
||||
|
||||
var primaryFieldData *schemapb.FieldData
|
||||
for _, field := range datas {
|
||||
if field.FieldName == primaryFieldName {
|
||||
if primaryFieldSchema.AutoID {
|
||||
return nil, fmt.Errorf("autoID field %v does not require data", primaryFieldName)
|
||||
}
|
||||
primaryFieldData = field
|
||||
}
|
||||
}
|
||||
|
||||
if primaryFieldData == nil {
|
||||
return nil, fmt.Errorf("can't find data for primary field %v", primaryFieldName)
|
||||
}
|
||||
|
||||
return primaryFieldData, nil
|
||||
}
|
||||
|
||||
func AppendIDs(dst *schemapb.IDs, src *schemapb.IDs, idx int) {
|
||||
switch src.IdField.(type) {
|
||||
case *schemapb.IDs_IntId:
|
||||
if dst.GetIdField() == nil {
|
||||
dst.IdField = &schemapb.IDs_IntId{
|
||||
IntId: &schemapb.LongArray{
|
||||
Data: []int64{src.GetIntId().Data[idx]},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
dst.GetIntId().Data = append(dst.GetIntId().Data, src.GetIntId().Data[idx])
|
||||
}
|
||||
case *schemapb.IDs_StrId:
|
||||
if dst.GetIdField() == nil {
|
||||
dst.IdField = &schemapb.IDs_StrId{
|
||||
StrId: &schemapb.StringArray{
|
||||
Data: []string{src.GetStrId().Data[idx]},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
dst.GetStrId().Data = append(dst.GetStrId().Data, src.GetStrId().Data[idx])
|
||||
}
|
||||
default:
|
||||
//TODO
|
||||
}
|
||||
}
|
||||
|
||||
func GetSizeOfIDs(data *schemapb.IDs) int {
|
||||
result := 0
|
||||
if data.IdField == nil {
|
||||
return result
|
||||
}
|
||||
|
||||
switch data.GetIdField().(type) {
|
||||
case *schemapb.IDs_IntId:
|
||||
result = len(data.GetIntId().GetData())
|
||||
case *schemapb.IDs_StrId:
|
||||
result = len(data.GetStrId().GetData())
|
||||
default:
|
||||
//TODO::
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func IsPrimaryFieldType(dataType schemapb.DataType) bool {
|
||||
if dataType == schemapb.DataType_Int64 || dataType == schemapb.DataType_VarChar {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue