Refactor RootCoord (#18930)

Signed-off-by: longjiquan <jiquan.long@zilliz.com>
Co-authored-by: xaxys <tpnnghd@163.com>

Signed-off-by: longjiquan <jiquan.long@zilliz.com>
Co-authored-by: xaxys <tpnnghd@163.com>
pull/19025/head
Jiquan Long 2022-09-05 13:29:11 +08:00 committed by GitHub
parent 686b0ce796
commit a5e2d6b6fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
126 changed files with 10348 additions and 8782 deletions

View File

@ -0,0 +1,24 @@
package allocator
type MockGIDAllocator struct {
GIDAllocator
AllocF func(count uint32) (UniqueID, UniqueID, error)
AllocOneF func() (UniqueID, error)
UpdateIDF func() error
}
func (m MockGIDAllocator) Alloc(count uint32) (UniqueID, UniqueID, error) {
return m.AllocF(count)
}
func (m MockGIDAllocator) AllocOne() (UniqueID, error) {
return m.AllocOneF()
}
func (m MockGIDAllocator) UpdateID() error {
return m.UpdateIDF()
}
func NewMockGIDAllocator() *MockGIDAllocator {
return &MockGIDAllocator{}
}

View File

@ -0,0 +1,19 @@
package common
import "reflect"
type ByteSlice []byte
func (s ByteSlice) Clone() ByteSlice {
clone := make(ByteSlice, len(s))
copy(clone, s)
return clone
}
func (s ByteSlice) Equal(other ByteSlice) bool {
return reflect.DeepEqual(s, other)
}
func CloneByteSlice(s ByteSlice) ByteSlice {
return s.Clone()
}

View File

@ -0,0 +1,43 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCloneByteSlice(t *testing.T) {
type args struct {
s ByteSlice
}
tests := []struct {
name string
args args
want ByteSlice
}{
{
args: args{s: []byte{0x0}},
want: []byte{0x0},
},
{
args: args{s: []byte{0xff}},
want: []byte{0xff},
},
{
args: args{s: []byte{0x0f}},
want: []byte{0x0f},
},
{
args: args{s: []byte{0xf0}},
want: []byte{0xf0},
}, {
args: args{s: []byte{0x0, 0xff, 0x0f, 0xf0}},
want: []byte{0x0, 0xff, 0x0f, 0xf0},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.True(t, tt.want.Equal(tt.args.s))
})
}
}

View File

@ -0,0 +1,36 @@
package common
import (
"reflect"
"github.com/milvus-io/milvus/internal/proto/commonpb"
)
type KeyDataPairs []*commonpb.KeyDataPair
func (pairs KeyDataPairs) Clone() KeyDataPairs {
clone := make(KeyDataPairs, 0, len(pairs))
for _, pair := range pairs {
clone = append(clone, &commonpb.KeyDataPair{
Key: pair.GetKey(),
Data: CloneByteSlice(pair.GetData()),
})
}
return clone
}
func (pairs KeyDataPairs) ToMap() map[string][]byte {
ret := make(map[string][]byte)
for _, pair := range pairs {
ret[pair.GetKey()] = CloneByteSlice(pair.GetData())
}
return ret
}
func (pairs KeyDataPairs) Equal(other KeyDataPairs) bool {
return reflect.DeepEqual(pairs.ToMap(), other.ToMap())
}
func CloneKeyDataPairs(pairs KeyDataPairs) KeyDataPairs {
return pairs.Clone()
}

View File

@ -0,0 +1,41 @@
package common
import (
"testing"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/stretchr/testify/assert"
)
func TestCloneKeyDataPairs(t *testing.T) {
type args struct {
pairs KeyDataPairs
}
tests := []struct {
name string
args args
}{
{
args: args{
pairs: nil,
},
},
{
args: args{
pairs: []*commonpb.KeyDataPair{
{Key: "k1", Data: []byte("v1")},
{Key: "k2", Data: []byte("v2")},
{Key: "k3", Data: []byte("v3")},
{Key: "k4", Data: []byte("v4")},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
clone := CloneKeyDataPairs(tt.args.pairs)
assert.True(t, clone.Equal(tt.args.pairs))
})
}
}

View File

@ -0,0 +1,36 @@
package common
import (
"reflect"
"github.com/milvus-io/milvus/internal/proto/commonpb"
)
type KeyValuePairs []*commonpb.KeyValuePair
func (pairs KeyValuePairs) Clone() KeyValuePairs {
clone := make(KeyValuePairs, 0, len(pairs))
for _, pair := range pairs {
clone = append(clone, &commonpb.KeyValuePair{
Key: pair.GetKey(),
Value: pair.GetValue(),
})
}
return clone
}
func (pairs KeyValuePairs) ToMap() map[string]string {
ret := make(map[string]string)
for _, pair := range pairs {
ret[pair.GetKey()] = pair.GetValue()
}
return ret
}
func (pairs KeyValuePairs) Equal(other KeyValuePairs) bool {
return reflect.DeepEqual(pairs.ToMap(), other.ToMap())
}
func CloneKeyValuePairs(pairs KeyValuePairs) KeyValuePairs {
return pairs.Clone()
}

View File

@ -0,0 +1,42 @@
package common
import (
"testing"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/stretchr/testify/assert"
)
func TestCloneKeyValuePairs(t *testing.T) {
type args struct {
pairs KeyValuePairs
}
tests := []struct {
name string
args args
want KeyValuePairs
}{
{
args: args{
pairs: nil,
},
},
{
args: args{
pairs: []*commonpb.KeyValuePair{
{Key: "k1", Value: "v1"},
{Key: "k2", Value: "v2"},
{Key: "k3", Value: "v3"},
{Key: "k4", Value: "v4"},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
clone := CloneKeyValuePairs(tt.args.pairs)
assert.True(t, clone.Equal(tt.args.pairs))
})
}
}

24
internal/common/map.go Normal file
View File

@ -0,0 +1,24 @@
package common
import "reflect"
type Str2Str map[string]string
func (m Str2Str) Clone() Str2Str {
if m == nil {
return nil
}
clone := make(Str2Str)
for key, value := range m {
clone[key] = value
}
return clone
}
func (m Str2Str) Equal(other Str2Str) bool {
return reflect.DeepEqual(m, other)
}
func CloneStr2Str(m Str2Str) Str2Str {
return m.Clone()
}

View File

@ -0,0 +1,37 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCloneStr2Str(t *testing.T) {
type args struct {
m Str2Str
}
tests := []struct {
name string
args args
}{
{
args: args{
m: nil,
},
},
{
args: args{
m: map[string]string{
"k1": "v1",
"k2": "v2",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := CloneStr2Str(tt.args.m)
assert.True(t, got.Equal(tt.args.m))
})
}
}

View File

@ -0,0 +1,27 @@
package common
type StringList []string
func (l StringList) Clone() StringList {
clone := make([]string, 0, len(l))
for _, s := range l {
clone = append(clone, s)
}
return clone
}
func (l StringList) Equal(other StringList) bool {
if len(l) != len(other) {
return false
}
for i := range l {
if l[i] != other[i] {
return false
}
}
return true
}
func CloneStringList(l StringList) StringList {
return l.Clone()
}

View File

@ -0,0 +1,39 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestCloneStringList(t *testing.T) {
type args struct {
l StringList
}
tests := []struct {
name string
args args
}{
{
args: args{
l: nil,
},
},
{
args: args{
l: []string{"s1", "s2"},
},
},
{
args: args{
l: []string{"dup", "dup", "dup"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := CloneStringList(tt.args.l)
assert.True(t, got.Equal(tt.args.l))
})
}
}

View File

@ -316,7 +316,7 @@ static void InitDefaultsscc_info_VectorField_schema_2eproto() {
&scc_info_FloatArray_schema_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_schema_2eproto[14];
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_schema_2eproto[1];
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_schema_2eproto[2];
static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_schema_2eproto = nullptr;
const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
@ -333,6 +333,7 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, type_params_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, index_params_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, autoid_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::FieldSchema, state_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::schema::CollectionSchema, _internal_metadata_),
~0u, // no _extensions_
@ -439,19 +440,19 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_schema_2eproto::offsets[] PROT
};
static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
{ 0, -1, sizeof(::milvus::proto::schema::FieldSchema)},
{ 13, -1, sizeof(::milvus::proto::schema::CollectionSchema)},
{ 22, -1, sizeof(::milvus::proto::schema::BoolArray)},
{ 28, -1, sizeof(::milvus::proto::schema::IntArray)},
{ 34, -1, sizeof(::milvus::proto::schema::LongArray)},
{ 40, -1, sizeof(::milvus::proto::schema::FloatArray)},
{ 46, -1, sizeof(::milvus::proto::schema::DoubleArray)},
{ 52, -1, sizeof(::milvus::proto::schema::BytesArray)},
{ 58, -1, sizeof(::milvus::proto::schema::StringArray)},
{ 64, -1, sizeof(::milvus::proto::schema::ScalarField)},
{ 77, -1, sizeof(::milvus::proto::schema::VectorField)},
{ 86, -1, sizeof(::milvus::proto::schema::FieldData)},
{ 97, -1, sizeof(::milvus::proto::schema::IDs)},
{ 105, -1, sizeof(::milvus::proto::schema::SearchResultData)},
{ 14, -1, sizeof(::milvus::proto::schema::CollectionSchema)},
{ 23, -1, sizeof(::milvus::proto::schema::BoolArray)},
{ 29, -1, sizeof(::milvus::proto::schema::IntArray)},
{ 35, -1, sizeof(::milvus::proto::schema::LongArray)},
{ 41, -1, sizeof(::milvus::proto::schema::FloatArray)},
{ 47, -1, sizeof(::milvus::proto::schema::DoubleArray)},
{ 53, -1, sizeof(::milvus::proto::schema::BytesArray)},
{ 59, -1, sizeof(::milvus::proto::schema::StringArray)},
{ 65, -1, sizeof(::milvus::proto::schema::ScalarField)},
{ 78, -1, sizeof(::milvus::proto::schema::VectorField)},
{ 87, -1, sizeof(::milvus::proto::schema::FieldData)},
{ 98, -1, sizeof(::milvus::proto::schema::IDs)},
{ 106, -1, sizeof(::milvus::proto::schema::SearchResultData)},
};
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
@ -473,54 +474,57 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
const char descriptor_table_protodef_schema_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
"\n\014schema.proto\022\023milvus.proto.schema\032\014com"
"mon.proto\"\214\002\n\013FieldSchema\022\017\n\007fieldID\030\001 \001"
"mon.proto\"\274\002\n\013FieldSchema\022\017\n\007fieldID\030\001 \001"
"(\003\022\014\n\004name\030\002 \001(\t\022\026\n\016is_primary_key\030\003 \001(\010"
"\022\023\n\013description\030\004 \001(\t\0220\n\tdata_type\030\005 \001(\016"
"2\035.milvus.proto.schema.DataType\0226\n\013type_"
"params\030\006 \003(\0132!.milvus.proto.common.KeyVa"
"luePair\0227\n\014index_params\030\007 \003(\0132!.milvus.p"
"roto.common.KeyValuePair\022\016\n\006autoID\030\010 \001(\010"
"\"w\n\020CollectionSchema\022\014\n\004name\030\001 \001(\t\022\023\n\013de"
"scription\030\002 \001(\t\022\016\n\006autoID\030\003 \001(\010\0220\n\006field"
"s\030\004 \003(\0132 .milvus.proto.schema.FieldSchem"
"a\"\031\n\tBoolArray\022\014\n\004data\030\001 \003(\010\"\030\n\010IntArray"
"\022\014\n\004data\030\001 \003(\005\"\031\n\tLongArray\022\014\n\004data\030\001 \003("
"\003\"\032\n\nFloatArray\022\014\n\004data\030\001 \003(\002\"\033\n\013DoubleA"
"rray\022\014\n\004data\030\001 \003(\001\"\032\n\nBytesArray\022\014\n\004data"
"\030\001 \003(\014\"\033\n\013StringArray\022\014\n\004data\030\001 \003(\t\"\222\003\n\013"
"ScalarField\0223\n\tbool_data\030\001 \001(\0132\036.milvus."
"proto.schema.BoolArrayH\000\0221\n\010int_data\030\002 \001"
"(\0132\035.milvus.proto.schema.IntArrayH\000\0223\n\tl"
"ong_data\030\003 \001(\0132\036.milvus.proto.schema.Lon"
"gArrayH\000\0225\n\nfloat_data\030\004 \001(\0132\037.milvus.pr"
"oto.schema.FloatArrayH\000\0227\n\013double_data\030\005"
" \001(\0132 .milvus.proto.schema.DoubleArrayH\000"
"\0227\n\013string_data\030\006 \001(\0132 .milvus.proto.sch"
"ema.StringArrayH\000\0225\n\nbytes_data\030\007 \001(\0132\037."
"milvus.proto.schema.BytesArrayH\000B\006\n\004data"
"\"t\n\013VectorField\022\013\n\003dim\030\001 \001(\003\0227\n\014float_ve"
"ctor\030\002 \001(\0132\037.milvus.proto.schema.FloatAr"
"rayH\000\022\027\n\rbinary_vector\030\003 \001(\014H\000B\006\n\004data\"\321"
"\001\n\tFieldData\022+\n\004type\030\001 \001(\0162\035.milvus.prot"
"o.schema.DataType\022\022\n\nfield_name\030\002 \001(\t\0223\n"
"\007scalars\030\003 \001(\0132 .milvus.proto.schema.Sca"
"larFieldH\000\0223\n\007vectors\030\004 \001(\0132 .milvus.pro"
"to.schema.VectorFieldH\000\022\020\n\010field_id\030\005 \001("
"\003B\007\n\005field\"w\n\003IDs\0220\n\006int_id\030\001 \001(\0132\036.milv"
"us.proto.schema.LongArrayH\000\0222\n\006str_id\030\002 "
"\001(\0132 .milvus.proto.schema.StringArrayH\000B"
"\n\n\010id_field\"\261\001\n\020SearchResultData\022\023\n\013num_"
"queries\030\001 \001(\003\022\r\n\005top_k\030\002 \001(\003\0223\n\013fields_d"
"ata\030\003 \003(\0132\036.milvus.proto.schema.FieldDat"
"a\022\016\n\006scores\030\004 \003(\002\022%\n\003ids\030\005 \001(\0132\030.milvus."
"proto.schema.IDs\022\r\n\005topks\030\006 \003(\003*\234\001\n\010Data"
"Type\022\010\n\004None\020\000\022\010\n\004Bool\020\001\022\010\n\004Int8\020\002\022\t\n\005In"
"t16\020\003\022\t\n\005Int32\020\004\022\t\n\005Int64\020\005\022\t\n\005Float\020\n\022\n"
"\n\006Double\020\013\022\n\n\006String\020\024\022\013\n\007VarChar\020\025\022\020\n\014B"
"inaryVector\020d\022\017\n\013FloatVector\020eBW\n\016io.mil"
"vus.grpcB\013SchemaProtoP\001Z3github.com/milv"
"us-io/milvus/internal/proto/schemapb\240\001\001b"
"\006proto3"
"\022.\n\005state\030\t \001(\0162\037.milvus.proto.schema.Fi"
"eldState\"w\n\020CollectionSchema\022\014\n\004name\030\001 \001"
"(\t\022\023\n\013description\030\002 \001(\t\022\016\n\006autoID\030\003 \001(\010\022"
"0\n\006fields\030\004 \003(\0132 .milvus.proto.schema.Fi"
"eldSchema\"\031\n\tBoolArray\022\014\n\004data\030\001 \003(\010\"\030\n\010"
"IntArray\022\014\n\004data\030\001 \003(\005\"\031\n\tLongArray\022\014\n\004d"
"ata\030\001 \003(\003\"\032\n\nFloatArray\022\014\n\004data\030\001 \003(\002\"\033\n"
"\013DoubleArray\022\014\n\004data\030\001 \003(\001\"\032\n\nBytesArray"
"\022\014\n\004data\030\001 \003(\014\"\033\n\013StringArray\022\014\n\004data\030\001 "
"\003(\t\"\222\003\n\013ScalarField\0223\n\tbool_data\030\001 \001(\0132\036"
".milvus.proto.schema.BoolArrayH\000\0221\n\010int_"
"data\030\002 \001(\0132\035.milvus.proto.schema.IntArra"
"yH\000\0223\n\tlong_data\030\003 \001(\0132\036.milvus.proto.sc"
"hema.LongArrayH\000\0225\n\nfloat_data\030\004 \001(\0132\037.m"
"ilvus.proto.schema.FloatArrayH\000\0227\n\013doubl"
"e_data\030\005 \001(\0132 .milvus.proto.schema.Doubl"
"eArrayH\000\0227\n\013string_data\030\006 \001(\0132 .milvus.p"
"roto.schema.StringArrayH\000\0225\n\nbytes_data\030"
"\007 \001(\0132\037.milvus.proto.schema.BytesArrayH\000"
"B\006\n\004data\"t\n\013VectorField\022\013\n\003dim\030\001 \001(\003\0227\n\014"
"float_vector\030\002 \001(\0132\037.milvus.proto.schema"
".FloatArrayH\000\022\027\n\rbinary_vector\030\003 \001(\014H\000B\006"
"\n\004data\"\321\001\n\tFieldData\022+\n\004type\030\001 \001(\0162\035.mil"
"vus.proto.schema.DataType\022\022\n\nfield_name\030"
"\002 \001(\t\0223\n\007scalars\030\003 \001(\0132 .milvus.proto.sc"
"hema.ScalarFieldH\000\0223\n\007vectors\030\004 \001(\0132 .mi"
"lvus.proto.schema.VectorFieldH\000\022\020\n\010field"
"_id\030\005 \001(\003B\007\n\005field\"w\n\003IDs\0220\n\006int_id\030\001 \001("
"\0132\036.milvus.proto.schema.LongArrayH\000\0222\n\006s"
"tr_id\030\002 \001(\0132 .milvus.proto.schema.String"
"ArrayH\000B\n\n\010id_field\"\261\001\n\020SearchResultData"
"\022\023\n\013num_queries\030\001 \001(\003\022\r\n\005top_k\030\002 \001(\003\0223\n\013"
"fields_data\030\003 \003(\0132\036.milvus.proto.schema."
"FieldData\022\016\n\006scores\030\004 \003(\002\022%\n\003ids\030\005 \001(\0132\030"
".milvus.proto.schema.IDs\022\r\n\005topks\030\006 \003(\003*"
"\234\001\n\010DataType\022\010\n\004None\020\000\022\010\n\004Bool\020\001\022\010\n\004Int8"
"\020\002\022\t\n\005Int16\020\003\022\t\n\005Int32\020\004\022\t\n\005Int64\020\005\022\t\n\005F"
"loat\020\n\022\n\n\006Double\020\013\022\n\n\006String\020\024\022\013\n\007VarCha"
"r\020\025\022\020\n\014BinaryVector\020d\022\017\n\013FloatVector\020e*V"
"\n\nFieldState\022\020\n\014FieldCreated\020\000\022\021\n\rFieldC"
"reating\020\001\022\021\n\rFieldDropping\020\002\022\020\n\014FieldDro"
"pped\020\003BW\n\016io.milvus.grpcB\013SchemaProtoP\001Z"
"3github.com/milvus-io/milvus/internal/pr"
"oto/schemapb\240\001\001b\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_schema_2eproto_deps[1] = {
&::descriptor_table_common_2eproto,
@ -544,7 +548,7 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_sch
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_schema_2eproto_once;
static bool descriptor_table_schema_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_schema_2eproto = {
&descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 1927,
&descriptor_table_schema_2eproto_initialized, descriptor_table_protodef_schema_2eproto, "schema.proto", 2063,
&descriptor_table_schema_2eproto_once, descriptor_table_schema_2eproto_sccs, descriptor_table_schema_2eproto_deps, 14, 1,
schemas, file_default_instances, TableStruct_schema_2eproto::offsets,
file_level_metadata_schema_2eproto, 14, file_level_enum_descriptors_schema_2eproto, file_level_service_descriptors_schema_2eproto,
@ -579,6 +583,22 @@ bool DataType_IsValid(int value) {
}
}
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldState_descriptor() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&descriptor_table_schema_2eproto);
return file_level_enum_descriptors_schema_2eproto[1];
}
bool FieldState_IsValid(int value) {
switch (value) {
case 0:
case 1:
case 2:
case 3:
return true;
default:
return false;
}
}
// ===================================================================
@ -614,8 +634,8 @@ FieldSchema::FieldSchema(const FieldSchema& from)
description_.AssignWithDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited(), from.description_);
}
::memcpy(&fieldid_, &from.fieldid_,
static_cast<size_t>(reinterpret_cast<char*>(&autoid_) -
reinterpret_cast<char*>(&fieldid_)) + sizeof(autoid_));
static_cast<size_t>(reinterpret_cast<char*>(&state_) -
reinterpret_cast<char*>(&fieldid_)) + sizeof(state_));
// @@protoc_insertion_point(copy_constructor:milvus.proto.schema.FieldSchema)
}
@ -624,8 +644,8 @@ void FieldSchema::SharedCtor() {
name_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
description_.UnsafeSetDefault(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
::memset(&fieldid_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&autoid_) -
reinterpret_cast<char*>(&fieldid_)) + sizeof(autoid_));
reinterpret_cast<char*>(&state_) -
reinterpret_cast<char*>(&fieldid_)) + sizeof(state_));
}
FieldSchema::~FieldSchema() {
@ -658,8 +678,8 @@ void FieldSchema::Clear() {
name_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
description_.ClearToEmptyNoArena(&::PROTOBUF_NAMESPACE_ID::internal::GetEmptyStringAlreadyInited());
::memset(&fieldid_, 0, static_cast<size_t>(
reinterpret_cast<char*>(&autoid_) -
reinterpret_cast<char*>(&fieldid_)) + sizeof(autoid_));
reinterpret_cast<char*>(&state_) -
reinterpret_cast<char*>(&fieldid_)) + sizeof(state_));
_internal_metadata_.Clear();
}
@ -738,6 +758,14 @@ const char* FieldSchema::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID
CHK_(ptr);
} else goto handle_unusual;
continue;
// .milvus.proto.schema.FieldState state = 9;
case 9:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 72)) {
::PROTOBUF_NAMESPACE_ID::uint64 val = ::PROTOBUF_NAMESPACE_ID::internal::ReadVarint(&ptr);
CHK_(ptr);
set_state(static_cast<::milvus::proto::schema::FieldState>(val));
} else goto handle_unusual;
continue;
default: {
handle_unusual:
if ((tag & 7) == 4 || tag == 0) {
@ -873,6 +901,20 @@ bool FieldSchema::MergePartialFromCodedStream(
break;
}
// .milvus.proto.schema.FieldState state = 9;
case 9: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (72 & 0xFF)) {
int value = 0;
DO_((::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadPrimitive<
int, ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::TYPE_ENUM>(
input, &value)));
set_state(static_cast< ::milvus::proto::schema::FieldState >(value));
} else {
goto handle_unusual;
}
break;
}
default: {
handle_unusual:
if (tag == 0) {
@ -959,6 +1001,12 @@ void FieldSchema::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBool(8, this->autoid(), output);
}
// .milvus.proto.schema.FieldState state = 9;
if (this->state() != 0) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnum(
9, this->state(), output);
}
if (_internal_metadata_.have_unknown_fields()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
_internal_metadata_.unknown_fields(), output);
@ -1031,6 +1079,12 @@ void FieldSchema::SerializeWithCachedSizes(
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteBoolToArray(8, this->autoid(), target);
}
// .milvus.proto.schema.FieldState state = 9;
if (this->state() != 0) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteEnumToArray(
9, this->state(), target);
}
if (_internal_metadata_.have_unknown_fields()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields(), target);
@ -1111,6 +1165,12 @@ size_t FieldSchema::ByteSizeLong() const {
total_size += 1 + 1;
}
// .milvus.proto.schema.FieldState state = 9;
if (this->state() != 0) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::EnumSize(this->state());
}
int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
SetCachedSize(cached_size);
return total_size;
@ -1160,6 +1220,9 @@ void FieldSchema::MergeFrom(const FieldSchema& from) {
if (from.autoid() != 0) {
set_autoid(from.autoid());
}
if (from.state() != 0) {
set_state(from.state());
}
}
void FieldSchema::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
@ -1193,6 +1256,7 @@ void FieldSchema::InternalSwap(FieldSchema* other) {
swap(data_type_, other->data_type_);
swap(is_primary_key_, other->is_primary_key_);
swap(autoid_, other->autoid_);
swap(state_, other->state_);
}
::PROTOBUF_NAMESPACE_ID::Metadata FieldSchema::GetMetadata() const {

View File

@ -159,6 +159,33 @@ inline bool DataType_Parse(
return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<DataType>(
DataType_descriptor(), name, value);
}
enum FieldState : int {
FieldCreated = 0,
FieldCreating = 1,
FieldDropping = 2,
FieldDropped = 3,
FieldState_INT_MIN_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::PROTOBUF_NAMESPACE_ID::int32>::min(),
FieldState_INT_MAX_SENTINEL_DO_NOT_USE_ = std::numeric_limits<::PROTOBUF_NAMESPACE_ID::int32>::max()
};
bool FieldState_IsValid(int value);
constexpr FieldState FieldState_MIN = FieldCreated;
constexpr FieldState FieldState_MAX = FieldDropped;
constexpr int FieldState_ARRAYSIZE = FieldState_MAX + 1;
const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* FieldState_descriptor();
template<typename T>
inline const std::string& FieldState_Name(T enum_t_value) {
static_assert(::std::is_same<T, FieldState>::value ||
::std::is_integral<T>::value,
"Incorrect type passed to function FieldState_Name.");
return ::PROTOBUF_NAMESPACE_ID::internal::NameOfEnum(
FieldState_descriptor(), enum_t_value);
}
inline bool FieldState_Parse(
const std::string& name, FieldState* value) {
return ::PROTOBUF_NAMESPACE_ID::internal::ParseNamedEnum<FieldState>(
FieldState_descriptor(), name, value);
}
// ===================================================================
class FieldSchema :
@ -282,6 +309,7 @@ class FieldSchema :
kDataTypeFieldNumber = 5,
kIsPrimaryKeyFieldNumber = 3,
kAutoIDFieldNumber = 8,
kStateFieldNumber = 9,
};
// repeated .milvus.proto.common.KeyValuePair type_params = 6;
int type_params_size() const;
@ -347,6 +375,11 @@ class FieldSchema :
bool autoid() const;
void set_autoid(bool value);
// .milvus.proto.schema.FieldState state = 9;
void clear_state();
::milvus::proto::schema::FieldState state() const;
void set_state(::milvus::proto::schema::FieldState value);
// @@protoc_insertion_point(class_scope:milvus.proto.schema.FieldSchema)
private:
class _Internal;
@ -360,6 +393,7 @@ class FieldSchema :
int data_type_;
bool is_primary_key_;
bool autoid_;
int state_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_schema_2eproto;
};
@ -2669,6 +2703,20 @@ inline void FieldSchema::set_autoid(bool value) {
// @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.autoID)
}
// .milvus.proto.schema.FieldState state = 9;
inline void FieldSchema::clear_state() {
state_ = 0;
}
inline ::milvus::proto::schema::FieldState FieldSchema::state() const {
// @@protoc_insertion_point(field_get:milvus.proto.schema.FieldSchema.state)
return static_cast< ::milvus::proto::schema::FieldState >(state_);
}
inline void FieldSchema::set_state(::milvus::proto::schema::FieldState value) {
state_ = value;
// @@protoc_insertion_point(field_set:milvus.proto.schema.FieldSchema.state)
}
// -------------------------------------------------------------------
// CollectionSchema
@ -4070,6 +4118,11 @@ template <>
inline const EnumDescriptor* GetEnumDescriptor< ::milvus::proto::schema::DataType>() {
return ::milvus::proto::schema::DataType_descriptor();
}
template <> struct is_proto_enum< ::milvus::proto::schema::FieldState> : ::std::true_type {};
template <>
inline const EnumDescriptor* GetEnumDescriptor< ::milvus::proto::schema::FieldState>() {
return ::milvus::proto::schema::FieldState_descriptor();
}
PROTOBUF_NAMESPACE_CLOSE

View File

@ -432,10 +432,6 @@ func (m *mockRootCoordService) UpdateChannelTimeTick(ctx context.Context, req *i
panic("not implemented") // TODO: Implement
}
func (m *mockRootCoordService) ReleaseDQLMessageStream(ctx context.Context, req *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
panic("not implemented") // TODO: Implement
}
func (m *mockRootCoordService) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
panic("not implemented") // TODO: Implement
}

View File

@ -136,46 +136,6 @@ func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb
return ret.(*commonpb.Status), err
}
// ReleaseDQLMessageStream release dql message stream by request
func (c *Client) ReleaseDQLMessageStream(ctx context.Context, req *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(proxypb.ProxyClient).ReleaseDQLMessageStream(ctx, req)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*commonpb.Status), err
}
func (c *Client) SendSearchResult(ctx context.Context, results *internalpb.SearchResults) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(proxypb.ProxyClient).SendSearchResult(ctx, results)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*commonpb.Status), err
}
func (c *Client) SendRetrieveResult(ctx context.Context, results *internalpb.RetrieveResults) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(proxypb.ProxyClient).SendRetrieveResult(ctx, results)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*commonpb.Status), err
}
func (c *Client) InvalidateCredentialCache(ctx context.Context, req *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {

View File

@ -67,15 +67,6 @@ func Test_NewClient(t *testing.T) {
r3, err := client.InvalidateCollectionMetaCache(ctx, nil)
retCheck(retNotNil, r3, err)
r4, err := client.ReleaseDQLMessageStream(ctx, nil)
retCheck(retNotNil, r4, err)
r5, err := client.SendSearchResult(ctx, nil)
retCheck(retNotNil, r5, err)
r6, err := client.SendRetrieveResult(ctx, nil)
retCheck(retNotNil, r6, err)
r7, err := client.InvalidateCredentialCache(ctx, nil)
retCheck(retNotNil, r7, err)
@ -140,15 +131,6 @@ func Test_NewClient(t *testing.T) {
r3Timeout, err := client.InvalidateCollectionMetaCache(shortCtx, nil)
retCheck(r3Timeout, err)
r4Timeout, err := client.ReleaseDQLMessageStream(shortCtx, nil)
retCheck(r4Timeout, err)
r5Timeout, err := client.SendSearchResult(shortCtx, nil)
retCheck(r5Timeout, err)
r6Timeout, err := client.SendRetrieveResult(shortCtx, nil)
retCheck(r6Timeout, err)
r7Timeout, err := client.InvalidateCredentialCache(shortCtx, nil)
retCheck(r7Timeout, err)

View File

@ -548,11 +548,6 @@ func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, request *pro
return s.proxy.InvalidateCollectionMetaCache(ctx, request)
}
// ReleaseDQLMessageStream notifies Proxy to release and close the search message stream of specific collection.
func (s *Server) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return s.proxy.ReleaseDQLMessageStream(ctx, request)
}
// CreateCollection notifies Proxy to create a collection
func (s *Server) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return s.proxy.CreateCollection(ctx, request)
@ -743,14 +738,6 @@ func (s *Server) GetFlushState(ctx context.Context, req *milvuspb.GetFlushStateR
return s.proxy.GetFlushState(ctx, req)
}
func (s *Server) SendSearchResult(ctx context.Context, results *internalpb.SearchResults) (*commonpb.Status, error) {
return s.proxy.SendSearchResult(ctx, results)
}
func (s *Server) SendRetrieveResult(ctx context.Context, results *internalpb.RetrieveResults) (*commonpb.Status, error) {
return s.proxy.SendRetrieveResult(ctx, results)
}
func (s *Server) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvuspb.ImportResponse, error) {
return s.proxy.Import(ctx, req)
}

View File

@ -182,10 +182,6 @@ func (m *MockRootCoord) ShowSegments(ctx context.Context, req *milvuspb.ShowSegm
return nil, nil
}
func (m *MockRootCoord) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return nil, nil
}
func (m *MockRootCoord) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return nil, nil
}
@ -597,10 +593,6 @@ func (m *MockProxy) InvalidateCollectionMetaCache(ctx context.Context, request *
return nil, nil
}
func (m *MockProxy) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return nil, nil
}
func (m *MockProxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return nil, nil
}
@ -784,14 +776,6 @@ func (m *MockProxy) GetFlushState(ctx context.Context, req *milvuspb.GetFlushSta
return nil, nil
}
func (m *MockProxy) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) {
return nil, nil
}
func (m *MockProxy) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) {
return nil, nil
}
func (m *MockProxy) Import(ctx context.Context, req *milvuspb.ImportRequest) (*milvuspb.ImportResponse, error) {
return nil, nil
}
@ -1014,11 +998,6 @@ func Test_NewServer(t *testing.T) {
assert.Nil(t, err)
})
t.Run("ReleaseDQLMessageStream", func(t *testing.T) {
_, err := server.ReleaseDQLMessageStream(ctx, nil)
assert.Nil(t, err)
})
t.Run("CreateCollection", func(t *testing.T) {
_, err := server.CreateCollection(ctx, nil)
assert.Nil(t, err)

View File

@ -343,20 +343,6 @@ func (c *Client) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequ
return ret.(*milvuspb.ShowSegmentsResponse), err
}
// ReleaseDQLMessageStream release DQL msgstream
func (c *Client) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {
if !funcutil.CheckCtxValid(ctx) {
return nil, ctx.Err()
}
return client.(rootcoordpb.RootCoordClient).ReleaseDQLMessageStream(ctx, in)
})
if err != nil || ret == nil {
return nil, err
}
return ret.(*commonpb.Status), err
}
// InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies.
func (c *Client) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
ret, err := c.grpcClient.ReCall(ctx, func(client interface{}) (interface{}, error) {

View File

@ -124,10 +124,6 @@ func Test_NewClient(t *testing.T) {
r, err := client.ShowSegments(ctx, nil)
retCheck(retNotNil, r, err)
}
{
r, err := client.ReleaseDQLMessageStream(ctx, nil)
retCheck(retNotNil, r, err)
}
{
r, err := client.GetMetrics(ctx, nil)
retCheck(retNotNil, r, err)
@ -326,10 +322,6 @@ func Test_NewClient(t *testing.T) {
rTimeout, err := client.ShowSegments(shortCtx, nil)
retCheck(rTimeout, err)
}
{
rTimeout, err := client.ReleaseDQLMessageStream(shortCtx, nil)
retCheck(rTimeout, err)
}
{
rTimeout, err := client.GetMetrics(shortCtx, nil)
retCheck(rTimeout, err)

View File

@ -405,11 +405,6 @@ func (s *Server) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentsRequ
return s.rootCoord.ShowSegments(ctx, in)
}
// ReleaseDQLMessageStream notifies RootCoord to release and close the search message stream of specific collection.
func (s *Server) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return s.rootCoord.ReleaseDQLMessageStream(ctx, in)
}
// InvalidateCollectionMetaCache notifies RootCoord to release the collection cache in Proxies.
func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return s.rootCoord.InvalidateCollectionMetaCache(ctx, in)

View File

@ -18,32 +18,21 @@ package grpcrootcoord
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"path"
"sync"
"testing"
"time"
"github.com/milvus-io/milvus/internal/proto/indexpb"
clientv3 "go.etcd.io/etcd/client/v3"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
rcc "github.com/milvus-io/milvus/internal/distributed/rootcoord/client"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/rootcoord"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/sessionutil"
@ -59,595 +48,6 @@ func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request *
return p.invalidateCollectionMetaCache(ctx, request)
}
func (p *proxyMock) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
func TestGrpcService(t *testing.T) {
const (
dbName = "testDB"
collName = "testColl"
collName2 = "testColl-again"
partName = "testPartition"
fieldName = "vector"
fieldID = 100
segID = 1001
)
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
Params.InitOnce(typeutil.RootCoordRole)
Params.Port = (randVal % 100) + 10000
t.Log("newParams.Address:", Params.GetAddress())
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
svr, err := NewServer(ctx, factory)
assert.Nil(t, err)
rootcoord.Params.Init()
rootcoord.Params.EtcdCfg.MetaRootPath = fmt.Sprintf("/%d/test/meta", randVal)
rootcoord.Params.EtcdCfg.KvRootPath = fmt.Sprintf("/%d/test/kv", randVal)
rootcoord.Params.CommonCfg.RootCoordSubName = fmt.Sprintf("msgChannel%d", randVal)
rootcoord.Params.CommonCfg.RootCoordTimeTick = fmt.Sprintf("timeTick%d", randVal)
rootcoord.Params.CommonCfg.RootCoordStatistics = fmt.Sprintf("stateChannel%d", randVal)
rootcoord.Params.RootCoordCfg.MaxPartitionNum = 64
rootcoord.Params.CommonCfg.DefaultPartitionName = "_default"
rootcoord.Params.CommonCfg.DefaultIndexName = "_default"
t.Logf("service port = %d", Params.Port)
core, ok := (svr.rootCoord).(*rootcoord.Core)
assert.True(t, ok)
err = svr.startGrpc(Params.Port)
assert.Nil(t, err)
svr.rootCoord.UpdateStateCode(internalpb.StateCode_Initializing)
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
sessKey := path.Join(rootcoord.Params.EtcdCfg.MetaRootPath, sessionutil.DefaultServiceRoot)
_, err = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix())
assert.Nil(t, err)
pnb, err := json.Marshal(
&sessionutil.Session{
ServerID: 100,
},
)
assert.Nil(t, err)
_, err = etcdCli.Put(ctx, path.Join(sessKey, typeutil.ProxyRole+"-100"), string(pnb))
assert.Nil(t, err)
rootcoord.Params.RootCoordCfg.Address = Params.GetAddress()
core.SetEtcdClient(etcdCli)
err = core.Init()
assert.Nil(t, err)
timeTickArray := make([]typeutil.Timestamp, 0, 16)
timeTickLock := sync.Mutex{}
core.SendTimeTick = func(ts typeutil.Timestamp, reason string) error {
timeTickLock.Lock()
defer timeTickLock.Unlock()
t.Logf("send time tick %d", ts)
timeTickArray = append(timeTickArray, ts)
return nil
}
core.SendDdCreateCollectionReq = func(ctx context.Context, req *internalpb.CreateCollectionRequest, channelNames []string) (map[string][]byte, error) {
return map[string][]byte{}, nil
}
dropCollectionArray := make([]*internalpb.DropCollectionRequest, 0, 16)
core.SendDdDropCollectionReq = func(ctx context.Context, req *internalpb.DropCollectionRequest, channelNames []string) error {
t.Logf("Drop Collection %s", req.CollectionName)
dropCollectionArray = append(dropCollectionArray, req)
return nil
}
createPartitionArray := make([]*internalpb.CreatePartitionRequest, 0, 16)
core.SendDdCreatePartitionReq = func(ctx context.Context, req *internalpb.CreatePartitionRequest, channelNames []string) error {
t.Logf("Create Partition %s", req.PartitionName)
createPartitionArray = append(createPartitionArray, req)
return nil
}
dropPartitionArray := make([]*internalpb.DropPartitionRequest, 0, 16)
core.SendDdDropPartitionReq = func(ctx context.Context, req *internalpb.DropPartitionRequest, channelNames []string) error {
t.Logf("Drop Partition %s", req.PartitionName)
dropPartitionArray = append(dropPartitionArray, req)
return nil
}
core.CallGetRecoveryInfoService = func(ctx context.Context, collID, partID rootcoord.UniqueID) ([]*datapb.SegmentBinlogs, error) {
return []*datapb.SegmentBinlogs{
{
SegmentID: segID,
NumOfRows: rootcoord.Params.RootCoordCfg.MinSegmentSizeToEnableIndex,
FieldBinlogs: []*datapb.FieldBinlog{
{
FieldID: fieldID,
Binlogs: []*datapb.Binlog{{LogPath: "file1"}, {LogPath: "file2"}, {LogPath: "file3"}},
},
},
},
}, nil
}
core.CallWatchChannels = func(ctx context.Context, collectionID int64, channelNames []string, startPositions []*commonpb.KeyDataPair) error {
return nil
}
var segs []typeutil.UniqueID
segLock := sync.Mutex{}
core.CallGetFlushedSegmentsService = func(ctx context.Context, collID, partID typeutil.UniqueID) ([]typeutil.UniqueID, error) {
segLock.Lock()
defer segLock.Unlock()
var ret []typeutil.UniqueID
ret = append(ret, segs...)
return ret, nil
}
collectionMetaCache := make([]string, 0, 16)
pnm := proxyMock{}
core.NewProxyClient = func(*sessionutil.Session) (types.Proxy, error) {
return &pnm, nil
}
pnm.invalidateCollectionMetaCache = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
collectionMetaCache = append(collectionMetaCache, request.CollectionName)
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
core.CallReleaseCollectionService = func(ctx context.Context, ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) error {
return nil
}
core.CallReleasePartitionService = func(ctx context.Context, ts typeutil.Timestamp, dbID, collectionID typeutil.UniqueID, partitionIDs []typeutil.UniqueID) error {
return nil
}
core.CallImportService = func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse {
return nil
}
core.CallAddSegRefLock = func(context.Context, int64, []int64) error {
return nil
}
core.CallReleaseSegRefLock = func(context.Context, int64, []int64) error {
return nil
}
core.CallDropCollectionIndexService = func(ctx context.Context, collID rootcoord.UniqueID) error {
return nil
}
core.CallGetSegmentIndexStateService = func(ctx context.Context, collID rootcoord.UniqueID, indexName string, segIDs []rootcoord.UniqueID) ([]*indexpb.SegmentIndexState, error) {
return nil, nil
}
err = svr.start()
assert.Nil(t, err)
svr.rootCoord.UpdateStateCode(internalpb.StateCode_Healthy)
cli, err := rcc.NewClient(context.Background(), rootcoord.Params.EtcdCfg.MetaRootPath, etcdCli)
assert.Nil(t, err)
err = cli.Init()
assert.Nil(t, err)
err = cli.Start()
assert.Nil(t, err)
t.Run("get component states", func(t *testing.T) {
req := &internalpb.GetComponentStatesRequest{}
rsp, err := svr.GetComponentStates(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("get time tick channel", func(t *testing.T) {
req := &internalpb.GetTimeTickChannelRequest{}
rsp, err := svr.GetTimeTickChannel(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("get statistics channel", func(t *testing.T) {
req := &internalpb.GetStatisticsChannelRequest{}
rsp, err := svr.GetStatisticsChannel(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("alloc time stamp", func(t *testing.T) {
req := &rootcoordpb.AllocTimestampRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_RequestTSO,
},
Count: 1,
}
rsp, err := svr.AllocTimestamp(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("alloc id", func(t *testing.T) {
req := &rootcoordpb.AllocIDRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_RequestID,
},
Count: 1,
}
rsp, err := svr.AllocID(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("update channel timetick", func(t *testing.T) {
req := &internalpb.ChannelTimeTickMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_TimeTick,
},
}
status, err := svr.UpdateChannelTimeTick(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
})
t.Run("release DQL msg stream", func(t *testing.T) {
req := &proxypb.ReleaseDQLMessageStreamRequest{}
rsp, err := svr.ReleaseDQLMessageStream(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.ErrorCode)
})
t.Run("show configurations", func(t *testing.T) {
req := &internalpb.ShowConfigurationsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_WatchQueryChannels,
MsgID: rand.Int63(),
},
Pattern: "",
}
rsp, err := svr.ShowConfigurations(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("get metrics", func(t *testing.T) {
req := &milvuspb.GetMetricsRequest{}
rsp, err := svr.GetMetrics(ctx, req)
assert.Nil(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
})
t.Run("create collection", func(t *testing.T) {
schema := schemapb.CollectionSchema{
Name: collName,
AutoID: true,
Fields: []*schemapb.FieldSchema{
{
FieldID: fieldID,
Name: fieldName,
IsPrimaryKey: false,
DataType: schemapb.DataType_FloatVector,
TypeParams: nil,
IndexParams: []*commonpb.KeyValuePair{
{
Key: "ik1",
Value: "iv1",
},
},
},
},
}
sbf, err := proto.Marshal(&schema)
assert.Nil(t, err)
req := &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_CreateCollection,
MsgID: 100,
Timestamp: 100,
SourceID: 100,
},
DbName: dbName,
CollectionName: collName,
Schema: sbf,
}
status, err := cli.CreateCollection(ctx, req)
assert.Nil(t, err)
colls, err := core.MetaTable.ListCollections(0)
assert.Nil(t, err)
assert.Equal(t, 1, len(colls))
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
//assert.Equal(t, commonpb.MsgType_CreateCollection, createCollectionArray[0].Base.MsgType)
_, has := colls[collName]
assert.True(t, has)
req.Base.MsgID = 101
req.Base.Timestamp = 101
req.Base.SourceID = 101
status, err = cli.CreateCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
req.Base.MsgID = 102
req.Base.Timestamp = 102
req.Base.SourceID = 102
req.CollectionName = collName2
status, err = cli.CreateCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
schema.Name = req.CollectionName
sbf, err = proto.Marshal(&schema)
assert.Nil(t, err)
req.Schema = sbf
req.Base.MsgID = 103
req.Base.Timestamp = 103
req.Base.SourceID = 103
status, err = cli.CreateCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
colls, err = core.MetaTable.ListCollections(0)
assert.Nil(t, err)
assert.Equal(t, 2, len(colls))
_, has = colls[collName2]
assert.True(t, has)
})
t.Run("has collection", func(t *testing.T) {
req := &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
MsgID: 110,
Timestamp: 110,
SourceID: 110,
},
DbName: "testDb",
CollectionName: collName,
}
rsp, err := cli.HasCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, true, rsp.Value)
req = &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
MsgID: 111,
Timestamp: 111,
SourceID: 111,
},
DbName: "testDb",
CollectionName: "testColl2",
}
rsp, err = cli.HasCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, false, rsp.Value)
// test time stamp go back
req = &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
MsgID: 111,
Timestamp: 111,
SourceID: 111,
},
DbName: "testDb",
CollectionName: "testColl2",
}
rsp, err = cli.HasCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, false, rsp.Value)
})
t.Run("describe collection", func(t *testing.T) {
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
req := &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
MsgID: 120,
Timestamp: 120,
SourceID: 120,
},
DbName: "testDb",
CollectionName: collName,
}
rsp, err := cli.DescribeCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, collName, rsp.Schema.Name)
assert.Equal(t, collMeta.CollectionID, rsp.CollectionID)
})
t.Run("show collection", func(t *testing.T) {
req := &milvuspb.ShowCollectionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowCollections,
MsgID: 130,
Timestamp: 130,
SourceID: 130,
},
DbName: "testDb",
}
rsp, err := cli.ShowCollections(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.ElementsMatch(t, rsp.CollectionNames, []string{collName, collName2})
assert.Equal(t, 2, len(rsp.CollectionNames))
})
t.Run("create partition", func(t *testing.T) {
req := &milvuspb.CreatePartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_CreatePartition,
MsgID: 140,
Timestamp: 140,
SourceID: 140,
},
DbName: dbName,
CollectionName: collName,
PartitionName: partName,
}
status, err := cli.CreatePartition(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
assert.Equal(t, 2, len(collMeta.Partitions))
partName2, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[1].PartitionID, 0)
assert.Nil(t, err)
assert.Equal(t, partName, partName2)
assert.Equal(t, 1, len(collectionMetaCache))
})
t.Run("has partition", func(t *testing.T) {
req := &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasPartition,
MsgID: 150,
Timestamp: 150,
SourceID: 150,
},
DbName: dbName,
CollectionName: collName,
PartitionName: partName,
}
rsp, err := cli.HasPartition(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, true, rsp.Value)
})
t.Run("show partition", func(t *testing.T) {
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
req := &milvuspb.ShowPartitionsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowPartitions,
MsgID: 160,
Timestamp: 160,
SourceID: 160,
},
DbName: "testDb",
CollectionName: collName,
CollectionID: coll.CollectionID,
}
rsp, err := cli.ShowPartitions(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, 2, len(rsp.PartitionNames))
assert.Equal(t, 2, len(rsp.PartitionIDs))
})
t.Run("show segment", func(t *testing.T) {
coll, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
partID := coll.Partitions[1].PartitionID
_, err = core.MetaTable.GetPartitionNameByID(coll.CollectionID, partID, 0)
assert.Nil(t, err)
segLock.Lock()
segs = []typeutil.UniqueID{1000}
segLock.Unlock()
req := &milvuspb.ShowSegmentsRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_ShowSegments,
MsgID: 170,
Timestamp: 170,
SourceID: 170,
},
CollectionID: coll.CollectionID,
PartitionID: partID,
}
rsp, err := cli.ShowSegments(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, rsp.Status.ErrorCode)
assert.Equal(t, int64(1000), rsp.SegmentIDs[0])
assert.Equal(t, 1, len(rsp.SegmentIDs))
})
t.Run("drop partition", func(t *testing.T) {
req := &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropPartition,
MsgID: 220,
Timestamp: 220,
SourceID: 220,
},
DbName: dbName,
CollectionName: collName,
PartitionName: partName,
}
status, err := cli.DropPartition(ctx, req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
collMeta, err := core.MetaTable.GetCollectionByName(collName, 0)
assert.Nil(t, err)
assert.Equal(t, 1, len(collMeta.Partitions))
partName, err := core.MetaTable.GetPartitionNameByID(collMeta.CollectionID, collMeta.Partitions[0].PartitionID, 0)
assert.Nil(t, err)
assert.Equal(t, rootcoord.Params.CommonCfg.DefaultPartitionName, partName)
assert.Equal(t, 2, len(collectionMetaCache))
})
t.Run("drop collection", func(t *testing.T) {
req := &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropCollection,
MsgID: 230,
Timestamp: 230,
SourceID: 230,
},
DbName: "testDb",
CollectionName: collName,
}
status, err := cli.DropCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, 1, len(dropCollectionArray))
assert.Equal(t, commonpb.MsgType_DropCollection, dropCollectionArray[0].Base.MsgType)
assert.Equal(t, commonpb.ErrorCode_Success, status.ErrorCode)
assert.Equal(t, collName, dropCollectionArray[0].CollectionName)
assert.Equal(t, 3, len(collectionMetaCache))
req = &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropCollection,
MsgID: 231,
Timestamp: 231,
SourceID: 231,
},
DbName: "testDb",
CollectionName: collName,
}
status, err = cli.DropCollection(ctx, req)
assert.Nil(t, err)
assert.Equal(t, 1, len(dropCollectionArray))
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, status.ErrorCode)
})
err = cli.Stop()
assert.Nil(t, err)
err = svr.Stop()
assert.Nil(t, err)
_, err = etcdCli.Delete(ctx, sessKey, clientv3.WithPrefix())
assert.Nil(t, err)
}
type mockCore struct {
types.RootCoordComponent
}

View File

@ -50,6 +50,7 @@ type BaseKV interface {
Close()
}
//go:generate mockery --name=TxnKV
// TxnKV contains extra txn operations of kv. The extra operations is transactional.
type TxnKV interface {
BaseKV
@ -77,6 +78,7 @@ type MetaKv interface {
CompareVersionAndSwap(key string, version int64, target string, opts ...clientv3.OpOption) (bool, error)
}
//go:generate mockery --name=SnapShotKV
// SnapShotKV is TxnKV for snapshot data. It must save timestamp.
type SnapShotKV interface {
Save(key string, value string, ts typeutil.Timestamp) error

View File

@ -0,0 +1,59 @@
package kv
type TxnKVMock struct {
TxnKV
SaveF func(key, value string) error
RemoveF func(key string) error
}
func (m TxnKVMock) Load(key string) (string, error) {
panic("implement me")
}
func (m TxnKVMock) MultiLoad(keys []string) ([]string, error) {
panic("implement me")
}
func (m TxnKVMock) LoadWithPrefix(key string) ([]string, []string, error) {
panic("implement me")
}
func (m TxnKVMock) Save(key, value string) error {
return m.SaveF(key, value)
}
func (m TxnKVMock) MultiSave(kvs map[string]string) error {
panic("implement me")
}
func (m TxnKVMock) Remove(key string) error {
return m.RemoveF(key)
}
func (m TxnKVMock) MultiRemove(keys []string) error {
panic("implement me")
}
func (m TxnKVMock) RemoveWithPrefix(key string) error {
panic("implement me")
}
func (m TxnKVMock) Close() {
panic("implement me")
}
func (m TxnKVMock) MultiSaveAndRemove(saves map[string]string, removals []string) error {
panic("implement me")
}
func (m TxnKVMock) MultiRemoveWithPrefix(keys []string) error {
panic("implement me")
}
func (m TxnKVMock) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error {
panic("implement me")
}
func NewMockTxnKV() *TxnKVMock {
return &TxnKVMock{}
}

View File

@ -0,0 +1,120 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
import mock "github.com/stretchr/testify/mock"
// SnapShotKV is an autogenerated mock type for the SnapShotKV type
type SnapShotKV struct {
mock.Mock
}
// Load provides a mock function with given fields: key, ts
func (_m *SnapShotKV) Load(key string, ts uint64) (string, error) {
ret := _m.Called(key, ts)
var r0 string
if rf, ok := ret.Get(0).(func(string, uint64) string); ok {
r0 = rf(key, ts)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string, uint64) error); ok {
r1 = rf(key, ts)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadWithPrefix provides a mock function with given fields: key, ts
func (_m *SnapShotKV) LoadWithPrefix(key string, ts uint64) ([]string, []string, error) {
ret := _m.Called(key, ts)
var r0 []string
if rf, ok := ret.Get(0).(func(string, uint64) []string); ok {
r0 = rf(key, ts)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
var r1 []string
if rf, ok := ret.Get(1).(func(string, uint64) []string); ok {
r1 = rf(key, ts)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).([]string)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(string, uint64) error); ok {
r2 = rf(key, ts)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// MultiSave provides a mock function with given fields: kvs, ts
func (_m *SnapShotKV) MultiSave(kvs map[string]string, ts uint64) error {
ret := _m.Called(kvs, ts)
var r0 error
if rf, ok := ret.Get(0).(func(map[string]string, uint64) error); ok {
r0 = rf(kvs, ts)
} else {
r0 = ret.Error(0)
}
return r0
}
// MultiSaveAndRemoveWithPrefix provides a mock function with given fields: saves, removals, ts
func (_m *SnapShotKV) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string, ts uint64) error {
ret := _m.Called(saves, removals, ts)
var r0 error
if rf, ok := ret.Get(0).(func(map[string]string, []string, uint64) error); ok {
r0 = rf(saves, removals, ts)
} else {
r0 = ret.Error(0)
}
return r0
}
// Save provides a mock function with given fields: key, value, ts
func (_m *SnapShotKV) Save(key string, value string, ts uint64) error {
ret := _m.Called(key, value, ts)
var r0 error
if rf, ok := ret.Get(0).(func(string, string, uint64) error); ok {
r0 = rf(key, value, ts)
} else {
r0 = ret.Error(0)
}
return r0
}
type mockConstructorTestingTNewSnapShotKV interface {
mock.TestingT
Cleanup(func())
}
// NewSnapShotKV creates a new instance of SnapShotKV. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewSnapShotKV(t mockConstructorTestingTNewSnapShotKV) *SnapShotKV {
mock := &SnapShotKV{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

218
internal/kv/mocks/TxnKV.go Normal file
View File

@ -0,0 +1,218 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package mocks
import mock "github.com/stretchr/testify/mock"
// TxnKV is an autogenerated mock type for the TxnKV type
type TxnKV struct {
mock.Mock
}
// Close provides a mock function with given fields:
func (_m *TxnKV) Close() {
_m.Called()
}
// Load provides a mock function with given fields: key
func (_m *TxnKV) Load(key string) (string, error) {
ret := _m.Called(key)
var r0 string
if rf, ok := ret.Get(0).(func(string) string); ok {
r0 = rf(key)
} else {
r0 = ret.Get(0).(string)
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(key)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// LoadWithPrefix provides a mock function with given fields: key
func (_m *TxnKV) LoadWithPrefix(key string) ([]string, []string, error) {
ret := _m.Called(key)
var r0 []string
if rf, ok := ret.Get(0).(func(string) []string); ok {
r0 = rf(key)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
var r1 []string
if rf, ok := ret.Get(1).(func(string) []string); ok {
r1 = rf(key)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).([]string)
}
}
var r2 error
if rf, ok := ret.Get(2).(func(string) error); ok {
r2 = rf(key)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// MultiLoad provides a mock function with given fields: keys
func (_m *TxnKV) MultiLoad(keys []string) ([]string, error) {
ret := _m.Called(keys)
var r0 []string
if rf, ok := ret.Get(0).(func([]string) []string); ok {
r0 = rf(keys)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]string)
}
}
var r1 error
if rf, ok := ret.Get(1).(func([]string) error); ok {
r1 = rf(keys)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MultiRemove provides a mock function with given fields: keys
func (_m *TxnKV) MultiRemove(keys []string) error {
ret := _m.Called(keys)
var r0 error
if rf, ok := ret.Get(0).(func([]string) error); ok {
r0 = rf(keys)
} else {
r0 = ret.Error(0)
}
return r0
}
// MultiRemoveWithPrefix provides a mock function with given fields: keys
func (_m *TxnKV) MultiRemoveWithPrefix(keys []string) error {
ret := _m.Called(keys)
var r0 error
if rf, ok := ret.Get(0).(func([]string) error); ok {
r0 = rf(keys)
} else {
r0 = ret.Error(0)
}
return r0
}
// MultiSave provides a mock function with given fields: kvs
func (_m *TxnKV) MultiSave(kvs map[string]string) error {
ret := _m.Called(kvs)
var r0 error
if rf, ok := ret.Get(0).(func(map[string]string) error); ok {
r0 = rf(kvs)
} else {
r0 = ret.Error(0)
}
return r0
}
// MultiSaveAndRemove provides a mock function with given fields: saves, removals
func (_m *TxnKV) MultiSaveAndRemove(saves map[string]string, removals []string) error {
ret := _m.Called(saves, removals)
var r0 error
if rf, ok := ret.Get(0).(func(map[string]string, []string) error); ok {
r0 = rf(saves, removals)
} else {
r0 = ret.Error(0)
}
return r0
}
// MultiSaveAndRemoveWithPrefix provides a mock function with given fields: saves, removals
func (_m *TxnKV) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error {
ret := _m.Called(saves, removals)
var r0 error
if rf, ok := ret.Get(0).(func(map[string]string, []string) error); ok {
r0 = rf(saves, removals)
} else {
r0 = ret.Error(0)
}
return r0
}
// Remove provides a mock function with given fields: key
func (_m *TxnKV) Remove(key string) error {
ret := _m.Called(key)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(key)
} else {
r0 = ret.Error(0)
}
return r0
}
// RemoveWithPrefix provides a mock function with given fields: key
func (_m *TxnKV) RemoveWithPrefix(key string) error {
ret := _m.Called(key)
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(key)
} else {
r0 = ret.Error(0)
}
return r0
}
// Save provides a mock function with given fields: key, value
func (_m *TxnKV) Save(key string, value string) error {
ret := _m.Called(key, value)
var r0 error
if rf, ok := ret.Get(0).(func(string, string) error); ok {
r0 = rf(key, value)
} else {
r0 = ret.Error(0)
}
return r0
}
type mockConstructorTestingTNewTxnKV interface {
mock.TestingT
Cleanup(func())
}
// NewTxnKV creates a new instance of TxnKV. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewTxnKV(t mockConstructorTestingTNewTxnKV) *TxnKV {
mock := &TxnKV{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -32,8 +32,6 @@ func TestMetaLogger(t *testing.T) {
WithOperation(DropCollection).Info()
ts.assertMessagesContains("CollectionID=0")
ts.assertMessagesContains("CollectionMeta=eyJUZW5hbnRJRCI6IiIsIkNvbGxlY3Rpb25JRCI6MCwiUGFydGl0aW9ucyI6bnVsbCwiTmFtZSI6IiIsIkRlc2NyaXB0aW9uIjoiIiwiQXV0b0lEIjpmYWxzZSwiRmllbGRzIjpudWxsLCJWaXJ0dWFsQ2hhbm5lbE5hbWVzIjpudWxsLCJQaHlzaWNhbENoYW5uZWxOYW1lcyI6bnVsbCwiU2hhcmRzTnVtIjowLCJTdGFydFBvc2l0aW9ucyI6bnVsbCwiQ3JlYXRlVGltZSI6MCwiQ29uc2lzdGVuY3lMZXZlbCI6MCwiQWxpYXNlcyI6bnVsbCwiRXh0cmEiOm51bGx9")
ts.assertMessagesContains("IndexMeta=eyJUZW5hbnRJRCI6IiIsIkNvbGxlY3Rpb25JRCI6MCwiRmllbGRJRCI6MCwiSW5kZXhJRCI6MCwiSW5kZXhOYW1lIjoiIiwiSXNEZWxldGVkIjpmYWxzZSwiQ3JlYXRlVGltZSI6MCwiVHlwZVBhcmFtcyI6bnVsbCwiSW5kZXhQYXJhbXMiOm51bGx9")
ts.assertMessagesContains("CollectionName=coll")
ts.assertMessagesContains("PartitionID=0")
ts.assertMessagesContains("PartitionName=part")

View File

@ -16,9 +16,11 @@ type RootCoordCatalog interface {
ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error)
CollectionExists(ctx context.Context, collectionID typeutil.UniqueID, ts typeutil.Timestamp) bool
DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error
AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType AlterType, ts typeutil.Timestamp) error
CreatePartition(ctx context.Context, partition *model.Partition, ts typeutil.Timestamp) error
DropPartition(ctx context.Context, collectionID typeutil.UniqueID, partitionID typeutil.UniqueID, ts typeutil.Timestamp) error
AlterPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, alterType AlterType, ts typeutil.Timestamp) error
CreateAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error
DropAlias(ctx context.Context, alias string, ts typeutil.Timestamp) error
@ -53,6 +55,18 @@ const (
MODIFY
)
func (t AlterType) String() string {
switch t {
case ADD:
return "ADD"
case DELETE:
return "DELETE"
case MODIFY:
return "MODIFY"
}
return ""
}
type DataCoordCatalog interface {
ListSegments(ctx context.Context) ([]*datapb.SegmentInfo, error)
AddSegment(ctx context.Context, segment *datapb.SegmentInfo) error

View File

@ -0,0 +1,35 @@
package metastore
import "testing"
func TestAlterType_String(t *testing.T) {
tests := []struct {
name string
t AlterType
want string
}{
{
t: ADD,
want: "ADD",
},
{
t: DELETE,
want: "DELETE",
},
{
t: MODIFY,
want: "MODIFY",
},
{
t: -1,
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.t.String(); got != tt.want {
t.Errorf("String() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -91,3 +91,27 @@ func (s *collectionDb) Insert(in *dbmodel.Collection) error {
return nil
}
func generateCollectionUpdatesWithoutID(in *dbmodel.Collection) map[string]interface{} {
ret := map[string]interface{}{
"tenant_id": in.TenantID,
"collection_id": in.CollectionID,
"collection_name": in.CollectionName,
"description": in.Description,
"auto_id": in.AutoID,
"shards_num": in.ShardsNum,
"start_position": in.StartPosition,
"consistency_level": in.ConsistencyLevel,
"status": in.Status,
"ts": in.Ts,
"is_deleted": in.IsDeleted,
"created_at": in.CreatedAt,
"updated_at": in.UpdatedAt,
}
return ret
}
func (s *collectionDb) Update(in *dbmodel.Collection) error {
updates := generateCollectionUpdatesWithoutID(in)
return s.db.Model(&dbmodel.Collection{}).Where("id = ?", in.ID).Updates(updates).Error
}

View File

@ -358,8 +358,8 @@ func TestCollection_Insert(t *testing.T) {
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt).
mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`status`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Status, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt).
WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
@ -386,8 +386,8 @@ func TestCollection_Insert_Error(t *testing.T) {
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt).
mock.ExpectExec("INSERT INTO `collections` (`tenant_id`,`collection_id`,`collection_name`,`description`,`auto_id`,`shards_num`,`start_position`,`consistency_level`,`status`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?) ON DUPLICATE KEY UPDATE `id`=`id`").
WithArgs(collection.TenantID, collection.CollectionID, collection.CollectionName, collection.Description, collection.AutoID, collection.ShardsNum, collection.StartPosition, collection.ConsistencyLevel, collection.Status, collection.Ts, collection.IsDeleted, collection.CreatedAt, collection.UpdatedAt).
WillReturnError(errors.New("test error"))
mock.ExpectRollback()
@ -423,3 +423,61 @@ func ErrorExec(f func()) {
f()
mock.ExpectRollback()
}
func Test_collectionDb_Update(t *testing.T) {
t.Run("normal case", func(t *testing.T) {
var collection = &dbmodel.Collection{
TenantID: "",
CollectionID: collID1,
CollectionName: "test_collection_name_1",
Description: "",
AutoID: false,
ShardsNum: int32(2),
StartPosition: "",
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
Ts: ts,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
// expectation
mock.ExpectBegin()
mock.ExpectExec("UPDATE `collections` SET `auto_id`=?,`collection_id`=?,`collection_name`=?,`consistency_level`=?,`created_at`=?,`description`=?,`is_deleted`=?,`shards_num`=?,`start_position`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?").
WithArgs(collection.AutoID, collection.CollectionID, collection.CollectionName, collection.ConsistencyLevel, collection.CreatedAt, collection.Description, collection.IsDeleted, collection.ShardsNum, collection.StartPosition, collection.Status, collection.TenantID, collection.Ts, collection.UpdatedAt, collection.ID).
WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
// actual
err := collTestDb.Update(collection)
assert.Nil(t, err)
})
t.Run("error", func(t *testing.T) {
var collection = &dbmodel.Collection{
TenantID: "",
CollectionID: collID1,
CollectionName: "test_collection_name_1",
Description: "",
AutoID: false,
ShardsNum: int32(2),
StartPosition: "",
ConsistencyLevel: int32(commonpb.ConsistencyLevel_Eventually),
Ts: ts,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
// expectation
mock.ExpectBegin()
mock.ExpectExec("UPDATE `collections` SET `auto_id`=?,`collection_id`=?,`collection_name`=?,`consistency_level`=?,`created_at`=?,`description`=?,`is_deleted`=?,`shards_num`=?,`start_position`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?").
WithArgs(collection.AutoID, collection.CollectionID, collection.CollectionName, collection.ConsistencyLevel, collection.CreatedAt, collection.Description, collection.IsDeleted, collection.ShardsNum, collection.StartPosition, collection.Status, collection.TenantID, collection.Ts, collection.UpdatedAt, collection.ID).
WillReturnError(errors.New("error mock Update"))
mock.ExpectRollback()
// actual
err := collTestDb.Update(collection)
assert.Error(t, err)
})
}

View File

@ -33,3 +33,24 @@ func (s *partitionDb) Insert(in []*dbmodel.Partition) error {
return nil
}
func generatePartitionUpdatesWithoutID(in *dbmodel.Partition) map[string]interface{} {
ret := map[string]interface{}{
"tenant_id": in.TenantID,
"partition_id": in.PartitionID,
"partition_name": in.PartitionName,
"partition_created_timestamp": in.PartitionCreatedTimestamp,
"collection_id": in.CollectionID,
"status": in.Status,
"ts": in.Ts,
"is_deleted": in.IsDeleted,
"created_at": in.CreatedAt,
"updated_at": in.UpdatedAt,
}
return ret
}
func (s *partitionDb) Update(in *dbmodel.Partition) error {
updates := generatePartitionUpdatesWithoutID(in)
return s.db.Model(&dbmodel.Partition{}).Where("id = ?", in.ID).Updates(updates).Error
}

View File

@ -65,8 +65,8 @@ func TestPartition_Insert(t *testing.T) {
// expectation
mock.ExpectBegin()
mock.ExpectExec("INSERT INTO `partitions` (`tenant_id`,`partition_id`,`partition_name`,`partition_created_timestamp`,`collection_id`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?)").
WithArgs(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Ts, partitions[0].IsDeleted, partitions[0].CreatedAt, partitions[0].UpdatedAt).
mock.ExpectExec("INSERT INTO `partitions` (`tenant_id`,`partition_id`,`partition_name`,`partition_created_timestamp`,`collection_id`,`status`,`ts`,`is_deleted`,`created_at`,`updated_at`) VALUES (?,?,?,?,?,?,?,?,?,?)").
WithArgs(partitions[0].TenantID, partitions[0].PartitionID, partitions[0].PartitionName, partitions[0].PartitionCreatedTimestamp, partitions[0].CollectionID, partitions[0].Status, partitions[0].Ts, partitions[0].IsDeleted, partitions[0].CreatedAt, partitions[0].UpdatedAt).
WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
@ -101,3 +101,54 @@ func TestPartition_Insert_Error(t *testing.T) {
err := partitionTestDb.Insert(partitions)
assert.Error(t, err)
}
func Test_partitionDb_Update(t *testing.T) {
t.Run("normal case", func(t *testing.T) {
partition := &dbmodel.Partition{
ID: 100,
TenantID: tenantID,
PartitionID: fieldID1,
PartitionName: "test_field_1",
PartitionCreatedTimestamp: typeutil.Timestamp(1000),
CollectionID: collID1,
Ts: ts,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
mock.ExpectBegin()
mock.ExpectExec("UPDATE `partitions` SET `collection_id`=?,`created_at`=?,`is_deleted`=?,`partition_created_timestamp`=?,`partition_id`=?,`partition_name`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?").
WithArgs(partition.CollectionID, partition.CreatedAt, partition.IsDeleted, partition.PartitionCreatedTimestamp, partition.PartitionID, partition.PartitionName, partition.Status, partition.TenantID, partition.Ts, partition.UpdatedAt, partition.ID).
WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
err := partitionTestDb.Update(partition)
assert.NoError(t, err)
})
t.Run("error case", func(t *testing.T) {
partition := &dbmodel.Partition{
ID: 100,
TenantID: tenantID,
PartitionID: fieldID1,
PartitionName: "test_field_1",
PartitionCreatedTimestamp: typeutil.Timestamp(1000),
CollectionID: collID1,
Ts: ts,
IsDeleted: false,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
}
mock.ExpectBegin()
mock.ExpectExec("UPDATE `partitions` SET `collection_id`=?,`created_at`=?,`is_deleted`=?,`partition_created_timestamp`=?,`partition_id`=?,`partition_name`=?,`status`=?,`tenant_id`=?,`ts`=?,`updated_at`=? WHERE id = ?").
WithArgs(partition.CollectionID, partition.CreatedAt, partition.IsDeleted, partition.PartitionCreatedTimestamp, partition.PartitionID, partition.PartitionName, partition.Status, partition.TenantID, partition.Ts, partition.UpdatedAt, partition.ID).
WillReturnError(errors.New("error mock Update Partition"))
mock.ExpectRollback()
err := partitionTestDb.Update(partition)
assert.Error(t, err)
})
}

View File

@ -21,6 +21,7 @@ type Collection struct {
ShardsNum int32 `gorm:"shards_num"`
StartPosition string `gorm:"start_position"`
ConsistencyLevel int32 `gorm:"consistency_level"`
Status int32 `gorm:"status"`
Ts typeutil.Timestamp `gorm:"ts"`
IsDeleted bool `gorm:"is_deleted"`
CreatedAt time.Time `gorm:"created_at"`
@ -39,6 +40,7 @@ type ICollectionDb interface {
Get(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) (*Collection, error)
GetCollectionIDByName(tenantID string, collectionName string, ts typeutil.Timestamp) (typeutil.UniqueID, error)
Insert(in *Collection) error
Update(in *Collection) error
}
// model <---> db

View File

@ -116,6 +116,20 @@ func (_m *ICollectionDb) ListCollectionIDTs(tenantID string, ts uint64) ([]*dbmo
return r0, r1
}
// Update provides a mock function with given fields: in
func (_m *ICollectionDb) Update(in *dbmodel.Collection) error {
ret := _m.Called(in)
var r0 error
if rf, ok := ret.Get(0).(func(*dbmodel.Collection) error); ok {
r0 = rf(in)
} else {
r0 = ret.Error(0)
}
return r0
}
type mockConstructorTestingTNewICollectionDb interface {
mock.TestingT
Cleanup(func())

View File

@ -49,6 +49,20 @@ func (_m *IPartitionDb) Insert(in []*dbmodel.Partition) error {
return r0
}
// Update provides a mock function with given fields: in
func (_m *IPartitionDb) Update(in *dbmodel.Partition) error {
ret := _m.Called(in)
var r0 error
if rf, ok := ret.Get(0).(func(*dbmodel.Partition) error); ok {
r0 = rf(in)
} else {
r0 = ret.Error(0)
}
return r0
}
type mockConstructorTestingTNewIPartitionDb interface {
mock.TestingT
Cleanup(func())

View File

@ -12,6 +12,29 @@ type ISegmentIndexDb struct {
mock.Mock
}
// Get provides a mock function with given fields: tenantID, collectionID, buildID
func (_m *ISegmentIndexDb) Get(tenantID string, collectionID int64, buildID int64) ([]*dbmodel.SegmentIndexResult, error) {
ret := _m.Called(tenantID, collectionID, buildID)
var r0 []*dbmodel.SegmentIndexResult
if rf, ok := ret.Get(0).(func(string, int64, int64) []*dbmodel.SegmentIndexResult); ok {
r0 = rf(tenantID, collectionID, buildID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*dbmodel.SegmentIndexResult)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string, int64, int64) error); ok {
r1 = rf(tenantID, collectionID, buildID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Insert provides a mock function with given fields: in
func (_m *ISegmentIndexDb) Insert(in []*dbmodel.SegmentIndex) error {
ret := _m.Called(in)
@ -26,6 +49,29 @@ func (_m *ISegmentIndexDb) Insert(in []*dbmodel.SegmentIndex) error {
return r0
}
// List provides a mock function with given fields: tenantID
func (_m *ISegmentIndexDb) List(tenantID string) ([]*dbmodel.SegmentIndexResult, error) {
ret := _m.Called(tenantID)
var r0 []*dbmodel.SegmentIndexResult
if rf, ok := ret.Get(0).(func(string) []*dbmodel.SegmentIndexResult); ok {
r0 = rf(tenantID)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*dbmodel.SegmentIndexResult)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(tenantID)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// MarkDeleted provides a mock function with given fields: tenantID, in
func (_m *ISegmentIndexDb) MarkDeleted(tenantID string, in []*dbmodel.SegmentIndex) error {
ret := _m.Called(tenantID, in)
@ -40,6 +86,20 @@ func (_m *ISegmentIndexDb) MarkDeleted(tenantID string, in []*dbmodel.SegmentInd
return r0
}
// MarkDeletedByBuildID provides a mock function with given fields: tenantID, idxID
func (_m *ISegmentIndexDb) MarkDeletedByBuildID(tenantID string, idxID int64) error {
ret := _m.Called(tenantID, idxID)
var r0 error
if rf, ok := ret.Get(0).(func(string, int64) error); ok {
r0 = rf(tenantID, idxID)
} else {
r0 = ret.Error(0)
}
return r0
}
// MarkDeletedByCollectionID provides a mock function with given fields: tenantID, collID
func (_m *ISegmentIndexDb) MarkDeletedByCollectionID(tenantID string, collID int64) error {
ret := _m.Called(tenantID, collID)
@ -54,26 +114,12 @@ func (_m *ISegmentIndexDb) MarkDeletedByCollectionID(tenantID string, collID int
return r0
}
// MarkDeletedByIndexID provides a mock function with given fields: tenantID, idxID
func (_m *ISegmentIndexDb) MarkDeletedByIndexID(tenantID string, idxID int64) error {
ret := _m.Called(tenantID, idxID)
var r0 error
if rf, ok := ret.Get(0).(func(string, int64) error); ok {
r0 = rf(tenantID, idxID)
} else {
r0 = ret.Error(0)
}
return r0
}
// Upsert provides a mock function with given fields: in
func (_m *ISegmentIndexDb) Upsert(in []*dbmodel.SegmentIndex) error {
// Update provides a mock function with given fields: in
func (_m *ISegmentIndexDb) Update(in *dbmodel.SegmentIndex) error {
ret := _m.Called(in)
var r0 error
if rf, ok := ret.Get(0).(func([]*dbmodel.SegmentIndex) error); ok {
if rf, ok := ret.Get(0).(func(*dbmodel.SegmentIndex) error); ok {
r0 = rf(in)
} else {
r0 = ret.Error(0)

View File

@ -14,6 +14,7 @@ type Partition struct {
PartitionName string `gorm:"partition_name"`
PartitionCreatedTimestamp uint64 `gorm:"partition_created_timestamp"`
CollectionID int64 `gorm:"collection_id"`
Status int32 `gorm:"status"`
Ts typeutil.Timestamp `gorm:"ts"`
IsDeleted bool `gorm:"is_deleted"`
CreatedAt time.Time `gorm:"created_at"`
@ -28,6 +29,7 @@ func (v Partition) TableName() string {
type IPartitionDb interface {
GetByCollectionID(tenantID string, collectionID typeutil.UniqueID, ts typeutil.Timestamp) ([]*Partition, error)
Insert(in []*Partition) error
Update(in *Partition) error
//MarkDeleted(tenantID string, collID typeutil.UniqueID) error
}

View File

@ -5,6 +5,11 @@ import (
"encoding/json"
"fmt"
"runtime"
"time"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/util"
@ -56,6 +61,7 @@ func (tc *Catalog) CreateCollection(ctx context.Context, collection *model.Colle
ShardsNum: collection.ShardsNum,
StartPosition: startPositionsStr,
ConsistencyLevel: int32(collection.ConsistencyLevel),
Status: int32(collection.State),
Ts: ts,
})
if err != nil {
@ -375,6 +381,47 @@ func (tc *Catalog) DropCollection(ctx context.Context, collection *model.Collect
})
}
func (tc *Catalog) alterModifyCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, ts typeutil.Timestamp) error {
if oldColl.TenantID != newColl.TenantID || oldColl.CollectionID != newColl.CollectionID {
return fmt.Errorf("altering tenant id or collection id is forbidden")
}
var startPositionsStr string
if newColl.StartPositions != nil {
startPositionsBytes, err := json.Marshal(newColl.StartPositions)
if err != nil {
return fmt.Errorf("failed to marshal start positions: %s", err.Error())
}
startPositionsStr = string(startPositionsBytes)
}
createdAt, _ := tsoutil.ParseTS(newColl.CreateTime)
tenantID := contextutil.TenantID(ctx)
coll := &dbmodel.Collection{
TenantID: tenantID,
CollectionID: newColl.CollectionID,
CollectionName: newColl.Name,
Description: newColl.Description,
AutoID: newColl.AutoID,
ShardsNum: newColl.ShardsNum,
StartPosition: startPositionsStr,
ConsistencyLevel: int32(newColl.ConsistencyLevel),
Status: int32(newColl.State),
Ts: ts,
CreatedAt: createdAt,
UpdatedAt: time.Now(),
}
return tc.metaDomain.CollectionDb(ctx).Update(coll)
}
func (tc *Catalog) AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType metastore.AlterType, ts typeutil.Timestamp) error {
if alterType == metastore.MODIFY {
return tc.alterModifyCollection(ctx, oldColl, newColl, ts)
}
return fmt.Errorf("altering collection doesn't support %s", alterType.String())
}
func (tc *Catalog) CreatePartition(ctx context.Context, partition *model.Partition, ts typeutil.Timestamp) error {
tenantID := contextutil.TenantID(ctx)
@ -384,6 +431,7 @@ func (tc *Catalog) CreatePartition(ctx context.Context, partition *model.Partiti
PartitionName: partition.PartitionName,
PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp,
CollectionID: partition.CollectionID,
Status: int32(partition.State),
Ts: ts,
}
err := tc.metaDomain.PartitionDb(ctx).Insert([]*dbmodel.Partition{p})
@ -414,6 +462,30 @@ func (tc *Catalog) DropPartition(ctx context.Context, collectionID typeutil.Uniq
return nil
}
func (tc *Catalog) alterModifyPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, ts typeutil.Timestamp) error {
createdAt, _ := tsoutil.ParseTS(newPart.PartitionCreatedTimestamp)
p := &dbmodel.Partition{
TenantID: contextutil.TenantID(ctx),
PartitionID: newPart.PartitionID,
PartitionName: newPart.PartitionName,
PartitionCreatedTimestamp: newPart.PartitionCreatedTimestamp,
CollectionID: newPart.CollectionID,
Status: int32(newPart.State),
Ts: ts,
IsDeleted: false,
CreatedAt: createdAt,
UpdatedAt: time.Now(),
}
return tc.metaDomain.PartitionDb(ctx).Update(p)
}
func (tc *Catalog) AlterPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, alterType metastore.AlterType, ts typeutil.Timestamp) error {
if alterType == metastore.MODIFY {
return tc.alterModifyPartition(ctx, oldPart, newPart, ts)
}
return fmt.Errorf("altering partition doesn't support: %s", alterType.String())
}
func (tc *Catalog) CreateAlias(ctx context.Context, alias *model.Alias, ts typeutil.Timestamp) error {
tenantID := contextutil.TenantID(ctx)

View File

@ -8,6 +8,10 @@ import (
"testing"
"time"
"github.com/milvus-io/milvus/internal/metastore"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel"
"github.com/milvus-io/milvus/internal/metastore/db/dbmodel/mocks"
@ -765,6 +769,62 @@ func TestTableCatalog_DropCollection_TsNot0_PartitionInsertError(t *testing.T) {
require.Error(t, gotErr)
}
func TestCatalog_AlterCollection(t *testing.T) {
coll := &model.Collection{
TenantID: tenantID,
CollectionID: collID1,
Name: collName1,
State: pb.CollectionState_CollectionCreated,
Aliases: []string{collAlias1, collAlias2},
}
newColl := &model.Collection{
TenantID: tenantID,
CollectionID: collID1,
Name: collName1,
State: pb.CollectionState_CollectionDropping,
Aliases: []string{collAlias1, collAlias2},
}
collDbMock.On("Update", mock.Anything).Return(nil).Once()
gotErr := mockCatalog.AlterCollection(ctx, coll, newColl, metastore.MODIFY, ts)
require.NoError(t, gotErr)
}
func TestTableCatalog_AlterCollection_TsNot0_AlterTypeError(t *testing.T) {
coll := &model.Collection{
TenantID: tenantID,
CollectionID: collID1,
Name: collName1,
State: pb.CollectionState_CollectionCreated,
Aliases: []string{collAlias1, collAlias2},
}
gotErr := mockCatalog.AlterCollection(ctx, coll, coll, metastore.ADD, ts)
require.Error(t, gotErr)
gotErr = mockCatalog.AlterCollection(ctx, coll, coll, metastore.DELETE, ts)
require.Error(t, gotErr)
}
func TestCatalog_AlterCollection_TsNot0_CollInsertError(t *testing.T) {
coll := &model.Collection{
TenantID: tenantID,
CollectionID: collID1,
Name: collName1,
State: pb.CollectionState_CollectionCreated,
Aliases: []string{collAlias1, collAlias2},
}
// expectation
errTest := errors.New("test error")
collDbMock.On("Update", mock.Anything).Return(errTest).Once()
// actual
gotErr := mockCatalog.AlterCollection(ctx, coll, coll, metastore.MODIFY, ts)
require.Error(t, gotErr)
}
func TestTableCatalog_CreatePartition(t *testing.T) {
partition := &model.Partition{
PartitionID: partitionID1,
@ -816,6 +876,63 @@ func TestTableCatalog_DropPartition_TsNot0_PartitionInsertError(t *testing.T) {
gotErr := mockCatalog.DropPartition(ctx, collID1, partitionID1, ts)
require.Error(t, gotErr)
}
func TestCatalog_AlterPartition(t *testing.T) {
partition := &model.Partition{
PartitionID: partitionID1,
PartitionName: "test_partition_name_1",
PartitionCreatedTimestamp: 1,
CollectionID: collID1,
State: pb.PartitionState_PartitionCreated,
}
newPartition := &model.Partition{
PartitionID: partitionID1,
PartitionName: "test_partition_name_1",
PartitionCreatedTimestamp: 1,
CollectionID: collID1,
State: pb.PartitionState_PartitionDropping,
}
partitionDbMock.On("Update", mock.Anything).Return(nil).Once()
gotErr := mockCatalog.AlterPartition(ctx, partition, newPartition, metastore.MODIFY, ts)
require.NoError(t, gotErr)
}
func TestCatalog_AlterPartition_TsNot0_AlterTypeError(t *testing.T) {
partition := &model.Partition{
PartitionID: partitionID1,
PartitionName: "test_partition_name_1",
PartitionCreatedTimestamp: 1,
CollectionID: collID1,
State: pb.PartitionState_PartitionCreated,
}
gotErr := mockCatalog.AlterPartition(ctx, partition, partition, metastore.ADD, ts)
require.Error(t, gotErr)
gotErr = mockCatalog.AlterPartition(ctx, partition, partition, metastore.DELETE, ts)
require.Error(t, gotErr)
}
func TestCatalog_AlterPartition_TsNot0_PartitionInsertError(t *testing.T) {
partition := &model.Partition{
PartitionID: partitionID1,
PartitionName: "test_partition_name_1",
PartitionCreatedTimestamp: 1,
CollectionID: collID1,
State: pb.PartitionState_PartitionCreated,
}
// expectation
errTest := errors.New("test error")
partitionDbMock.On("Update", mock.Anything).Return(errTest).Once()
// actual
gotErr := mockCatalog.AlterPartition(ctx, partition, partition, metastore.MODIFY, ts)
require.Error(t, gotErr)
}
func TestTableCatalog_CreateAlias(t *testing.T) {
alias := &model.Alias{
CollectionID: collID1,

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/util/crypto"
"github.com/milvus-io/milvus/internal/util"
@ -24,6 +25,10 @@ import (
"github.com/milvus-io/milvus/internal/util/typeutil"
)
const (
maxTxnNum = 64
)
// prefix/collection/collection_id -> CollectionInfo
// prefix/partitions/collection_id/partition_id -> PartitionInfo
// prefix/aliases/alias_name -> AliasInfo
@ -72,7 +77,13 @@ func buildKvs(keys, values []string) (map[string]string, error) {
return ret, nil
}
// TODO: atomicity should be promised outside.
func min(a, b int) int {
if a < b {
return a
}
return b
}
func batchSave(snapshot kv.SnapShotKV, maxTxnNum int, kvs map[string]string, ts typeutil.Timestamp) error {
keys := make([]string, 0, len(kvs))
values := make([]string, 0, len(kvs))
@ -80,19 +91,12 @@ func batchSave(snapshot kv.SnapShotKV, maxTxnNum int, kvs map[string]string, ts
keys = append(keys, k)
values = append(values, v)
}
min := func(a, b int) int {
if a < b {
return a
}
return b
}
for i := 0; i < len(kvs); i = i + maxTxnNum {
end := min(i+maxTxnNum, len(keys))
batch, err := buildKvs(keys[i:end], values[i:end])
if err != nil {
return err
}
// TODO: atomicity is not promised. Garbage will be generated.
if err := snapshot.MultiSave(batch, ts); err != nil {
return err
}
@ -100,16 +104,43 @@ func batchSave(snapshot kv.SnapShotKV, maxTxnNum int, kvs map[string]string, ts
return nil
}
func batchMultiSaveAndRemoveWithPrefix(snapshot kv.SnapShotKV, maxTxnNum int, saves map[string]string, removals []string, ts typeutil.Timestamp) error {
if err := batchSave(snapshot, maxTxnNum, saves, ts); err != nil {
return err
}
for i := 0; i < len(removals); i = i + maxTxnNum {
end := min(i+maxTxnNum, len(removals))
batch := removals[i:end]
if err := snapshot.MultiSaveAndRemoveWithPrefix(nil, batch, ts); err != nil {
return err
}
}
return nil
}
func (kc *Catalog) CreateCollection(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
if coll.State != pb.CollectionState_CollectionCreating {
return fmt.Errorf("cannot create collection with state: %s, collection: %s", coll.State.String(), coll.Name)
}
k1 := buildCollectionKey(coll.CollectionID)
collInfo := model.MarshalCollectionModel(coll)
v1, err := proto.Marshal(collInfo)
if err != nil {
log.Error("create collection marshal fail", zap.String("key", k1), zap.Error(err))
return fmt.Errorf("failed to marshal collection info: %s", err.Error())
}
// Due to the limit of etcd txn number, we must split these kvs into several batches.
// Save collection key first, and the state of collection is creating.
// If we save collection key with error, then no garbage will be generated and error will be raised.
// If we succeeded to save collection but failed to save other related keys, the garbage meta can be removed
// outside and the collection won't be seen by any others (since it's of creating state).
// However, if we save other keys first, there is no chance to remove the intermediate meta.
if err := kc.Snapshot.Save(k1, string(v1), ts); err != nil {
return err
}
kvs := map[string]string{k1: string(v1)}
kvs := map[string]string{}
// save partition info to newly path.
for _, partition := range coll.Partitions {
@ -135,8 +166,8 @@ func (kc *Catalog) CreateCollection(ctx context.Context, coll *model.Collection,
kvs[k] = string(v)
}
// TODO: atomicity should be promised outside.
maxTxnNum := 64
// Though batchSave is not atomic enough, we can promise the atomicity outside.
// Recovering from failure, if we found collection is creating, we should removing all these related meta.
return batchSave(kc.Snapshot, maxTxnNum, kvs, ts)
}
@ -145,7 +176,7 @@ func (kc *Catalog) loadCollection(ctx context.Context, collectionID typeutil.Uni
collVal, err := kc.Snapshot.Load(collKey, ts)
if err != nil {
log.Error("get collection meta fail", zap.String("key", collKey), zap.Error(err))
return nil, err
return nil, fmt.Errorf("can't find collection: %d", collectionID)
}
collMeta := &pb.CollectionInfo{}
@ -334,39 +365,80 @@ func (kc *Catalog) AlterAlias(ctx context.Context, alias *model.Alias, ts typeut
}
func (kc *Catalog) DropCollection(ctx context.Context, collectionInfo *model.Collection, ts typeutil.Timestamp) error {
delMetakeysSnap := []string{
fmt.Sprintf("%s/%d", CollectionMetaPrefix, collectionInfo.CollectionID),
}
collectionKey := buildCollectionKey(collectionInfo.CollectionID)
var delMetakeysSnap []string
for _, alias := range collectionInfo.Aliases {
delMetakeysSnap = append(delMetakeysSnap,
fmt.Sprintf("%s/%s", CollectionAliasMetaPrefix, alias),
)
}
delMetakeysSnap = append(delMetakeysSnap, buildPartitionPrefix(collectionInfo.CollectionID))
delMetakeysSnap = append(delMetakeysSnap, buildFieldPrefix(collectionInfo.CollectionID))
err := kc.Snapshot.MultiSaveAndRemoveWithPrefix(map[string]string{}, delMetakeysSnap, ts)
if err != nil {
log.Error("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err))
// Though batchMultiSaveAndRemoveWithPrefix is not atomic enough, we can promise atomicity outside.
// If we found collection under dropping state, we'll know that gc is not completely on this collection.
// However, if we remove collection first, we cannot remove other metas.
if err := batchMultiSaveAndRemoveWithPrefix(kc.Snapshot, maxTxnNum, nil, delMetakeysSnap, ts); err != nil {
return err
}
// Txn operation
kvs := map[string]string{}
for k, v := range collectionInfo.Extra {
kvs[k] = v
// if we found collection dropping, we should try removing related resources.
return kc.Snapshot.MultiSaveAndRemoveWithPrefix(nil, []string{collectionKey}, ts)
}
func (kc *Catalog) alterModifyCollection(oldColl *model.Collection, newColl *model.Collection, ts typeutil.Timestamp) error {
if oldColl.TenantID != newColl.TenantID || oldColl.CollectionID != newColl.CollectionID {
return fmt.Errorf("altering tenant id or collection id is forbidden")
}
//delMetaKeysTxn := []string{
// fmt.Sprintf("%s/%d", SegmentIndexMetaPrefix, collectionInfo.CollectionID),
// fmt.Sprintf("%s/%d", IndexMetaPrefix, collectionInfo.CollectionID),
//}
err = kc.Txn.MultiSave(kvs)
oldCollClone := oldColl.Clone()
oldCollClone.Name = newColl.Name
oldCollClone.Description = newColl.Description
oldCollClone.AutoID = newColl.AutoID
oldCollClone.VirtualChannelNames = newColl.VirtualChannelNames
oldCollClone.PhysicalChannelNames = newColl.PhysicalChannelNames
oldCollClone.StartPositions = newColl.StartPositions
oldCollClone.ShardsNum = newColl.ShardsNum
oldCollClone.CreateTime = newColl.CreateTime
oldCollClone.ConsistencyLevel = newColl.ConsistencyLevel
oldCollClone.State = newColl.State
key := buildCollectionKey(oldColl.CollectionID)
value, err := proto.Marshal(model.MarshalCollectionModel(oldCollClone))
if err != nil {
log.Warn("drop collection update meta fail", zap.Int64("collectionID", collectionInfo.CollectionID), zap.Error(err))
return err
}
return kc.Snapshot.Save(key, string(value), ts)
}
return nil
func (kc *Catalog) AlterCollection(ctx context.Context, oldColl *model.Collection, newColl *model.Collection, alterType metastore.AlterType, ts typeutil.Timestamp) error {
if alterType == metastore.MODIFY {
return kc.alterModifyCollection(oldColl, newColl, ts)
}
return fmt.Errorf("altering collection doesn't support %s", alterType.String())
}
func (kc *Catalog) alterModifyPartition(oldPart *model.Partition, newPart *model.Partition, ts typeutil.Timestamp) error {
if oldPart.CollectionID != newPart.CollectionID || oldPart.PartitionID != newPart.PartitionID {
return fmt.Errorf("altering collection id or partition id is forbidden")
}
oldPartClone := oldPart.Clone()
newPartClone := newPart.Clone()
oldPartClone.PartitionName = newPartClone.PartitionName
oldPartClone.PartitionCreatedTimestamp = newPartClone.PartitionCreatedTimestamp
oldPartClone.State = newPartClone.State
key := buildPartitionKey(oldPart.CollectionID, oldPart.PartitionID)
value, err := proto.Marshal(model.MarshalPartitionModel(oldPartClone))
if err != nil {
return err
}
return kc.Snapshot.Save(key, string(value), ts)
}
func (kc *Catalog) AlterPartition(ctx context.Context, oldPart *model.Partition, newPart *model.Partition, alterType metastore.AlterType, ts typeutil.Timestamp) error {
if alterType == metastore.MODIFY {
return kc.alterModifyPartition(oldPart, newPart, ts)
}
return fmt.Errorf("altering partition doesn't support %s", alterType.String())
}
func dropPartition(collMeta *pb.CollectionInfo, partitionID typeutil.UniqueID) {

View File

@ -5,6 +5,10 @@ import (
"errors"
"testing"
"github.com/milvus-io/milvus/internal/kv/mocks"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util/typeutil"
@ -758,3 +762,325 @@ func Test_batchSave(t *testing.T) {
assert.Error(t, err)
})
}
func Test_min(t *testing.T) {
type args struct {
a int
b int
}
tests := []struct {
name string
args args
want int
}{
{
args: args{a: 1, b: 2},
want: 1,
},
{
args: args{a: 4, b: 3},
want: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := min(tt.args.a, tt.args.b); got != tt.want {
t.Errorf("min() = %v, want %v", got, tt.want)
}
})
}
}
func Test_batchMultiSaveAndRemoveWithPrefix(t *testing.T) {
t.Run("failed to save", func(t *testing.T) {
snapshot := kv.NewMockSnapshotKV()
snapshot.MultiSaveFunc = func(kvs map[string]string, ts typeutil.Timestamp) error {
return errors.New("error mock MultiSave")
}
saves := map[string]string{"k": "v"}
err := batchMultiSaveAndRemoveWithPrefix(snapshot, maxTxnNum, saves, []string{}, 0)
assert.Error(t, err)
})
t.Run("failed to remove", func(t *testing.T) {
snapshot := kv.NewMockSnapshotKV()
snapshot.MultiSaveFunc = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
snapshot.MultiSaveAndRemoveWithPrefixFunc = func(saves map[string]string, removals []string, ts typeutil.Timestamp) error {
return errors.New("error mock MultiSaveAndRemoveWithPrefix")
}
saves := map[string]string{"k": "v"}
removals := []string{"prefix1", "prefix2"}
err := batchMultiSaveAndRemoveWithPrefix(snapshot, maxTxnNum, saves, removals, 0)
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
snapshot := kv.NewMockSnapshotKV()
snapshot.MultiSaveFunc = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
snapshot.MultiSaveAndRemoveWithPrefixFunc = func(saves map[string]string, removals []string, ts typeutil.Timestamp) error {
return nil
}
saves := map[string]string{"k": "v"}
removals := []string{"prefix1", "prefix2"}
err := batchMultiSaveAndRemoveWithPrefix(snapshot, maxTxnNum, saves, removals, 0)
assert.NoError(t, err)
})
}
func TestCatalog_AlterCollection(t *testing.T) {
t.Run("add", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
err := kc.AlterCollection(ctx, nil, nil, metastore.ADD, 0)
assert.Error(t, err)
})
t.Run("delete", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
err := kc.AlterCollection(ctx, nil, nil, metastore.DELETE, 0)
assert.Error(t, err)
})
t.Run("modify", func(t *testing.T) {
snapshot := kv.NewMockSnapshotKV()
kvs := map[string]string{}
snapshot.SaveFunc = func(key string, value string, ts typeutil.Timestamp) error {
kvs[key] = value
return nil
}
kc := &Catalog{Snapshot: snapshot}
ctx := context.Background()
var collectionID int64 = 1
oldC := &model.Collection{CollectionID: collectionID, State: pb.CollectionState_CollectionCreating}
newC := &model.Collection{CollectionID: collectionID, State: pb.CollectionState_CollectionCreated}
err := kc.AlterCollection(ctx, oldC, newC, metastore.MODIFY, 0)
assert.NoError(t, err)
key := buildCollectionKey(collectionID)
value, ok := kvs[key]
assert.True(t, ok)
var collPb pb.CollectionInfo
err = proto.Unmarshal([]byte(value), &collPb)
assert.NoError(t, err)
got := model.UnmarshalCollectionModel(&collPb)
assert.Equal(t, pb.CollectionState_CollectionCreated, got.State)
})
t.Run("modify, tenant id changed", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
var collectionID int64 = 1
oldC := &model.Collection{TenantID: "1", CollectionID: collectionID, State: pb.CollectionState_CollectionCreating}
newC := &model.Collection{TenantID: "2", CollectionID: collectionID, State: pb.CollectionState_CollectionCreated}
err := kc.AlterCollection(ctx, oldC, newC, metastore.MODIFY, 0)
assert.Error(t, err)
})
}
func TestCatalog_AlterPartition(t *testing.T) {
t.Run("add", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
err := kc.AlterPartition(ctx, nil, nil, metastore.ADD, 0)
assert.Error(t, err)
})
t.Run("delete", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
err := kc.AlterPartition(ctx, nil, nil, metastore.DELETE, 0)
assert.Error(t, err)
})
t.Run("modify", func(t *testing.T) {
snapshot := kv.NewMockSnapshotKV()
kvs := map[string]string{}
snapshot.SaveFunc = func(key string, value string, ts typeutil.Timestamp) error {
kvs[key] = value
return nil
}
kc := &Catalog{Snapshot: snapshot}
ctx := context.Background()
var collectionID int64 = 1
var partitionID int64 = 2
oldP := &model.Partition{PartitionID: partitionID, CollectionID: collectionID, State: pb.PartitionState_PartitionCreating}
newP := &model.Partition{PartitionID: partitionID, CollectionID: collectionID, State: pb.PartitionState_PartitionCreated}
err := kc.AlterPartition(ctx, oldP, newP, metastore.MODIFY, 0)
assert.NoError(t, err)
key := buildPartitionKey(collectionID, partitionID)
value, ok := kvs[key]
assert.True(t, ok)
var partPb pb.PartitionInfo
err = proto.Unmarshal([]byte(value), &partPb)
assert.NoError(t, err)
got := model.UnmarshalPartitionModel(&partPb)
assert.Equal(t, pb.PartitionState_PartitionCreated, got.State)
})
t.Run("modify, tenant id changed", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
var collectionID int64 = 1
oldP := &model.Partition{PartitionID: 1, CollectionID: collectionID, State: pb.PartitionState_PartitionCreating}
newP := &model.Partition{PartitionID: 2, CollectionID: collectionID, State: pb.PartitionState_PartitionCreated}
err := kc.AlterPartition(ctx, oldP, newP, metastore.MODIFY, 0)
assert.Error(t, err)
})
}
type mockSnapshotOpt func(ss *mocks.SnapShotKV)
func newMockSnapshot(t *testing.T, opts ...mockSnapshotOpt) *mocks.SnapShotKV {
ss := mocks.NewSnapShotKV(t)
for _, opt := range opts {
opt(ss)
}
return ss
}
func withMockSave(saveErr error) mockSnapshotOpt {
return func(ss *mocks.SnapShotKV) {
ss.On(
"Save",
mock.AnythingOfType("string"),
mock.AnythingOfType("string"),
mock.AnythingOfType("uint64")).
Return(saveErr)
}
}
func withMockMultiSave(multiSaveErr error) mockSnapshotOpt {
return func(ss *mocks.SnapShotKV) {
ss.On(
"MultiSave",
mock.AnythingOfType("map[string]string"),
mock.AnythingOfType("uint64")).
Return(multiSaveErr)
}
}
func withMockMultiSaveAndRemoveWithPrefix(err error) mockSnapshotOpt {
return func(ss *mocks.SnapShotKV) {
ss.On(
"MultiSaveAndRemoveWithPrefix",
mock.AnythingOfType("map[string]string"),
mock.AnythingOfType("[]string"),
mock.AnythingOfType("uint64")).
Return(err)
}
}
func TestCatalog_CreateCollection(t *testing.T) {
t.Run("collection not creating", func(t *testing.T) {
kc := &Catalog{}
ctx := context.Background()
coll := &model.Collection{State: pb.CollectionState_CollectionDropping}
err := kc.CreateCollection(ctx, coll, 100)
assert.Error(t, err)
})
t.Run("failed to save collection", func(t *testing.T) {
mockSnapshot := newMockSnapshot(t, withMockSave(errors.New("error mock Save")))
kc := &Catalog{Snapshot: mockSnapshot}
ctx := context.Background()
coll := &model.Collection{State: pb.CollectionState_CollectionCreating}
err := kc.CreateCollection(ctx, coll, 100)
assert.Error(t, err)
})
t.Run("succeed to save collection but failed to save other keys", func(t *testing.T) {
mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(errors.New("error mock MultiSave")))
kc := &Catalog{Snapshot: mockSnapshot}
ctx := context.Background()
coll := &model.Collection{
Partitions: []*model.Partition{
{PartitionName: "test"},
},
State: pb.CollectionState_CollectionCreating,
}
err := kc.CreateCollection(ctx, coll, 100)
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
mockSnapshot := newMockSnapshot(t, withMockSave(nil), withMockMultiSave(nil))
kc := &Catalog{Snapshot: mockSnapshot}
ctx := context.Background()
coll := &model.Collection{
Partitions: []*model.Partition{
{PartitionName: "test"},
},
State: pb.CollectionState_CollectionCreating,
}
err := kc.CreateCollection(ctx, coll, 100)
assert.NoError(t, err)
})
}
func TestCatalog_DropCollection(t *testing.T) {
t.Run("failed to remove", func(t *testing.T) {
mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemoveWithPrefix(errors.New("error mock MultiSaveAndRemoveWithPrefix")))
kc := &Catalog{Snapshot: mockSnapshot}
ctx := context.Background()
coll := &model.Collection{
Partitions: []*model.Partition{
{PartitionName: "test"},
},
State: pb.CollectionState_CollectionDropping,
}
err := kc.DropCollection(ctx, coll, 100)
assert.Error(t, err)
})
t.Run("succeed to remove first, but failed to remove twice", func(t *testing.T) {
mockSnapshot := newMockSnapshot(t)
removeOtherCalled := false
removeCollectionCalled := false
mockSnapshot.On(
"MultiSaveAndRemoveWithPrefix",
mock.AnythingOfType("map[string]string"),
mock.AnythingOfType("[]string"),
mock.AnythingOfType("uint64")).
Return(func(map[string]string, []string, typeutil.Timestamp) error {
removeOtherCalled = true
return nil
}).Once()
mockSnapshot.On(
"MultiSaveAndRemoveWithPrefix",
mock.AnythingOfType("map[string]string"),
mock.AnythingOfType("[]string"),
mock.AnythingOfType("uint64")).
Return(func(map[string]string, []string, typeutil.Timestamp) error {
removeCollectionCalled = true
return errors.New("error mock MultiSaveAndRemoveWithPrefix")
}).Once()
kc := &Catalog{Snapshot: mockSnapshot}
ctx := context.Background()
coll := &model.Collection{
Partitions: []*model.Partition{
{PartitionName: "test"},
},
State: pb.CollectionState_CollectionDropping,
}
err := kc.DropCollection(ctx, coll, 100)
assert.Error(t, err)
assert.True(t, removeOtherCalled)
assert.True(t, removeCollectionCalled)
})
t.Run("normal case", func(t *testing.T) {
mockSnapshot := newMockSnapshot(t, withMockMultiSaveAndRemoveWithPrefix(nil))
kc := &Catalog{Snapshot: mockSnapshot}
ctx := context.Background()
coll := &model.Collection{
Partitions: []*model.Partition{
{PartitionName: "test"},
},
State: pb.CollectionState_CollectionDropping,
}
err := kc.DropCollection(ctx, coll, 100)
assert.NoError(t, err)
})
}

View File

@ -176,7 +176,7 @@ func (ss *SuffixSnapshot) checkKeyTS(key string, ts typeutil.Timestamp) (bool, e
}
latest = ss.lastestTS[key]
}
return latest < ts, nil
return latest <= ts, nil
}
// loadLatestTS load the loatest ts for specified key

View File

@ -6,6 +6,25 @@ type Alias struct {
Name string
CollectionID int64
CreatedTime uint64
State pb.AliasState
}
func (a Alias) Available() bool {
return a.State == pb.AliasState_AliasCreated
}
func (a Alias) Clone() *Alias {
return &Alias{
Name: a.Name,
CollectionID: a.CollectionID,
CreatedTime: a.CreatedTime,
State: a.State,
}
}
func (a Alias) Equal(other Alias) bool {
return a.Name == other.Name &&
a.CollectionID == other.CollectionID
}
func MarshalAliasModel(alias *Alias) *pb.AliasInfo {
@ -13,6 +32,7 @@ func MarshalAliasModel(alias *Alias) *pb.AliasInfo {
AliasName: alias.Name,
CollectionId: alias.CollectionID,
CreatedTime: alias.CreatedTime,
State: alias.State,
}
}
@ -21,5 +41,6 @@ func UnmarshalAliasModel(info *pb.AliasInfo) *Alias {
Name: info.GetAliasName(),
CollectionID: info.GetCollectionId(),
CreatedTime: info.GetCreatedTime(),
State: info.GetState(),
}
}

View File

@ -0,0 +1,91 @@
package model
import (
"testing"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/stretchr/testify/assert"
)
func TestAlias_Available(t *testing.T) {
type fields struct {
Name string
CollectionID int64
CreatedTime uint64
State etcdpb.AliasState
}
tests := []struct {
name string
fields fields
want bool
}{
{
fields: fields{State: etcdpb.AliasState_AliasCreated},
want: true,
},
{
fields: fields{State: etcdpb.AliasState_AliasCreating},
want: false,
},
{
fields: fields{State: etcdpb.AliasState_AliasDropping},
want: false,
},
{
fields: fields{State: etcdpb.AliasState_AliasDropped},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := Alias{
Name: tt.fields.Name,
CollectionID: tt.fields.CollectionID,
CreatedTime: tt.fields.CreatedTime,
State: tt.fields.State,
}
assert.Equalf(t, tt.want, a.Available(), "Available()")
})
}
}
func TestAlias_Clone(t *testing.T) {
type fields struct {
Name string
CollectionID int64
CreatedTime uint64
State etcdpb.AliasState
}
tests := []struct {
name string
fields fields
}{
{fields: fields{Name: "alias1", CollectionID: 101}},
{fields: fields{Name: "alias2", CollectionID: 102}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
a := Alias{
Name: tt.fields.Name,
CollectionID: tt.fields.CollectionID,
CreatedTime: tt.fields.CreatedTime,
State: tt.fields.State,
}
clone := a.Clone()
assert.True(t, clone.Equal(a))
})
}
}
func TestAlias_Codec(t *testing.T) {
alias := &Alias{
Name: "alias",
CollectionID: 101,
CreatedTime: 10000,
State: etcdpb.AliasState_AliasCreated,
}
aliasPb := MarshalAliasModel(alias)
aliasFromPb := UnmarshalAliasModel(aliasPb)
assert.True(t, aliasFromPb.Equal(*alias))
}

View File

@ -22,7 +22,12 @@ type Collection struct {
CreateTime uint64
ConsistencyLevel commonpb.ConsistencyLevel
Aliases []string // TODO: deprecate this.
Extra map[string]string // extra kvs
Extra map[string]string // deprecated.
State pb.CollectionState
}
func (c Collection) Available() bool {
return c.State == pb.CollectionState_CollectionCreated
}
func (c Collection) Clone() *Collection {
@ -32,19 +37,31 @@ func (c Collection) Clone() *Collection {
Name: c.Name,
Description: c.Description,
AutoID: c.AutoID,
Fields: c.Fields,
Partitions: c.Partitions,
VirtualChannelNames: c.VirtualChannelNames,
PhysicalChannelNames: c.PhysicalChannelNames,
Fields: CloneFields(c.Fields),
Partitions: ClonePartitions(c.Partitions),
VirtualChannelNames: common.CloneStringList(c.VirtualChannelNames),
PhysicalChannelNames: common.CloneStringList(c.PhysicalChannelNames),
ShardsNum: c.ShardsNum,
ConsistencyLevel: c.ConsistencyLevel,
CreateTime: c.CreateTime,
StartPositions: c.StartPositions,
Aliases: c.Aliases,
Extra: c.Extra,
StartPositions: common.CloneKeyDataPairs(c.StartPositions),
Aliases: common.CloneStringList(c.Aliases),
Extra: common.CloneStr2Str(c.Extra),
State: c.State,
}
}
func (c Collection) Equal(other Collection) bool {
return c.TenantID == other.TenantID &&
CheckPartitionsEqual(c.Partitions, other.Partitions) &&
c.Name == other.Name &&
c.Description == other.Description &&
c.AutoID == other.AutoID &&
CheckFieldsEqual(c.Fields, other.Fields) &&
c.ShardsNum == other.ShardsNum &&
c.ConsistencyLevel == other.ConsistencyLevel
}
func UnmarshalCollectionModel(coll *pb.CollectionInfo) *Collection {
if coll == nil {
return nil
@ -81,6 +98,7 @@ func UnmarshalCollectionModel(coll *pb.CollectionInfo) *Collection {
ConsistencyLevel: coll.ConsistencyLevel,
CreateTime: coll.CreateTime,
StartPositions: coll.StartPositions,
State: coll.State,
}
}
@ -115,5 +133,6 @@ func MarshalCollectionModel(coll *Collection) *pb.CollectionInfo {
ShardsNum: coll.ShardsNum,
ConsistencyLevel: coll.ConsistencyLevel,
StartPositions: coll.StartPositions,
State: coll.State,
}
}

View File

@ -1,6 +1,8 @@
package model
import (
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
)
@ -14,6 +16,62 @@ type Field struct {
TypeParams []*commonpb.KeyValuePair
IndexParams []*commonpb.KeyValuePair
AutoID bool
State schemapb.FieldState
}
func (f Field) Available() bool {
return f.State == schemapb.FieldState_FieldCreated
}
func (f Field) Clone() *Field {
return &Field{
FieldID: f.FieldID,
Name: f.Name,
IsPrimaryKey: f.IsPrimaryKey,
Description: f.Description,
DataType: f.DataType,
TypeParams: common.CloneKeyValuePairs(f.TypeParams),
IndexParams: common.CloneKeyValuePairs(f.IndexParams),
AutoID: f.AutoID,
State: f.State,
}
}
func CloneFields(fields []*Field) []*Field {
clone := make([]*Field, 0, len(fields))
for _, field := range fields {
clone = append(clone, field.Clone())
}
return clone
}
func checkParamsEqual(paramsA, paramsB []*commonpb.KeyValuePair) bool {
var A common.KeyValuePairs = paramsA
return A.Equal(paramsB)
}
func (f Field) Equal(other Field) bool {
return f.FieldID == other.FieldID &&
f.Name == other.Name &&
f.IsPrimaryKey == other.IsPrimaryKey &&
f.Description == other.Description &&
f.DataType == other.DataType &&
checkParamsEqual(f.TypeParams, f.TypeParams) &&
checkParamsEqual(f.IndexParams, other.IndexParams) &&
f.AutoID == other.AutoID
}
func CheckFieldsEqual(fieldsA, fieldsB []*Field) bool {
if len(fieldsA) != len(fieldsB) {
return false
}
l := len(fieldsA)
for i := 0; i < l; i++ {
if !fieldsA[i].Equal(*fieldsB[i]) {
return false
}
}
return true
}
func MarshalFieldModel(field *Field) *schemapb.FieldSchema {

View File

@ -3,6 +3,8 @@ package model
import (
"testing"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/stretchr/testify/assert"
)
@ -54,3 +56,85 @@ func TestUnmarshalFieldModels(t *testing.T) {
assert.Equal(t, []*Field{fieldModel}, ret)
assert.Nil(t, UnmarshalFieldModels(nil))
}
func TestCheckFieldsEqual(t *testing.T) {
type args struct {
fieldsA []*Field
fieldsB []*Field
}
tests := []struct {
name string
args args
want bool
}{
{
// length not match.
args: args{
fieldsA: []*Field{{Name: "f1"}},
fieldsB: []*Field{},
},
want: false,
},
{
args: args{
fieldsA: []*Field{{Name: "f1"}},
fieldsB: []*Field{{Name: "f2"}},
},
want: false,
},
{
args: args{
fieldsA: []*Field{{Name: "f1"}, {Name: "f2"}},
fieldsB: []*Field{{Name: "f1"}, {Name: "f2"}},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CheckFieldsEqual(tt.args.fieldsA, tt.args.fieldsB); got != tt.want {
t.Errorf("CheckFieldsEqual() = %v, want %v", got, tt.want)
}
})
}
}
func TestField_Available(t *testing.T) {
type fields struct {
FieldID int64
Name string
IsPrimaryKey bool
Description string
DataType schemapb.DataType
TypeParams []*commonpb.KeyValuePair
IndexParams []*commonpb.KeyValuePair
AutoID bool
State schemapb.FieldState
}
tests := []struct {
name string
fields fields
want bool
}{
{fields: fields{State: schemapb.FieldState_FieldCreated}, want: true},
{fields: fields{State: schemapb.FieldState_FieldCreating}, want: false},
{fields: fields{State: schemapb.FieldState_FieldDropping}, want: false},
{fields: fields{State: schemapb.FieldState_FieldDropped}, want: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
f := Field{
FieldID: tt.fields.FieldID,
Name: tt.fields.Name,
IsPrimaryKey: tt.fields.IsPrimaryKey,
Description: tt.fields.Description,
DataType: tt.fields.DataType,
TypeParams: tt.fields.TypeParams,
IndexParams: tt.fields.IndexParams,
AutoID: tt.fields.AutoID,
State: tt.fields.State,
}
assert.Equalf(t, tt.want, f.Available(), "Available()")
})
}
}

View File

@ -1,13 +1,57 @@
package model
import pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
import (
"github.com/milvus-io/milvus/internal/common"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
)
type Partition struct {
PartitionID int64
PartitionName string
PartitionCreatedTimestamp uint64
Extra map[string]string
Extra map[string]string // deprecated.
CollectionID int64
State pb.PartitionState
}
func (p Partition) Available() bool {
return p.State == pb.PartitionState_PartitionCreated
}
func (p Partition) Clone() *Partition {
return &Partition{
PartitionID: p.PartitionID,
PartitionName: p.PartitionName,
PartitionCreatedTimestamp: p.PartitionCreatedTimestamp,
Extra: common.CloneStr2Str(p.Extra),
CollectionID: p.CollectionID,
State: p.State,
}
}
func ClonePartitions(partitions []*Partition) []*Partition {
clone := make([]*Partition, 0, len(partitions))
for _, partition := range partitions {
clone = append(clone, partition.Clone())
}
return clone
}
func (p Partition) Equal(other Partition) bool {
return p.PartitionName == other.PartitionName
}
func CheckPartitionsEqual(partitionsA, partitionsB []*Partition) bool {
if len(partitionsA) != len(partitionsB) {
return false
}
l := len(partitionsA)
for i := 0; i < l; i++ {
if !partitionsA[i].Equal(*partitionsB[i]) {
return false
}
}
return true
}
func MarshalPartitionModel(partition *Partition) *pb.PartitionInfo {
@ -16,6 +60,7 @@ func MarshalPartitionModel(partition *Partition) *pb.PartitionInfo {
PartitionName: partition.PartitionName,
PartitionCreatedTimestamp: partition.PartitionCreatedTimestamp,
CollectionId: partition.CollectionID,
State: partition.State,
}
}
@ -25,5 +70,6 @@ func UnmarshalPartitionModel(info *pb.PartitionInfo) *Partition {
PartitionName: info.GetPartitionName(),
PartitionCreatedTimestamp: info.GetPartitionCreatedTimestamp(),
CollectionID: info.GetCollectionId(),
State: info.GetState(),
}
}

View File

@ -0,0 +1,47 @@
package model
import (
"testing"
)
func TestCheckPartitionsEqual(t *testing.T) {
type args struct {
partitionsA []*Partition
partitionsB []*Partition
}
tests := []struct {
name string
args args
want bool
}{
{
// length not match.
args: args{
partitionsA: []*Partition{{PartitionName: "_default"}},
partitionsB: []*Partition{},
},
want: false,
},
{
args: args{
partitionsA: []*Partition{{PartitionName: "_default"}},
partitionsB: []*Partition{{PartitionName: "not_default"}},
},
want: false,
},
{
args: args{
partitionsA: []*Partition{{PartitionName: "_default"}, {PartitionName: "not_default"}},
partitionsB: []*Partition{{PartitionName: "_default"}, {PartitionName: "not_default"}},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := CheckPartitionsEqual(tt.args.partitionsA, tt.args.partitionsB); got != tt.want {
t.Errorf("CheckPartitionsEqual() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -0,0 +1,16 @@
package msgstream
import "context"
type MockMqFactory struct {
Factory
NewMsgStreamFunc func(ctx context.Context) (MsgStream, error)
}
func NewMockMqFactory() *MockMqFactory {
return &MockMqFactory{}
}
func (m MockMqFactory) NewMsgStream(ctx context.Context) (MsgStream, error) {
return m.NewMsgStreamFunc(ctx)
}

View File

@ -0,0 +1,24 @@
package msgstream
type MockMsgStream struct {
MsgStream
AsProducerFunc func(channels []string)
BroadcastMarkFunc func(*MsgPack) (map[string][]MessageID, error)
BroadcastFunc func(*MsgPack) error
}
func NewMockMsgStream() *MockMsgStream {
return &MockMsgStream{}
}
func (m MockMsgStream) AsProducer(channels []string) {
m.AsProducerFunc(channels)
}
func (m MockMsgStream) BroadcastMark(pack *MsgPack) (map[string][]MessageID, error) {
return m.BroadcastMarkFunc(pack)
}
func (m MockMsgStream) Broadcast(pack *MsgPack) error {
return m.BroadcastFunc(pack)
}

View File

@ -20,6 +20,27 @@ message FieldIndexInfo{
int64 indexID = 2;
}
enum CollectionState {
CollectionCreated = 0;
CollectionCreating = 1;
CollectionDropping = 2;
CollectionDropped = 3;
}
enum PartitionState {
PartitionCreated = 0;
PartitionCreating = 1;
PartitionDropping = 2;
PartitionDropped = 3;
}
enum AliasState {
AliasCreated = 0;
AliasCreating = 1;
AliasDropping = 2;
AliasDropped = 3;
}
message CollectionInfo {
int64 ID = 1;
schema.CollectionSchema schema = 2;
@ -37,6 +58,7 @@ message CollectionInfo {
int32 shards_num = 10;
repeated common.KeyDataPair start_positions = 11;
common.ConsistencyLevel consistency_level = 12;
CollectionState state = 13; // To keep compatible with older version, default state is `Created`.
}
message PartitionInfo {
@ -44,12 +66,14 @@ message PartitionInfo {
string partitionName = 2;
uint64 partition_created_timestamp = 3;
int64 collection_id = 4;
PartitionState state = 5; // To keep compatible with older version, default state is `Created`.
}
message AliasInfo {
string alias_name = 1;
int64 collection_id = 2;
uint64 created_time = 3;
AliasState state = 4; // To keep compatible with older version, default state is `Created`.
}
message SegmentIndexInfo {

View File

@ -22,6 +22,99 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type CollectionState int32
const (
CollectionState_CollectionCreated CollectionState = 0
CollectionState_CollectionCreating CollectionState = 1
CollectionState_CollectionDropping CollectionState = 2
CollectionState_CollectionDropped CollectionState = 3
)
var CollectionState_name = map[int32]string{
0: "CollectionCreated",
1: "CollectionCreating",
2: "CollectionDropping",
3: "CollectionDropped",
}
var CollectionState_value = map[string]int32{
"CollectionCreated": 0,
"CollectionCreating": 1,
"CollectionDropping": 2,
"CollectionDropped": 3,
}
func (x CollectionState) String() string {
return proto.EnumName(CollectionState_name, int32(x))
}
func (CollectionState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{0}
}
type PartitionState int32
const (
PartitionState_PartitionCreated PartitionState = 0
PartitionState_PartitionCreating PartitionState = 1
PartitionState_PartitionDropping PartitionState = 2
PartitionState_PartitionDropped PartitionState = 3
)
var PartitionState_name = map[int32]string{
0: "PartitionCreated",
1: "PartitionCreating",
2: "PartitionDropping",
3: "PartitionDropped",
}
var PartitionState_value = map[string]int32{
"PartitionCreated": 0,
"PartitionCreating": 1,
"PartitionDropping": 2,
"PartitionDropped": 3,
}
func (x PartitionState) String() string {
return proto.EnumName(PartitionState_name, int32(x))
}
func (PartitionState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{1}
}
type AliasState int32
const (
AliasState_AliasCreated AliasState = 0
AliasState_AliasCreating AliasState = 1
AliasState_AliasDropping AliasState = 2
AliasState_AliasDropped AliasState = 3
)
var AliasState_name = map[int32]string{
0: "AliasCreated",
1: "AliasCreating",
2: "AliasDropping",
3: "AliasDropped",
}
var AliasState_value = map[string]int32{
"AliasCreated": 0,
"AliasCreating": 1,
"AliasDropping": 2,
"AliasDropped": 3,
}
func (x AliasState) String() string {
return proto.EnumName(AliasState_name, int32(x))
}
func (AliasState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{2}
}
type IndexInfo struct {
IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
IndexID int64 `protobuf:"varint,2,opt,name=indexID,proto3" json:"indexID,omitempty"`
@ -157,6 +250,7 @@ type CollectionInfo struct {
ShardsNum int32 `protobuf:"varint,10,opt,name=shards_num,json=shardsNum,proto3" json:"shards_num,omitempty"`
StartPositions []*commonpb.KeyDataPair `protobuf:"bytes,11,rep,name=start_positions,json=startPositions,proto3" json:"start_positions,omitempty"`
ConsistencyLevel commonpb.ConsistencyLevel `protobuf:"varint,12,opt,name=consistency_level,json=consistencyLevel,proto3,enum=milvus.proto.common.ConsistencyLevel" json:"consistency_level,omitempty"`
State CollectionState `protobuf:"varint,13,opt,name=state,proto3,enum=milvus.proto.etcd.CollectionState" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -271,14 +365,22 @@ func (m *CollectionInfo) GetConsistencyLevel() commonpb.ConsistencyLevel {
return commonpb.ConsistencyLevel_Strong
}
func (m *CollectionInfo) GetState() CollectionState {
if m != nil {
return m.State
}
return CollectionState_CollectionCreated
}
type PartitionInfo struct {
PartitionID int64 `protobuf:"varint,1,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
PartitionName string `protobuf:"bytes,2,opt,name=partitionName,proto3" json:"partitionName,omitempty"`
PartitionCreatedTimestamp uint64 `protobuf:"varint,3,opt,name=partition_created_timestamp,json=partitionCreatedTimestamp,proto3" json:"partition_created_timestamp,omitempty"`
CollectionId int64 `protobuf:"varint,4,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
PartitionID int64 `protobuf:"varint,1,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
PartitionName string `protobuf:"bytes,2,opt,name=partitionName,proto3" json:"partitionName,omitempty"`
PartitionCreatedTimestamp uint64 `protobuf:"varint,3,opt,name=partition_created_timestamp,json=partitionCreatedTimestamp,proto3" json:"partition_created_timestamp,omitempty"`
CollectionId int64 `protobuf:"varint,4,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"`
State PartitionState `protobuf:"varint,5,opt,name=state,proto3,enum=milvus.proto.etcd.PartitionState" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PartitionInfo) Reset() { *m = PartitionInfo{} }
@ -334,13 +436,21 @@ func (m *PartitionInfo) GetCollectionId() int64 {
return 0
}
func (m *PartitionInfo) GetState() PartitionState {
if m != nil {
return m.State
}
return PartitionState_PartitionCreated
}
type AliasInfo struct {
AliasName string `protobuf:"bytes,1,opt,name=alias_name,json=aliasName,proto3" json:"alias_name,omitempty"`
CollectionId int64 `protobuf:"varint,2,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"`
CreatedTime uint64 `protobuf:"varint,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
AliasName string `protobuf:"bytes,1,opt,name=alias_name,json=aliasName,proto3" json:"alias_name,omitempty"`
CollectionId int64 `protobuf:"varint,2,opt,name=collection_id,json=collectionId,proto3" json:"collection_id,omitempty"`
CreatedTime uint64 `protobuf:"varint,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"`
State AliasState `protobuf:"varint,4,opt,name=state,proto3,enum=milvus.proto.etcd.AliasState" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AliasInfo) Reset() { *m = AliasInfo{} }
@ -389,6 +499,13 @@ func (m *AliasInfo) GetCreatedTime() uint64 {
return 0
}
func (m *AliasInfo) GetState() AliasState {
if m != nil {
return m.State
}
return AliasState_AliasCreated
}
type SegmentIndexInfo struct {
CollectionID int64 `protobuf:"varint,1,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
@ -638,6 +755,9 @@ func (m *CredentialInfo) GetSha256Password() string {
}
func init() {
proto.RegisterEnum("milvus.proto.etcd.CollectionState", CollectionState_name, CollectionState_value)
proto.RegisterEnum("milvus.proto.etcd.PartitionState", PartitionState_name, PartitionState_value)
proto.RegisterEnum("milvus.proto.etcd.AliasState", AliasState_name, AliasState_value)
proto.RegisterType((*IndexInfo)(nil), "milvus.proto.etcd.IndexInfo")
proto.RegisterType((*FieldIndexInfo)(nil), "milvus.proto.etcd.FieldIndexInfo")
proto.RegisterType((*CollectionInfo)(nil), "milvus.proto.etcd.CollectionInfo")
@ -651,59 +771,68 @@ func init() {
func init() { proto.RegisterFile("etcd_meta.proto", fileDescriptor_975d306d62b73e88) }
var fileDescriptor_975d306d62b73e88 = []byte{
// 861 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xdd, 0x6e, 0xe3, 0x44,
0x14, 0x96, 0xe3, 0xfc, 0xf9, 0x24, 0x4d, 0xb7, 0x03, 0xac, 0xbc, 0x65, 0x01, 0x6f, 0x60, 0xc1,
0x37, 0xdb, 0x8a, 0x2e, 0x70, 0x07, 0x02, 0x6a, 0xad, 0x14, 0x01, 0x55, 0x34, 0xad, 0xb8, 0xe0,
0xc6, 0x9a, 0xd8, 0xa7, 0xcd, 0x48, 0xfe, 0x93, 0x67, 0x5c, 0xe8, 0x1b, 0xf0, 0x46, 0xdc, 0x70,
0xcb, 0xd3, 0xf0, 0x0e, 0x08, 0xcd, 0x78, 0xec, 0xd8, 0x09, 0xe5, 0x72, 0xef, 0xf2, 0x7d, 0x33,
0xe7, 0xf8, 0xfc, 0x7c, 0xf3, 0x05, 0x8e, 0x51, 0x46, 0x71, 0x98, 0xa2, 0x64, 0x67, 0x45, 0x99,
0xcb, 0x9c, 0x9c, 0xa4, 0x3c, 0xb9, 0xaf, 0x44, 0x8d, 0xce, 0xd4, 0xe9, 0xe9, 0x3c, 0xca, 0xd3,
0x34, 0xcf, 0x6a, 0xea, 0x74, 0x2e, 0xa2, 0x2d, 0xa6, 0xe6, 0xfa, 0xf2, 0x2f, 0x0b, 0x9c, 0x55,
0x16, 0xe3, 0x6f, 0xab, 0xec, 0x36, 0x27, 0x1f, 0x00, 0x70, 0x05, 0xc2, 0x8c, 0xa5, 0xe8, 0x5a,
0x9e, 0xe5, 0x3b, 0xd4, 0xd1, 0xcc, 0x15, 0x4b, 0x91, 0xb8, 0x30, 0xd1, 0x60, 0x15, 0xb8, 0x03,
0xcf, 0xf2, 0x6d, 0xda, 0x40, 0x12, 0xc0, 0xbc, 0x0e, 0x2c, 0x58, 0xc9, 0x52, 0xe1, 0xda, 0x9e,
0xed, 0xcf, 0x2e, 0x5e, 0x9c, 0xf5, 0x8a, 0x31, 0x65, 0xfc, 0x80, 0x0f, 0x3f, 0xb3, 0xa4, 0xc2,
0x35, 0xe3, 0x25, 0x9d, 0xe9, 0xb0, 0xb5, 0x8e, 0x52, 0xf9, 0x63, 0x4c, 0x50, 0x62, 0xec, 0x0e,
0x3d, 0xcb, 0x9f, 0xd2, 0x06, 0x92, 0x8f, 0x60, 0x16, 0x95, 0xc8, 0x24, 0x86, 0x92, 0xa7, 0xe8,
0x8e, 0x3c, 0xcb, 0x1f, 0x52, 0xa8, 0xa9, 0x1b, 0x9e, 0xe2, 0x32, 0x80, 0xc5, 0x1b, 0x8e, 0x49,
0xbc, 0xeb, 0xc5, 0x85, 0xc9, 0x2d, 0x4f, 0x30, 0x5e, 0x05, 0xba, 0x11, 0x9b, 0x36, 0xf0, 0xf1,
0x36, 0x96, 0xff, 0x0c, 0x61, 0x71, 0x99, 0x27, 0x09, 0x46, 0x92, 0xe7, 0x99, 0x4e, 0xb3, 0x80,
0x41, 0x9b, 0x61, 0xb0, 0x0a, 0xc8, 0xd7, 0x30, 0xae, 0x07, 0xa8, 0x63, 0x67, 0x17, 0x2f, 0xfb,
0x3d, 0x9a, 0xe1, 0xee, 0x92, 0x5c, 0x6b, 0x82, 0x9a, 0xa0, 0xfd, 0x46, 0xec, 0xfd, 0x46, 0xc8,
0x12, 0xe6, 0x05, 0x2b, 0x25, 0xd7, 0x05, 0x04, 0xc2, 0x1d, 0x7a, 0xb6, 0x6f, 0xd3, 0x1e, 0x47,
0x3e, 0x85, 0x45, 0x8b, 0xd5, 0x62, 0x84, 0x3b, 0xf2, 0x6c, 0xdf, 0xa1, 0x7b, 0x2c, 0x79, 0x03,
0x47, 0xb7, 0x6a, 0x28, 0xa1, 0xee, 0x0f, 0x85, 0x3b, 0xfe, 0xaf, 0xb5, 0x28, 0x8d, 0x9c, 0xf5,
0x87, 0x47, 0xe7, 0xb7, 0x2d, 0x46, 0x41, 0x2e, 0xe0, 0xbd, 0x7b, 0x5e, 0xca, 0x8a, 0x25, 0x61,
0xb4, 0x65, 0x59, 0x86, 0x89, 0x16, 0x88, 0x70, 0x27, 0xfa, 0xb3, 0xef, 0x98, 0xc3, 0xcb, 0xfa,
0xac, 0xfe, 0xf6, 0x17, 0xf0, 0xb4, 0xd8, 0x3e, 0x08, 0x1e, 0x1d, 0x04, 0x4d, 0x75, 0xd0, 0xbb,
0xcd, 0x69, 0x2f, 0xea, 0x5b, 0x78, 0xde, 0xf6, 0x10, 0xd6, 0x53, 0x89, 0xf5, 0xa4, 0x84, 0x64,
0x69, 0x21, 0x5c, 0xc7, 0xb3, 0xfd, 0x21, 0x3d, 0x6d, 0xef, 0x5c, 0xd6, 0x57, 0x6e, 0xda, 0x1b,
0x4a, 0xc2, 0x62, 0xcb, 0xca, 0x58, 0x84, 0x59, 0x95, 0xba, 0xe0, 0x59, 0xfe, 0x88, 0x3a, 0x35,
0x73, 0x55, 0xa5, 0x64, 0x05, 0xc7, 0x42, 0xb2, 0x52, 0x86, 0x45, 0x2e, 0x74, 0x06, 0xe1, 0xce,
0xf4, 0x50, 0xbc, 0xc7, 0xb4, 0x1a, 0x30, 0xc9, 0xb4, 0x54, 0x17, 0x3a, 0x70, 0xdd, 0xc4, 0x11,
0x0a, 0x27, 0x51, 0x9e, 0x09, 0x2e, 0x24, 0x66, 0xd1, 0x43, 0x98, 0xe0, 0x3d, 0x26, 0xee, 0xdc,
0xb3, 0xfc, 0xc5, 0xbe, 0x28, 0x4c, 0xb2, 0xcb, 0xdd, 0xed, 0x1f, 0xd5, 0x65, 0xfa, 0x24, 0xda,
0x63, 0x96, 0x7f, 0x5a, 0x70, 0xb4, 0x6e, 0x57, 0xad, 0xf4, 0xe7, 0xc1, 0xac, 0xb3, 0x7b, 0x23,
0xc4, 0x2e, 0x45, 0x3e, 0x81, 0xa3, 0xde, 0xde, 0xb5, 0x30, 0x1d, 0xda, 0x27, 0xc9, 0x37, 0xf0,
0xfe, 0xff, 0x4c, 0xd6, 0x08, 0xf1, 0xd9, 0xa3, 0x83, 0x25, 0x1f, 0xc3, 0x51, 0xd4, 0x8a, 0x3a,
0xe4, 0xf5, 0x0b, 0xb5, 0xe9, 0x7c, 0x47, 0xae, 0xe2, 0x65, 0x09, 0xce, 0x77, 0x09, 0x67, 0xa2,
0x31, 0x13, 0xa6, 0x40, 0xcf, 0x4c, 0x34, 0xa3, 0x0b, 0x3a, 0x48, 0x38, 0x38, 0x4c, 0x48, 0x5e,
0xc0, 0xbc, 0x5b, 0xab, 0x29, 0xd3, 0x3c, 0x21, 0x5d, 0xdd, 0xf2, 0xf7, 0x01, 0x3c, 0xb9, 0xc6,
0xbb, 0x14, 0x33, 0xb9, 0x7b, 0xfc, 0x4b, 0xe8, 0xe6, 0x69, 0xc6, 0xd6, 0xe3, 0xf6, 0x27, 0x3b,
0x38, 0x9c, 0xec, 0x73, 0x70, 0x84, 0xc9, 0x1c, 0xe8, 0x4f, 0xdb, 0x74, 0x47, 0xd4, 0x06, 0xa3,
0x5e, 0x49, 0x60, 0x66, 0xd1, 0xc0, 0xae, 0xc1, 0x8c, 0xfa, 0x3e, 0xe9, 0xc2, 0x64, 0x53, 0x71,
0x1d, 0x33, 0xae, 0x4f, 0x0c, 0x54, 0x9d, 0x62, 0xc6, 0x36, 0x09, 0xd6, 0x8f, 0xd5, 0x9d, 0x68,
0x03, 0x9c, 0xd5, 0x9c, 0x6e, 0x6c, 0xdf, 0x3b, 0xa6, 0x07, 0x26, 0xf8, 0xb7, 0xd5, 0xb5, 0xaf,
0x9f, 0x50, 0xb2, 0xb7, 0x6e, 0x5f, 0x1f, 0x02, 0xb4, 0x13, 0x6a, 0xcc, 0xab, 0xc3, 0x90, 0x97,
0x1d, 0xeb, 0x0a, 0x25, 0xbb, 0x6b, 0xac, 0x6b, 0xa7, 0xd6, 0x1b, 0x76, 0x27, 0x0e, 0x5c, 0x70,
0x7c, 0xe8, 0x82, 0xcb, 0x3f, 0x54, 0xb7, 0x25, 0xc6, 0x98, 0x49, 0xce, 0x12, 0xbd, 0xf6, 0x53,
0x98, 0x56, 0x02, 0xcb, 0x8e, 0xe0, 0x5a, 0x4c, 0x5e, 0x01, 0xc1, 0x2c, 0x2a, 0x1f, 0x0a, 0x25,
0xa6, 0x82, 0x09, 0xf1, 0x6b, 0x5e, 0xc6, 0xe6, 0xad, 0x9c, 0xb4, 0x27, 0x6b, 0x73, 0x40, 0x9e,
0xc2, 0x58, 0x62, 0xc6, 0x32, 0xa9, 0x9b, 0x74, 0xa8, 0x41, 0xe4, 0x19, 0x4c, 0xb9, 0x08, 0x45,
0x55, 0x60, 0xd9, 0xfc, 0x49, 0x71, 0x71, 0xad, 0x20, 0xf9, 0x0c, 0x8e, 0xc5, 0x96, 0x5d, 0x7c,
0xf9, 0xd5, 0x2e, 0xfd, 0x48, 0xc7, 0x2e, 0x6a, 0xba, 0xc9, 0xfd, 0xfd, 0xeb, 0x5f, 0x3e, 0xbf,
0xe3, 0x72, 0x5b, 0x6d, 0x94, 0x33, 0x9c, 0xd7, 0x0b, 0x78, 0xc5, 0x73, 0xf3, 0xeb, 0x9c, 0x67,
0x52, 0xd5, 0x9c, 0x9c, 0xeb, 0x9d, 0x9c, 0x2b, 0x7f, 0x2e, 0x36, 0x9b, 0xb1, 0x46, 0xaf, 0xff,
0x0d, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x28, 0x7e, 0xad, 0xf2, 0x07, 0x00, 0x00,
// 1000 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcb, 0x6e, 0x23, 0x45,
0x14, 0x9d, 0x76, 0xfb, 0xd5, 0xd7, 0x8f, 0xd8, 0xc5, 0x4c, 0xd4, 0x13, 0x66, 0xa0, 0xc7, 0x30,
0xd0, 0x1a, 0x69, 0x12, 0x91, 0xf0, 0xda, 0x80, 0x80, 0xb4, 0x46, 0xb2, 0x80, 0x91, 0xd5, 0x89,
0xb2, 0x60, 0xd3, 0x2a, 0x77, 0x57, 0xec, 0x42, 0xfd, 0x52, 0x57, 0x39, 0x90, 0x3f, 0xe0, 0x4f,
0xf8, 0x04, 0xf8, 0x01, 0xbe, 0x86, 0x35, 0x7b, 0x54, 0x55, 0xfd, 0xb6, 0xc3, 0x92, 0x9d, 0xef,
0xe9, 0xba, 0xb7, 0xee, 0xb9, 0xf7, 0xd4, 0x31, 0x1c, 0x11, 0xee, 0x07, 0x5e, 0x44, 0x38, 0x3e,
0x4d, 0xb3, 0x84, 0x27, 0x68, 0x1e, 0xd1, 0xf0, 0x6e, 0xc7, 0x54, 0x74, 0x2a, 0xbe, 0x9e, 0x8c,
0xfd, 0x24, 0x8a, 0x92, 0x58, 0x41, 0x27, 0x63, 0xe6, 0x6f, 0x49, 0x94, 0x1f, 0x5f, 0xfc, 0xa5,
0x81, 0xb1, 0x8c, 0x03, 0xf2, 0xeb, 0x32, 0xbe, 0x4d, 0xd0, 0x73, 0x00, 0x2a, 0x02, 0x2f, 0xc6,
0x11, 0x31, 0x35, 0x4b, 0xb3, 0x0d, 0xd7, 0x90, 0xc8, 0x5b, 0x1c, 0x11, 0x64, 0xc2, 0x40, 0x06,
0x4b, 0xc7, 0xec, 0x58, 0x9a, 0xad, 0xbb, 0x45, 0x88, 0x1c, 0x18, 0xab, 0xc4, 0x14, 0x67, 0x38,
0x62, 0xa6, 0x6e, 0xe9, 0xf6, 0xe8, 0xfc, 0xc5, 0x69, 0xa3, 0x99, 0xbc, 0x8d, 0xef, 0xc9, 0xfd,
0x0d, 0x0e, 0x77, 0x64, 0x85, 0x69, 0xe6, 0x8e, 0x64, 0xda, 0x4a, 0x66, 0x89, 0xfa, 0x01, 0x09,
0x09, 0x27, 0x81, 0xd9, 0xb5, 0x34, 0x7b, 0xe8, 0x16, 0x21, 0x7a, 0x1f, 0x46, 0x7e, 0x46, 0x30,
0x27, 0x1e, 0xa7, 0x11, 0x31, 0x7b, 0x96, 0x66, 0x77, 0x5d, 0x50, 0xd0, 0x35, 0x8d, 0xc8, 0xc2,
0x81, 0xe9, 0x1b, 0x4a, 0xc2, 0xa0, 0xe2, 0x62, 0xc2, 0xe0, 0x96, 0x86, 0x24, 0x58, 0x3a, 0x92,
0x88, 0xee, 0x16, 0xe1, 0xc3, 0x34, 0x16, 0x7f, 0xf6, 0x60, 0x7a, 0x99, 0x84, 0x21, 0xf1, 0x39,
0x4d, 0x62, 0x59, 0x66, 0x0a, 0x9d, 0xb2, 0x42, 0x67, 0xe9, 0xa0, 0xaf, 0xa0, 0xaf, 0x06, 0x28,
0x73, 0x47, 0xe7, 0x2f, 0x9b, 0x1c, 0xf3, 0xe1, 0x56, 0x45, 0xae, 0x24, 0xe0, 0xe6, 0x49, 0x6d,
0x22, 0x7a, 0x9b, 0x08, 0x5a, 0xc0, 0x38, 0xc5, 0x19, 0xa7, 0xb2, 0x01, 0x87, 0x99, 0x5d, 0x4b,
0xb7, 0x75, 0xb7, 0x81, 0xa1, 0x8f, 0x60, 0x5a, 0xc6, 0x62, 0x31, 0xcc, 0xec, 0x59, 0xba, 0x6d,
0xb8, 0x2d, 0x14, 0xbd, 0x81, 0xc9, 0xad, 0x18, 0x8a, 0x27, 0xf9, 0x11, 0x66, 0xf6, 0x0f, 0xad,
0x45, 0x68, 0xe4, 0xb4, 0x39, 0x3c, 0x77, 0x7c, 0x5b, 0xc6, 0x84, 0xa1, 0x73, 0x78, 0x72, 0x47,
0x33, 0xbe, 0xc3, 0xa1, 0xe7, 0x6f, 0x71, 0x1c, 0x93, 0x50, 0x0a, 0x84, 0x99, 0x03, 0x79, 0xed,
0x3b, 0xf9, 0xc7, 0x4b, 0xf5, 0x4d, 0xdd, 0xfd, 0x29, 0x1c, 0xa7, 0xdb, 0x7b, 0x46, 0xfd, 0xbd,
0xa4, 0xa1, 0x4c, 0x7a, 0x5c, 0x7c, 0x6d, 0x64, 0x7d, 0x03, 0xcf, 0x4a, 0x0e, 0x9e, 0x9a, 0x4a,
0x20, 0x27, 0xc5, 0x38, 0x8e, 0x52, 0x66, 0x1a, 0x96, 0x6e, 0x77, 0xdd, 0x93, 0xf2, 0xcc, 0xa5,
0x3a, 0x72, 0x5d, 0x9e, 0x10, 0x12, 0x66, 0x5b, 0x9c, 0x05, 0xcc, 0x8b, 0x77, 0x91, 0x09, 0x96,
0x66, 0xf7, 0x5c, 0x43, 0x21, 0x6f, 0x77, 0x11, 0x5a, 0xc2, 0x11, 0xe3, 0x38, 0xe3, 0x5e, 0x9a,
0x30, 0x59, 0x81, 0x99, 0x23, 0x39, 0x14, 0xeb, 0x21, 0xad, 0x3a, 0x98, 0x63, 0x29, 0xd5, 0xa9,
0x4c, 0x5c, 0x15, 0x79, 0xc8, 0x85, 0xb9, 0x9f, 0xc4, 0x8c, 0x32, 0x4e, 0x62, 0xff, 0xde, 0x0b,
0xc9, 0x1d, 0x09, 0xcd, 0xb1, 0xa5, 0xd9, 0xd3, 0xb6, 0x28, 0xf2, 0x62, 0x97, 0xd5, 0xe9, 0x1f,
0xc4, 0x61, 0x77, 0xe6, 0xb7, 0x10, 0xf4, 0x25, 0xf4, 0x18, 0xc7, 0x9c, 0x98, 0x13, 0x59, 0x67,
0x71, 0x60, 0x53, 0x35, 0x69, 0x89, 0x93, 0xae, 0x4a, 0x58, 0xfc, 0xa3, 0xc1, 0x64, 0x55, 0x8a,
0x44, 0x28, 0xd7, 0x82, 0x51, 0x4d, 0x35, 0xb9, 0x84, 0xeb, 0x10, 0xfa, 0x10, 0x26, 0x0d, 0xc5,
0x48, 0x49, 0x1b, 0x6e, 0x13, 0x44, 0x5f, 0xc3, 0xbb, 0xff, 0xb1, 0x93, 0x5c, 0xc2, 0x4f, 0x1f,
0x5c, 0x09, 0xfa, 0x00, 0x26, 0x7e, 0xd9, 0xb3, 0x47, 0xd5, 0xdb, 0xd6, 0xdd, 0x71, 0x05, 0x2e,
0x03, 0xf4, 0x45, 0x41, 0xbc, 0x27, 0x89, 0x1f, 0x92, 0x68, 0xc9, 0xae, 0xc1, 0xfb, 0x77, 0x0d,
0x8c, 0x6f, 0x43, 0x8a, 0x59, 0x61, 0x60, 0x58, 0x04, 0x0d, 0x03, 0x93, 0x88, 0xa4, 0xb2, 0xd7,
0x4a, 0xe7, 0x40, 0x2b, 0x2f, 0x60, 0x5c, 0x67, 0x99, 0x13, 0xcc, 0x9f, 0xad, 0xe4, 0x85, 0x2e,
0x8a, 0x6e, 0xbb, 0xb2, 0xdb, 0xe7, 0x07, 0xba, 0x95, 0x3d, 0x35, 0x3a, 0xfd, 0xad, 0x03, 0xb3,
0x2b, 0xb2, 0x89, 0x48, 0xcc, 0x2b, 0x97, 0x5a, 0x40, 0xfd, 0xf2, 0x62, 0x4b, 0x0d, 0xac, 0xbd,
0xc8, 0xce, 0xfe, 0x22, 0x9f, 0x81, 0xc1, 0xf2, 0xca, 0x8e, 0xec, 0x57, 0x77, 0x2b, 0x40, 0x39,
0xa1, 0x78, 0xce, 0x4e, 0x3e, 0xfa, 0x22, 0xac, 0x3b, 0x61, 0xaf, 0x69, 0xe8, 0x26, 0x0c, 0xd6,
0x3b, 0x2a, 0x73, 0xfa, 0xea, 0x4b, 0x1e, 0x8a, 0xf1, 0x90, 0x18, 0xaf, 0x43, 0xa2, 0x5c, 0xc5,
0x1c, 0x48, 0xa7, 0x1e, 0x29, 0x4c, 0x12, 0x6b, 0x9b, 0xdc, 0x70, 0xcf, 0xad, 0xff, 0xd6, 0xea,
0x3e, 0xfb, 0x23, 0xe1, 0xf8, 0x7f, 0xf7, 0xd9, 0xf7, 0x00, 0xca, 0x09, 0x15, 0x2e, 0x5b, 0x43,
0xd0, 0xcb, 0x9a, 0xc7, 0x7a, 0x1c, 0x6f, 0x0a, 0x8f, 0xad, 0x1e, 0xc7, 0x35, 0xde, 0xb0, 0x3d,
0xbb, 0xee, 0xef, 0xdb, 0xf5, 0xe2, 0x0f, 0xc1, 0x36, 0x23, 0x01, 0x89, 0x39, 0xc5, 0xa1, 0x5c,
0xfb, 0x09, 0x0c, 0x77, 0x8c, 0x64, 0x35, 0x95, 0x96, 0x31, 0x7a, 0x0d, 0x88, 0xc4, 0x7e, 0x76,
0x9f, 0x0a, 0x05, 0xa6, 0x98, 0xb1, 0x5f, 0x92, 0x2c, 0xc8, 0x9f, 0xe6, 0xbc, 0xfc, 0xb2, 0xca,
0x3f, 0xa0, 0x63, 0xe8, 0x73, 0x12, 0xe3, 0x98, 0x4b, 0x92, 0x86, 0x9b, 0x47, 0xe8, 0x29, 0x0c,
0x29, 0xf3, 0xd8, 0x2e, 0x25, 0x59, 0xf1, 0x6f, 0x4a, 0xd9, 0x95, 0x08, 0xd1, 0xc7, 0x70, 0xc4,
0xb6, 0xf8, 0xfc, 0xb3, 0xcf, 0xab, 0xf2, 0x3d, 0x99, 0x3b, 0x55, 0x70, 0x51, 0xfb, 0x55, 0x02,
0x47, 0x2d, 0xbb, 0x41, 0x4f, 0x60, 0x5e, 0x41, 0xf9, 0x5b, 0x9f, 0x3d, 0x42, 0xc7, 0x80, 0x5a,
0x30, 0x8d, 0x37, 0x33, 0xad, 0x89, 0x3b, 0x59, 0x92, 0xa6, 0x02, 0xef, 0x34, 0xcb, 0x48, 0x9c,
0x04, 0x33, 0xfd, 0xd5, 0xcf, 0x30, 0x6d, 0x3e, 0x73, 0xf4, 0x18, 0x66, 0xab, 0x96, 0xb5, 0xcc,
0x1e, 0x89, 0xf4, 0x26, 0xaa, 0x6e, 0xab, 0xc3, 0xb5, 0xcb, 0xea, 0x35, 0xaa, 0xbb, 0x6e, 0x00,
0xaa, 0x47, 0x8a, 0x66, 0x30, 0x96, 0x51, 0x75, 0xc7, 0x1c, 0x26, 0x15, 0xa2, 0xea, 0x17, 0x50,
0xad, 0x76, 0x91, 0x57, 0xd6, 0xfd, 0xee, 0xe2, 0xa7, 0x4f, 0x36, 0x94, 0x6f, 0x77, 0x6b, 0xe1,
0xfb, 0x67, 0x4a, 0xb5, 0xaf, 0x69, 0x92, 0xff, 0x3a, 0xa3, 0x31, 0x17, 0x8b, 0x0e, 0xcf, 0xa4,
0x90, 0xcf, 0x84, 0x59, 0xa4, 0xeb, 0x75, 0x5f, 0x46, 0x17, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff,
0x43, 0x11, 0x13, 0x31, 0xd0, 0x09, 0x00, 0x00,
}

View File

@ -14,11 +14,6 @@ service Proxy {
rpc InvalidateCollectionMetaCache(InvalidateCollMetaCacheRequest) returns (common.Status) {}
rpc GetDdChannel(internal.GetDdChannelRequest) returns (milvus.StringResponse) {}
rpc ReleaseDQLMessageStream(ReleaseDQLMessageStreamRequest) returns (common.Status) {}
rpc SendSearchResult(internal.SearchResults) returns (common.Status) {}
rpc SendRetrieveResult(internal.RetrieveResults) returns (common.Status) {}
rpc InvalidateCredentialCache(InvalidateCredCacheRequest) returns (common.Status) {}
rpc UpdateCredentialCache(UpdateCredCacheRequest) returns (common.Status) {}
@ -32,12 +27,6 @@ message InvalidateCollMetaCacheRequest {
int64 collectionID = 4;
}
message ReleaseDQLMessageStreamRequest {
common.MsgBase base = 1;
int64 dbID = 2;
int64 collectionID = 3;
}
message InvalidateCredCacheRequest {
common.MsgBase base = 1;
string username = 2;

View File

@ -90,61 +90,6 @@ func (m *InvalidateCollMetaCacheRequest) GetCollectionID() int64 {
return 0
}
type ReleaseDQLMessageStreamRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
DbID int64 `protobuf:"varint,2,opt,name=dbID,proto3" json:"dbID,omitempty"`
CollectionID int64 `protobuf:"varint,3,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReleaseDQLMessageStreamRequest) Reset() { *m = ReleaseDQLMessageStreamRequest{} }
func (m *ReleaseDQLMessageStreamRequest) String() string { return proto.CompactTextString(m) }
func (*ReleaseDQLMessageStreamRequest) ProtoMessage() {}
func (*ReleaseDQLMessageStreamRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_700b50b08ed8dbaf, []int{1}
}
func (m *ReleaseDQLMessageStreamRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReleaseDQLMessageStreamRequest.Unmarshal(m, b)
}
func (m *ReleaseDQLMessageStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReleaseDQLMessageStreamRequest.Marshal(b, m, deterministic)
}
func (m *ReleaseDQLMessageStreamRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReleaseDQLMessageStreamRequest.Merge(m, src)
}
func (m *ReleaseDQLMessageStreamRequest) XXX_Size() int {
return xxx_messageInfo_ReleaseDQLMessageStreamRequest.Size(m)
}
func (m *ReleaseDQLMessageStreamRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReleaseDQLMessageStreamRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReleaseDQLMessageStreamRequest proto.InternalMessageInfo
func (m *ReleaseDQLMessageStreamRequest) GetBase() *commonpb.MsgBase {
if m != nil {
return m.Base
}
return nil
}
func (m *ReleaseDQLMessageStreamRequest) GetDbID() int64 {
if m != nil {
return m.DbID
}
return 0
}
func (m *ReleaseDQLMessageStreamRequest) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
type InvalidateCredCacheRequest struct {
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
@ -157,7 +102,7 @@ func (m *InvalidateCredCacheRequest) Reset() { *m = InvalidateCredCacheR
func (m *InvalidateCredCacheRequest) String() string { return proto.CompactTextString(m) }
func (*InvalidateCredCacheRequest) ProtoMessage() {}
func (*InvalidateCredCacheRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_700b50b08ed8dbaf, []int{2}
return fileDescriptor_700b50b08ed8dbaf, []int{1}
}
func (m *InvalidateCredCacheRequest) XXX_Unmarshal(b []byte) error {
@ -206,7 +151,7 @@ func (m *UpdateCredCacheRequest) Reset() { *m = UpdateCredCacheRequest{}
func (m *UpdateCredCacheRequest) String() string { return proto.CompactTextString(m) }
func (*UpdateCredCacheRequest) ProtoMessage() {}
func (*UpdateCredCacheRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_700b50b08ed8dbaf, []int{3}
return fileDescriptor_700b50b08ed8dbaf, []int{2}
}
func (m *UpdateCredCacheRequest) XXX_Unmarshal(b []byte) error {
@ -261,7 +206,7 @@ func (m *RefreshPolicyInfoCacheRequest) Reset() { *m = RefreshPolicyInfo
func (m *RefreshPolicyInfoCacheRequest) String() string { return proto.CompactTextString(m) }
func (*RefreshPolicyInfoCacheRequest) ProtoMessage() {}
func (*RefreshPolicyInfoCacheRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_700b50b08ed8dbaf, []int{4}
return fileDescriptor_700b50b08ed8dbaf, []int{3}
}
func (m *RefreshPolicyInfoCacheRequest) XXX_Unmarshal(b []byte) error {
@ -305,7 +250,6 @@ func (m *RefreshPolicyInfoCacheRequest) GetOpKey() string {
func init() {
proto.RegisterType((*InvalidateCollMetaCacheRequest)(nil), "milvus.proto.proxy.InvalidateCollMetaCacheRequest")
proto.RegisterType((*ReleaseDQLMessageStreamRequest)(nil), "milvus.proto.proxy.ReleaseDQLMessageStreamRequest")
proto.RegisterType((*InvalidateCredCacheRequest)(nil), "milvus.proto.proxy.InvalidateCredCacheRequest")
proto.RegisterType((*UpdateCredCacheRequest)(nil), "milvus.proto.proxy.UpdateCredCacheRequest")
proto.RegisterType((*RefreshPolicyInfoCacheRequest)(nil), "milvus.proto.proxy.RefreshPolicyInfoCacheRequest")
@ -314,45 +258,39 @@ func init() {
func init() { proto.RegisterFile("proxy.proto", fileDescriptor_700b50b08ed8dbaf) }
var fileDescriptor_700b50b08ed8dbaf = []byte{
// 603 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xdd, 0x6e, 0xd3, 0x30,
0x14, 0x5e, 0xe8, 0x36, 0xc6, 0x59, 0x35, 0x90, 0x35, 0xb6, 0x11, 0xd8, 0x34, 0x05, 0x04, 0xd3,
0x24, 0xda, 0x51, 0x78, 0x82, 0xb5, 0xd2, 0x54, 0xc1, 0xd0, 0x48, 0x41, 0x48, 0x70, 0x81, 0x9c,
0xe4, 0xac, 0xf5, 0xe4, 0xd8, 0x59, 0xec, 0x0c, 0x7a, 0x85, 0xc4, 0x25, 0xaf, 0xc1, 0x4b, 0xf0,
0x78, 0x28, 0x3f, 0x4b, 0x9b, 0x36, 0x69, 0x04, 0x13, 0x77, 0xfe, 0xec, 0xcf, 0xe7, 0xfb, 0x8e,
0x8f, 0xcf, 0x81, 0xf5, 0x20, 0x94, 0xdf, 0xc6, 0xad, 0x20, 0x94, 0x5a, 0x12, 0xe2, 0x33, 0x7e,
0x15, 0xa9, 0x14, 0xb5, 0x92, 0x13, 0xb3, 0xe9, 0x4a, 0xdf, 0x97, 0x22, 0xdd, 0x33, 0x37, 0x98,
0xd0, 0x18, 0x0a, 0xca, 0x33, 0xdc, 0x9c, 0xbe, 0x61, 0xfd, 0x36, 0x60, 0xaf, 0x2f, 0xae, 0x28,
0x67, 0x1e, 0xd5, 0xd8, 0x95, 0x9c, 0x9f, 0xa2, 0xa6, 0x5d, 0xea, 0x8e, 0xd0, 0xc6, 0xcb, 0x08,
0x95, 0x26, 0x47, 0xb0, 0xec, 0x50, 0x85, 0x3b, 0xc6, 0xbe, 0x71, 0xb0, 0xde, 0x79, 0xd4, 0x2a,
0x28, 0x66, 0x52, 0xa7, 0x6a, 0x78, 0x4c, 0x15, 0xda, 0x09, 0x93, 0x6c, 0xc3, 0x6d, 0xcf, 0xf9,
0x22, 0xa8, 0x8f, 0x3b, 0xb7, 0xf6, 0x8d, 0x83, 0x3b, 0xf6, 0xaa, 0xe7, 0xbc, 0xa5, 0x3e, 0x92,
0x67, 0x70, 0xd7, 0x95, 0x9c, 0xa3, 0xab, 0x99, 0x14, 0x29, 0xa1, 0x91, 0x10, 0x36, 0x26, 0xdb,
0x09, 0xd1, 0x82, 0xe6, 0x64, 0xa7, 0xdf, 0xdb, 0x59, 0xde, 0x37, 0x0e, 0x1a, 0x76, 0x61, 0xcf,
0xfa, 0x69, 0xc0, 0x9e, 0x8d, 0x1c, 0xa9, 0xc2, 0xde, 0xbb, 0x37, 0xa7, 0xa8, 0x14, 0x1d, 0xe2,
0x40, 0x87, 0x48, 0xfd, 0x7f, 0xb7, 0x4e, 0x60, 0xd9, 0x73, 0xfa, 0xbd, 0xc4, 0x77, 0xc3, 0x4e,
0xd6, 0x73, 0x66, 0x1a, 0x25, 0x66, 0x2e, 0xc0, 0x9c, 0x7a, 0xc6, 0x10, 0xbd, 0x1b, 0x3e, 0xa1,
0x09, 0x6b, 0x91, 0x8a, 0xcb, 0x96, 0xbf, 0x61, 0x8e, 0xad, 0x1f, 0x06, 0x6c, 0x7d, 0x08, 0xfe,
0xbf, 0x50, 0x7c, 0x16, 0x50, 0xa5, 0xbe, 0xca, 0xd0, 0xcb, 0xea, 0x94, 0x63, 0xeb, 0x3b, 0xec,
0xda, 0x78, 0x1e, 0xa2, 0x1a, 0x9d, 0x49, 0xce, 0xdc, 0x71, 0x5f, 0x9c, 0xcb, 0x1b, 0x5a, 0xd9,
0x82, 0x55, 0x19, 0xbc, 0x1f, 0x07, 0xa9, 0x91, 0x15, 0x3b, 0x43, 0x64, 0x13, 0x56, 0x64, 0xf0,
0x1a, 0xc7, 0x99, 0x87, 0x14, 0x74, 0x7e, 0xad, 0xc1, 0xca, 0x59, 0xfc, 0xdf, 0x49, 0x00, 0xe4,
0x04, 0x75, 0x57, 0xfa, 0x81, 0x14, 0x28, 0xf4, 0x40, 0x53, 0x8d, 0x8a, 0x1c, 0x15, 0x15, 0xf3,
0x2e, 0x98, 0xa7, 0x66, 0x8e, 0xcd, 0xa7, 0x15, 0x37, 0x66, 0xe8, 0xd6, 0x12, 0xb9, 0x84, 0xcd,
0x13, 0x4c, 0x20, 0x53, 0x9a, 0xb9, 0xaa, 0x3b, 0xa2, 0x42, 0x20, 0x27, 0x9d, 0x6a, 0xcd, 0x39,
0xf2, 0xb5, 0xea, 0xe3, 0xe2, 0x9d, 0x0c, 0x0c, 0x74, 0xc8, 0xc4, 0xd0, 0x46, 0x15, 0x48, 0xa1,
0xd0, 0x5a, 0x22, 0x21, 0xec, 0x16, 0xfb, 0x34, 0xfd, 0x7a, 0x79, 0xb7, 0xce, 0x6a, 0xa7, 0x43,
0x62, 0x71, 0x6b, 0x9b, 0x0f, 0x4b, 0xab, 0x12, 0x5b, 0x8d, 0xe2, 0x34, 0x29, 0x34, 0x4f, 0x50,
0xf7, 0xbc, 0xeb, 0xf4, 0x0e, 0xab, 0xd3, 0xcb, 0x49, 0x7f, 0x99, 0x16, 0x87, 0xed, 0x8a, 0x1e,
0x2e, 0x4f, 0x68, 0x71, 0xc3, 0xd7, 0x25, 0xf4, 0x11, 0xee, 0x0d, 0x50, 0x78, 0x03, 0xa4, 0xa1,
0x3b, 0xb2, 0x51, 0x45, 0x5c, 0x93, 0x27, 0x15, 0x49, 0x4d, 0x93, 0x54, 0x5d, 0xe0, 0xcf, 0x40,
0xe2, 0xc0, 0x36, 0xea, 0x90, 0xe1, 0x15, 0x66, 0xa1, 0xab, 0x3e, 0x54, 0x91, 0x56, 0x1b, 0xfc,
0x02, 0x1e, 0x14, 0x67, 0x0b, 0x0a, 0xcd, 0x28, 0x4f, 0xcb, 0xde, 0xaa, 0x29, 0xfb, 0xcc, 0x84,
0xa8, 0xd3, 0x72, 0xe0, 0xfe, 0x64, 0xb4, 0x4c, 0xeb, 0x1c, 0x96, 0xe9, 0x94, 0x4f, 0xa1, 0xfa,
0x7c, 0xb6, 0xca, 0x47, 0x07, 0x79, 0x51, 0x5e, 0xf2, 0x05, 0x63, 0xa6, 0x46, 0xeb, 0xf8, 0xd5,
0xa7, 0xce, 0x90, 0xe9, 0x51, 0xe4, 0xc4, 0x27, 0xed, 0x94, 0xfa, 0x9c, 0xc9, 0x6c, 0xd5, 0xbe,
0x2e, 0x49, 0x3b, 0xb9, 0xdd, 0x4e, 0x04, 0x03, 0xc7, 0x59, 0x4d, 0xe0, 0xcb, 0x3f, 0x01, 0x00,
0x00, 0xff, 0xff, 0x61, 0xb0, 0x0e, 0xba, 0x6b, 0x07, 0x00, 0x00,
// 504 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x5d, 0x6f, 0xd3, 0x30,
0x14, 0x5d, 0xd8, 0x5a, 0xe0, 0xae, 0x1a, 0x92, 0x35, 0x4a, 0x09, 0x0c, 0x55, 0x41, 0x82, 0x6a,
0x12, 0xed, 0x28, 0xfc, 0x82, 0x75, 0x52, 0x55, 0xa1, 0xa1, 0x29, 0x83, 0x17, 0x5e, 0x90, 0x93,
0xdc, 0xb5, 0xae, 0x1c, 0xdb, 0x8b, 0x9d, 0x41, 0x9f, 0x90, 0xf8, 0x55, 0xfc, 0x3b, 0x50, 0x3e,
0x9a, 0x36, 0x5d, 0xba, 0x0a, 0x55, 0x7b, 0xcb, 0xb1, 0xcf, 0xd5, 0x39, 0x27, 0xbe, 0x07, 0xf6,
0x55, 0x24, 0x7f, 0xce, 0xba, 0x2a, 0x92, 0x46, 0x12, 0x12, 0x32, 0x7e, 0x13, 0xeb, 0x0c, 0x75,
0xd3, 0x1b, 0xbb, 0xe1, 0xcb, 0x30, 0x94, 0x22, 0x3b, 0xb3, 0x0f, 0x98, 0x30, 0x18, 0x09, 0xca,
0x73, 0xdc, 0x58, 0x9e, 0x70, 0xfe, 0x58, 0xf0, 0x6a, 0x24, 0x6e, 0x28, 0x67, 0x01, 0x35, 0x38,
0x90, 0x9c, 0x9f, 0xa3, 0xa1, 0x03, 0xea, 0x4f, 0xd0, 0xc5, 0xeb, 0x18, 0xb5, 0x21, 0x27, 0xb0,
0xe7, 0x51, 0x8d, 0x2d, 0xab, 0x6d, 0x75, 0xf6, 0xfb, 0x2f, 0xbb, 0x25, 0xc5, 0x5c, 0xea, 0x5c,
0x8f, 0x4f, 0xa9, 0x46, 0x37, 0x65, 0x92, 0x67, 0xf0, 0x30, 0xf0, 0xbe, 0x0b, 0x1a, 0x62, 0xeb,
0x41, 0xdb, 0xea, 0x3c, 0x76, 0xeb, 0x81, 0xf7, 0x99, 0x86, 0x48, 0xde, 0xc2, 0x13, 0x5f, 0x72,
0x8e, 0xbe, 0x61, 0x52, 0x64, 0x84, 0xdd, 0x94, 0x70, 0xb0, 0x38, 0x4e, 0x89, 0x0e, 0x34, 0x16,
0x27, 0xa3, 0xb3, 0xd6, 0x5e, 0xdb, 0xea, 0xec, 0xba, 0xa5, 0x33, 0x67, 0x0a, 0xf6, 0x92, 0xf3,
0x08, 0x83, 0x2d, 0x5d, 0xdb, 0xf0, 0x28, 0xd6, 0xc9, 0x9f, 0x2a, 0x6c, 0x17, 0xd8, 0xf9, 0x6d,
0x41, 0xf3, 0xab, 0xba, 0x7f, 0xa1, 0xe4, 0x4e, 0x51, 0xad, 0x7f, 0xc8, 0x28, 0xc8, 0x7f, 0x4d,
0x81, 0x9d, 0x5f, 0x70, 0xe4, 0xe2, 0x55, 0x84, 0x7a, 0x72, 0x21, 0x39, 0xf3, 0x67, 0x23, 0x71,
0x25, 0xb7, 0xb4, 0xd2, 0x84, 0xba, 0x54, 0x5f, 0x66, 0x2a, 0x33, 0x52, 0x73, 0x73, 0x44, 0x0e,
0xa1, 0x26, 0xd5, 0x27, 0x9c, 0xe5, 0x1e, 0x32, 0xd0, 0xff, 0x5b, 0x83, 0xda, 0x45, 0xb2, 0x62,
0x44, 0x01, 0x19, 0xa2, 0x19, 0xc8, 0x50, 0x49, 0x81, 0xc2, 0x5c, 0x1a, 0x6a, 0x50, 0x93, 0x93,
0xb2, 0x62, 0xb1, 0x78, 0xb7, 0xa9, 0xb9, 0x63, 0xfb, 0xcd, 0x9a, 0x89, 0x15, 0xba, 0xb3, 0x43,
0xae, 0xe1, 0x70, 0x88, 0x29, 0x64, 0xda, 0x30, 0x5f, 0x0f, 0x26, 0x54, 0x08, 0xe4, 0xa4, 0xbf,
0x5e, 0xf3, 0x16, 0x79, 0xae, 0xfa, 0xba, 0x3c, 0x93, 0x83, 0x4b, 0x13, 0x31, 0x31, 0x76, 0x51,
0x2b, 0x29, 0x34, 0x3a, 0x3b, 0x24, 0x82, 0xa3, 0x72, 0x35, 0xb2, 0xd5, 0x2b, 0x0a, 0xb2, 0xaa,
0x9d, 0xf5, 0xf2, 0xee, 0x36, 0xd9, 0x2f, 0x2a, 0x5f, 0x25, 0xb1, 0x1a, 0x27, 0x31, 0x29, 0x34,
0x86, 0x68, 0xce, 0x82, 0x79, 0xbc, 0xe3, 0xf5, 0xf1, 0x0a, 0xd2, 0x7f, 0xc6, 0x9a, 0xc2, 0xf3,
0x72, 0x6f, 0x50, 0x18, 0x46, 0x79, 0x16, 0xa9, 0xbb, 0x21, 0xd2, 0xca, 0xf6, 0x6f, 0x8a, 0xe3,
0xc1, 0xd3, 0x45, 0x6d, 0x96, 0x75, 0x8e, 0xab, 0x74, 0xaa, 0x1b, 0xb6, 0x49, 0x63, 0x0a, 0xcd,
0xea, 0x5a, 0x90, 0xf7, 0x55, 0x22, 0x77, 0x56, 0x68, 0x83, 0xd6, 0xe9, 0xc7, 0x6f, 0xfd, 0x31,
0x33, 0x93, 0xd8, 0x4b, 0x6e, 0x7a, 0x19, 0xf5, 0x1d, 0x93, 0xf9, 0x57, 0x6f, 0xfe, 0x3c, 0xbd,
0x74, 0xba, 0x97, 0x0a, 0x2a, 0xcf, 0xab, 0xa7, 0xf0, 0xc3, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
0x33, 0xad, 0xd0, 0x9b, 0xba, 0x05, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -371,9 +309,6 @@ type ProxyClient interface {
GetStatisticsChannel(ctx context.Context, in *internalpb.GetStatisticsChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
InvalidateCollectionMetaCache(ctx context.Context, in *InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
GetDdChannel(ctx context.Context, in *internalpb.GetDdChannelRequest, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
ReleaseDQLMessageStream(ctx context.Context, in *ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
SendSearchResult(ctx context.Context, in *internalpb.SearchResults, opts ...grpc.CallOption) (*commonpb.Status, error)
SendRetrieveResult(ctx context.Context, in *internalpb.RetrieveResults, opts ...grpc.CallOption) (*commonpb.Status, error)
InvalidateCredentialCache(ctx context.Context, in *InvalidateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
UpdateCredentialCache(ctx context.Context, in *UpdateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
RefreshPolicyInfoCache(ctx context.Context, in *RefreshPolicyInfoCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
@ -423,33 +358,6 @@ func (c *proxyClient) GetDdChannel(ctx context.Context, in *internalpb.GetDdChan
return out, nil
}
func (c *proxyClient) ReleaseDQLMessageStream(ctx context.Context, in *ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/ReleaseDQLMessageStream", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *proxyClient) SendSearchResult(ctx context.Context, in *internalpb.SearchResults, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/SendSearchResult", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *proxyClient) SendRetrieveResult(ctx context.Context, in *internalpb.RetrieveResults, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/SendRetrieveResult", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *proxyClient) InvalidateCredentialCache(ctx context.Context, in *InvalidateCredCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.proxy.Proxy/InvalidateCredentialCache", in, out, opts...)
@ -483,9 +391,6 @@ type ProxyServer interface {
GetStatisticsChannel(context.Context, *internalpb.GetStatisticsChannelRequest) (*milvuspb.StringResponse, error)
InvalidateCollectionMetaCache(context.Context, *InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
GetDdChannel(context.Context, *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error)
ReleaseDQLMessageStream(context.Context, *ReleaseDQLMessageStreamRequest) (*commonpb.Status, error)
SendSearchResult(context.Context, *internalpb.SearchResults) (*commonpb.Status, error)
SendRetrieveResult(context.Context, *internalpb.RetrieveResults) (*commonpb.Status, error)
InvalidateCredentialCache(context.Context, *InvalidateCredCacheRequest) (*commonpb.Status, error)
UpdateCredentialCache(context.Context, *UpdateCredCacheRequest) (*commonpb.Status, error)
RefreshPolicyInfoCache(context.Context, *RefreshPolicyInfoCacheRequest) (*commonpb.Status, error)
@ -507,15 +412,6 @@ func (*UnimplementedProxyServer) InvalidateCollectionMetaCache(ctx context.Conte
func (*UnimplementedProxyServer) GetDdChannel(ctx context.Context, req *internalpb.GetDdChannelRequest) (*milvuspb.StringResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetDdChannel not implemented")
}
func (*UnimplementedProxyServer) ReleaseDQLMessageStream(ctx context.Context, req *ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReleaseDQLMessageStream not implemented")
}
func (*UnimplementedProxyServer) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendSearchResult not implemented")
}
func (*UnimplementedProxyServer) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendRetrieveResult not implemented")
}
func (*UnimplementedProxyServer) InvalidateCredentialCache(ctx context.Context, req *InvalidateCredCacheRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method InvalidateCredentialCache not implemented")
}
@ -602,60 +498,6 @@ func _Proxy_GetDdChannel_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
func _Proxy_ReleaseDQLMessageStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReleaseDQLMessageStreamRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProxyServer).ReleaseDQLMessageStream(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.proxy.Proxy/ReleaseDQLMessageStream",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProxyServer).ReleaseDQLMessageStream(ctx, req.(*ReleaseDQLMessageStreamRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Proxy_SendSearchResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.SearchResults)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProxyServer).SendSearchResult(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.proxy.Proxy/SendSearchResult",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProxyServer).SendSearchResult(ctx, req.(*internalpb.SearchResults))
}
return interceptor(ctx, in, info, handler)
}
func _Proxy_SendRetrieveResult_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.RetrieveResults)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ProxyServer).SendRetrieveResult(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.proxy.Proxy/SendRetrieveResult",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ProxyServer).SendRetrieveResult(ctx, req.(*internalpb.RetrieveResults))
}
return interceptor(ctx, in, info, handler)
}
func _Proxy_InvalidateCredentialCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InvalidateCredCacheRequest)
if err := dec(in); err != nil {
@ -730,18 +572,6 @@ var _Proxy_serviceDesc = grpc.ServiceDesc{
MethodName: "GetDdChannel",
Handler: _Proxy_GetDdChannel_Handler,
},
{
MethodName: "ReleaseDQLMessageStream",
Handler: _Proxy_ReleaseDQLMessageStream_Handler,
},
{
MethodName: "SendSearchResult",
Handler: _Proxy_SendSearchResult_Handler,
},
{
MethodName: "SendRetrieveResult",
Handler: _Proxy_SendRetrieveResult_Handler,
},
{
MethodName: "InvalidateCredentialCache",
Handler: _Proxy_InvalidateCredentialCache_Handler,

View File

@ -103,7 +103,6 @@ service RootCoord {
rpc AllocTimestamp(AllocTimestampRequest) returns (AllocTimestampResponse) {}
rpc AllocID(AllocIDRequest) returns (AllocIDResponse) {}
rpc UpdateChannelTimeTick(internal.ChannelTimeTickMsg) returns (common.Status) {}
rpc ReleaseDQLMessageStream(proxy.ReleaseDQLMessageStreamRequest) returns (common.Status) {}
rpc InvalidateCollectionMetaCache(proxy.InvalidateCollMetaCacheRequest) returns (common.Status) {}
// rpc SegmentFlushCompleted(data.SegmentFlushCompletedMsg) returns (common.Status) {}
@ -209,4 +208,5 @@ message GetCredentialResponse {
string username = 2;
// password stored in etcd/mysql
string password = 3;
}
}

View File

@ -674,103 +674,101 @@ func init() {
func init() { proto.RegisterFile("root_coord.proto", fileDescriptor_4513485a144f6b06) }
var fileDescriptor_4513485a144f6b06 = []byte{
// 1526 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xed, 0x72, 0xd3, 0x46,
0x17, 0xc6, 0x36, 0x49, 0xec, 0x63, 0xc7, 0x0e, 0x3b, 0x7c, 0xf8, 0x35, 0xbc, 0x2f, 0xc6, 0x2f,
0x05, 0xf3, 0xe5, 0xd0, 0x30, 0x43, 0x29, 0xff, 0x88, 0xcd, 0x04, 0x4f, 0xc9, 0x10, 0x64, 0xe8,
0xd0, 0x0f, 0xc6, 0x5d, 0x4b, 0x07, 0x47, 0x13, 0x59, 0x6b, 0xb4, 0xeb, 0x7c, 0x4c, 0x7f, 0x75,
0xa6, 0xff, 0x7b, 0x27, 0xbd, 0x88, 0xf6, 0x52, 0x7a, 0x23, 0x9d, 0xd5, 0x4a, 0xb2, 0x24, 0x4b,
0x8e, 0x02, 0xfc, 0xf3, 0xae, 0x9e, 0x7d, 0x9e, 0xb3, 0xcf, 0xee, 0x9e, 0xb3, 0x6b, 0xd8, 0x70,
0x18, 0x13, 0x43, 0x9d, 0x31, 0xc7, 0xe8, 0x4c, 0x1d, 0x26, 0x18, 0xb9, 0x3c, 0x31, 0xad, 0xc3,
0x19, 0x57, 0xad, 0x8e, 0xfc, 0xec, 0x7e, 0x6d, 0x54, 0x74, 0x36, 0x99, 0x30, 0x5b, 0xf5, 0x37,
0x2a, 0x61, 0x54, 0xa3, 0x6a, 0xda, 0x02, 0x1d, 0x9b, 0x5a, 0x5e, 0xbb, 0x3c, 0x75, 0xd8, 0xf1,
0x89, 0xd7, 0xa8, 0xa1, 0xd0, 0x8d, 0xe1, 0x04, 0x05, 0x55, 0x1d, 0xad, 0x21, 0x5c, 0x7a, 0x66,
0x59, 0x4c, 0x7f, 0x63, 0x4e, 0x90, 0x0b, 0x3a, 0x99, 0x6a, 0xf8, 0x71, 0x86, 0x5c, 0x90, 0x87,
0x70, 0x7e, 0x44, 0x39, 0xd6, 0x73, 0xcd, 0x5c, 0xbb, 0xbc, 0x75, 0xad, 0x13, 0x89, 0xc4, 0x93,
0xdf, 0xe5, 0xe3, 0x6d, 0xca, 0x51, 0x73, 0x91, 0xe4, 0x22, 0xac, 0xe8, 0x6c, 0x66, 0x8b, 0x7a,
0xa1, 0x99, 0x6b, 0xaf, 0x6b, 0xaa, 0xd1, 0xfa, 0x2d, 0x07, 0x97, 0xe3, 0x0a, 0x7c, 0xca, 0x6c,
0x8e, 0xe4, 0x11, 0xac, 0x72, 0x41, 0xc5, 0x8c, 0x7b, 0x22, 0x57, 0x13, 0x45, 0x06, 0x2e, 0x44,
0xf3, 0xa0, 0xe4, 0x1a, 0x94, 0x84, 0xcf, 0x54, 0xcf, 0x37, 0x73, 0xed, 0xf3, 0xda, 0xbc, 0x23,
0x25, 0x86, 0x77, 0x50, 0x75, 0x43, 0xe8, 0xf7, 0xbe, 0xc0, 0xec, 0xf2, 0x61, 0x66, 0x0b, 0x6a,
0x01, 0xf3, 0xe7, 0xcc, 0xaa, 0x0a, 0xf9, 0x7e, 0xcf, 0xa5, 0x2e, 0x68, 0xf9, 0x7e, 0x2f, 0x65,
0x1e, 0x7f, 0xe5, 0xa1, 0xd2, 0x9f, 0x4c, 0x99, 0x23, 0x34, 0xe4, 0x33, 0x4b, 0x7c, 0x9a, 0xd6,
0x15, 0x58, 0x13, 0x94, 0x1f, 0x0c, 0x4d, 0xc3, 0x13, 0x5c, 0x95, 0xcd, 0xbe, 0x41, 0xae, 0x43,
0xd9, 0xa0, 0x82, 0xda, 0xcc, 0x40, 0xf9, 0xb1, 0xe0, 0x7e, 0x04, 0xbf, 0xab, 0x6f, 0x90, 0xc7,
0xb0, 0x22, 0x39, 0xb0, 0x7e, 0xbe, 0x99, 0x6b, 0x57, 0xb7, 0x9a, 0x89, 0x6a, 0x2a, 0x40, 0xa9,
0x89, 0x9a, 0x82, 0x93, 0x06, 0x14, 0x39, 0x8e, 0x27, 0x68, 0x0b, 0x5e, 0x5f, 0x69, 0x16, 0xda,
0x05, 0x2d, 0x68, 0x93, 0xff, 0x40, 0x91, 0xce, 0x04, 0x1b, 0x9a, 0x06, 0xaf, 0xaf, 0xba, 0xdf,
0xd6, 0x64, 0xbb, 0x6f, 0x70, 0x72, 0x15, 0x4a, 0x0e, 0x3b, 0x1a, 0x2a, 0x23, 0xd6, 0xdc, 0x68,
0x8a, 0x0e, 0x3b, 0xea, 0xca, 0x36, 0xf9, 0x06, 0x56, 0x4c, 0xfb, 0x03, 0xe3, 0xf5, 0x62, 0xb3,
0xd0, 0x2e, 0x6f, 0xdd, 0x48, 0x8c, 0xe5, 0x3b, 0x3c, 0xf9, 0x9e, 0x5a, 0x33, 0xdc, 0xa3, 0xa6,
0xa3, 0x29, 0x7c, 0xeb, 0x8f, 0x1c, 0x5c, 0xe9, 0x21, 0xd7, 0x1d, 0x73, 0x84, 0x03, 0x2f, 0x8a,
0x4f, 0xdf, 0x16, 0x2d, 0xa8, 0xe8, 0xcc, 0xb2, 0x50, 0x17, 0x26, 0xb3, 0x83, 0x25, 0x8c, 0xf4,
0x91, 0xff, 0x01, 0x78, 0xd3, 0xed, 0xf7, 0x78, 0xbd, 0xe0, 0x4e, 0x32, 0xd4, 0xd3, 0x9a, 0x41,
0xcd, 0x0b, 0x44, 0x12, 0xf7, 0xed, 0x0f, 0x6c, 0x81, 0x36, 0x97, 0x40, 0xdb, 0x84, 0xf2, 0x94,
0x3a, 0xc2, 0x8c, 0x28, 0x87, 0xbb, 0xe4, 0x59, 0x09, 0x64, 0xbc, 0xe5, 0x9c, 0x77, 0xb4, 0xfe,
0xc9, 0x43, 0xc5, 0xd3, 0x95, 0x9a, 0x9c, 0xf4, 0xa0, 0x24, 0xe7, 0x34, 0x94, 0x3e, 0x79, 0x16,
0xdc, 0xee, 0x24, 0x67, 0xa0, 0x4e, 0x2c, 0x60, 0xad, 0x38, 0xf2, 0x43, 0xef, 0x41, 0xd9, 0xb4,
0x0d, 0x3c, 0x1e, 0xaa, 0xe5, 0xc9, 0xbb, 0xcb, 0xf3, 0xff, 0x28, 0x8f, 0xcc, 0x42, 0x9d, 0x40,
0xdb, 0xc0, 0x63, 0x97, 0x03, 0x4c, 0xff, 0x27, 0x27, 0x08, 0x17, 0xf0, 0x58, 0x38, 0x74, 0x18,
0xe6, 0x2a, 0xb8, 0x5c, 0xdf, 0x9e, 0x12, 0x93, 0x4b, 0xd0, 0x79, 0x2e, 0x47, 0x07, 0xdc, 0xfc,
0xb9, 0x2d, 0x9c, 0x13, 0xad, 0x86, 0xd1, 0xde, 0xc6, 0x2f, 0x70, 0x31, 0x09, 0x48, 0x36, 0xa0,
0x70, 0x80, 0x27, 0x9e, 0xed, 0xf2, 0x27, 0xd9, 0x82, 0x95, 0x43, 0xb9, 0x95, 0x5c, 0x9f, 0x17,
0xf6, 0x86, 0x3b, 0xa1, 0xf9, 0x4c, 0x14, 0xf4, 0x69, 0xfe, 0x49, 0xae, 0xf5, 0x77, 0x1e, 0xea,
0x8b, 0xdb, 0xed, 0x73, 0x72, 0x45, 0x96, 0x2d, 0x37, 0x86, 0x75, 0x6f, 0xa1, 0x23, 0xd6, 0x6d,
0xa7, 0x59, 0x97, 0x16, 0x61, 0xc4, 0x53, 0xe5, 0x61, 0x85, 0x87, 0xba, 0x1a, 0x08, 0x17, 0x16,
0x20, 0x09, 0xee, 0x3d, 0x8d, 0xba, 0x77, 0x33, 0xcb, 0x12, 0x86, 0x5d, 0x34, 0xe0, 0xe2, 0x0e,
0x8a, 0xae, 0x83, 0x06, 0xda, 0xc2, 0xa4, 0xd6, 0xa7, 0x1f, 0xd8, 0x06, 0x14, 0x67, 0x5c, 0xd6,
0xc7, 0x89, 0x0a, 0xa6, 0xa4, 0x05, 0xed, 0xd6, 0xef, 0x39, 0xb8, 0x14, 0x93, 0xf9, 0x9c, 0x85,
0x5a, 0x22, 0x25, 0xbf, 0x4d, 0x29, 0xe7, 0x47, 0xcc, 0x51, 0x89, 0xb6, 0xa4, 0x05, 0xed, 0xad,
0x3f, 0xaf, 0x43, 0x49, 0x63, 0x4c, 0x74, 0xa5, 0x25, 0x64, 0x0a, 0x44, 0xc6, 0xc4, 0x26, 0x53,
0x66, 0xa3, 0xad, 0x12, 0x2b, 0x27, 0x0f, 0xa3, 0x01, 0x04, 0x35, 0x7f, 0x11, 0xea, 0x59, 0xd5,
0xb8, 0x95, 0x32, 0x22, 0x06, 0x6f, 0x9d, 0x23, 0x13, 0x57, 0x51, 0xd6, 0xeb, 0x37, 0xa6, 0x7e,
0xd0, 0xdd, 0xa7, 0xb6, 0x8d, 0xd6, 0x32, 0xc5, 0x18, 0xd4, 0x57, 0x8c, 0x1d, 0x7a, 0xaf, 0x31,
0x10, 0x8e, 0x69, 0x8f, 0x7d, 0x67, 0x5b, 0xe7, 0xc8, 0x47, 0x77, 0x6d, 0xa5, 0xba, 0xc9, 0x85,
0xa9, 0x73, 0x5f, 0x70, 0x2b, 0x5d, 0x70, 0x01, 0x7c, 0x46, 0xc9, 0x21, 0x6c, 0x74, 0x1d, 0xa4,
0x02, 0xbb, 0xc1, 0xa1, 0x21, 0xf7, 0x13, 0x87, 0xc6, 0x61, 0xbe, 0xd0, 0xb2, 0x0d, 0xd0, 0x3a,
0x47, 0x7e, 0x82, 0x6a, 0xcf, 0x61, 0xd3, 0x10, 0xfd, 0xdd, 0x44, 0xfa, 0x28, 0x28, 0x23, 0xf9,
0x10, 0xd6, 0x5f, 0x50, 0x1e, 0xe2, 0xbe, 0x93, 0xc8, 0x1d, 0xc1, 0xf8, 0xd4, 0x37, 0x12, 0xa1,
0xdb, 0x8c, 0x59, 0x21, 0x7b, 0x8e, 0x80, 0xf8, 0x09, 0x21, 0xa4, 0xd2, 0x49, 0x9e, 0xc1, 0x02,
0xd0, 0x97, 0xda, 0xcc, 0x8c, 0x0f, 0x84, 0xdf, 0x42, 0x59, 0x19, 0xfe, 0xcc, 0x32, 0x29, 0x27,
0xb7, 0x97, 0x2c, 0x89, 0x8b, 0xc8, 0x68, 0xd8, 0x6b, 0x28, 0x49, 0xa3, 0x15, 0xe9, 0x57, 0xa9,
0x0b, 0x71, 0x16, 0xca, 0x01, 0xc0, 0x33, 0x4b, 0xa0, 0xa3, 0x38, 0x6f, 0x25, 0x72, 0xce, 0x01,
0x19, 0x49, 0x6d, 0xa8, 0x0d, 0xf6, 0xe5, 0x05, 0xc7, 0xb7, 0x86, 0x93, 0x7b, 0xc9, 0x1b, 0x3a,
0x8a, 0xf2, 0xe9, 0xef, 0x67, 0x03, 0x07, 0x76, 0xbf, 0x87, 0x9a, 0x32, 0x73, 0xcf, 0xbf, 0x34,
0xa4, 0xe8, 0xc5, 0x50, 0x19, 0xa7, 0xf3, 0x03, 0xac, 0x4b, 0x5b, 0xe7, 0xe4, 0x77, 0x52, 0xad,
0x3f, 0x2b, 0xf5, 0x7b, 0xa8, 0xbc, 0xa0, 0x7c, 0xce, 0xdc, 0x4e, 0x3b, 0x01, 0x0b, 0xc4, 0x99,
0x0e, 0xc0, 0x01, 0x54, 0xa5, 0x6b, 0xc1, 0x60, 0x9e, 0x72, 0x7c, 0xa3, 0x20, 0x5f, 0xe2, 0x5e,
0x26, 0x6c, 0x20, 0x86, 0x50, 0x91, 0xdf, 0xfc, 0xd2, 0x9b, 0x32, 0x97, 0x30, 0xc4, 0x17, 0xba,
0x93, 0x01, 0x19, 0x4a, 0xb3, 0xd5, 0xe8, 0x3b, 0x8c, 0x3c, 0x48, 0xab, 0xc2, 0x89, 0x2f, 0xc2,
0x46, 0x27, 0x2b, 0x3c, 0x90, 0xfc, 0x19, 0xd6, 0xbc, 0xd7, 0x51, 0xfc, 0x84, 0xc4, 0x06, 0x07,
0x0f, 0xb3, 0xc6, 0xed, 0x53, 0x71, 0x01, 0x3b, 0x85, 0x4b, 0x6f, 0xa7, 0x86, 0xcc, 0xce, 0xaa,
0x06, 0xf8, 0x55, 0x28, 0xbe, 0xcd, 0xe6, 0x95, 0x2e, 0x8a, 0xdb, 0xe5, 0xe3, 0xd3, 0xb6, 0x99,
0x05, 0x57, 0x34, 0xb4, 0x90, 0x72, 0xec, 0xbd, 0x7e, 0xb9, 0x8b, 0x9c, 0xd3, 0x31, 0x0e, 0x84,
0x83, 0x74, 0x12, 0xaf, 0x4e, 0xea, 0x91, 0x9d, 0x02, 0xce, 0xb8, 0xa9, 0x1d, 0xf8, 0x6f, 0xdf,
0x3e, 0xa4, 0x96, 0x69, 0x44, 0x4a, 0xce, 0x2e, 0x0a, 0xda, 0xa5, 0xfa, 0x3e, 0x26, 0x6b, 0x46,
0x87, 0x04, 0xe0, 0x8c, 0x9a, 0xbf, 0x02, 0x51, 0xf9, 0xc1, 0xfe, 0x60, 0x8e, 0x67, 0x0e, 0x55,
0xbb, 0x3d, 0xad, 0xd6, 0x2f, 0x42, 0x7d, 0x99, 0xaf, 0xcf, 0x30, 0x22, 0x54, 0x86, 0x61, 0x07,
0xc5, 0x2e, 0x0a, 0xc7, 0xd4, 0xd3, 0x92, 0xe8, 0x1c, 0x90, 0xb2, 0x45, 0x12, 0x70, 0x81, 0xc0,
0x00, 0x56, 0xd5, 0x73, 0x94, 0xb4, 0x12, 0x07, 0xf9, 0x8f, 0xe9, 0x65, 0x97, 0x87, 0xe0, 0xc1,
0x1d, 0x4a, 0x0e, 0x3b, 0x28, 0x42, 0xcf, 0xdc, 0x94, 0xe4, 0x10, 0x05, 0x2d, 0x4f, 0x0e, 0x71,
0x6c, 0x20, 0x66, 0x43, 0xed, 0xa5, 0xc9, 0xbd, 0x8f, 0x6f, 0x28, 0x3f, 0x48, 0x2b, 0x09, 0x31,
0xd4, 0xf2, 0x92, 0xb0, 0x00, 0x0e, 0x39, 0x56, 0xd1, 0x50, 0x7e, 0xf0, 0x7c, 0x4b, 0xbd, 0xa9,
0x87, 0xff, 0x87, 0x38, 0x6d, 0x93, 0xbd, 0x0b, 0xae, 0x5b, 0xc1, 0xcd, 0x3a, 0x5e, 0x86, 0xe7,
0x87, 0x34, 0x80, 0xc8, 0x47, 0x40, 0x06, 0x66, 0x2f, 0x07, 0x7c, 0x69, 0xe6, 0x21, 0x6c, 0xf4,
0xd0, 0xc2, 0x08, 0xf3, 0xfd, 0x94, 0x1b, 0x4d, 0x14, 0x96, 0xf1, 0xe4, 0xed, 0xc3, 0xba, 0x5c,
0x06, 0x39, 0xee, 0x2d, 0x47, 0x87, 0xa7, 0x54, 0xc7, 0x08, 0xc6, 0xa7, 0xbe, 0x9b, 0x05, 0x1a,
0xda, 0x43, 0xeb, 0x91, 0x57, 0x4d, 0x7c, 0x1e, 0xf3, 0x45, 0x4d, 0x7a, 0x63, 0x35, 0x1e, 0x64,
0x44, 0x87, 0xf6, 0x10, 0xa8, 0xe5, 0xd6, 0x98, 0x85, 0x29, 0xc7, 0x7a, 0x0e, 0xc8, 0x68, 0xd7,
0x2b, 0x28, 0xca, 0x8b, 0x82, 0x4b, 0x79, 0x33, 0xf5, 0x1e, 0x71, 0x06, 0xc2, 0xf7, 0x50, 0x7b,
0x35, 0x45, 0x87, 0x0a, 0x94, 0x7e, 0xb9, 0xbc, 0xc9, 0x27, 0x2b, 0x86, 0xca, 0x7c, 0x49, 0x87,
0x01, 0xca, 0x0c, 0xbe, 0xc4, 0x84, 0x39, 0x60, 0x79, 0x6e, 0x0b, 0xe3, 0xc2, 0xc9, 0x53, 0xf5,
0xcb, 0xc0, 0x96, 0x0a, 0xb8, 0x91, 0x67, 0x10, 0x50, 0xb8, 0xf0, 0x23, 0xc9, 0x9b, 0xfa, 0x9e,
0x63, 0x1e, 0x9a, 0x16, 0x8e, 0x31, 0xe5, 0x04, 0xc4, 0x61, 0x19, 0x2d, 0x1a, 0x41, 0x59, 0x09,
0xef, 0x38, 0xd4, 0x16, 0x64, 0x59, 0x68, 0x2e, 0xc2, 0xa7, 0x6d, 0x9f, 0x0e, 0x0c, 0x26, 0xa1,
0x03, 0xc8, 0x63, 0xb1, 0xc7, 0x2c, 0x53, 0x3f, 0x89, 0x5f, 0xad, 0x82, 0xd4, 0x30, 0x87, 0xa4,
0x5c, 0xad, 0x12, 0x91, 0xbe, 0xc8, 0xf6, 0x93, 0x1f, 0x1f, 0x8f, 0x4d, 0xb1, 0x3f, 0x1b, 0xc9,
0x29, 0x6e, 0xaa, 0x81, 0x0f, 0x4c, 0xe6, 0xfd, 0xda, 0xf4, 0x07, 0x6f, 0xba, 0x5c, 0x9b, 0xc1,
0x01, 0x9a, 0x8e, 0x46, 0xab, 0x6e, 0xd7, 0xa3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x68, 0x8b,
0x7b, 0x42, 0xfb, 0x17, 0x00, 0x00,
// 1496 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x5b, 0x73, 0x13, 0x37,
0x14, 0xc6, 0x36, 0x49, 0xec, 0x63, 0xc7, 0x0e, 0x1a, 0x2e, 0xae, 0xa1, 0xd4, 0xb8, 0x14, 0xcc,
0xcd, 0xa1, 0x61, 0x86, 0x52, 0xde, 0x88, 0xcd, 0x04, 0x4f, 0x9b, 0x21, 0x5d, 0x43, 0x87, 0x5e,
0x18, 0x57, 0xde, 0x15, 0x8e, 0x26, 0xeb, 0x95, 0x59, 0xc9, 0xb9, 0x4c, 0x9f, 0x3a, 0xd3, 0xf7,
0xfe, 0xa5, 0x4e, 0xfb, 0x53, 0xfa, 0x47, 0x3a, 0x5a, 0xed, 0xca, 0xbb, 0xeb, 0x5d, 0x67, 0x03,
0xbc, 0x59, 0xda, 0x4f, 0xdf, 0x77, 0xf4, 0x49, 0x47, 0x47, 0x32, 0x6c, 0xb8, 0x8c, 0x89, 0xa1,
0xc9, 0x98, 0x6b, 0x75, 0xa6, 0x2e, 0x13, 0x0c, 0x5d, 0x9e, 0x50, 0xfb, 0x70, 0xc6, 0x55, 0xab,
0x23, 0x3f, 0x7b, 0x5f, 0x1b, 0x15, 0x93, 0x4d, 0x26, 0xcc, 0x51, 0xfd, 0x8d, 0x4a, 0x18, 0xd5,
0xa8, 0x52, 0x47, 0x10, 0xd7, 0xc1, 0xb6, 0xdf, 0x2e, 0x4f, 0x5d, 0x76, 0x7c, 0xe2, 0x37, 0x6a,
0x44, 0x98, 0xd6, 0x70, 0x42, 0x04, 0x56, 0x1d, 0xad, 0x21, 0x5c, 0x7a, 0x66, 0xdb, 0xcc, 0x7c,
0x45, 0x27, 0x84, 0x0b, 0x3c, 0x99, 0x1a, 0xe4, 0xfd, 0x8c, 0x70, 0x81, 0x1e, 0xc2, 0xf9, 0x11,
0xe6, 0xa4, 0x9e, 0x6b, 0xe6, 0xda, 0xe5, 0xad, 0x6b, 0x9d, 0x48, 0x24, 0xbe, 0xfc, 0x2e, 0x1f,
0x6f, 0x63, 0x4e, 0x0c, 0x0f, 0x89, 0x2e, 0xc2, 0x8a, 0xc9, 0x66, 0x8e, 0xa8, 0x17, 0x9a, 0xb9,
0xf6, 0xba, 0xa1, 0x1a, 0xad, 0x3f, 0x72, 0x70, 0x39, 0xae, 0xc0, 0xa7, 0xcc, 0xe1, 0x04, 0x3d,
0x82, 0x55, 0x2e, 0xb0, 0x98, 0x71, 0x5f, 0xe4, 0x6a, 0xa2, 0xc8, 0xc0, 0x83, 0x18, 0x3e, 0x14,
0x5d, 0x83, 0x92, 0x08, 0x98, 0xea, 0xf9, 0x66, 0xae, 0x7d, 0xde, 0x98, 0x77, 0xa4, 0xc4, 0xf0,
0x06, 0xaa, 0x5e, 0x08, 0xfd, 0xde, 0x27, 0x98, 0x5d, 0x3e, 0xcc, 0x6c, 0x43, 0x4d, 0x33, 0x7f,
0xcc, 0xac, 0xaa, 0x90, 0xef, 0xf7, 0x3c, 0xea, 0x82, 0x91, 0xef, 0xf7, 0x52, 0xe6, 0xf1, 0x4f,
0x1e, 0x2a, 0xfd, 0xc9, 0x94, 0xb9, 0xc2, 0x20, 0x7c, 0x66, 0x8b, 0x0f, 0xd3, 0xba, 0x02, 0x6b,
0x02, 0xf3, 0x83, 0x21, 0xb5, 0x7c, 0xc1, 0x55, 0xd9, 0xec, 0x5b, 0xe8, 0x0b, 0x28, 0x5b, 0x58,
0x60, 0x87, 0x59, 0x44, 0x7e, 0x2c, 0x78, 0x1f, 0x21, 0xe8, 0xea, 0x5b, 0xe8, 0x31, 0xac, 0x48,
0x0e, 0x52, 0x3f, 0xdf, 0xcc, 0xb5, 0xab, 0x5b, 0xcd, 0x44, 0x35, 0x15, 0xa0, 0xd4, 0x24, 0x86,
0x82, 0xa3, 0x06, 0x14, 0x39, 0x19, 0x4f, 0x88, 0x23, 0x78, 0x7d, 0xa5, 0x59, 0x68, 0x17, 0x0c,
0xdd, 0x46, 0x9f, 0x41, 0x11, 0xcf, 0x04, 0x1b, 0x52, 0x8b, 0xd7, 0x57, 0xbd, 0x6f, 0x6b, 0xb2,
0xdd, 0xb7, 0x38, 0xba, 0x0a, 0x25, 0x97, 0x1d, 0x0d, 0x95, 0x11, 0x6b, 0x5e, 0x34, 0x45, 0x97,
0x1d, 0x75, 0x65, 0x1b, 0x7d, 0x03, 0x2b, 0xd4, 0x79, 0xc7, 0x78, 0xbd, 0xd8, 0x2c, 0xb4, 0xcb,
0x5b, 0x37, 0x12, 0x63, 0xf9, 0x8e, 0x9c, 0xfc, 0x88, 0xed, 0x19, 0xd9, 0xc3, 0xd4, 0x35, 0x14,
0xbe, 0xf5, 0x57, 0x0e, 0xae, 0xf4, 0x08, 0x37, 0x5d, 0x3a, 0x22, 0x03, 0x3f, 0x8a, 0x0f, 0xdf,
0x16, 0x2d, 0xa8, 0x98, 0xcc, 0xb6, 0x89, 0x29, 0x28, 0x73, 0xf4, 0x12, 0x46, 0xfa, 0xd0, 0x75,
0x00, 0x7f, 0xba, 0xfd, 0x1e, 0xaf, 0x17, 0xbc, 0x49, 0x86, 0x7a, 0x5a, 0x33, 0xa8, 0xf9, 0x81,
0x48, 0xe2, 0xbe, 0xf3, 0x8e, 0x2d, 0xd0, 0xe6, 0x12, 0x68, 0x9b, 0x50, 0x9e, 0x62, 0x57, 0xd0,
0x88, 0x72, 0xb8, 0x4b, 0xe6, 0x8a, 0x96, 0xf1, 0x97, 0x73, 0xde, 0xd1, 0xfa, 0x2f, 0x0f, 0x15,
0x5f, 0x57, 0x6a, 0x72, 0xd4, 0x83, 0x92, 0x9c, 0xd3, 0x50, 0xfa, 0xe4, 0x5b, 0x70, 0xbb, 0x93,
0x7c, 0x02, 0x75, 0x62, 0x01, 0x1b, 0xc5, 0x51, 0x10, 0x7a, 0x0f, 0xca, 0xd4, 0xb1, 0xc8, 0xf1,
0x50, 0x2d, 0x4f, 0xde, 0x5b, 0x9e, 0x2f, 0xa3, 0x3c, 0xf2, 0x14, 0xea, 0x68, 0x6d, 0x8b, 0x1c,
0x7b, 0x1c, 0x40, 0x83, 0x9f, 0x1c, 0x11, 0xb8, 0x40, 0x8e, 0x85, 0x8b, 0x87, 0x61, 0xae, 0x82,
0xc7, 0xf5, 0xed, 0x29, 0x31, 0x79, 0x04, 0x9d, 0xe7, 0x72, 0xb4, 0xe6, 0xe6, 0xcf, 0x1d, 0xe1,
0x9e, 0x18, 0x35, 0x12, 0xed, 0x6d, 0xfc, 0x06, 0x17, 0x93, 0x80, 0x68, 0x03, 0x0a, 0x07, 0xe4,
0xc4, 0xb7, 0x5d, 0xfe, 0x44, 0x5b, 0xb0, 0x72, 0x28, 0xb7, 0x92, 0xe7, 0xf3, 0xc2, 0xde, 0xf0,
0x26, 0x34, 0x9f, 0x89, 0x82, 0x3e, 0xcd, 0x3f, 0xc9, 0xb5, 0xfe, 0xcd, 0x43, 0x7d, 0x71, 0xbb,
0x7d, 0xcc, 0x59, 0x91, 0x65, 0xcb, 0x8d, 0x61, 0xdd, 0x5f, 0xe8, 0x88, 0x75, 0xdb, 0x69, 0xd6,
0xa5, 0x45, 0x18, 0xf1, 0x54, 0x79, 0x58, 0xe1, 0xa1, 0xae, 0x06, 0x81, 0x0b, 0x0b, 0x90, 0x04,
0xf7, 0x9e, 0x46, 0xdd, 0xbb, 0x99, 0x65, 0x09, 0xc3, 0x2e, 0x5a, 0x70, 0x71, 0x87, 0x88, 0xae,
0x4b, 0x2c, 0xe2, 0x08, 0x8a, 0xed, 0x0f, 0x4f, 0xd8, 0x06, 0x14, 0x67, 0x5c, 0xd6, 0xc7, 0x89,
0x0a, 0xa6, 0x64, 0xe8, 0x76, 0xeb, 0xcf, 0x1c, 0x5c, 0x8a, 0xc9, 0x7c, 0xcc, 0x42, 0x2d, 0x91,
0x92, 0xdf, 0xa6, 0x98, 0xf3, 0x23, 0xe6, 0xaa, 0x83, 0xb6, 0x64, 0xe8, 0xf6, 0xd6, 0xdf, 0xd7,
0xa1, 0x64, 0x30, 0x26, 0xba, 0xd2, 0x12, 0x34, 0x05, 0x24, 0x63, 0x62, 0x93, 0x29, 0x73, 0x88,
0xa3, 0x0e, 0x56, 0x8e, 0x1e, 0x46, 0x03, 0xd0, 0x35, 0x7f, 0x11, 0xea, 0x5b, 0xd5, 0xb8, 0x95,
0x32, 0x22, 0x06, 0x6f, 0x9d, 0x43, 0x13, 0x4f, 0x51, 0xd6, 0xeb, 0x57, 0xd4, 0x3c, 0xe8, 0xee,
0x63, 0xc7, 0x21, 0xf6, 0x32, 0xc5, 0x18, 0x34, 0x50, 0x8c, 0x25, 0xbd, 0xdf, 0x18, 0x08, 0x97,
0x3a, 0xe3, 0xc0, 0xd9, 0xd6, 0x39, 0xf4, 0xde, 0x5b, 0x5b, 0xa9, 0x4e, 0xb9, 0xa0, 0x26, 0x0f,
0x04, 0xb7, 0xd2, 0x05, 0x17, 0xc0, 0x67, 0x94, 0x1c, 0xc2, 0x46, 0xd7, 0x25, 0x58, 0x90, 0xae,
0x4e, 0x1a, 0x74, 0x3f, 0x71, 0x68, 0x1c, 0x16, 0x08, 0x2d, 0xdb, 0x00, 0xad, 0x73, 0xe8, 0x17,
0xa8, 0xf6, 0x5c, 0x36, 0x0d, 0xd1, 0xdf, 0x4d, 0xa4, 0x8f, 0x82, 0x32, 0x92, 0x0f, 0x61, 0xfd,
0x05, 0xe6, 0x21, 0xee, 0x3b, 0x89, 0xdc, 0x11, 0x4c, 0x40, 0x7d, 0x23, 0x11, 0xba, 0xcd, 0x98,
0x1d, 0xb2, 0xe7, 0x08, 0x50, 0x70, 0x20, 0x84, 0x54, 0x3a, 0xc9, 0x33, 0x58, 0x00, 0x06, 0x52,
0x9b, 0x99, 0xf1, 0x5a, 0xf8, 0x35, 0x94, 0x95, 0xe1, 0xcf, 0x6c, 0x8a, 0x39, 0xba, 0xbd, 0x64,
0x49, 0x3c, 0x44, 0x46, 0xc3, 0x7e, 0x80, 0x92, 0x34, 0x5a, 0x91, 0x7e, 0x95, 0xba, 0x10, 0x67,
0xa1, 0x1c, 0x00, 0x3c, 0xb3, 0x05, 0x71, 0x15, 0xe7, 0xad, 0x44, 0xce, 0x39, 0x20, 0x23, 0xa9,
0x03, 0xb5, 0xc1, 0xbe, 0xbc, 0xe0, 0x04, 0xd6, 0x70, 0x74, 0x2f, 0x79, 0x43, 0x47, 0x51, 0x01,
0xfd, 0xfd, 0x6c, 0x60, 0x6d, 0xf7, 0x5b, 0xa8, 0x29, 0x33, 0xf7, 0x82, 0x4b, 0x43, 0x8a, 0x5e,
0x0c, 0x95, 0x71, 0x3a, 0x3f, 0xc1, 0xba, 0xb4, 0x75, 0x4e, 0x7e, 0x27, 0xd5, 0xfa, 0xb3, 0x52,
0xbf, 0x85, 0xca, 0x0b, 0xcc, 0xe7, 0xcc, 0xed, 0xb4, 0x0c, 0x58, 0x20, 0xce, 0x94, 0x00, 0x07,
0x50, 0x95, 0xae, 0xe9, 0xc1, 0x3c, 0x25, 0x7d, 0xa3, 0xa0, 0x40, 0xe2, 0x5e, 0x26, 0xac, 0x16,
0x23, 0x50, 0x91, 0xdf, 0x82, 0xd2, 0x9b, 0x32, 0x97, 0x30, 0x24, 0x10, 0xba, 0x93, 0x01, 0x19,
0x3a, 0x66, 0xab, 0xd1, 0x77, 0x18, 0x7a, 0x90, 0x56, 0x85, 0x13, 0x5f, 0x84, 0x8d, 0x4e, 0x56,
0xb8, 0x96, 0xfc, 0x15, 0xd6, 0xfc, 0xd7, 0x51, 0x3c, 0x43, 0x62, 0x83, 0xf5, 0xc3, 0xac, 0x71,
0xfb, 0x54, 0x9c, 0x66, 0xc7, 0x70, 0xe9, 0xf5, 0xd4, 0x92, 0xa7, 0xb3, 0xaa, 0x01, 0x41, 0x15,
0x8a, 0x6f, 0xb3, 0x79, 0xa5, 0x8b, 0xe2, 0x76, 0xf9, 0xf8, 0xb4, 0x6d, 0xe6, 0xc2, 0xe7, 0x7d,
0xe7, 0x10, 0xdb, 0xd4, 0x8a, 0x14, 0x81, 0x5d, 0x22, 0x70, 0x17, 0x9b, 0xfb, 0x24, 0x5e, 0xa3,
0xd4, 0x53, 0x3b, 0x3a, 0x44, 0x83, 0x33, 0x6e, 0xed, 0xdf, 0x01, 0xa9, 0x8c, 0x75, 0xde, 0xd1,
0xf1, 0xcc, 0xc5, 0x6a, 0xff, 0xa5, 0x55, 0xdf, 0x45, 0x68, 0x20, 0xf3, 0xf5, 0x19, 0x46, 0x84,
0x0a, 0x23, 0xec, 0x10, 0xb1, 0x4b, 0x84, 0x4b, 0xcd, 0xb4, 0x63, 0x6d, 0x0e, 0x48, 0x59, 0xb4,
0x04, 0x9c, 0x16, 0x18, 0xc0, 0xaa, 0x7a, 0x20, 0xa2, 0x56, 0xe2, 0xa0, 0xe0, 0x79, 0xbb, 0xac,
0x9c, 0xeb, 0x27, 0x70, 0x28, 0x5d, 0x77, 0x88, 0x08, 0x3d, 0x3c, 0x53, 0xd2, 0x35, 0x0a, 0x5a,
0x9e, 0xae, 0x71, 0xac, 0x16, 0x73, 0xa0, 0xf6, 0x3d, 0xe5, 0xfe, 0xc7, 0x57, 0x98, 0x1f, 0xa4,
0x1d, 0xd2, 0x31, 0xd4, 0xf2, 0x43, 0x7a, 0x01, 0x1c, 0x72, 0xac, 0x62, 0x10, 0xf9, 0xc1, 0xf7,
0x2d, 0xf5, 0xee, 0x1c, 0xfe, 0x67, 0xe0, 0xb4, 0x4d, 0xf6, 0x46, 0x5f, 0x80, 0xf4, 0x5d, 0x37,
0x5e, 0x18, 0xe7, 0x69, 0xa3, 0x21, 0xf2, 0x5a, 0x9e, 0x81, 0xd9, 0xcf, 0xca, 0x4f, 0xcd, 0x3c,
0x84, 0x8d, 0x1e, 0xb1, 0x49, 0x84, 0xf9, 0x7e, 0xca, 0x1d, 0x23, 0x0a, 0xcb, 0x98, 0x79, 0xfb,
0xb0, 0x2e, 0x97, 0x41, 0x8e, 0x7b, 0xcd, 0x89, 0xcb, 0x53, 0xea, 0x55, 0x04, 0x13, 0x50, 0xdf,
0xcd, 0x02, 0x0d, 0xed, 0xa1, 0xf5, 0xc8, 0x3b, 0x23, 0x3e, 0x8f, 0xf9, 0xa2, 0x26, 0xbd, 0x7a,
0x1a, 0x0f, 0x32, 0xa2, 0x43, 0x7b, 0x08, 0xd4, 0x72, 0x1b, 0xcc, 0x26, 0x29, 0x69, 0x3d, 0x07,
0x64, 0xb4, 0xeb, 0x25, 0x14, 0x65, 0xe9, 0xf6, 0x28, 0x6f, 0xa6, 0x56, 0xf6, 0x33, 0x10, 0xbe,
0x85, 0xda, 0xcb, 0x29, 0x71, 0xb1, 0x20, 0xd2, 0x2f, 0x8f, 0x37, 0x39, 0xb3, 0x62, 0xa8, 0xcc,
0xd7, 0x66, 0x18, 0x10, 0x79, 0x82, 0x2f, 0x31, 0x61, 0x0e, 0x58, 0x7e, 0xb6, 0x85, 0x71, 0xe1,
0xc3, 0x53, 0xf5, 0xcb, 0xc0, 0x96, 0x0a, 0x78, 0x91, 0x67, 0x10, 0x50, 0xb8, 0xf0, 0xb3, 0xc5,
0x9f, 0xfa, 0x9e, 0x4b, 0x0f, 0xa9, 0x4d, 0xc6, 0x24, 0x25, 0x03, 0xe2, 0xb0, 0x8c, 0x16, 0x8d,
0xa0, 0xac, 0x84, 0x77, 0x5c, 0xec, 0x08, 0xb4, 0x2c, 0x34, 0x0f, 0x11, 0xd0, 0xb6, 0x4f, 0x07,
0xea, 0x49, 0x98, 0x00, 0x32, 0x2d, 0xf6, 0x98, 0x4d, 0xcd, 0x93, 0xf8, 0x65, 0x47, 0x1f, 0x0d,
0x73, 0x48, 0xca, 0x65, 0x27, 0x11, 0x19, 0x88, 0x6c, 0x3f, 0xf9, 0xf9, 0xf1, 0x98, 0x8a, 0xfd,
0xd9, 0x48, 0x4e, 0x71, 0x53, 0x0d, 0x7c, 0x40, 0x99, 0xff, 0x6b, 0x33, 0x18, 0xbc, 0xe9, 0x71,
0x6d, 0xea, 0x04, 0x9a, 0x8e, 0x46, 0xab, 0x5e, 0xd7, 0xa3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff,
0x19, 0x92, 0xda, 0x17, 0x8d, 0x17, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -851,7 +849,6 @@ type RootCoordClient interface {
AllocTimestamp(ctx context.Context, in *AllocTimestampRequest, opts ...grpc.CallOption) (*AllocTimestampResponse, error)
AllocID(ctx context.Context, in *AllocIDRequest, opts ...grpc.CallOption) (*AllocIDResponse, error)
UpdateChannelTimeTick(ctx context.Context, in *internalpb.ChannelTimeTickMsg, opts ...grpc.CallOption) (*commonpb.Status, error)
ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error)
ShowConfigurations(ctx context.Context, in *internalpb.ShowConfigurationsRequest, opts ...grpc.CallOption) (*internalpb.ShowConfigurationsResponse, error)
// https://wiki.lfaidata.foundation/display/MIL/MEP+8+--+Add+metrics+for+proxy
@ -1058,15 +1055,6 @@ func (c *rootCoordClient) UpdateChannelTimeTick(ctx context.Context, in *interna
return out, nil
}
func (c *rootCoordClient) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.rootcoord.RootCoord/ReleaseDQLMessageStream", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *rootCoordClient) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest, opts ...grpc.CallOption) (*commonpb.Status, error) {
out := new(commonpb.Status)
err := c.cc.Invoke(ctx, "/milvus.proto.rootcoord.RootCoord/InvalidateCollectionMetaCache", in, out, opts...)
@ -1315,7 +1303,6 @@ type RootCoordServer interface {
AllocTimestamp(context.Context, *AllocTimestampRequest) (*AllocTimestampResponse, error)
AllocID(context.Context, *AllocIDRequest) (*AllocIDResponse, error)
UpdateChannelTimeTick(context.Context, *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error)
ReleaseDQLMessageStream(context.Context, *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error)
InvalidateCollectionMetaCache(context.Context, *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
ShowConfigurations(context.Context, *internalpb.ShowConfigurationsRequest) (*internalpb.ShowConfigurationsResponse, error)
// https://wiki.lfaidata.foundation/display/MIL/MEP+8+--+Add+metrics+for+proxy
@ -1404,9 +1391,6 @@ func (*UnimplementedRootCoordServer) AllocID(ctx context.Context, req *AllocIDRe
func (*UnimplementedRootCoordServer) UpdateChannelTimeTick(ctx context.Context, req *internalpb.ChannelTimeTickMsg) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateChannelTimeTick not implemented")
}
func (*UnimplementedRootCoordServer) ReleaseDQLMessageStream(ctx context.Context, req *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReleaseDQLMessageStream not implemented")
}
func (*UnimplementedRootCoordServer) InvalidateCollectionMetaCache(ctx context.Context, req *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return nil, status.Errorf(codes.Unimplemented, "method InvalidateCollectionMetaCache not implemented")
}
@ -1814,24 +1798,6 @@ func _RootCoord_UpdateChannelTimeTick_Handler(srv interface{}, ctx context.Conte
return interceptor(ctx, in, info, handler)
}
func _RootCoord_ReleaseDQLMessageStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(proxypb.ReleaseDQLMessageStreamRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RootCoordServer).ReleaseDQLMessageStream(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.rootcoord.RootCoord/ReleaseDQLMessageStream",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RootCoordServer).ReleaseDQLMessageStream(ctx, req.(*proxypb.ReleaseDQLMessageStreamRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RootCoord_InvalidateCollectionMetaCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(proxypb.InvalidateCollMetaCacheRequest)
if err := dec(in); err != nil {
@ -2272,10 +2238,6 @@ var _RootCoord_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateChannelTimeTick",
Handler: _RootCoord_UpdateChannelTimeTick_Handler,
},
{
MethodName: "ReleaseDQLMessageStream",
Handler: _RootCoord_ReleaseDQLMessageStream_Handler,
},
{
MethodName: "InvalidateCollectionMetaCache",
Handler: _RootCoord_InvalidateCollectionMetaCache_Handler,

View File

@ -31,6 +31,13 @@ enum DataType {
FloatVector = 101;
}
enum FieldState {
FieldCreated = 0;
FieldCreating = 1;
FieldDropping = 2;
FieldDropped = 3;
}
/**
* @brief Field schema
*/
@ -43,6 +50,7 @@ message FieldSchema {
repeated common.KeyValuePair type_params = 6;
repeated common.KeyValuePair index_params = 7;
bool autoID = 8;
FieldState state = 9; // To keep compatible with older version, the default state is `Created`.
}
/**

View File

@ -78,6 +78,37 @@ func (DataType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{0}
}
type FieldState int32
const (
FieldState_FieldCreated FieldState = 0
FieldState_FieldCreating FieldState = 1
FieldState_FieldDropping FieldState = 2
FieldState_FieldDropped FieldState = 3
)
var FieldState_name = map[int32]string{
0: "FieldCreated",
1: "FieldCreating",
2: "FieldDropping",
3: "FieldDropped",
}
var FieldState_value = map[string]int32{
"FieldCreated": 0,
"FieldCreating": 1,
"FieldDropping": 2,
"FieldDropped": 3,
}
func (x FieldState) String() string {
return proto.EnumName(FieldState_name, int32(x))
}
func (FieldState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_1c5fb4d8cc22d66a, []int{1}
}
//*
// @brief Field schema
type FieldSchema struct {
@ -89,6 +120,7 @@ type FieldSchema struct {
TypeParams []*commonpb.KeyValuePair `protobuf:"bytes,6,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,7,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
AutoID bool `protobuf:"varint,8,opt,name=autoID,proto3" json:"autoID,omitempty"`
State FieldState `protobuf:"varint,9,opt,name=state,proto3,enum=milvus.proto.schema.FieldState" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -175,6 +207,13 @@ func (m *FieldSchema) GetAutoID() bool {
return false
}
func (m *FieldSchema) GetState() FieldState {
if m != nil {
return m.State
}
return FieldState_FieldCreated
}
//*
// @brief Collection schema
type CollectionSchema struct {
@ -1022,6 +1061,7 @@ func (m *SearchResultData) GetTopks() []int64 {
func init() {
proto.RegisterEnum("milvus.proto.schema.DataType", DataType_name, DataType_value)
proto.RegisterEnum("milvus.proto.schema.FieldState", FieldState_name, FieldState_value)
proto.RegisterType((*FieldSchema)(nil), "milvus.proto.schema.FieldSchema")
proto.RegisterType((*CollectionSchema)(nil), "milvus.proto.schema.CollectionSchema")
proto.RegisterType((*BoolArray)(nil), "milvus.proto.schema.BoolArray")
@ -1041,68 +1081,71 @@ func init() {
func init() { proto.RegisterFile("schema.proto", fileDescriptor_1c5fb4d8cc22d66a) }
var fileDescriptor_1c5fb4d8cc22d66a = []byte{
// 995 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x5d, 0x6f, 0xe3, 0x44,
0x17, 0xce, 0xc4, 0xf9, 0xb0, 0x8f, 0xf3, 0xf6, 0xb5, 0x66, 0x17, 0x64, 0x90, 0x76, 0x9b, 0x8d,
0x40, 0x8a, 0x56, 0xa2, 0xd5, 0xb6, 0x68, 0x59, 0x56, 0xac, 0x80, 0x34, 0xaa, 0x12, 0x15, 0xad,
0x82, 0x8b, 0x8a, 0xc4, 0x4d, 0x34, 0x89, 0x67, 0xdb, 0x51, 0x6d, 0x8f, 0x99, 0x99, 0xac, 0xc8,
0x0f, 0xe0, 0x1f, 0x70, 0x85, 0xb8, 0xe0, 0x8f, 0x71, 0x81, 0xf8, 0x1d, 0x48, 0x68, 0x3e, 0x92,
0x18, 0xe2, 0xad, 0x7a, 0x77, 0x66, 0x7c, 0x9e, 0x67, 0xce, 0x79, 0xce, 0x87, 0xa1, 0x27, 0x97,
0x37, 0x34, 0x27, 0x47, 0xa5, 0xe0, 0x8a, 0xe3, 0x07, 0x39, 0xcb, 0xde, 0xae, 0xa4, 0x3d, 0x1d,
0xd9, 0x4f, 0x1f, 0xf6, 0x96, 0x3c, 0xcf, 0x79, 0x61, 0x2f, 0x07, 0x7f, 0x36, 0x21, 0x3c, 0x67,
0x34, 0x4b, 0x2f, 0xcd, 0x57, 0x1c, 0x43, 0xf7, 0x8d, 0x3e, 0x4e, 0xc7, 0x31, 0xea, 0xa3, 0xa1,
0x97, 0x6c, 0x8e, 0x18, 0x43, 0xab, 0x20, 0x39, 0x8d, 0x9b, 0x7d, 0x34, 0x0c, 0x12, 0x63, 0xe3,
0x8f, 0xe0, 0x80, 0xc9, 0x79, 0x29, 0x58, 0x4e, 0xc4, 0x7a, 0x7e, 0x4b, 0xd7, 0xb1, 0xd7, 0x47,
0x43, 0x3f, 0xe9, 0x31, 0x39, 0xb3, 0x97, 0x17, 0x74, 0x8d, 0xfb, 0x10, 0xa6, 0x54, 0x2e, 0x05,
0x2b, 0x15, 0xe3, 0x45, 0xdc, 0x32, 0x04, 0xd5, 0x2b, 0xfc, 0x12, 0x82, 0x94, 0x28, 0x32, 0x57,
0xeb, 0x92, 0xc6, 0xed, 0x3e, 0x1a, 0x1e, 0x9c, 0x3c, 0x3a, 0xaa, 0x09, 0xfe, 0x68, 0x4c, 0x14,
0xf9, 0x6e, 0x5d, 0xd2, 0xc4, 0x4f, 0x9d, 0x85, 0x47, 0x10, 0x6a, 0xd8, 0xbc, 0x24, 0x82, 0xe4,
0x32, 0xee, 0xf4, 0xbd, 0x61, 0x78, 0xf2, 0xe4, 0xdf, 0x68, 0x97, 0xf2, 0x05, 0x5d, 0x5f, 0x91,
0x6c, 0x45, 0x67, 0x84, 0x89, 0x04, 0x34, 0x6a, 0x66, 0x40, 0x78, 0x0c, 0x3d, 0x56, 0xa4, 0xf4,
0xa7, 0x0d, 0x49, 0xf7, 0xbe, 0x24, 0xa1, 0x81, 0x39, 0x96, 0xf7, 0xa1, 0x43, 0x56, 0x8a, 0x4f,
0xc7, 0xb1, 0x6f, 0x54, 0x70, 0xa7, 0xc1, 0xaf, 0x08, 0xa2, 0x33, 0x9e, 0x65, 0x74, 0xa9, 0x93,
0x75, 0x42, 0x6f, 0xe4, 0x44, 0x15, 0x39, 0xff, 0x23, 0x54, 0x73, 0x5f, 0xa8, 0xdd, 0x13, 0x5e,
0xf5, 0x09, 0xfc, 0x02, 0x3a, 0xa6, 0x4e, 0x32, 0x6e, 0x99, 0xd0, 0xfb, 0xb5, 0xea, 0x55, 0x0a,
0x9d, 0x38, 0xff, 0xc1, 0x21, 0x04, 0x23, 0xce, 0xb3, 0xaf, 0x85, 0x20, 0x6b, 0x1d, 0x94, 0xd6,
0x35, 0x46, 0x7d, 0x6f, 0xe8, 0x27, 0xc6, 0x1e, 0x3c, 0x06, 0x7f, 0x5a, 0xa8, 0xfd, 0xef, 0x6d,
0xf7, 0xfd, 0x10, 0x82, 0x6f, 0x78, 0x71, 0xbd, 0xef, 0xe0, 0x39, 0x87, 0x3e, 0xc0, 0x79, 0xc6,
0x49, 0x0d, 0x45, 0xd3, 0x79, 0x3c, 0x81, 0x70, 0xcc, 0x57, 0x8b, 0x8c, 0xee, 0xbb, 0xa0, 0x1d,
0xc9, 0x68, 0xad, 0xa8, 0xdc, 0xf7, 0xe8, 0xed, 0x48, 0x2e, 0x95, 0x60, 0x75, 0x91, 0x04, 0xce,
0xe5, 0x0f, 0x0f, 0xc2, 0xcb, 0x25, 0xc9, 0x88, 0x30, 0x4a, 0xe0, 0x57, 0x10, 0x2c, 0x38, 0xcf,
0xe6, 0xce, 0x11, 0x0d, 0xc3, 0x93, 0xc7, 0xb5, 0xc2, 0x6d, 0x15, 0x9a, 0x34, 0x12, 0x5f, 0x43,
0x74, 0x1f, 0xe2, 0x97, 0xe0, 0xb3, 0x42, 0x59, 0x74, 0xd3, 0xa0, 0xeb, 0x9b, 0x76, 0x23, 0xdf,
0xa4, 0x91, 0x74, 0x59, 0xa1, 0x0c, 0xf6, 0x15, 0x04, 0x19, 0x2f, 0xae, 0x2d, 0xd8, 0xbb, 0xe3,
0xe9, 0xad, 0xb6, 0xfa, 0x69, 0x0d, 0x31, 0xf0, 0xaf, 0x00, 0xde, 0x68, 0x4d, 0x2d, 0xbe, 0x65,
0xf0, 0x87, 0xf5, 0x35, 0xdf, 0x4a, 0x3f, 0x69, 0x24, 0x81, 0x01, 0x19, 0x86, 0x33, 0x08, 0x53,
0xa3, 0xb9, 0xa5, 0x68, 0x1b, 0x8a, 0xfa, 0xb6, 0xa9, 0xd4, 0x66, 0xd2, 0x48, 0xc0, 0xc2, 0x36,
0x24, 0xd2, 0x68, 0x6e, 0x49, 0x3a, 0x77, 0x90, 0x54, 0x6a, 0xa3, 0x49, 0x2c, 0x6c, 0x93, 0xcb,
0x42, 0x97, 0xd6, 0x72, 0x74, 0xef, 0xc8, 0x65, 0xd7, 0x01, 0x3a, 0x17, 0x03, 0xd2, 0x0c, 0xa3,
0x8e, 0xad, 0xf5, 0xe0, 0x17, 0x04, 0xe1, 0x15, 0x5d, 0x2a, 0xee, 0xea, 0x1b, 0x81, 0x97, 0xb2,
0xdc, 0x2d, 0x32, 0x6d, 0xea, 0x41, 0xb7, 0xba, 0xbd, 0x35, 0x6e, 0xae, 0x6c, 0xf7, 0x50, 0x2e,
0x34, 0x30, 0x4b, 0x8e, 0x3f, 0x86, 0xff, 0x2d, 0x58, 0xa1, 0x57, 0x9e, 0xa3, 0xd1, 0x05, 0xec,
0x4d, 0x1a, 0x49, 0xcf, 0x5e, 0x5b, 0xb7, 0x6d, 0x58, 0x7f, 0x23, 0x08, 0x4c, 0x40, 0x26, 0xdd,
0x67, 0xd0, 0x32, 0x6b, 0x0e, 0xdd, 0x67, 0xcd, 0x19, 0x57, 0xfc, 0x08, 0xc0, 0x4c, 0xeb, 0xbc,
0xb2, 0x80, 0x03, 0x73, 0xf3, 0x5a, 0xaf, 0x8d, 0x2f, 0xa0, 0x2b, 0x4d, 0x57, 0x4b, 0xd7, 0x49,
0xef, 0xa8, 0xc0, 0xae, 0xf3, 0x75, 0x27, 0x3a, 0x88, 0x46, 0xdb, 0x2c, 0xa4, 0xeb, 0xa3, 0x7a,
0x74, 0x45, 0x57, 0x8d, 0x76, 0x10, 0xfc, 0x01, 0xf8, 0x36, 0x34, 0x96, 0x9a, 0x1e, 0xda, 0xfe,
0x30, 0xd2, 0x51, 0x17, 0xda, 0xc6, 0x1c, 0xfc, 0x8c, 0xc0, 0x9b, 0x8e, 0x25, 0xfe, 0x0c, 0x3a,
0x7a, 0x5e, 0x58, 0x7a, 0xe7, 0xac, 0x55, 0x1b, 0xbe, 0xcd, 0x0a, 0x35, 0x4d, 0xf1, 0xe7, 0xd0,
0x91, 0x4a, 0x68, 0x60, 0xf3, 0xde, 0x1d, 0xd6, 0x96, 0x4a, 0x4c, 0xd3, 0x11, 0x80, 0xcf, 0xd2,
0xb9, 0x8d, 0xe3, 0x2f, 0x04, 0xd1, 0x25, 0x25, 0x62, 0x79, 0x93, 0x50, 0xb9, 0xca, 0xec, 0x1c,
0x1c, 0x42, 0x58, 0xac, 0xf2, 0xf9, 0x8f, 0x2b, 0x2a, 0x18, 0x95, 0xae, 0x57, 0xa0, 0x58, 0xe5,
0xdf, 0xda, 0x1b, 0xfc, 0x00, 0xda, 0x8a, 0x97, 0xf3, 0x5b, 0xf3, 0xb6, 0x97, 0xb4, 0x14, 0x2f,
0x2f, 0xf0, 0x97, 0x10, 0xda, 0xfd, 0xb9, 0x19, 0x60, 0xef, 0x9d, 0xf9, 0x6c, 0x2b, 0x9f, 0xd8,
0x22, 0x9a, 0x96, 0xd5, 0x8b, 0x5c, 0x2e, 0xb9, 0xa0, 0x76, 0x61, 0x37, 0x13, 0x77, 0xc2, 0x4f,
0xc1, 0x63, 0xa9, 0x74, 0xe3, 0x18, 0xd7, 0xaf, 0x93, 0xb1, 0x4c, 0xb4, 0x13, 0x7e, 0x68, 0x22,
0xbb, 0xb5, 0xff, 0x3c, 0x2f, 0xb1, 0x87, 0xa7, 0xbf, 0x21, 0xf0, 0x37, 0xfd, 0x83, 0x7d, 0x68,
0xbd, 0xe6, 0x05, 0x8d, 0x1a, 0xda, 0xd2, 0x5b, 0x2c, 0x42, 0xda, 0x9a, 0x16, 0xea, 0x45, 0xd4,
0xc4, 0x01, 0xb4, 0xa7, 0x85, 0x7a, 0xf6, 0x3c, 0xf2, 0x9c, 0x79, 0x7a, 0x12, 0xb5, 0x9c, 0xf9,
0xfc, 0xd3, 0xa8, 0xad, 0x4d, 0x33, 0x05, 0x11, 0x60, 0x80, 0x8e, 0xdd, 0x03, 0x51, 0xa8, 0x6d,
0x2b, 0x76, 0xf4, 0x10, 0x87, 0xd0, 0xbd, 0x22, 0xe2, 0xec, 0x86, 0x88, 0xe8, 0x3d, 0x1c, 0x41,
0x6f, 0x54, 0x99, 0x80, 0x28, 0xc5, 0xff, 0x87, 0xf0, 0x7c, 0x37, 0x39, 0x11, 0x1d, 0x7d, 0x0f,
0x07, 0x8c, 0x6f, 0xf2, 0xba, 0x16, 0xe5, 0x72, 0x14, 0xda, 0x3f, 0xd2, 0x4c, 0xe7, 0x38, 0x43,
0x3f, 0x9c, 0x5e, 0x33, 0x75, 0xb3, 0x5a, 0xe8, 0xdf, 0xed, 0xb1, 0x75, 0xfb, 0x84, 0x71, 0x67,
0x1d, 0xb3, 0x42, 0x51, 0x51, 0x90, 0xec, 0xd8, 0x28, 0x72, 0x6c, 0x15, 0x29, 0x17, 0xbf, 0x23,
0xb4, 0xe8, 0x98, 0xab, 0xd3, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd1, 0xcc, 0x21, 0x94, 0x03,
0x09, 0x00, 0x00,
// 1051 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x6f, 0x6f, 0x1b, 0xc5,
0x13, 0xf6, 0xfa, 0xfc, 0xe7, 0x6e, 0xce, 0xed, 0xef, 0x7e, 0xdb, 0x82, 0x0e, 0xa4, 0x36, 0xae,
0x05, 0x92, 0x55, 0x89, 0x44, 0x4d, 0xa0, 0x94, 0x8a, 0x0a, 0x70, 0xac, 0x28, 0x56, 0x50, 0x15,
0x2e, 0x28, 0x48, 0xbc, 0xb1, 0xd6, 0xbe, 0x6d, 0xb2, 0xca, 0xf9, 0xf6, 0xd8, 0x5d, 0x57, 0xf8,
0x03, 0xf0, 0x0d, 0x78, 0x81, 0x10, 0x2f, 0xf8, 0x62, 0xbc, 0xe2, 0x73, 0x20, 0xa1, 0x9d, 0x5d,
0xff, 0x29, 0x71, 0xa2, 0xbc, 0x9b, 0x9d, 0x9b, 0xe7, 0xb9, 0x99, 0x67, 0x66, 0x67, 0xa1, 0xa3,
0xa7, 0x97, 0x7c, 0xc6, 0x76, 0x2b, 0x25, 0x8d, 0xa4, 0x0f, 0x66, 0xa2, 0x78, 0x3b, 0xd7, 0xee,
0xb4, 0xeb, 0x3e, 0x7d, 0xd8, 0x99, 0xca, 0xd9, 0x4c, 0x96, 0xce, 0xd9, 0xfb, 0x2d, 0x80, 0xf8,
0x48, 0xf0, 0x22, 0x3f, 0xc3, 0xaf, 0x34, 0x85, 0xf6, 0x1b, 0x7b, 0x1c, 0x0d, 0x53, 0xd2, 0x25,
0xfd, 0x20, 0x5b, 0x1e, 0x29, 0x85, 0x46, 0xc9, 0x66, 0x3c, 0xad, 0x77, 0x49, 0x3f, 0xca, 0xd0,
0xa6, 0x1f, 0xc1, 0x7d, 0xa1, 0xc7, 0x95, 0x12, 0x33, 0xa6, 0x16, 0xe3, 0x2b, 0xbe, 0x48, 0x83,
0x2e, 0xe9, 0x87, 0x59, 0x47, 0xe8, 0x53, 0xe7, 0x3c, 0xe1, 0x0b, 0xda, 0x85, 0x38, 0xe7, 0x7a,
0xaa, 0x44, 0x65, 0x84, 0x2c, 0xd3, 0x06, 0x12, 0x6c, 0xba, 0xe8, 0x4b, 0x88, 0x72, 0x66, 0xd8,
0xd8, 0x2c, 0x2a, 0x9e, 0x36, 0xbb, 0xa4, 0x7f, 0x7f, 0xff, 0xd1, 0xee, 0x96, 0xe4, 0x77, 0x87,
0xcc, 0xb0, 0xef, 0x17, 0x15, 0xcf, 0xc2, 0xdc, 0x5b, 0x74, 0x00, 0xb1, 0x85, 0x8d, 0x2b, 0xa6,
0xd8, 0x4c, 0xa7, 0xad, 0x6e, 0xd0, 0x8f, 0xf7, 0x9f, 0xbc, 0x8b, 0xf6, 0x25, 0x9f, 0xf0, 0xc5,
0x39, 0x2b, 0xe6, 0xfc, 0x94, 0x09, 0x95, 0x81, 0x45, 0x9d, 0x22, 0x88, 0x0e, 0xa1, 0x23, 0xca,
0x9c, 0xff, 0xbc, 0x24, 0x69, 0xdf, 0x95, 0x24, 0x46, 0x98, 0x67, 0x79, 0x1f, 0x5a, 0x6c, 0x6e,
0xe4, 0x68, 0x98, 0x86, 0xa8, 0x82, 0x3f, 0xd1, 0xcf, 0xa0, 0xa9, 0x0d, 0x33, 0x3c, 0x8d, 0xb0,
0xb2, 0x9d, 0xad, 0x95, 0xb9, 0x26, 0xd8, 0xb0, 0xcc, 0x45, 0xf7, 0x7e, 0x27, 0x90, 0x1c, 0xca,
0xa2, 0xe0, 0x53, 0xab, 0x91, 0xef, 0xcf, 0xb2, 0x0b, 0x64, 0xa3, 0x0b, 0xff, 0xd1, 0xb7, 0x7e,
0x5d, 0xdf, 0x75, 0x66, 0xc1, 0x3b, 0x99, 0xbd, 0x80, 0x16, 0xb6, 0x57, 0xa7, 0x0d, 0xac, 0xb8,
0x7b, 0x4b, 0x6a, 0x68, 0x67, 0x3e, 0xbe, 0xb7, 0x03, 0xd1, 0x40, 0xca, 0xe2, 0x1b, 0xa5, 0xd8,
0xc2, 0x26, 0x65, 0xdb, 0x91, 0x92, 0x6e, 0xd0, 0x0f, 0x33, 0xb4, 0x7b, 0x8f, 0x21, 0x1c, 0x95,
0xe6, 0xfa, 0xf7, 0xa6, 0xff, 0xbe, 0x03, 0xd1, 0xb7, 0xb2, 0xbc, 0xb8, 0x1e, 0x10, 0xf8, 0x80,
0x2e, 0xc0, 0x51, 0x21, 0xd9, 0x16, 0x8a, 0xba, 0x8f, 0x78, 0x02, 0xf1, 0x50, 0xce, 0x27, 0x05,
0xbf, 0x1e, 0x42, 0xd6, 0x24, 0x83, 0x85, 0xe1, 0xfa, 0x7a, 0x44, 0x67, 0x4d, 0x72, 0x66, 0x94,
0xd8, 0x96, 0x49, 0xe4, 0x43, 0xfe, 0x0a, 0x20, 0x3e, 0x9b, 0xb2, 0x82, 0x29, 0x54, 0x82, 0xbe,
0x82, 0x68, 0x22, 0x65, 0x31, 0xf6, 0x81, 0xa4, 0x1f, 0xef, 0x3f, 0xde, 0x2a, 0xdc, 0x4a, 0xa1,
0xe3, 0x5a, 0x16, 0x5a, 0x88, 0x1d, 0x5f, 0xfa, 0x12, 0x42, 0x51, 0x1a, 0x87, 0xae, 0x23, 0x7a,
0xfb, 0xac, 0x2f, 0xe5, 0x3b, 0xae, 0x65, 0x6d, 0x51, 0x1a, 0xc4, 0xbe, 0x82, 0xa8, 0x90, 0xe5,
0x85, 0x03, 0x07, 0xb7, 0xfc, 0x7a, 0xa5, 0xad, 0xfd, 0xb5, 0x85, 0x20, 0xfc, 0x6b, 0x80, 0x37,
0x56, 0x53, 0x87, 0x6f, 0x20, 0xfe, 0x86, 0x71, 0x5c, 0x49, 0x7f, 0x5c, 0xcb, 0x22, 0x04, 0x21,
0xc3, 0x21, 0xc4, 0x39, 0x6a, 0xee, 0x28, 0x9a, 0x48, 0xb1, 0x7d, 0x6c, 0x36, 0x7a, 0x73, 0x5c,
0xcb, 0xc0, 0xc1, 0x96, 0x24, 0x1a, 0x35, 0x77, 0x24, 0xad, 0x5b, 0x48, 0x36, 0x7a, 0x63, 0x49,
0x1c, 0x6c, 0x59, 0xcb, 0xc4, 0xb6, 0xd6, 0x71, 0xb4, 0x6f, 0xa9, 0x65, 0x3d, 0x01, 0xb6, 0x16,
0x04, 0x59, 0x86, 0x41, 0xcb, 0xf5, 0xba, 0xf7, 0x2b, 0x81, 0xf8, 0x9c, 0x4f, 0x8d, 0xf4, 0xfd,
0x4d, 0x20, 0xc8, 0xc5, 0xcc, 0xef, 0x3f, 0x6b, 0xda, 0xfd, 0xe0, 0x74, 0x7b, 0x8b, 0x61, 0xbe,
0x6d, 0x77, 0x50, 0x2e, 0x46, 0x98, 0x23, 0xa7, 0x1f, 0xc3, 0xbd, 0x89, 0x28, 0xed, 0xa6, 0xf4,
0x34, 0xb6, 0x81, 0x9d, 0xe3, 0x5a, 0xd6, 0x71, 0x6e, 0x17, 0xb6, 0x4a, 0xeb, 0x1f, 0x02, 0x11,
0x26, 0x84, 0xe5, 0x3e, 0x83, 0x06, 0x6e, 0x47, 0x72, 0x97, 0xed, 0x88, 0xa1, 0xf4, 0x11, 0x00,
0xde, 0xd6, 0xf1, 0xc6, 0xde, 0x8e, 0xd0, 0xf3, 0xda, 0xae, 0x8d, 0x2f, 0xa1, 0xad, 0x71, 0xaa,
0xb5, 0x9f, 0xa4, 0x1b, 0x3a, 0xb0, 0x9e, 0x7c, 0x3b, 0x89, 0x1e, 0x62, 0xd1, 0xae, 0x0a, 0xed,
0xe7, 0x68, 0x3b, 0x7a, 0x43, 0x57, 0x8b, 0xf6, 0x10, 0xfa, 0x01, 0x84, 0x2e, 0x35, 0x91, 0xe3,
0x0c, 0xad, 0xde, 0x99, 0x7c, 0xd0, 0x86, 0x26, 0x9a, 0xbd, 0x5f, 0x08, 0x04, 0xa3, 0xa1, 0xa6,
0x9f, 0x43, 0xcb, 0xde, 0x17, 0x91, 0xdf, 0x7a, 0xd7, 0x36, 0x07, 0xbe, 0x29, 0x4a, 0x33, 0xca,
0xe9, 0x17, 0xd0, 0xd2, 0x46, 0x59, 0x60, 0xfd, 0xce, 0x13, 0xd6, 0xd4, 0x46, 0x8d, 0xf2, 0x01,
0x40, 0x28, 0xf2, 0xb1, 0xcb, 0xe3, 0x6f, 0x02, 0xc9, 0x19, 0x67, 0x6a, 0x7a, 0x99, 0x71, 0x3d,
0x2f, 0xdc, 0x3d, 0xd8, 0x81, 0xb8, 0x9c, 0xcf, 0xc6, 0x3f, 0xcd, 0xb9, 0x12, 0x5c, 0xfb, 0x59,
0x81, 0x72, 0x3e, 0xfb, 0xce, 0x79, 0xe8, 0x03, 0x68, 0x1a, 0x59, 0x8d, 0xaf, 0xf0, 0xdf, 0x41,
0xd6, 0x30, 0xb2, 0x3a, 0xa1, 0x5f, 0x41, 0xec, 0xf6, 0xe7, 0xf2, 0x02, 0x07, 0x37, 0xd6, 0xb3,
0xea, 0x7c, 0xe6, 0x9a, 0x88, 0x23, 0x6b, 0x17, 0xb9, 0x9e, 0x4a, 0xc5, 0xdd, 0xc2, 0xae, 0x67,
0xfe, 0x44, 0x9f, 0x42, 0x20, 0x72, 0xed, 0xaf, 0x63, 0xba, 0x7d, 0x9d, 0x0c, 0x75, 0x66, 0x83,
0xe8, 0x43, 0xcc, 0xec, 0xca, 0x3d, 0x95, 0x41, 0xe6, 0x0e, 0x4f, 0xff, 0x20, 0x10, 0x2e, 0xe7,
0x87, 0x86, 0xd0, 0x78, 0x2d, 0x4b, 0x9e, 0xd4, 0xac, 0x65, 0xb7, 0x58, 0x42, 0xac, 0x35, 0x2a,
0xcd, 0x8b, 0xa4, 0x4e, 0x23, 0x68, 0x8e, 0x4a, 0xf3, 0xec, 0x79, 0x12, 0x78, 0xf3, 0x60, 0x3f,
0x69, 0x78, 0xf3, 0xf9, 0xa7, 0x49, 0xd3, 0x9a, 0x78, 0x0b, 0x12, 0xa0, 0x00, 0x2d, 0xb7, 0x07,
0x92, 0xd8, 0xda, 0x4e, 0xec, 0xe4, 0x21, 0x8d, 0xa1, 0x7d, 0xce, 0xd4, 0xe1, 0x25, 0x53, 0xc9,
0x7b, 0x34, 0x81, 0xce, 0x60, 0xe3, 0x06, 0x24, 0x39, 0xfd, 0x1f, 0xc4, 0x47, 0xeb, 0x9b, 0x93,
0xf0, 0xa7, 0xe7, 0x00, 0xeb, 0x17, 0xd2, 0x02, 0xf0, 0x74, 0xa8, 0x38, 0x33, 0x3c, 0x4f, 0x6a,
0xf4, 0xff, 0x70, 0x6f, 0xed, 0xb1, 0xbf, 0x20, 0x2b, 0xd7, 0x50, 0xc9, 0xaa, 0xb2, 0xae, 0xfa,
0x0a, 0x87, 0x2e, 0x9e, 0x27, 0xc1, 0xe0, 0x07, 0xb8, 0x2f, 0xe4, 0x52, 0xaf, 0x0b, 0x55, 0x4d,
0x07, 0xb1, 0x7b, 0xe9, 0x4e, 0xad, 0x76, 0xa7, 0xe4, 0xc7, 0x83, 0x0b, 0x61, 0x2e, 0xe7, 0x13,
0xfb, 0xfa, 0xef, 0xb9, 0xb0, 0x4f, 0x84, 0xf4, 0xd6, 0x9e, 0x28, 0x0d, 0x57, 0x25, 0x2b, 0xf6,
0x50, 0xe9, 0x3d, 0xa7, 0x74, 0x35, 0xf9, 0x93, 0x90, 0x49, 0x0b, 0x5d, 0x07, 0xff, 0x06, 0x00,
0x00, 0xff, 0xff, 0x31, 0x59, 0x18, 0x2e, 0x92, 0x09, 0x00, 0x00,
}

View File

@ -129,29 +129,6 @@ func (node *Proxy) InvalidateCollectionMetaCache(ctx context.Context, request *p
}, nil
}
// ReleaseDQLMessageStream release the query message stream of specific collection.
func (node *Proxy) ReleaseDQLMessageStream(ctx context.Context, request *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
ctx = logutil.WithModule(ctx, moduleName)
logutil.Logger(ctx).Debug("received request to release DQL message strem",
zap.Any("role", typeutil.ProxyRole),
zap.Any("db", request.DbID),
zap.Any("collection", request.CollectionID))
if !node.checkHealthy() {
return unhealthyStatus(), nil
}
logutil.Logger(ctx).Debug("complete to release DQL message stream",
zap.Any("role", typeutil.ProxyRole),
zap.Any("db", request.DbID),
zap.Any("collection", request.CollectionID))
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
}
// CreateCollection create a collection by the schema.
// TODO(dragondriver): add more detailed ut for ConsistencyLevel, should we support multiple consistency level in Proxy?
func (node *Proxy) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
@ -4021,22 +3998,6 @@ func (node *Proxy) ListCredUsers(ctx context.Context, req *milvuspb.ListCredUser
}, nil
}
// SendSearchResult needs to be removed TODO
func (node *Proxy) SendSearchResult(ctx context.Context, req *internalpb.SearchResults) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "Not implemented",
}, nil
}
// SendRetrieveResult needs to be removed TODO
func (node *Proxy) SendRetrieveResult(ctx context.Context, req *internalpb.RetrieveResults) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "Not implemented",
}, nil
}
func (node *Proxy) CreateRole(ctx context.Context, req *milvuspb.CreateRoleRequest) (*commonpb.Status, error) {
logger.Debug("CreateRole", zap.Any("req", req))
if code, ok := node.checkHealthyAndReturnCode(); !ok {

View File

@ -718,12 +718,6 @@ func TestProxy(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// recreate -> fail
req2 := constructCreateCollectionRequest()
resp, err = proxy.CreateCollection(ctx, req2)
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
reqInvalidField := constructCreateCollectionRequest()
schema := constructCollectionSchema()
schema.Fields = append(schema.Fields, &schemapb.FieldSchema{
@ -764,16 +758,6 @@ func TestProxy(t *testing.T) {
DbName: dbName,
CollectionName: collectionName,
})
sameAliasReq := &milvuspb.CreateAliasRequest{
Base: nil,
CollectionName: collectionName,
Alias: "alias",
}
resp, err = proxy.CreateAlias(ctx, sameAliasReq)
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
wg.Add(1)
@ -833,15 +817,6 @@ func TestProxy(t *testing.T) {
CollectionName: collectionName,
})
sameDropReq := &milvuspb.DropAliasRequest{
Base: nil,
Alias: "alias",
}
// Can't drop non-existing alias
resp, err = proxy.DropAlias(ctx, sameDropReq)
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
wg.Add(1)
@ -950,16 +925,6 @@ func TestProxy(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// recreate -> fail
resp, err = proxy.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
PartitionName: partitionName,
})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// create partition with non-exist collection -> fail
resp, err = proxy.CreatePartition(ctx, &milvuspb.CreatePartitionRequest{
Base: nil,
@ -1683,7 +1648,7 @@ func TestProxy(t *testing.T) {
wg.Add(1)
t.Run("release collection", func(t *testing.T) {
defer wg.Done()
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
_, err := globalMetaCache.GetCollectionID(ctx, collectionName)
assert.NoError(t, err)
resp, err := proxy.ReleaseCollection(ctx, &milvuspb.ReleaseCollectionRequest{
@ -1695,16 +1660,6 @@ func TestProxy(t *testing.T) {
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
assert.Equal(t, "", resp.Reason)
// release dql message stream
resp, err = proxy.ReleaseDQLMessageStream(ctx, &proxypb.ReleaseDQLMessageStreamRequest{
Base: nil,
DbID: 0,
CollectionID: collectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
assert.Equal(t, "", resp.Reason)
// release collection cache
resp, err = proxy.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{
Base: nil,
@ -1995,24 +1950,6 @@ func TestProxy(t *testing.T) {
// drop non-exist partition -> fail
resp, err = proxy.DropPartition(ctx, &milvuspb.DropPartitionRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
PartitionName: partitionName,
})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
resp, err = proxy.DropPartition(ctx, &milvuspb.DropPartitionRequest{
Base: nil,
DbName: dbName,
CollectionName: collectionName,
PartitionName: otherCollectionName,
})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
resp, err = proxy.DropPartition(ctx, &milvuspb.DropPartitionRequest{
Base: nil,
DbName: dbName,
@ -2087,7 +2024,7 @@ func TestProxy(t *testing.T) {
wg.Add(1)
t.Run("drop collection", func(t *testing.T) {
defer wg.Done()
collectionID, err := globalMetaCache.GetCollectionID(ctx, collectionName)
_, err := globalMetaCache.GetCollectionID(ctx, collectionName)
assert.NoError(t, err)
resp, err := proxy.DropCollection(ctx, &milvuspb.DropCollectionRequest{
@ -2107,15 +2044,6 @@ func TestProxy(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// release dql stream
resp, err = proxy.ReleaseDQLMessageStream(ctx, &proxypb.ReleaseDQLMessageStreamRequest{
Base: nil,
DbID: 0,
CollectionID: collectionID,
})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.ErrorCode)
// release collection load cache
resp, err = proxy.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{
Base: nil,
@ -2328,14 +2256,6 @@ func TestProxy(t *testing.T) {
proxy.UpdateStateCode(internalpb.StateCode_Abnormal)
wg.Add(1)
t.Run("ReleaseDQLMessageStream fail, unhealthy", func(t *testing.T) {
defer wg.Done()
resp, err := proxy.ReleaseDQLMessageStream(ctx, &proxypb.ReleaseDQLMessageStreamRequest{})
assert.NoError(t, err)
assert.NotEqual(t, commonpb.ErrorCode_Success, resp.ErrorCode)
})
wg.Add(1)
t.Run("CreateCollection fail, unhealthy", func(t *testing.T) {
defer wg.Done()

View File

@ -899,20 +899,6 @@ func (coord *RootCoordMock) DescribeSegments(ctx context.Context, req *rootcoord
panic("implement me")
}
func (coord *RootCoordMock) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
code := coord.state.Load().(internalpb.StateCode)
if code != internalpb.StateCode_Healthy {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: fmt.Sprintf("state code = %s", internalpb.StateCode_name[int32(code)]),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
}
func (coord *RootCoordMock) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
code := coord.state.Load().(internalpb.StateCode)
if code != internalpb.StateCode_Healthy {

View File

@ -314,7 +314,9 @@ func (dct *dropCollectionTask) PreExecute(ctx context.Context) error {
func (dct *dropCollectionTask) Execute(ctx context.Context) error {
collID, err := globalMetaCache.GetCollectionID(ctx, dct.CollectionName)
if err != nil {
return err
// make dropping collection idempotent.
dct.result = &commonpb.Status{ErrorCode: commonpb.ErrorCode_Success}
return nil
}
dct.result, err = dct.rootCoord.DropCollection(ctx, dct.DropCollectionRequest)

View File

@ -799,7 +799,7 @@ func TestDropCollectionTask(t *testing.T) {
return 0, errors.New("mock")
})
err = task.Execute(ctx)
assert.Error(t, err)
assert.NoError(t, err)
cache.setGetIDFunc(func(ctx context.Context, collectionName string) (typeutil.UniqueID, error) {
return 0, nil
})

View File

@ -224,23 +224,6 @@ func (rc *rootCoordMock) ShowPartitions(ctx context.Context, in *milvuspb.ShowPa
}, nil
}
func (rc *rootCoordMock) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) (*commonpb.Status, error) {
if rc.returnGrpcError {
return nil, errors.New("release DQLMessage stream failed")
}
if rc.returnError {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
Reason: "release DQLMessage stream failed",
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
func (rc *rootCoordMock) InvalidateCollectionMetaCache(ctx context.Context, in *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
if rc.returnGrpcError {
return nil, errors.New("InvalidateCollectionMetaCache failed")

View File

@ -0,0 +1,28 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type alterAliasTask struct {
baseTaskV2
Req *milvuspb.AlterAliasRequest
}
func (t *alterAliasTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_AlterAlias); err != nil {
return err
}
return nil
}
func (t *alterAliasTask) Execute(ctx context.Context) error {
if err := t.core.ExpireMetaCache(ctx, []string{t.Req.GetAlias()}, InvalidCollectionID, t.GetTs()); err != nil {
return err
}
// alter alias is atomic enough.
return t.core.meta.AlterAlias(ctx, t.Req.GetAlias(), t.Req.GetCollectionName(), t.GetTs())
}

View File

@ -0,0 +1,54 @@
package rootcoord
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
func Test_alterAliasTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &alterAliasTask{Req: &milvuspb.AlterAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := &alterAliasTask{Req: &milvuspb.AlterAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias}}}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_alterAliasTask_Execute(t *testing.T) {
t.Run("failed to expire cache", func(t *testing.T) {
core := newTestCore(withInvalidProxyManager())
task := &alterAliasTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.AlterAliasRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias},
Alias: "test",
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("failed to alter alias", func(t *testing.T) {
core := newTestCore(withValidProxyManager(), withInvalidMeta())
task := &alterAliasTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.AlterAliasRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_AlterAlias},
Alias: "test",
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
}

View File

@ -0,0 +1,226 @@
package rootcoord
import (
"context"
"errors"
"fmt"
"time"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"go.uber.org/zap"
)
type watchInfo struct {
ts Timestamp
collectionID UniqueID
partitionID UniqueID
vChannels []string
startPositions []*commonpb.KeyDataPair
}
// Broker communicates with other components.
type Broker interface {
ReleaseCollection(ctx context.Context, collectionID UniqueID) error
GetQuerySegmentInfo(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error)
WatchChannels(ctx context.Context, info *watchInfo) error
UnwatchChannels(ctx context.Context, info *watchInfo) error
AddSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error
ReleaseSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error
Flush(ctx context.Context, cID int64, segIDs []int64) error
Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error)
DropCollectionIndex(ctx context.Context, collID UniqueID) error
GetSegmentIndexState(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error)
}
type ServerBroker struct {
s *Core
}
func newServerBroker(s *Core) *ServerBroker {
return &ServerBroker{s: s}
}
func (b *ServerBroker) ReleaseCollection(ctx context.Context, collectionID UniqueID) error {
log.Info("releasing collection", zap.Int64("collection", collectionID))
if err := funcutil.WaitForComponentHealthy(ctx, b.s.queryCoord, "QueryCoord", 100, time.Millisecond*200); err != nil {
log.Error("failed to release collection, querycoord not healthy", zap.Error(err), zap.Int64("collection", collectionID))
return err
}
resp, err := b.s.queryCoord.ReleaseCollection(ctx, &querypb.ReleaseCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_ReleaseCollection},
CollectionID: collectionID,
NodeID: b.s.session.ServerID,
})
if err != nil {
return err
}
if resp.GetErrorCode() != commonpb.ErrorCode_Success {
return fmt.Errorf("failed to release collection, code: %s, reason: %s", resp.GetErrorCode(), resp.GetReason())
}
log.Info("done to release collection", zap.Int64("collection", collectionID))
return nil
}
func (b *ServerBroker) GetQuerySegmentInfo(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error) {
resp, err := b.s.queryCoord.GetSegmentInfo(ctx, &querypb.GetSegmentInfoRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_GetSegmentState,
SourceID: b.s.session.ServerID,
},
CollectionID: collectionID,
SegmentIDs: segIDs,
})
return resp, err
}
func toKeyDataPairs(m map[string][]byte) []*commonpb.KeyDataPair {
ret := make([]*commonpb.KeyDataPair, 0, len(m))
for k, data := range m {
ret = append(ret, &commonpb.KeyDataPair{
Key: k,
Data: data,
})
}
return ret
}
func (b *ServerBroker) WatchChannels(ctx context.Context, info *watchInfo) error {
log.Info("watching channels", zap.Uint64("ts", info.ts), zap.Int64("collection", info.collectionID), zap.Strings("vChannels", info.vChannels))
if err := funcutil.WaitForComponentHealthy(ctx, b.s.dataCoord, "DataCoord", 100, time.Millisecond*200); err != nil {
return err
}
resp, err := b.s.dataCoord.WatchChannels(ctx, &datapb.WatchChannelsRequest{
CollectionID: info.collectionID,
ChannelNames: info.vChannels,
StartPositions: info.startPositions,
})
if err != nil {
return err
}
if resp.GetStatus().GetErrorCode() != commonpb.ErrorCode_Success {
return fmt.Errorf("failed to watch channels, code: %s, reason: %s", resp.GetStatus().GetErrorCode(), resp.GetStatus().GetReason())
}
log.Info("done to watch channels", zap.Uint64("ts", info.ts), zap.Int64("collection", info.collectionID), zap.Strings("vChannels", info.vChannels))
return nil
}
func (b *ServerBroker) UnwatchChannels(ctx context.Context, info *watchInfo) error {
// TODO: release flowgraph on datanodes.
return nil
}
func (b *ServerBroker) AddSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error {
log.Info("acquiring seg lock",
zap.Int64s("segment IDs", segIDs),
zap.Int64("node ID", b.s.session.ServerID))
resp, err := b.s.dataCoord.AcquireSegmentLock(ctx, &datapb.AcquireSegmentLockRequest{
SegmentIDs: segIDs,
NodeID: b.s.session.ServerID,
TaskID: taskID,
})
if err != nil {
return err
}
if resp.GetErrorCode() != commonpb.ErrorCode_Success {
return fmt.Errorf("failed to acquire segment lock %s", resp.GetReason())
}
log.Info("acquire seg lock succeed",
zap.Int64s("segment IDs", segIDs),
zap.Int64("node ID", b.s.session.ServerID))
return nil
}
func (b *ServerBroker) ReleaseSegRefLock(ctx context.Context, taskID int64, segIDs []int64) error {
log.Info("releasing seg lock",
zap.Int64s("segment IDs", segIDs),
zap.Int64("node ID", b.s.session.ServerID))
resp, err := b.s.dataCoord.ReleaseSegmentLock(ctx, &datapb.ReleaseSegmentLockRequest{
SegmentIDs: segIDs,
NodeID: b.s.session.ServerID,
TaskID: taskID,
})
if err != nil {
return err
}
if resp.GetErrorCode() != commonpb.ErrorCode_Success {
return fmt.Errorf("failed to release segment lock %s", resp.GetReason())
}
log.Info("release seg lock succeed",
zap.Int64s("segment IDs", segIDs),
zap.Int64("node ID", b.s.session.ServerID))
return nil
}
func (b *ServerBroker) Flush(ctx context.Context, cID int64, segIDs []int64) error {
resp, err := b.s.dataCoord.Flush(ctx, &datapb.FlushRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Flush,
SourceID: b.s.session.ServerID,
},
DbID: 0,
SegmentIDs: segIDs,
CollectionID: cID,
})
if err != nil {
return errors.New("failed to call flush to data coordinator: " + err.Error())
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
return errors.New(resp.Status.Reason)
}
log.Info("flush on collection succeed", zap.Int64("collection ID", cID))
return nil
}
func (b *ServerBroker) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
return b.s.dataCoord.Import(ctx, req)
}
func (b *ServerBroker) DropCollectionIndex(ctx context.Context, collID UniqueID) error {
if err := funcutil.WaitForComponentHealthy(ctx, b.s.indexCoord, "IndexCoord", 100, time.Millisecond*100); err != nil {
return err
}
rsp, err := b.s.indexCoord.DropIndex(ctx, &indexpb.DropIndexRequest{
CollectionID: collID,
IndexName: "",
})
if err != nil {
return err
}
if rsp.ErrorCode != commonpb.ErrorCode_Success {
return fmt.Errorf(rsp.Reason)
}
return nil
}
func (b *ServerBroker) GetSegmentIndexState(ctx context.Context, collID UniqueID, indexName string, segIDs []UniqueID) ([]*indexpb.SegmentIndexState, error) {
resp, err := b.s.indexCoord.GetSegmentIndexState(ctx, &indexpb.GetSegmentIndexStateRequest{
CollectionID: collID,
IndexName: indexName,
SegmentIDs: segIDs,
})
if err != nil {
return nil, err
}
if resp.Status.ErrorCode != commonpb.ErrorCode_Success {
return nil, errors.New(resp.Status.Reason)
}
return resp.GetStates(), nil
}

View File

@ -0,0 +1,302 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/stretchr/testify/assert"
)
func TestServerBroker_ReleaseCollection(t *testing.T) {
t.Run("not healthy", func(t *testing.T) {
c := newTestCore(withUnhealthyQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseCollection(ctx, 1)
assert.Error(t, err)
})
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseCollection(ctx, 1)
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseCollection(ctx, 1)
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseCollection(ctx, 1)
assert.NoError(t, err)
})
}
func TestServerBroker_GetSegmentInfo(t *testing.T) {
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
_, err := b.GetQuerySegmentInfo(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
resp, err := b.GetQuerySegmentInfo(ctx, 1, []int64{1, 2})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidQueryCoord())
b := newServerBroker(c)
ctx := context.Background()
resp, err := b.GetQuerySegmentInfo(ctx, 1, []int64{1, 2})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
}
func TestServerBroker_WatchChannels(t *testing.T) {
t.Run("unhealthy", func(t *testing.T) {
c := newTestCore(withUnhealthyDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.WatchChannels(ctx, &watchInfo{})
assert.Error(t, err)
})
t.Run("failed to execute", func(t *testing.T) {
defer cleanTestEnv()
c := newTestCore(withInvalidDataCoord(), withRocksMqTtSynchronizer())
b := newServerBroker(c)
ctx := context.Background()
err := b.WatchChannels(ctx, &watchInfo{})
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
defer cleanTestEnv()
c := newTestCore(withFailedDataCoord(), withRocksMqTtSynchronizer())
b := newServerBroker(c)
ctx := context.Background()
err := b.WatchChannels(ctx, &watchInfo{})
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
defer cleanTestEnv()
c := newTestCore(withValidDataCoord(), withRocksMqTtSynchronizer())
b := newServerBroker(c)
ctx := context.Background()
err := b.WatchChannels(ctx, &watchInfo{})
assert.NoError(t, err)
})
}
func TestServerBroker_UnwatchChannels(t *testing.T) {
// TODO: implement
b := newServerBroker(newTestCore())
ctx := context.Background()
b.UnwatchChannels(ctx, &watchInfo{})
}
func TestServerBroker_AddSegRefLock(t *testing.T) {
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.AddSegRefLock(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.AddSegRefLock(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.AddSegRefLock(ctx, 1, []int64{1, 2})
assert.NoError(t, err)
})
}
func TestServerBroker_ReleaseSegRefLock(t *testing.T) {
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseSegRefLock(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseSegRefLock(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.ReleaseSegRefLock(ctx, 1, []int64{1, 2})
assert.NoError(t, err)
})
}
func TestServerBroker_Flush(t *testing.T) {
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.Flush(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.Flush(ctx, 1, []int64{1, 2})
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.Flush(ctx, 1, []int64{1, 2})
assert.NoError(t, err)
})
}
func TestServerBroker_Import(t *testing.T) {
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
resp, err := b.Import(ctx, &datapb.ImportTaskRequest{})
assert.Error(t, err)
assert.Nil(t, resp)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedDataCoord())
b := newServerBroker(c)
ctx := context.Background()
resp, err := b.Import(ctx, &datapb.ImportTaskRequest{})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidDataCoord())
b := newServerBroker(c)
ctx := context.Background()
resp, err := b.Import(ctx, &datapb.ImportTaskRequest{})
assert.NoError(t, err)
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
})
}
func TestServerBroker_DropCollectionIndex(t *testing.T) {
t.Run("not healthy", func(t *testing.T) {
c := newTestCore(withUnhealthyIndexCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.DropCollectionIndex(ctx, 1)
assert.Error(t, err)
})
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidIndexCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.DropCollectionIndex(ctx, 1)
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedIndexCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.DropCollectionIndex(ctx, 1)
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidIndexCoord())
b := newServerBroker(c)
ctx := context.Background()
err := b.DropCollectionIndex(ctx, 1)
assert.NoError(t, err)
})
}
func TestServerBroker_GetSegmentIndexState(t *testing.T) {
t.Run("failed to execute", func(t *testing.T) {
c := newTestCore(withInvalidIndexCoord())
b := newServerBroker(c)
ctx := context.Background()
_, err := b.GetSegmentIndexState(ctx, 1, "index_name", []UniqueID{1, 2})
assert.Error(t, err)
})
t.Run("non success error code on execute", func(t *testing.T) {
c := newTestCore(withFailedIndexCoord())
b := newServerBroker(c)
ctx := context.Background()
_, err := b.GetSegmentIndexState(ctx, 1, "index_name", []UniqueID{1, 2})
assert.Error(t, err)
})
t.Run("success", func(t *testing.T) {
c := newTestCore(withValidIndexCoord())
c.indexCoord.(*mockIndexCoord).GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
return &indexpb.GetSegmentIndexStateResponse{
Status: succStatus(),
States: []*indexpb.SegmentIndexState{
{
SegmentID: 1,
State: commonpb.IndexState_Finished,
FailReason: "",
},
},
}, nil
}
b := newServerBroker(c)
ctx := context.Background()
states, err := b.GetSegmentIndexState(ctx, 1, "index_name", []UniqueID{1})
assert.NoError(t, err)
assert.Equal(t, 1, len(states))
assert.Equal(t, commonpb.IndexState_Finished, states[0].GetState())
})
}

View File

@ -0,0 +1,11 @@
package rootcoord
const (
// TODO: better to make them configurable, use default value if no config was set since we never explode these before.
snapshotsSep = "_ts"
snapshotPrefix = "snapshots"
globalIDAllocatorKey = "idTimestamp"
globalIDAllocatorSubPath = "gid"
globalTSOAllocatorKey = "timestamp"
globalTSOAllocatorSubPath = "tso"
)

View File

@ -0,0 +1,26 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type createAliasTask struct {
baseTaskV2
Req *milvuspb.CreateAliasRequest
}
func (t *createAliasTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_CreateAlias); err != nil {
return err
}
return nil
}
func (t *createAliasTask) Execute(ctx context.Context) error {
// create alias is atomic enough.
return t.core.meta.CreateAlias(ctx, t.Req.GetAlias(), t.Req.GetCollectionName(), t.GetTs())
}

View File

@ -0,0 +1,40 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
func Test_createAliasTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &createAliasTask{Req: &milvuspb.CreateAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}}}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := &createAliasTask{Req: &milvuspb.CreateAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateAlias}}}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_createAliasTask_Execute(t *testing.T) {
t.Run("failed to create alias", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &createAliasTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateAliasRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateAlias},
Alias: "test",
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
}

View File

@ -0,0 +1,294 @@
package rootcoord
import (
"context"
"errors"
"fmt"
ms "github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type collectionChannels struct {
virtualChannels []string
physicalChannels []string
}
type createCollectionTask struct {
baseTaskV2
Req *milvuspb.CreateCollectionRequest
schema *schemapb.CollectionSchema
collID UniqueID
partID UniqueID
channels collectionChannels
}
func (t *createCollectionTask) validate() error {
if t.Req == nil {
return errors.New("empty requests")
}
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_CreateCollection); err != nil {
return err
}
return nil
}
func hasSystemFields(schema *schemapb.CollectionSchema, systemFields []string) bool {
for _, f := range schema.GetFields() {
if funcutil.SliceContain(systemFields, f.GetName()) {
return true
}
}
return false
}
func (t *createCollectionTask) validateSchema(schema *schemapb.CollectionSchema) error {
if t.Req.GetCollectionName() != schema.GetName() {
return fmt.Errorf("collection name = %s, schema.Name=%s", t.Req.GetCollectionName(), schema.Name)
}
if hasSystemFields(schema, []string{RowIDFieldName, TimeStampFieldName}) {
return fmt.Errorf("schema contains system field: %s, %s", RowIDFieldName, TimeStampFieldName)
}
return nil
}
func (t *createCollectionTask) assignFieldID(schema *schemapb.CollectionSchema) {
for idx := range schema.GetFields() {
schema.Fields[idx].FieldID = int64(idx + StartOfUserFieldID)
}
}
func (t *createCollectionTask) appendSysFields(schema *schemapb.CollectionSchema) {
schema.Fields = append(schema.Fields, &schemapb.FieldSchema{
FieldID: int64(RowIDField),
Name: RowIDFieldName,
IsPrimaryKey: false,
Description: "row id",
DataType: schemapb.DataType_Int64,
})
schema.Fields = append(schema.Fields, &schemapb.FieldSchema{
FieldID: int64(TimeStampField),
Name: TimeStampFieldName,
IsPrimaryKey: false,
Description: "time stamp",
DataType: schemapb.DataType_Int64,
})
}
func (t *createCollectionTask) prepareSchema() error {
var schema schemapb.CollectionSchema
if err := proto.Unmarshal(t.Req.GetSchema(), &schema); err != nil {
return err
}
if err := t.validateSchema(&schema); err != nil {
return err
}
t.assignFieldID(&schema)
t.appendSysFields(&schema)
t.schema = &schema
return nil
}
func (t *createCollectionTask) assignShardsNum() {
if t.Req.GetShardsNum() <= 0 {
t.Req.ShardsNum = 2
}
}
func (t *createCollectionTask) assignCollectionID() error {
var err error
t.collID, err = t.core.idAllocator.AllocOne()
return err
}
func (t *createCollectionTask) assignPartitionID() error {
var err error
t.partID, err = t.core.idAllocator.AllocOne()
return err
}
func (t *createCollectionTask) assignChannels() error {
vchanNames := make([]string, t.Req.ShardsNum)
//physical channel names
chanNames := t.core.chanTimeTick.getDmlChannelNames(int(t.Req.ShardsNum))
for i := int32(0); i < t.Req.ShardsNum; i++ {
vchanNames[i] = fmt.Sprintf("%s_%dv%d", chanNames[i], t.collID, i)
}
t.channels = collectionChannels{
virtualChannels: vchanNames,
physicalChannels: chanNames,
}
return nil
}
func (t *createCollectionTask) Prepare(ctx context.Context) error {
if err := t.validate(); err != nil {
return err
}
if err := t.prepareSchema(); err != nil {
return err
}
t.assignShardsNum()
if err := t.assignCollectionID(); err != nil {
return err
}
if err := t.assignPartitionID(); err != nil {
return err
}
return t.assignChannels()
}
func (t *createCollectionTask) genCreateCollectionMsg(ctx context.Context) *ms.MsgPack {
ts := t.GetTs()
collectionID := t.collID
partitionID := t.partID
// error won't happen here.
marshaledSchema, _ := proto.Marshal(t.schema)
pChannels := t.channels.physicalChannels
vChannels := t.channels.virtualChannels
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
Ctx: ctx,
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
msg := &ms.CreateCollectionMsg{
BaseMsg: baseMsg,
CreateCollectionRequest: internalpb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection, Timestamp: ts},
CollectionID: collectionID,
PartitionID: partitionID,
Schema: marshaledSchema,
VirtualChannelNames: vChannels,
PhysicalChannelNames: pChannels,
},
}
msgPack.Msgs = append(msgPack.Msgs, msg)
return &msgPack
}
func (t *createCollectionTask) addChannelsAndGetStartPositions(ctx context.Context) (map[string][]byte, error) {
t.core.chanTimeTick.addDmlChannels(t.channels.physicalChannels...)
msg := t.genCreateCollectionMsg(ctx)
return t.core.chanTimeTick.broadcastMarkDmlChannels(t.channels.physicalChannels, msg)
}
func (t *createCollectionTask) Execute(ctx context.Context) error {
collID := t.collID
partID := t.partID
ts := t.GetTs()
vchanNames := t.channels.virtualChannels
chanNames := t.channels.physicalChannels
startPositions, err := t.addChannelsAndGetStartPositions(ctx)
if err != nil {
// ugly here, since we must get start positions first.
t.core.chanTimeTick.removeDmlChannels(t.channels.physicalChannels...)
return err
}
collInfo := model.Collection{
CollectionID: collID,
Name: t.schema.Name,
Description: t.schema.Description,
AutoID: t.schema.AutoID,
Fields: model.UnmarshalFieldModels(t.schema.Fields),
VirtualChannelNames: vchanNames,
PhysicalChannelNames: chanNames,
ShardsNum: t.Req.ShardsNum,
ConsistencyLevel: t.Req.ConsistencyLevel,
StartPositions: toKeyDataPairs(startPositions),
CreateTime: ts,
State: pb.CollectionState_CollectionCreating,
Partitions: []*model.Partition{
{
PartitionID: partID,
PartitionName: Params.CommonCfg.DefaultPartitionName,
PartitionCreatedTimestamp: ts,
CollectionID: collID,
State: pb.PartitionState_PartitionCreated,
},
},
}
// We cannot check the idempotency inside meta table when adding collection, since we'll execute duplicate steps
// if add collection successfully due to idempotency check. Some steps may be risky to be duplicate executed if they
// are not promised idempotent.
clone := collInfo.Clone()
clone.Partitions = []*model.Partition{{PartitionName: Params.CommonCfg.DefaultPartitionName}}
// need double check in meta table if we can't promise the sequence execution.
existedCollInfo, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp)
if err == nil {
equal := existedCollInfo.Equal(*clone)
if !equal {
return fmt.Errorf("create duplicate collection with different parameters, collection: %s", t.Req.GetCollectionName())
}
// make creating collection idempotent.
log.Warn("add duplicate collection", zap.String("collection", t.Req.GetCollectionName()), zap.Uint64("ts", t.GetTs()))
return nil
}
undoTask := newBaseUndoTask()
undoTask.AddStep(&NullStep{}, &RemoveDmlChannelsStep{
baseStep: baseStep{core: t.core},
pchannels: chanNames,
}) // remove dml channels if any error occurs.
undoTask.AddStep(&AddCollectionMetaStep{
baseStep: baseStep{core: t.core},
coll: &collInfo,
}, &DeleteCollectionMetaStep{
baseStep: baseStep{core: t.core},
collectionID: collID,
ts: ts,
})
undoTask.AddStep(&WatchChannelsStep{
baseStep: baseStep{core: t.core},
info: &watchInfo{
ts: ts,
collectionID: collID,
vChannels: t.channels.virtualChannels,
startPositions: toKeyDataPairs(startPositions),
},
}, &UnwatchChannelsStep{
baseStep: baseStep{core: t.core},
collectionID: collID,
channels: t.channels,
})
undoTask.AddStep(&ChangeCollectionStateStep{
baseStep: baseStep{core: t.core},
collectionID: collID,
state: pb.CollectionState_CollectionCreated,
ts: ts,
}, &NullStep{}) // We'll remove the whole collection anyway.
return undoTask.Execute(ctx)
}

View File

@ -0,0 +1,502 @@
package rootcoord
import (
"context"
"errors"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/stretchr/testify/assert"
)
func Test_createCollectionTask_validate(t *testing.T) {
t.Run("empty request", func(t *testing.T) {
task := createCollectionTask{
Req: nil,
}
err := task.validate()
assert.Error(t, err)
})
t.Run("invalid msg type", func(t *testing.T) {
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
},
}
err := task.validate()
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
},
}
err := task.validate()
assert.NoError(t, err)
})
}
func Test_createCollectionTask_validateSchema(t *testing.T) {
t.Run("name mismatch", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
otherName := collectionName + "_other"
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
},
}
schema := &schemapb.CollectionSchema{
Name: otherName,
}
err := task.validateSchema(schema)
assert.Error(t, err)
})
t.Run("has system fields", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
},
}
schema := &schemapb.CollectionSchema{
Name: collectionName,
Fields: []*schemapb.FieldSchema{
{Name: RowIDFieldName},
},
}
err := task.validateSchema(schema)
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
},
}
schema := &schemapb.CollectionSchema{
Name: collectionName,
Fields: []*schemapb.FieldSchema{},
}
err := task.validateSchema(schema)
assert.NoError(t, err)
})
}
func Test_createCollectionTask_prepareSchema(t *testing.T) {
t.Run("failed to unmarshal", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: []byte("invalid schema"),
},
}
err := task.prepareSchema()
assert.Error(t, err)
})
t.Run("contain system fields", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
schema := &schemapb.CollectionSchema{
Name: collectionName,
Description: "",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{Name: TimeStampFieldName},
},
}
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: marshaledSchema,
},
}
err = task.prepareSchema()
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
schema := &schemapb.CollectionSchema{
Name: collectionName,
Description: "",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{Name: field1},
},
}
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
task := createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: marshaledSchema,
},
}
err = task.prepareSchema()
assert.NoError(t, err)
})
}
func Test_createCollectionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("invalid schema", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
task := &createCollectionTask{
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: []byte("invalid schema"),
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("failed to assign id", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
schema := &schemapb.CollectionSchema{
Name: collectionName,
Description: "",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{Name: field1},
},
}
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
core := newTestCore(withInvalidIDAllocator())
task := createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: marshaledSchema,
},
}
err = task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("failed to assign channels", func(t *testing.T) {
// TODO: error won't happen here.
})
t.Run("normal case", func(t *testing.T) {
defer cleanTestEnv()
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
ticker := newRocksMqTtSynchronizer()
core := newTestCore(withValidIDAllocator(), withTtSynchronizer(ticker))
schema := &schemapb.CollectionSchema{
Name: collectionName,
Description: "",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{Name: field1},
},
}
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
task := createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: marshaledSchema,
},
}
err = task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_createCollectionTask_Execute(t *testing.T) {
t.Run("add same collection with different parameters", func(t *testing.T) {
defer cleanTestEnv()
ticker := newRocksMqTtSynchronizer()
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName}
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll, nil
}
core := newTestCore(withMeta(meta), withTtSynchronizer(ticker))
task := &createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
},
schema: &schemapb.CollectionSchema{Name: collectionName, Fields: []*schemapb.FieldSchema{{Name: field1}}},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("add duplicate collection", func(t *testing.T) {
defer cleanTestEnv()
ticker := newRocksMqTtSynchronizer()
shardNum := 2
pchans := ticker.getDmlChannelNames(shardNum)
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
collID := UniqueID(1)
schema := &schemapb.CollectionSchema{Name: collectionName, Fields: []*schemapb.FieldSchema{{Name: field1}}}
channels := collectionChannels{
virtualChannels: []string{funcutil.GenRandomStr(), funcutil.GenRandomStr()},
physicalChannels: pchans,
}
coll := &model.Collection{
CollectionID: collID,
Name: schema.Name,
Description: schema.Description,
AutoID: schema.AutoID,
Fields: model.UnmarshalFieldModels(schema.GetFields()),
VirtualChannelNames: channels.virtualChannels,
PhysicalChannelNames: channels.physicalChannels,
Partitions: []*model.Partition{{PartitionName: Params.CommonCfg.DefaultPartitionName}},
}
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll, nil
}
core := newTestCore(withMeta(meta), withTtSynchronizer(ticker))
task := &createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
},
collID: collID,
schema: schema,
channels: channels,
}
err := task.Execute(context.Background())
assert.NoError(t, err)
})
t.Run("failed to get start positions", func(t *testing.T) {
ticker := newTickerWithMockFailStream()
shardNum := 2
pchans := ticker.getDmlChannelNames(shardNum)
core := newTestCore(withTtSynchronizer(ticker))
task := &createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
channels: collectionChannels{
physicalChannels: pchans,
virtualChannels: []string{funcutil.GenRandomStr(), funcutil.GenRandomStr()},
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
defer cleanTestEnv()
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
shardNum := 2
ticker := newRocksMqTtSynchronizer()
pchans := ticker.getDmlChannelNames(shardNum)
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return nil, errors.New("error mock GetCollectionByName")
}
meta.AddCollectionFunc = func(ctx context.Context, coll *model.Collection) error {
return nil
}
meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error {
return nil
}
dc := newMockDataCoord()
dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{
NodeID: TestRootCoordID,
StateCode: internalpb.StateCode_Healthy,
},
SubcomponentStates: nil,
Status: succStatus(),
}, nil
}
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
return &datapb.WatchChannelsResponse{Status: succStatus()}, nil
}
core := newTestCore(withValidIDAllocator(),
withMeta(meta),
withTtSynchronizer(ticker),
withDataCoord(dc))
core.broker = newServerBroker(core)
schema := &schemapb.CollectionSchema{
Name: collectionName,
Description: "",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{Name: field1},
},
}
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
task := createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: marshaledSchema,
ShardsNum: int32(shardNum),
},
channels: collectionChannels{physicalChannels: pchans},
schema: schema,
}
err = task.Execute(context.Background())
assert.NoError(t, err)
})
t.Run("partial error, check if undo worked", func(t *testing.T) {
defer cleanTestEnv()
collectionName := funcutil.GenRandomStr()
field1 := funcutil.GenRandomStr()
shardNum := 2
ticker := newRocksMqTtSynchronizer()
pchans := ticker.getDmlChannelNames(shardNum)
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return nil, errors.New("error mock GetCollectionByName")
}
meta.AddCollectionFunc = func(ctx context.Context, coll *model.Collection) error {
return nil
}
// inject error here.
meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error {
return errors.New("error mock ChangeCollectionState")
}
removeCollectionCalled := false
removeCollectionChan := make(chan struct{}, 1)
meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
removeCollectionCalled = true
removeCollectionChan <- struct{}{}
return nil
}
broker := newMockBroker()
broker.WatchChannelsFunc = func(ctx context.Context, info *watchInfo) error {
return nil
}
unwatchChannelsCalled := false
unwatchChannelsChan := make(chan struct{}, 1)
broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error {
unwatchChannelsCalled = true
unwatchChannelsChan <- struct{}{}
return nil
}
core := newTestCore(withValidIDAllocator(),
withMeta(meta),
withTtSynchronizer(ticker),
withBroker(broker))
schema := &schemapb.CollectionSchema{
Name: collectionName,
Description: "",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{Name: field1},
},
}
marshaledSchema, err := proto.Marshal(schema)
assert.NoError(t, err)
task := createCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreateCollection},
CollectionName: collectionName,
Schema: marshaledSchema,
ShardsNum: int32(shardNum),
},
channels: collectionChannels{physicalChannels: pchans},
schema: schema,
}
err = task.Execute(context.Background())
assert.Error(t, err)
// check if undo worked.
// undo watch.
<-unwatchChannelsChan
assert.True(t, unwatchChannelsCalled)
// undo adding collection.
<-removeCollectionChan
assert.True(t, removeCollectionCalled)
time.Sleep(time.Second * 2) // wait for asynchronous step done.
// undo add channels.
assert.Zero(t, len(ticker.listDmlChannels()))
})
}

View File

@ -0,0 +1,69 @@
package rootcoord
import (
"context"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/model"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type createPartitionTask struct {
baseTaskV2
Req *milvuspb.CreatePartitionRequest
collMeta *model.Collection
}
func (t *createPartitionTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_CreatePartition); err != nil {
return err
}
collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.GetTs())
if err != nil {
return err
}
t.collMeta = collMeta
return nil
}
func (t *createPartitionTask) Execute(ctx context.Context) error {
for _, partition := range t.collMeta.Partitions {
if partition.PartitionName == t.Req.GetPartitionName() {
log.Warn("add duplicate partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName()), zap.Uint64("ts", t.GetTs()))
return nil
}
}
partID, err := t.core.idAllocator.AllocOne()
if err != nil {
return err
}
partition := &model.Partition{
PartitionID: partID,
PartitionName: t.Req.GetPartitionName(),
PartitionCreatedTimestamp: t.GetTs(),
Extra: nil,
CollectionID: t.collMeta.CollectionID,
State: pb.PartitionState_PartitionCreated,
}
undoTask := newBaseUndoTask()
undoTask.AddStep(&ExpireCacheStep{
baseStep: baseStep{core: t.core},
collectionNames: []string{t.Req.GetCollectionName()},
collectionID: t.collMeta.CollectionID,
ts: t.GetTs(),
}, &NullStep{})
undoTask.AddStep(&AddPartitionMetaStep{
baseStep: baseStep{core: t.core},
partition: partition,
}, &NullStep{}) // adding partition is atomic enough.
return undoTask.Execute(ctx)
}

View File

@ -0,0 +1,126 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
func Test_createPartitionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &createPartitionTask{
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("failed to get collection meta", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &createPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
meta := newMockMetaTable()
collectionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName}
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll.Clone(), nil
}
core := newTestCore(withMeta(meta))
task := &createPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.CreatePartitionRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_CreatePartition}},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
assert.True(t, coll.Equal(*task.collMeta))
})
}
func Test_createPartitionTask_Execute(t *testing.T) {
t.Run("create duplicate partition", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}}
task := &createPartitionTask{
collMeta: coll,
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
})
t.Run("failed to allocate partition id", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
core := newTestCore(withInvalidIDAllocator())
task := &createPartitionTask{
baseTaskV2: baseTaskV2{core: core},
collMeta: coll,
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("failed to expire cache", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
core := newTestCore(withValidIDAllocator(), withInvalidProxyManager())
task := &createPartitionTask{
baseTaskV2: baseTaskV2{core: core},
collMeta: coll,
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("failed to add partition meta", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withInvalidMeta())
task := &createPartitionTask{
baseTaskV2: baseTaskV2{core: core},
collMeta: coll,
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
meta := newMockMetaTable()
meta.AddPartitionFunc = func(ctx context.Context, partition *model.Partition) error {
return nil
}
core := newTestCore(withValidIDAllocator(), withValidProxyManager(), withMeta(meta))
task := &createPartitionTask{
baseTaskV2: baseTaskV2{core: core},
collMeta: coll,
Req: &milvuspb.CreatePartitionRequest{CollectionName: collectionName, PartitionName: partitionName},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
})
}

View File

@ -0,0 +1,73 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util/tsoutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
)
// describeCollectionTask describe collection request task
type describeCollectionTask struct {
baseTaskV2
Req *milvuspb.DescribeCollectionRequest
Rsp *milvuspb.DescribeCollectionResponse
}
func (t *describeCollectionTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_DescribeCollection); err != nil {
return err
}
return nil
}
// Execute task execution
func (t *describeCollectionTask) Execute(ctx context.Context) (err error) {
var collInfo *model.Collection
t.Rsp.Status = succStatus()
if t.Req.GetTimeStamp() == 0 {
t.Req.TimeStamp = typeutil.MaxTimestamp
}
if t.Req.GetCollectionName() != "" {
collInfo, err = t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.Req.GetTimeStamp())
if err != nil {
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
return err
}
} else {
collInfo, err = t.core.meta.GetCollectionByID(ctx, t.Req.GetCollectionID(), t.Req.GetTimeStamp())
if err != nil {
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
return err
}
}
t.Rsp.Schema = &schemapb.CollectionSchema{
Name: collInfo.Name,
Description: collInfo.Description,
AutoID: collInfo.AutoID,
Fields: model.MarshalFieldModels(collInfo.Fields),
}
t.Rsp.CollectionID = collInfo.CollectionID
t.Rsp.VirtualChannelNames = collInfo.VirtualChannelNames
t.Rsp.PhysicalChannelNames = collInfo.PhysicalChannelNames
if collInfo.ShardsNum == 0 {
collInfo.ShardsNum = int32(len(collInfo.VirtualChannelNames))
}
t.Rsp.ShardsNum = collInfo.ShardsNum
t.Rsp.ConsistencyLevel = collInfo.ConsistencyLevel
t.Rsp.CreatedTimestamp = collInfo.CreateTime
createdPhysicalTime, _ := tsoutil.ParseHybridTs(collInfo.CreateTime)
t.Rsp.CreatedUtcTimestamp = uint64(createdPhysicalTime)
t.Rsp.Aliases = t.core.meta.ListAliasesByID(collInfo.CollectionID)
t.Rsp.StartPositions = collInfo.StartPositions
t.Rsp.CollectionName = t.Rsp.Schema.Name
return nil
}

View File

@ -0,0 +1,115 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/stretchr/testify/assert"
)
func Test_describeCollectionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &describeCollectionTask{
Req: &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropCollection,
},
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := &describeCollectionTask{
Req: &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
},
},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_describeCollectionTask_Execute(t *testing.T) {
t.Run("failed to get collection by name", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &describeCollectionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
},
CollectionName: "test coll",
},
Rsp: &milvuspb.DescribeCollectionResponse{},
}
err := task.Execute(context.Background())
assert.Error(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
})
t.Run("failed to get collection by id", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &describeCollectionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
},
CollectionID: 1,
},
Rsp: &milvuspb.DescribeCollectionResponse{},
}
err := task.Execute(context.Background())
assert.Error(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
})
t.Run("success", func(t *testing.T) {
meta := newMockMetaTable()
meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) {
return &model.Collection{
CollectionID: 1,
Name: "test coll",
}, nil
}
alias1, alias2 := funcutil.GenRandomStr(), funcutil.GenRandomStr()
meta.ListAliasesByIDFunc = func(collID UniqueID) []string {
return []string{alias1, alias2}
}
core := newTestCore(withMeta(meta))
task := &describeCollectionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DescribeCollection,
},
CollectionID: 1,
},
Rsp: &milvuspb.DescribeCollectionResponse{},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.ElementsMatch(t, []string{alias1, alias2}, task.Rsp.GetAliases())
})
}

View File

@ -0,0 +1,29 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type dropAliasTask struct {
baseTaskV2
Req *milvuspb.DropAliasRequest
}
func (t *dropAliasTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_DropAlias); err != nil {
return err
}
return nil
}
func (t *dropAliasTask) Execute(ctx context.Context) error {
// drop alias is atomic enough.
if err := t.core.ExpireMetaCache(ctx, []string{t.Req.GetAlias()}, InvalidCollectionID, t.GetTs()); err != nil {
return err
}
return t.core.meta.DropAlias(ctx, t.Req.GetAlias(), t.GetTs())
}

View File

@ -0,0 +1,82 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
func Test_dropAliasTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &dropAliasTask{
Req: &milvuspb.DropAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection}},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := &dropAliasTask{
Req: &milvuspb.DropAliasRequest{Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias}},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_dropAliasTask_Execute(t *testing.T) {
t.Run("failed to expire cache", func(t *testing.T) {
core := newTestCore(withInvalidProxyManager())
alias := funcutil.GenRandomStr()
task := &dropAliasTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropAliasRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias},
Alias: alias,
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("failed to drop alias", func(t *testing.T) {
core := newTestCore(withValidProxyManager(), withInvalidMeta())
alias := funcutil.GenRandomStr()
task := &dropAliasTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropAliasRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias},
Alias: alias,
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
meta := newMockMetaTable()
meta.DropAliasFunc = func(ctx context.Context, alias string, ts Timestamp) error {
return nil
}
core := newTestCore(withValidProxyManager(), withMeta(meta))
alias := funcutil.GenRandomStr()
task := &dropAliasTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropAliasRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropAlias},
Alias: alias,
},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
})
}

View File

@ -0,0 +1,94 @@
package rootcoord
import (
"context"
"fmt"
"github.com/milvus-io/milvus/internal/log"
"go.uber.org/zap"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type dropCollectionTask struct {
baseTaskV2
Req *milvuspb.DropCollectionRequest
}
func (t *dropCollectionTask) validate() error {
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_DropCollection); err != nil {
return err
}
if t.core.meta.IsAlias(t.Req.GetCollectionName()) {
return fmt.Errorf("cannot drop the collection via alias = %s", t.Req.CollectionName)
}
return nil
}
func (t *dropCollectionTask) Prepare(ctx context.Context) error {
return t.validate()
}
func (t *dropCollectionTask) Execute(ctx context.Context) error {
// use max ts to check if latest collection exists.
// we cannot handle case that
// dropping collection with `ts1` but a collection exists in catalog with newer ts which is bigger than `ts1`.
// fortunately, if ddls are promised to execute in sequence, then everything is OK. The `ts1` will always be latest.
collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), typeutil.MaxTimestamp)
if err != nil {
// make dropping collection idempotent.
log.Warn("drop non-existent collection", zap.String("collection", t.Req.GetCollectionName()))
return nil
}
// meta cache of all aliases should also be cleaned.
aliases := t.core.meta.ListAliasesByID(collMeta.CollectionID)
ts := t.GetTs()
redoTask := newBaseRedoTask()
redoTask.AddSyncStep(&ExpireCacheStep{
baseStep: baseStep{core: t.core},
collectionNames: append(aliases, collMeta.Name),
collectionID: collMeta.CollectionID,
ts: ts,
})
redoTask.AddSyncStep(&ChangeCollectionStateStep{
baseStep: baseStep{core: t.core},
collectionID: collMeta.CollectionID,
state: pb.CollectionState_CollectionDropping,
ts: ts,
})
redoTask.AddAsyncStep(&ReleaseCollectionStep{
baseStep: baseStep{core: t.core},
collectionID: collMeta.CollectionID,
})
redoTask.AddAsyncStep(&DropIndexStep{
baseStep: baseStep{core: t.core},
collID: collMeta.CollectionID,
})
redoTask.AddAsyncStep(&DeleteCollectionDataStep{
baseStep: baseStep{core: t.core},
coll: collMeta,
ts: ts,
})
redoTask.AddAsyncStep(&RemoveDmlChannelsStep{
baseStep: baseStep{core: t.core},
pchannels: collMeta.PhysicalChannelNames,
})
redoTask.AddAsyncStep(&DeleteCollectionMetaStep{
baseStep: baseStep{core: t.core},
collectionID: collMeta.CollectionID,
ts: ts,
})
return redoTask.Execute(ctx)
}

View File

@ -0,0 +1,216 @@
package rootcoord
import (
"context"
"errors"
"testing"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/stretchr/testify/assert"
)
func Test_dropCollectionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &dropCollectionTask{
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DescribeCollection},
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("drop via alias", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
meta := newMockMetaTable()
meta.IsAliasFunc = func(name string) bool {
return true
}
core := newTestCore(withMeta(meta))
task := &dropCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
CollectionName: collectionName,
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
meta := newMockMetaTable()
meta.IsAliasFunc = func(name string) bool {
return false
}
core := newTestCore(withMeta(meta))
task := &dropCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
CollectionName: collectionName,
},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_dropCollectionTask_Execute(t *testing.T) {
t.Run("drop non-existent collection", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
core := newTestCore(withInvalidMeta())
task := &dropCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
CollectionName: collectionName,
},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
})
t.Run("failed to expire cache", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName}
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll.Clone(), nil
}
meta.ListAliasesByIDFunc = func(collID UniqueID) []string {
return []string{}
}
core := newTestCore(withInvalidProxyManager(), withMeta(meta))
task := &dropCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
CollectionName: collectionName,
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("failed to change collection state", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName}
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll.Clone(), nil
}
meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error {
return errors.New("error mock ChangeCollectionState")
}
meta.ListAliasesByIDFunc = func(collID UniqueID) []string {
return []string{}
}
core := newTestCore(withValidProxyManager(), withMeta(meta))
task := &dropCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
CollectionName: collectionName,
},
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("normal case, redo", func(t *testing.T) {
defer cleanTestEnv()
collectionName := funcutil.GenRandomStr()
shardNum := 2
ticker := newRocksMqTtSynchronizer()
pchans := ticker.getDmlChannelNames(shardNum)
ticker.addDmlChannels(pchans...)
coll := &model.Collection{Name: collectionName, ShardsNum: int32(shardNum), PhysicalChannelNames: pchans}
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll.Clone(), nil
}
meta.ChangeCollectionStateFunc = func(ctx context.Context, collectionID UniqueID, state etcdpb.CollectionState, ts Timestamp) error {
return nil
}
meta.ListAliasesByIDFunc = func(collID UniqueID) []string {
return []string{}
}
removeCollectionMetaCalled := false
removeCollectionMetaChan := make(chan struct{}, 1)
meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
removeCollectionMetaCalled = true
removeCollectionMetaChan <- struct{}{}
return nil
}
broker := newMockBroker()
releaseCollectionCalled := false
releaseCollectionChan := make(chan struct{}, 1)
broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error {
releaseCollectionCalled = true
releaseCollectionChan <- struct{}{}
return nil
}
dropIndexCalled := false
dropIndexChan := make(chan struct{}, 1)
broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error {
dropIndexCalled = true
dropIndexChan <- struct{}{}
return nil
}
gc := newMockGarbageCollector()
deleteCollectionCalled := false
deleteCollectionChan := make(chan struct{}, 1)
gc.GcCollectionDataFunc = func(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
deleteCollectionCalled = true
deleteCollectionChan <- struct{}{}
return nil
}
core := newTestCore(
withValidProxyManager(),
withMeta(meta),
withBroker(broker),
withGarbageCollector(gc),
withTtSynchronizer(ticker))
task := &dropCollectionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
CollectionName: collectionName,
},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
// check if redo worked.
<-releaseCollectionChan
assert.True(t, releaseCollectionCalled)
<-dropIndexChan
assert.True(t, dropIndexCalled)
<-deleteCollectionChan
assert.True(t, deleteCollectionCalled)
<-removeCollectionMetaChan
assert.True(t, removeCollectionMetaCalled)
})
}

View File

@ -0,0 +1,90 @@
package rootcoord
import (
"context"
"fmt"
"github.com/milvus-io/milvus/internal/log"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
type dropPartitionTask struct {
baseTaskV2
Req *milvuspb.DropPartitionRequest
collMeta *model.Collection
}
func (t *dropPartitionTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.GetBase().GetMsgType(), commonpb.MsgType_DropPartition); err != nil {
return err
}
if t.Req.GetPartitionName() == Params.CommonCfg.DefaultPartitionName {
return fmt.Errorf("default partition cannot be deleted")
}
collMeta, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.GetTs())
if err != nil {
// Is this idempotent?
return err
}
t.collMeta = collMeta
return nil
}
func (t *dropPartitionTask) Execute(ctx context.Context) error {
partID := common.InvalidPartitionID
for _, partition := range t.collMeta.Partitions {
if partition.PartitionName == t.Req.GetPartitionName() {
partID = partition.PartitionID
break
}
}
if partID == common.InvalidPartitionID {
log.Warn("drop an non-existent partition", zap.String("collection", t.Req.GetCollectionName()), zap.String("partition", t.Req.GetPartitionName()))
// make dropping partition idempotent.
return nil
}
redoTask := newBaseRedoTask()
redoTask.AddSyncStep(&ExpireCacheStep{
baseStep: baseStep{core: t.core},
collectionNames: []string{t.Req.GetCollectionName()},
collectionID: t.collMeta.CollectionID,
ts: t.GetTs(),
})
redoTask.AddSyncStep(&ChangePartitionStateStep{
baseStep: baseStep{core: t.core},
collectionID: t.collMeta.CollectionID,
partitionID: partID,
state: pb.PartitionState_PartitionDropping,
ts: t.GetTs(),
})
// TODO: release partition when query coord is ready.
redoTask.AddAsyncStep(&DeletePartitionDataStep{
baseStep: baseStep{core: t.core},
pchans: t.collMeta.PhysicalChannelNames,
partition: &model.Partition{
PartitionID: partID,
PartitionName: t.Req.GetPartitionName(),
CollectionID: t.collMeta.CollectionID,
},
ts: t.GetTs(),
})
redoTask.AddAsyncStep(&RemovePartitionMetaStep{
baseStep: baseStep{core: t.core},
collectionID: t.collMeta.CollectionID,
partitionID: partID,
ts: t.GetTs(),
})
return redoTask.Execute(ctx)
}

View File

@ -0,0 +1,174 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
func Test_dropPartitionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &dropPartitionTask{
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropCollection},
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("drop default partition", func(t *testing.T) {
task := &dropPartitionTask{
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
PartitionName: Params.CommonCfg.DefaultPartitionName,
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("failed to get collection meta", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &dropPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
Params.InitOnce()
collectionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName}
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return coll.Clone(), nil
}
core := newTestCore(withMeta(meta))
task := &dropPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
CollectionName: collectionName,
},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
assert.True(t, coll.Equal(*task.collMeta))
})
}
func Test_dropPartitionTask_Execute(t *testing.T) {
t.Run("drop non-existent partition", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{}}
task := &dropPartitionTask{
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
CollectionName: collectionName,
PartitionName: partitionName,
},
collMeta: coll.Clone(),
}
err := task.Execute(context.Background())
assert.NoError(t, err)
})
t.Run("failed to expire cache", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}}
core := newTestCore(withInvalidProxyManager())
task := &dropPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
CollectionName: collectionName,
PartitionName: partitionName,
},
collMeta: coll.Clone(),
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("failed to change partition state", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}}
core := newTestCore(withValidProxyManager(), withInvalidMeta())
task := &dropPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
CollectionName: collectionName,
PartitionName: partitionName,
},
collMeta: coll.Clone(),
}
err := task.Execute(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
collectionName := funcutil.GenRandomStr()
partitionName := funcutil.GenRandomStr()
coll := &model.Collection{Name: collectionName, Partitions: []*model.Partition{{PartitionName: partitionName}}}
removePartitionMetaCalled := false
removePartitionMetaChan := make(chan struct{}, 1)
meta := newMockMetaTable()
meta.ChangePartitionStateFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state etcdpb.PartitionState, ts Timestamp) error {
return nil
}
meta.RemovePartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error {
removePartitionMetaCalled = true
removePartitionMetaChan <- struct{}{}
return nil
}
gc := newMockGarbageCollector()
deletePartitionCalled := false
deletePartitionChan := make(chan struct{}, 1)
gc.GcPartitionDataFunc = func(ctx context.Context, pChannels []string, coll *model.Partition, ts typeutil.Timestamp) error {
deletePartitionChan <- struct{}{}
deletePartitionCalled = true
return nil
}
core := newTestCore(withValidProxyManager(), withMeta(meta), withGarbageCollector(gc))
task := &dropPartitionTask{
baseTaskV2: baseTaskV2{core: core},
Req: &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{MsgType: commonpb.MsgType_DropPartition},
CollectionName: collectionName,
PartitionName: partitionName,
},
collMeta: coll.Clone(),
}
err := task.Execute(context.Background())
assert.NoError(t, err)
// check if redo worked.
<-removePartitionMetaChan
assert.True(t, removePartitionMetaCalled)
<-deletePartitionChan
assert.True(t, deletePartitionCalled)
})
}

View File

@ -0,0 +1,158 @@
package rootcoord
import (
"context"
"time"
ms "github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/model"
"go.uber.org/zap"
)
type GarbageCollector interface {
ReDropCollection(collMeta *model.Collection, ts Timestamp)
RemoveCreatingCollection(collMeta *model.Collection)
ReDropPartition(pChannels []string, partition *model.Partition, ts Timestamp)
GcCollectionData(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error
GcPartitionData(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error
}
type GarbageCollectorCtx struct {
s *Core
}
func newGarbageCollectorCtx(s *Core) *GarbageCollectorCtx {
return &GarbageCollectorCtx{s: s}
}
func (c *GarbageCollectorCtx) ReDropCollection(collMeta *model.Collection, ts Timestamp) {
// TODO: remove this after data gc can be notified by rpc.
c.s.chanTimeTick.addDmlChannels(collMeta.PhysicalChannelNames...)
defer c.s.chanTimeTick.removeDmlChannels(collMeta.PhysicalChannelNames...)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
if err := c.s.broker.ReleaseCollection(ctx, collMeta.CollectionID); err != nil {
log.Error("failed to release collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID))
return
}
if err := c.s.broker.DropCollectionIndex(ctx, collMeta.CollectionID); err != nil {
log.Error("failed to drop collection index when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID))
return
}
if err := c.GcCollectionData(ctx, collMeta, ts); err != nil {
log.Error("failed to notify datacoord to gc collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID))
return
}
if err := c.s.meta.RemoveCollection(ctx, collMeta.CollectionID, ts); err != nil {
log.Error("failed to remove collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID))
}
}
func (c *GarbageCollectorCtx) RemoveCreatingCollection(collMeta *model.Collection) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
if err := c.s.broker.UnwatchChannels(ctx, &watchInfo{collectionID: collMeta.CollectionID, vChannels: collMeta.VirtualChannelNames}); err != nil {
log.Error("failed to unwatch channels when recovery",
zap.Error(err),
zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID),
zap.Strings("vchans", collMeta.VirtualChannelNames), zap.Strings("pchans", collMeta.PhysicalChannelNames))
return
}
if err := c.s.meta.RemoveCollection(ctx, collMeta.CollectionID, collMeta.CreateTime); err != nil {
log.Error("failed to remove collection when recovery", zap.Error(err), zap.String("collection", collMeta.Name), zap.Int64("collection id", collMeta.CollectionID))
}
}
func (c *GarbageCollectorCtx) ReDropPartition(pChannels []string, partition *model.Partition, ts Timestamp) {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
// TODO: release partition when query coord is ready.
// TODO: remove this after data gc can be notified by rpc.
c.s.chanTimeTick.addDmlChannels(pChannels...)
defer c.s.chanTimeTick.removeDmlChannels(pChannels...)
if err := c.GcPartitionData(ctx, pChannels, partition, ts); err != nil {
log.Error("failed to notify datanodes to gc partition", zap.Error(err))
return
}
if err := c.s.meta.RemovePartition(ctx, partition.CollectionID, partition.PartitionID, ts); err != nil {
log.Error("failed to remove partition when recovery", zap.Error(err))
}
}
func (c *GarbageCollectorCtx) GcCollectionData(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
Ctx: ctx,
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
msg := &ms.DropCollectionMsg{
BaseMsg: baseMsg,
DropCollectionRequest: internalpb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropCollection,
Timestamp: ts,
SourceID: c.s.session.ServerID,
},
CollectionName: coll.Name,
CollectionID: coll.CollectionID,
},
}
msgPack.Msgs = append(msgPack.Msgs, msg)
if err := c.s.chanTimeTick.broadcastDmlChannels(coll.PhysicalChannelNames, &msgPack); err != nil {
return err
}
// TODO: remove this after gc can be notified by rpc. Without this tt, DropCollectionMsg cannot be seen by
// datanodes.
return c.s.chanTimeTick.sendTimeTickToChannel(coll.PhysicalChannelNames, ts)
}
func (c *GarbageCollectorCtx) GcPartitionData(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error {
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
Ctx: ctx,
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
msg := &ms.DropPartitionMsg{
BaseMsg: baseMsg,
DropPartitionRequest: internalpb.DropPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_DropPartition,
Timestamp: ts,
SourceID: c.s.session.ServerID,
},
PartitionName: partition.PartitionName,
CollectionID: partition.CollectionID,
PartitionID: partition.PartitionID,
},
}
msgPack.Msgs = append(msgPack.Msgs, msg)
if err := c.s.chanTimeTick.broadcastDmlChannels(pChannels, &msgPack); err != nil {
return err
}
// TODO: remove this after gc can be notified by rpc. Without this tt, DropCollectionMsg cannot be seen by
// datanodes.
return c.s.chanTimeTick.sendTimeTickToChannel(pChannels, ts)
}

View File

@ -0,0 +1,208 @@
package rootcoord
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/metastore/model"
)
func TestGarbageCollectorCtx_ReDropCollection(t *testing.T) {
t.Run("failed to release collection", func(t *testing.T) {
broker := newMockBroker()
broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error {
return errors.New("error mock ReleaseCollection")
}
ticker := newTickerWithMockNormalStream()
core := newTestCore(withBroker(broker), withTtSynchronizer(ticker))
gc := newGarbageCollectorCtx(core)
gc.ReDropCollection(&model.Collection{}, 1000)
})
t.Run("failed to DropCollectionIndex", func(t *testing.T) {
broker := newMockBroker()
releaseCollectionCalled := false
broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error {
releaseCollectionCalled = true
return nil
}
broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error {
return errors.New("error mock DropCollectionIndex")
}
ticker := newTickerWithMockNormalStream()
core := newTestCore(withBroker(broker), withTtSynchronizer(ticker))
gc := newGarbageCollectorCtx(core)
gc.ReDropCollection(&model.Collection{}, 1000)
assert.True(t, releaseCollectionCalled)
})
t.Run("failed to GcCollectionData", func(t *testing.T) {
broker := newMockBroker()
releaseCollectionCalled := false
broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error {
releaseCollectionCalled = true
return nil
}
dropCollectionIndexCalled := false
broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error {
dropCollectionIndexCalled = true
return nil
}
ticker := newTickerWithMockFailStream() // failed to broadcast drop msg.
core := newTestCore(withBroker(broker), withTtSynchronizer(ticker))
gc := newGarbageCollectorCtx(core)
shardsNum := 2
pchans := ticker.getDmlChannelNames(shardsNum)
gc.ReDropCollection(&model.Collection{PhysicalChannelNames: pchans}, 1000)
assert.True(t, releaseCollectionCalled)
assert.True(t, dropCollectionIndexCalled)
})
t.Run("failed to remove collection", func(t *testing.T) {
broker := newMockBroker()
releaseCollectionCalled := false
broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error {
releaseCollectionCalled = true
return nil
}
dropCollectionIndexCalled := false
broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error {
dropCollectionIndexCalled = true
return nil
}
meta := newMockMetaTable()
meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
return errors.New("error mock RemoveCollection")
}
ticker := newTickerWithMockNormalStream()
core := newTestCore(withBroker(broker),
withTtSynchronizer(ticker),
withMeta(meta))
gc := newGarbageCollectorCtx(core)
gc.ReDropCollection(&model.Collection{}, 1000)
assert.True(t, releaseCollectionCalled)
assert.True(t, dropCollectionIndexCalled)
})
t.Run("normal case", func(t *testing.T) {
broker := newMockBroker()
releaseCollectionCalled := false
broker.ReleaseCollectionFunc = func(ctx context.Context, collectionID UniqueID) error {
releaseCollectionCalled = true
return nil
}
dropCollectionIndexCalled := false
broker.DropCollectionIndexFunc = func(ctx context.Context, collID UniqueID) error {
dropCollectionIndexCalled = true
return nil
}
meta := newMockMetaTable()
removeCollectionCalled := false
meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
removeCollectionCalled = true
return nil
}
ticker := newTickerWithMockNormalStream()
core := newTestCore(withBroker(broker),
withTtSynchronizer(ticker),
withMeta(meta))
gc := newGarbageCollectorCtx(core)
gc.ReDropCollection(&model.Collection{}, 1000)
assert.True(t, releaseCollectionCalled)
assert.True(t, dropCollectionIndexCalled)
assert.True(t, removeCollectionCalled)
})
}
func TestGarbageCollectorCtx_RemoveCreatingCollection(t *testing.T) {
t.Run("failed to UnwatchChannels", func(t *testing.T) {
broker := newMockBroker()
broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error {
return errors.New("error mock UnwatchChannels")
}
core := newTestCore(withBroker(broker))
gc := newGarbageCollectorCtx(core)
gc.RemoveCreatingCollection(&model.Collection{})
})
t.Run("failed to RemoveCollection", func(t *testing.T) {
broker := newMockBroker()
unwatchChannelsCalled := false
broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error {
unwatchChannelsCalled = true
return nil
}
meta := newMockMetaTable()
meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
return errors.New("error mock RemoveCollection")
}
core := newTestCore(withBroker(broker), withMeta(meta))
gc := newGarbageCollectorCtx(core)
gc.RemoveCreatingCollection(&model.Collection{})
assert.True(t, unwatchChannelsCalled)
})
t.Run("normal case", func(t *testing.T) {
broker := newMockBroker()
unwatchChannelsCalled := false
broker.UnwatchChannelsFunc = func(ctx context.Context, info *watchInfo) error {
unwatchChannelsCalled = true
return nil
}
meta := newMockMetaTable()
removeCollectionCalled := false
meta.RemoveCollectionFunc = func(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
removeCollectionCalled = true
return nil
}
core := newTestCore(withBroker(broker), withMeta(meta))
gc := newGarbageCollectorCtx(core)
gc.RemoveCreatingCollection(&model.Collection{})
assert.True(t, unwatchChannelsCalled)
assert.True(t, removeCollectionCalled)
})
}
// func TestGarbageCollectorCtx_ReDropPartition(t *testing.T) {
// t.Run("failed to GcPartitionData", func(t *testing.T) {
// ticker := newTickerWithMockFailStream() // failed to broadcast drop msg.
// shardsNum := 2
// pchans := ticker.getDmlChannelNames(shardsNum)
// core := newTestCore(withTtSynchronizer(ticker))
// gc := newGarbageCollectorCtx(core)
// gc.ReDropPartition(pchans, &model.Partition{}, 100000)
// })
//
// t.Run("failed to RemovePartition", func(t *testing.T) {
// ticker := newTickerWithMockNormalStream()
// shardsNum := 2
// pchans := ticker.getDmlChannelNames(shardsNum)
// meta := newMockMetaTable()
// meta.RemovePartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error {
// return errors.New("error mock RemovePartition")
// }
// core := newTestCore(withMeta(meta), withTtSynchronizer(ticker))
// gc := newGarbageCollectorCtx(core)
// gc.ReDropPartition(pchans, &model.Partition{}, 100000)
// })
//
// t.Run("normal case", func(t *testing.T) {
// ticker := newTickerWithMockNormalStream()
// shardsNum := 2
// pchans := ticker.getDmlChannelNames(shardsNum)
// meta := newMockMetaTable()
// removePartitionCalled := false
// meta.RemovePartitionFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error {
// removePartitionCalled = true
// return nil
// }
// core := newTestCore(withMeta(meta), withTtSynchronizer(ticker))
// gc := newGarbageCollectorCtx(core)
// gc.ReDropPartition(pchans, &model.Partition{}, 100000)
// assert.True(t, removePartitionCalled)
// })
// }
//

View File

@ -0,0 +1,35 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
// hasCollectionTask has collection request task
type hasCollectionTask struct {
baseTaskV2
Req *milvuspb.HasCollectionRequest
Rsp *milvuspb.BoolResponse
}
func (t *hasCollectionTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasCollection); err != nil {
return err
}
return nil
}
// Execute task execution
func (t *hasCollectionTask) Execute(ctx context.Context) error {
t.Rsp.Status = succStatus()
if t.Req.GetTimeStamp() == 0 {
t.Req.TimeStamp = typeutil.MaxTimestamp
}
_, err := t.core.meta.GetCollectionByName(ctx, t.Req.GetCollectionName(), t.Req.GetTimeStamp())
t.Rsp.Value = err == nil
return nil
}

View File

@ -0,0 +1,85 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/stretchr/testify/assert"
)
func Test_hasCollectionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &hasCollectionTask{
Req: &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Undefined,
},
CollectionName: "test coll",
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := &hasCollectionTask{
Req: &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
},
},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_hasCollectionTask_Execute(t *testing.T) {
t.Run("failed", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &hasCollectionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
},
},
Rsp: &milvuspb.BoolResponse{},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.False(t, task.Rsp.GetValue())
})
t.Run("success", func(t *testing.T) {
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return nil, nil
}
core := newTestCore(withMeta(meta))
task := &hasCollectionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
},
},
Rsp: &milvuspb.BoolResponse{},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.True(t, task.Rsp.GetValue())
})
}

View File

@ -0,0 +1,43 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
)
// hasPartitionTask has partition request task
type hasPartitionTask struct {
baseTaskV2
Req *milvuspb.HasPartitionRequest
Rsp *milvuspb.BoolResponse
}
func (t *hasPartitionTask) Prepare(ctx context.Context) error {
if err := CheckMsgType(t.Req.Base.MsgType, commonpb.MsgType_HasPartition); err != nil {
return err
}
return nil
}
// Execute task execution
func (t *hasPartitionTask) Execute(ctx context.Context) error {
t.Rsp.Status = succStatus()
t.Rsp.Value = false
// TODO: why HasPartitionRequest doesn't contain Timestamp but other requests do.
coll, err := t.core.meta.GetCollectionByName(ctx, t.Req.CollectionName, typeutil.MaxTimestamp)
if err != nil {
t.Rsp.Status = failStatus(commonpb.ErrorCode_CollectionNotExists, err.Error())
return err
}
for _, part := range coll.Partitions {
if part.PartitionName == t.Req.PartitionName {
t.Rsp.Value = true
break
}
}
return nil
}

View File

@ -0,0 +1,127 @@
package rootcoord
import (
"context"
"testing"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/stretchr/testify/assert"
)
func Test_hasPartitionTask_Prepare(t *testing.T) {
t.Run("invalid msg type", func(t *testing.T) {
task := &hasPartitionTask{
Req: &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_Undefined,
},
},
}
err := task.Prepare(context.Background())
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
task := &hasPartitionTask{
Req: &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasPartition,
},
},
}
err := task.Prepare(context.Background())
assert.NoError(t, err)
})
}
func Test_hasPartitionTask_Execute(t *testing.T) {
t.Run("fail to get collection", func(t *testing.T) {
core := newTestCore(withInvalidMeta())
task := &hasPartitionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasPartition,
},
CollectionName: "test coll",
},
Rsp: &milvuspb.BoolResponse{},
}
err := task.Execute(context.Background())
assert.Error(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_CollectionNotExists)
assert.False(t, task.Rsp.GetValue())
})
t.Run("failed", func(t *testing.T) {
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return &model.Collection{
Partitions: []*model.Partition{
{
PartitionName: "invalid test partition",
},
},
}, nil
}
core := newTestCore(withMeta(meta))
task := &hasPartitionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
},
CollectionName: "test coll",
PartitionName: "test partition",
},
Rsp: &milvuspb.BoolResponse{},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.False(t, task.Rsp.GetValue())
})
t.Run("success", func(t *testing.T) {
meta := newMockMetaTable()
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return &model.Collection{
Partitions: []*model.Partition{
{
PartitionName: "invalid test partition",
},
{
PartitionName: "test partition",
},
},
}, nil
}
core := newTestCore(withMeta(meta))
task := &hasPartitionTask{
baseTaskV2: baseTaskV2{
core: core,
done: make(chan error, 1),
},
Req: &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_HasCollection,
},
CollectionName: "test coll",
PartitionName: "test partition",
},
Rsp: &milvuspb.BoolResponse{},
}
err := task.Execute(context.Background())
assert.NoError(t, err)
assert.Equal(t, task.Rsp.GetStatus().GetErrorCode(), commonpb.ErrorCode_Success)
assert.True(t, task.Rsp.GetValue())
})
}

View File

@ -0,0 +1,71 @@
package rootcoord
import (
"context"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/proto/datapb"
"go.uber.org/zap"
)
type GetCollectionNameFunc func(collID, partitionID UniqueID) (string, string, error)
type IDAllocator func(count uint32) (UniqueID, UniqueID, error)
type ImportFunc func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse
type ImportFactory interface {
NewGetCollectionNameFunc() GetCollectionNameFunc
NewIDAllocator() IDAllocator
NewImportFunc() ImportFunc
}
type ImportFactoryImpl struct {
c *Core
}
func (f ImportFactoryImpl) NewGetCollectionNameFunc() GetCollectionNameFunc {
return GetCollectionNameWithCore(f.c)
}
func (f ImportFactoryImpl) NewIDAllocator() IDAllocator {
return IDAllocatorWithCore(f.c)
}
func (f ImportFactoryImpl) NewImportFunc() ImportFunc {
return ImportFuncWithCore(f.c)
}
func NewImportFactory(c *Core) ImportFactory {
return &ImportFactoryImpl{c: c}
}
func GetCollectionNameWithCore(c *Core) GetCollectionNameFunc {
return func(collID, partitionID UniqueID) (string, string, error) {
colName, err := c.meta.GetCollectionNameByID(collID)
if err != nil {
log.Error("Core failed to get collection name by id", zap.Int64("ID", collID), zap.Error(err))
return "", "", err
}
partName, err := c.meta.GetPartitionNameByID(collID, partitionID, 0)
if err != nil {
log.Error("Core failed to get partition name by id", zap.Int64("ID", partitionID), zap.Error(err))
return colName, "", err
}
return colName, partName, nil
}
}
func IDAllocatorWithCore(c *Core) IDAllocator {
return func(count uint32) (UniqueID, UniqueID, error) {
return c.idAllocator.Alloc(count)
}
}
func ImportFuncWithCore(c *Core) ImportFunc {
return func(ctx context.Context, req *datapb.ImportTaskRequest) *datapb.ImportTaskResponse {
// TODO: better to handle error here.
resp, _ := c.broker.Import(ctx, req)
return resp
}
}

File diff suppressed because it is too large Load Diff

View File

@ -17,34 +17,25 @@
package rootcoord
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"strings"
"sync"
"testing"
"time"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/common"
"github.com/milvus-io/milvus/internal/kv"
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
memkv "github.com/milvus-io/milvus/internal/kv/mem"
"github.com/milvus-io/milvus/internal/metastore"
"github.com/milvus-io/milvus/internal/metastore/kv/rootcoord"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/milvuspb"
"github.com/milvus-io/milvus/internal/proto/schemapb"
"github.com/milvus-io/milvus/internal/util"
"github.com/milvus-io/milvus/internal/util/etcd"
"github.com/milvus-io/milvus/internal/util/funcutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
@ -122,508 +113,23 @@ func (m *mockTestTxnKV) RemoveWithPrefix(key string) error {
func generateMetaTable(t *testing.T) (*MetaTable, *mockTestKV, *mockTestTxnKV, func()) {
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
Params.Init()
rootPath := fmt.Sprintf("/test/meta/%d", randVal)
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
require.Nil(t, err)
skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7)
assert.Nil(t, err)
assert.NotNil(t, skv)
txnkv := etcdkv.NewEtcdKV(etcdCli, rootPath)
_, err = NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnkv, Snapshot: skv})
assert.Nil(t, err)
mockSnapshotKV := &mockTestKV{
SnapShotKV: skv,
loadWithPrefix: func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return skv.LoadWithPrefix(key, ts)
},
}
mockTxnKV := &mockTestTxnKV{
TxnKV: txnkv,
loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) },
save: func(key, value string) error { return txnkv.Save(key, value) },
multiSave: func(kvs map[string]string) error { return txnkv.MultiSave(kvs) },
multiSaveAndRemoveWithPrefix: func(kvs map[string]string, removal []string) error {
return txnkv.MultiSaveAndRemoveWithPrefix(kvs, removal)
},
remove: func(key string) error { return txnkv.Remove(key) },
}
mockMt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: mockSnapshotKV})
assert.Nil(t, err)
return mockMt, mockSnapshotKV, mockTxnKV, func() {
etcdCli.Close()
}
}
func TestMetaTable(t *testing.T) {
const (
collName = "testColl"
collNameInvalid = "testColl_invalid"
aliasName1 = "alias1"
aliasName2 = "alias2"
collID = typeutil.UniqueID(1)
collIDInvalid = typeutil.UniqueID(2)
partIDDefault = typeutil.UniqueID(10)
partID = typeutil.UniqueID(20)
partName = "testPart"
partIDInvalid = typeutil.UniqueID(21)
segID = typeutil.UniqueID(100)
segID2 = typeutil.UniqueID(101)
fieldID = typeutil.UniqueID(110)
fieldID2 = typeutil.UniqueID(111)
indexID = typeutil.UniqueID(10000)
indexID2 = typeutil.UniqueID(10001)
buildID = typeutil.UniqueID(201)
indexName = "testColl_index_110"
)
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
Params.Init()
rootPath := fmt.Sprintf("/test/meta/%d", randVal)
var vtso typeutil.Timestamp
ftso := func() typeutil.Timestamp {
vtso++
return vtso
}
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
require.Nil(t, err)
defer etcdCli.Close()
skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7)
assert.Nil(t, err)
assert.NotNil(t, skv)
txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath)
mt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: skv})
assert.Nil(t, err)
collInfo := &model.Collection{
CollectionID: collID,
Name: collName,
AutoID: false,
Fields: []*model.Field{
{
FieldID: fieldID,
Name: "field110",
IsPrimaryKey: false,
Description: "",
DataType: schemapb.DataType_FloatVector,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "field110-k1",
Value: "field110-v1",
},
{
Key: "field110-k2",
Value: "field110-v2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "field110-i1",
Value: "field110-v1",
},
{
Key: "field110-i2",
Value: "field110-v2",
},
},
},
},
CreateTime: 0,
Partitions: []*model.Partition{
{
PartitionID: partIDDefault,
PartitionName: Params.CommonCfg.DefaultPartitionName,
PartitionCreatedTimestamp: 0,
},
},
VirtualChannelNames: []string{
fmt.Sprintf("dmChannel_%dv%d", collID, 0),
fmt.Sprintf("dmChannel_%dv%d", collID, 1),
},
PhysicalChannelNames: []string{
funcutil.ToPhysicalChannel(fmt.Sprintf("dmChannel_%dv%d", collID, 0)),
funcutil.ToPhysicalChannel(fmt.Sprintf("dmChannel_%dv%d", collID, 1)),
},
}
var wg sync.WaitGroup
wg.Add(1)
t.Run("add collection", func(t *testing.T) {
defer wg.Done()
ts := ftso()
err = mt.AddCollection(collInfo, ts, "")
assert.Nil(t, err)
assert.Equal(t, uint64(1), ts)
collMeta, err := mt.GetCollectionByName(collName, ts)
assert.Nil(t, err)
assert.Equal(t, collMeta.CreateTime, ts)
assert.Equal(t, collMeta.Partitions[0].PartitionCreatedTimestamp, ts)
assert.Equal(t, partIDDefault, collMeta.Partitions[0].PartitionID)
assert.Equal(t, 1, len(collMeta.Partitions))
assert.True(t, mt.HasCollection(collInfo.CollectionID, 0))
field, err := mt.GetFieldSchema(collName, "field110")
assert.Nil(t, err)
assert.Equal(t, collInfo.Fields[0].FieldID, field.FieldID)
})
wg.Add(1)
t.Run("add alias", func(t *testing.T) {
defer wg.Done()
ts := ftso()
exists := mt.IsAlias(aliasName1)
assert.False(t, exists)
err = mt.AddAlias(aliasName1, collName, ts)
assert.Nil(t, err)
aliases := mt.ListAliases(collID)
assert.Equal(t, aliases, []string{aliasName1})
exists = mt.IsAlias(aliasName1)
assert.True(t, exists)
})
wg.Add(1)
t.Run("alter alias", func(t *testing.T) {
defer wg.Done()
ts := ftso()
err = mt.AlterAlias(aliasName1, collName, ts)
assert.Nil(t, err)
err = mt.AlterAlias(aliasName1, collNameInvalid, ts)
assert.NotNil(t, err)
})
wg.Add(1)
t.Run("delete alias", func(t *testing.T) {
defer wg.Done()
ts := ftso()
err = mt.DropAlias(aliasName1, ts)
assert.Nil(t, err)
})
wg.Add(1)
t.Run("not load alias when load collection meta", func(t *testing.T) {
defer wg.Done()
ts := ftso()
err = mt.AddAlias(aliasName1, collName, ts)
assert.Nil(t, err)
err = mt.reloadFromCatalog()
assert.Nil(t, err)
_, ok := mt.collName2ID[aliasName1]
assert.False(t, ok)
})
wg.Add(1)
t.Run("add partition", func(t *testing.T) {
defer wg.Done()
ts := ftso()
err = mt.AddPartition(collID, partName, partID, ts, "")
assert.Nil(t, err)
//assert.Equal(t, ts, uint64(2))
collMeta, ok := mt.collID2Meta[collID]
assert.True(t, ok)
assert.Equal(t, 2, len(collMeta.Partitions))
assert.Equal(t, collMeta.Partitions[1].PartitionName, partName)
assert.Equal(t, ts, collMeta.Partitions[1].PartitionCreatedTimestamp)
})
wg.Add(1)
t.Run("drop partition", func(t *testing.T) {
defer wg.Done()
ts := ftso()
id, err := mt.DeletePartition(collID, partName, ts, "")
assert.Nil(t, err)
assert.Equal(t, partID, id)
})
wg.Add(1)
t.Run("drop collection", func(t *testing.T) {
defer wg.Done()
ts := ftso()
err = mt.DeleteCollection(collIDInvalid, ts, "")
assert.NotNil(t, err)
ts2 := ftso()
err = mt.AddAlias(aliasName2, collName, ts2)
assert.Nil(t, err)
err = mt.DeleteCollection(collID, ts, "")
assert.Nil(t, err)
ts3 := ftso()
err = mt.DropAlias(aliasName2, ts3)
assert.NotNil(t, err)
})
wg.Add(1)
t.Run("delete credential", func(t *testing.T) {
defer wg.Done()
err = mt.DeleteCredential("")
assert.Nil(t, err)
err = mt.DeleteCredential("abcxyz")
assert.Nil(t, err)
})
/////////////////////////// these tests should run at last, it only used to hit the error lines ////////////////////////
txnkv := etcdkv.NewEtcdKV(etcdCli, rootPath)
mockKV := &mockTestKV{
loadWithPrefix: func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return nil, nil, nil
},
}
mockTxnKV := &mockTestTxnKV{
TxnKV: txnkv,
loadWithPrefix: func(key string) ([]string, []string, error) { return txnkv.LoadWithPrefix(key) },
save: func(key, value string) error { return txnkv.Save(key, value) },
multiSave: func(kvs map[string]string) error { return txnkv.MultiSave(kvs) },
multiSaveAndRemoveWithPrefix: func(kvs map[string]string, removal []string) error {
return txnkv.MultiSaveAndRemoveWithPrefix(kvs, removal)
},
remove: func(key string) error { return txnkv.Remove(key) },
loadWithPrefix: func(key string) ([]string, []string, error) { return nil, nil, nil },
save: func(key, value string) error { return nil },
multiSave: func(kvs map[string]string) error { return nil },
multiSaveAndRemoveWithPrefix: func(kvs map[string]string, removal []string) error { return nil },
remove: func(key string) error { return nil },
}
mt, err = NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: mockKV})
assert.Nil(t, err)
wg.Add(1)
t.Run("add collection failed", func(t *testing.T) {
defer wg.Done()
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return nil, nil, nil
}
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return fmt.Errorf("multi save error")
}
collInfo.Partitions = []*model.Partition{}
assert.Error(t, mt.AddCollection(collInfo, 0, ""))
})
wg.Add(1)
t.Run("delete collection failed", func(t *testing.T) {
defer wg.Done()
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
mockKV.multiSaveAndRemoveWithPrefix = func(save map[string]string, keys []string, ts typeutil.Timestamp) error {
return fmt.Errorf("multi save and remove with prefix error")
}
ts := ftso()
assert.Error(t, mt.DeleteCollection(collInfo.CollectionID, ts, ""))
})
wg.Add(1)
t.Run("get collection failed", func(t *testing.T) {
defer wg.Done()
mockKV.save = func(key string, value string, ts typeutil.Timestamp) error {
return nil
}
ts := ftso()
collInfo.Partitions = []*model.Partition{}
err = mt.AddCollection(collInfo, ts, "")
assert.Nil(t, err)
mt.collID2Meta = make(map[int64]model.Collection)
_, err = mt.GetCollectionByName(collInfo.Name, 0)
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("can't find collection %s with id %d", collInfo.Name, collInfo.CollectionID))
})
wg.Add(1)
t.Run("add partition failed", func(t *testing.T) {
defer wg.Done()
mockKV.save = func(key string, value string, ts typeutil.Timestamp) error {
return nil
}
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return nil, nil, nil
}
err := mt.reloadFromCatalog()
assert.Nil(t, err)
ts := ftso()
collInfo.Partitions = []*model.Partition{}
err = mt.AddCollection(collInfo, ts, "")
assert.Nil(t, err)
ts = ftso()
err = mt.AddPartition(2, "no-part", 22, ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, "can't find collection. id = 2")
coll := mt.collID2Meta[collInfo.CollectionID]
coll.Partitions = make([]*model.Partition, Params.RootCoordCfg.MaxPartitionNum)
mt.collID2Meta[coll.CollectionID] = coll
err = mt.AddPartition(coll.CollectionID, "no-part", 22, ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("maximum partition's number should be limit to %d", Params.RootCoordCfg.MaxPartitionNum))
coll.Partitions = []*model.Partition{{PartitionID: partID, PartitionName: partName, PartitionCreatedTimestamp: ftso()}}
mt.collID2Meta[coll.CollectionID] = coll
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return fmt.Errorf("multi save error")
}
tmpSaveFunc := mockKV.save
mockKV.save = func(key, value string, ts typeutil.Timestamp) error {
return errors.New("mock")
}
assert.Error(t, mt.AddPartition(coll.CollectionID, "no-part", 22, ts, ""))
mockKV.save = tmpSaveFunc
//err = mt.AddPartition(coll.CollectionID, "no-part", 22, ts, nil)
//assert.NotNil(t, err)
//assert.EqualError(t, err, "multi save error")
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
collInfo.Partitions = []*model.Partition{}
ts = ftso()
err = mt.AddPartition(coll.CollectionID, partName, 22, ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("partition name = %s already exists", partName))
err = mt.AddPartition(coll.CollectionID, "no-part", partID, ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("partition id = %d already exists", partID))
})
wg.Add(1)
t.Run("has partition failed", func(t *testing.T) {
defer wg.Done()
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return nil, nil, nil
}
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
err := mt.reloadFromCatalog()
assert.Nil(t, err)
collInfo.Partitions = []*model.Partition{}
ts := ftso()
err = mt.AddCollection(collInfo, ts, "")
assert.Nil(t, err)
assert.False(t, mt.HasPartition(collInfo.CollectionID, "no-partName", 0))
mt.collID2Meta = make(map[int64]model.Collection)
assert.False(t, mt.HasPartition(collInfo.CollectionID, partName, 0))
})
wg.Add(1)
t.Run("delete partition failed", func(t *testing.T) {
defer wg.Done()
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return nil, nil, nil
}
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
err := mt.reloadFromCatalog()
assert.Nil(t, err)
collInfo.Partitions = []*model.Partition{{PartitionID: partID, PartitionName: partName, PartitionCreatedTimestamp: ftso()}}
ts := ftso()
err = mt.AddCollection(collInfo, ts, "")
assert.Nil(t, err)
ts = ftso()
_, err = mt.DeletePartition(collInfo.CollectionID, Params.CommonCfg.DefaultPartitionName, ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, "default partition cannot be deleted")
_, err = mt.DeletePartition(collInfo.CollectionID, "abc", ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, "partition abc does not exist")
mockKV.save = func(key, value string, ts typeutil.Timestamp) error { return errors.New("mocked error") }
_, err = mt.DeletePartition(collInfo.CollectionID, partName, ts, "")
assert.Error(t, err)
mt.collID2Meta = make(map[int64]model.Collection)
_, err = mt.DeletePartition(collInfo.CollectionID, "abc", ts, "")
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("can't find collection id = %d", collInfo.CollectionID))
})
wg.Add(1)
t.Run("get field schema failed", func(t *testing.T) {
defer wg.Done()
mockKV.loadWithPrefix = func(key string, ts typeutil.Timestamp) ([]string, []string, error) {
return nil, nil, nil
}
mockKV.multiSave = func(kvs map[string]string, ts typeutil.Timestamp) error {
return nil
}
mockKV.save = func(key string, value string, ts typeutil.Timestamp) error {
return nil
}
err := mt.reloadFromCatalog()
assert.Nil(t, err)
collInfo.Partitions = []*model.Partition{}
ts := ftso()
err = mt.AddCollection(collInfo, ts, "")
assert.Nil(t, err)
mt.collID2Meta = make(map[int64]model.Collection)
_, err = mt.getFieldSchemaInternal(collInfo.Name, collInfo.Fields[0].Name)
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Name))
mt.collName2ID = make(map[string]int64)
_, err = mt.getFieldSchemaInternal(collInfo.Name, collInfo.Fields[0].Name)
assert.NotNil(t, err)
assert.EqualError(t, err, fmt.Sprintf("collection %s not found", collInfo.Name))
})
wg.Add(1)
t.Run("add credential failed", func(t *testing.T) {
defer wg.Done()
mockTxnKV.loadWithPrefix = func(key string) ([]string, []string, error) {
return []string{}, []string{}, nil
}
mockTxnKV.load = func(key string) (string, error) {
return "", errors.New("test error")
}
mockTxnKV.save = func(key, value string) error {
return fmt.Errorf("save error")
}
err = mt.AddCredential(&internalpb.CredentialInfo{Username: "x", EncryptedPassword: "a\xc5z"})
assert.Error(t, err)
})
wg.Add(1)
t.Run("alter credential failed", func(t *testing.T) {
defer wg.Done()
mockTxnKV.save = func(key, value string) error {
return fmt.Errorf("save error")
}
err = mt.AlterCredential(&internalpb.CredentialInfo{Username: "", EncryptedPassword: "az"})
assert.Error(t, err)
})
wg.Add(1)
t.Run("delete credential failed", func(t *testing.T) {
defer wg.Done()
mockTxnKV.remove = func(key string) error {
return fmt.Errorf("delete error")
}
err := mt.DeleteCredential("")
assert.Error(t, err)
})
wg.Wait()
mockMt := &MetaTable{catalog: &rootcoord.Catalog{Txn: mockTxnKV, Snapshot: mockSnapshotKV}}
return mockMt, mockSnapshotKV, mockTxnKV, func() {}
}
func TestRbacCreateRole(t *testing.T) {
@ -1170,350 +676,3 @@ func TestRbacListUserRole(t *testing.T) {
assert.Nil(t, err)
assert.Equal(t, 4, len(userRoles))
}
func TestMetaWithTimestamp(t *testing.T) {
const (
collID1 = typeutil.UniqueID(1)
collID2 = typeutil.UniqueID(2)
collName1 = "t1"
collName2 = "t2"
partID1 = 11
partID2 = 12
partName1 = "p1"
partName2 = "p2"
)
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
Params.Init()
rootPath := fmt.Sprintf("/test/meta/%d", randVal)
var tsoStart typeutil.Timestamp = 100
vtso := tsoStart
ftso := func() typeutil.Timestamp {
vtso++
return vtso
}
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer etcdCli.Close()
skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7)
assert.Nil(t, err)
assert.NotNil(t, skv)
txnKV := etcdkv.NewEtcdKV(etcdCli, rootPath)
mt, err := NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: skv})
assert.Nil(t, err)
collInfo := &model.Collection{
CollectionID: collID1,
Name: collName1,
}
collInfo.Partitions = []*model.Partition{{PartitionID: partID1, PartitionName: partName1, PartitionCreatedTimestamp: ftso()}}
t1 := ftso()
err = mt.AddCollection(collInfo, t1, "")
assert.Nil(t, err)
collInfo.CollectionID = collID2
collInfo.Partitions = []*model.Partition{{PartitionID: partID2, PartitionName: partName2, PartitionCreatedTimestamp: ftso()}}
collInfo.Name = collName2
t2 := ftso()
err = mt.AddCollection(collInfo, t2, "")
assert.Nil(t, err)
assert.True(t, mt.HasCollection(collID1, 0))
assert.True(t, mt.HasCollection(collID2, 0))
assert.True(t, mt.HasCollection(collID1, t2))
assert.True(t, mt.HasCollection(collID2, t2))
assert.True(t, mt.HasCollection(collID1, t1))
assert.False(t, mt.HasCollection(collID2, t1))
assert.False(t, mt.HasCollection(collID1, tsoStart))
assert.False(t, mt.HasCollection(collID2, tsoStart))
c1, err := mt.GetCollectionByID(collID1, 0)
assert.Nil(t, err)
c2, err := mt.GetCollectionByID(collID2, 0)
assert.Nil(t, err)
assert.Equal(t, collID1, c1.CollectionID)
assert.Equal(t, collID2, c2.CollectionID)
c1, err = mt.GetCollectionByID(collID1, t2)
assert.Nil(t, err)
c2, err = mt.GetCollectionByID(collID2, t2)
assert.Nil(t, err)
assert.Equal(t, collID1, c1.CollectionID)
assert.Equal(t, collID2, c2.CollectionID)
c1, err = mt.GetCollectionByID(collID1, t1)
assert.Nil(t, err)
c2, err = mt.GetCollectionByID(collID2, t1)
assert.NotNil(t, err)
assert.Equal(t, int64(1), c1.CollectionID)
c1, err = mt.GetCollectionByID(collID1, tsoStart)
assert.NotNil(t, err)
c2, err = mt.GetCollectionByID(collID2, tsoStart)
assert.NotNil(t, err)
c1, err = mt.GetCollectionByName(collName1, 0)
assert.Nil(t, err)
c2, err = mt.GetCollectionByName(collName2, 0)
assert.Nil(t, err)
assert.Equal(t, int64(1), c1.CollectionID)
assert.Equal(t, int64(2), c2.CollectionID)
c1, err = mt.GetCollectionByName(collName1, t2)
assert.Nil(t, err)
c2, err = mt.GetCollectionByName(collName2, t2)
assert.Nil(t, err)
assert.Equal(t, int64(1), c1.CollectionID)
assert.Equal(t, int64(2), c2.CollectionID)
c1, err = mt.GetCollectionByName(collName1, t1)
assert.Nil(t, err)
c2, err = mt.GetCollectionByName(collName2, t1)
assert.NotNil(t, err)
assert.Equal(t, int64(1), c1.CollectionID)
c1, err = mt.GetCollectionByName(collName1, tsoStart)
assert.NotNil(t, err)
c2, err = mt.GetCollectionByName(collName2, tsoStart)
assert.NotNil(t, err)
getKeys := func(m map[string]*model.Collection) []string {
keys := make([]string, 0, len(m))
for key := range m {
keys = append(keys, key)
}
return keys
}
s1, err := mt.ListCollections(0)
assert.Nil(t, err)
assert.Equal(t, 2, len(s1))
assert.ElementsMatch(t, getKeys(s1), []string{collName1, collName2})
s1, err = mt.ListCollections(t2)
assert.Nil(t, err)
assert.Equal(t, 2, len(s1))
assert.ElementsMatch(t, getKeys(s1), []string{collName1, collName2})
s1, err = mt.ListCollections(t1)
assert.Nil(t, err)
assert.Equal(t, 1, len(s1))
assert.ElementsMatch(t, getKeys(s1), []string{collName1})
s1, err = mt.ListCollections(tsoStart)
assert.Nil(t, err)
assert.Equal(t, 0, len(s1))
p1, err := mt.GetPartitionByName(collID1, partName1, 0)
assert.Nil(t, err)
p2, err := mt.GetPartitionByName(collID2, partName2, 0)
assert.Nil(t, err)
assert.Equal(t, int64(partID1), p1)
assert.Equal(t, int64(partID2), p2)
p1, err = mt.GetPartitionByName(collID1, partName1, t2)
assert.Nil(t, err)
p2, err = mt.GetPartitionByName(collID2, partName2, t2)
assert.Nil(t, err)
assert.Equal(t, int64(11), p1)
assert.Equal(t, int64(12), p2)
p1, err = mt.GetPartitionByName(1, partName1, t1)
assert.Nil(t, err)
_, err = mt.GetPartitionByName(2, partName2, t1)
assert.NotNil(t, err)
assert.Equal(t, int64(11), p1)
_, err = mt.GetPartitionByName(1, partName1, tsoStart)
assert.NotNil(t, err)
_, err = mt.GetPartitionByName(2, partName2, tsoStart)
assert.NotNil(t, err)
var cID UniqueID
cID, err = mt.GetCollectionIDByName(collName1)
assert.NoError(t, err)
assert.Equal(t, collID1, cID)
_, err = mt.GetCollectionIDByName("badname")
assert.Error(t, err)
name, err := mt.GetCollectionNameByID(collID2)
assert.Nil(t, err)
assert.Equal(t, collName2, name)
_, err = mt.GetCollectionNameByID(int64(999))
assert.Error(t, err)
}
func TestFixIssue10540(t *testing.T) {
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
Params.Init()
rootPath := fmt.Sprintf("/test/meta/%d", randVal)
etcdCli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer etcdCli.Close()
skv, err := rootcoord.NewMetaSnapshot(etcdCli, rootPath, TimestampPrefix, 7)
assert.Nil(t, err)
assert.NotNil(t, skv)
txnKV := memkv.NewMemoryKV()
_, err = NewMetaTable(context.TODO(), &rootcoord.Catalog{Txn: txnKV, Snapshot: skv})
assert.Nil(t, err)
}
func TestMetaTable_unlockGetCollectionInfo(t *testing.T) {
t.Run("normal case", func(t *testing.T) {
mt := &MetaTable{
collName2ID: map[string]typeutil.UniqueID{"test": 100},
collID2Meta: map[typeutil.UniqueID]model.Collection{
100: {CollectionID: 100, Name: "test"},
},
}
info, err := mt.getCollectionInfoInternal("test")
assert.NoError(t, err)
assert.Equal(t, UniqueID(100), info.CollectionID)
assert.Equal(t, "test", info.Name)
})
t.Run("collection name not found", func(t *testing.T) {
mt := &MetaTable{collName2ID: nil, collAlias2ID: nil}
_, err := mt.getCollectionInfoInternal("test")
assert.Error(t, err)
})
t.Run("name found, meta not found", func(t *testing.T) {
mt := &MetaTable{
collName2ID: map[string]typeutil.UniqueID{"test": 100},
collAlias2ID: nil,
collID2Meta: nil,
}
_, err := mt.getCollectionInfoInternal("test")
assert.Error(t, err)
})
t.Run("alias found, meta not found", func(t *testing.T) {
mt := &MetaTable{
collName2ID: nil,
collAlias2ID: map[string]typeutil.UniqueID{"test": 100},
collID2Meta: nil,
}
_, err := mt.getCollectionInfoInternal("test")
assert.Error(t, err)
})
}
type MockedCatalog struct {
mock.Mock
metastore.RootCoordCatalog
alterIndexParamsVerification func(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType)
createIndexParamsVerification func(ctx context.Context, col *model.Collection, index *model.Index)
dropIndexParamsVerification func(ctx context.Context, collectionInfo *model.Collection, dropIdxID typeutil.UniqueID)
}
func (mc *MockedCatalog) ListCollections(ctx context.Context, ts typeutil.Timestamp) (map[string]*model.Collection, error) {
args := mc.Called()
return args.Get(0).(map[string]*model.Collection), nil
}
func (mc *MockedCatalog) ListIndexes(ctx context.Context) ([]*model.Index, error) {
args := mc.Called()
return args.Get(0).([]*model.Index), nil
}
func (mc *MockedCatalog) ListAliases(ctx context.Context, ts typeutil.Timestamp) ([]*model.Alias, error) {
args := mc.Called()
return args.Get(0).([]*model.Alias), nil
}
func (mc *MockedCatalog) AlterIndex(ctx context.Context, oldIndex *model.Index, newIndex *model.Index, alterType metastore.AlterType) error {
if mc.alterIndexParamsVerification != nil {
mc.alterIndexParamsVerification(ctx, oldIndex, newIndex, alterType)
}
args := mc.Called()
err := args.Get(0)
if err == nil {
return nil
}
return err.(error)
}
func TestMetaTable_ReloadFromKV(t *testing.T) {
mc := &MockedCatalog{}
collectionName := "cn"
collInfo := &model.Collection{
CollectionID: 1,
Name: collectionName,
AutoID: false,
Fields: []*model.Field{
{
FieldID: 1,
Name: "field110",
IsPrimaryKey: false,
Description: "",
DataType: schemapb.DataType_FloatVector,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "field110-k1",
Value: "field110-v1",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "field110-i1",
Value: "field110-v1",
},
},
},
},
Partitions: []*model.Partition{
{
PartitionID: 1,
PartitionName: Params.CommonCfg.DefaultPartitionName,
PartitionCreatedTimestamp: 0,
},
},
Aliases: []string{"a", "b"},
}
collections := map[string]*model.Collection{collectionName: collInfo}
mc.On("ListCollections").Return(collections, nil)
alias1 := *collInfo
alias1.Name = collInfo.Aliases[0]
alias2 := *collInfo
alias2.Name = collInfo.Aliases[1]
mc.On("ListAliases").Return([]*model.Alias{
{
CollectionID: collInfo.CollectionID,
Name: collInfo.Aliases[0],
},
{
CollectionID: collInfo.CollectionID,
Name: collInfo.Aliases[1],
},
}, nil)
mt := &MetaTable{}
mt.catalog = mc
mt.reloadFromCatalog()
assert.True(t, len(mt.collID2Meta) == 1)
assert.Equal(t, mt.collID2Meta[1], *collInfo)
assert.True(t, len(mt.collName2ID) == 1)
assert.True(t, len(mt.collAlias2ID) == 2)
ret, ok := mt.collAlias2ID[collInfo.Aliases[0]]
assert.True(t, ok)
assert.Equal(t, int64(1), ret)
}

View File

@ -0,0 +1,840 @@
package rootcoord
import (
"context"
"errors"
"math/rand"
"os"
"github.com/milvus-io/milvus/internal/mq/msgstream"
"github.com/milvus-io/milvus/internal/proto/indexpb"
"github.com/milvus-io/milvus/internal/allocator"
"github.com/milvus-io/milvus/internal/kv"
"github.com/milvus-io/milvus/internal/tso"
"github.com/milvus-io/milvus/internal/log"
"github.com/milvus-io/milvus/internal/metastore/model"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/milvus-io/milvus/internal/proto/datapb"
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
"github.com/milvus-io/milvus/internal/proto/internalpb"
"github.com/milvus-io/milvus/internal/proto/proxypb"
"github.com/milvus-io/milvus/internal/proto/querypb"
"github.com/milvus-io/milvus/internal/types"
"github.com/milvus-io/milvus/internal/util/dependency"
"github.com/milvus-io/milvus/internal/util/metricsinfo"
"github.com/milvus-io/milvus/internal/util/retry"
"github.com/milvus-io/milvus/internal/util/sessionutil"
"github.com/milvus-io/milvus/internal/util/typeutil"
"go.uber.org/zap"
)
const (
TestProxyID = 100
TestRootCoordID = 200
)
type mockMetaTable struct {
IMetaTable
ListCollectionsFunc func(ctx context.Context, ts Timestamp) ([]*model.Collection, error)
AddCollectionFunc func(ctx context.Context, coll *model.Collection) error
GetCollectionByNameFunc func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error)
GetCollectionByIDFunc func(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error)
ChangeCollectionStateFunc func(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error
RemoveCollectionFunc func(ctx context.Context, collectionID UniqueID, ts Timestamp) error
AddPartitionFunc func(ctx context.Context, partition *model.Partition) error
ChangePartitionStateFunc func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error
RemovePartitionFunc func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error
CreateAliasFunc func(ctx context.Context, alias string, collectionName string, ts Timestamp) error
AlterAliasFunc func(ctx context.Context, alias string, collectionName string, ts Timestamp) error
DropAliasFunc func(ctx context.Context, alias string, ts Timestamp) error
IsAliasFunc func(name string) bool
ListAliasesByIDFunc func(collID UniqueID) []string
}
func (m mockMetaTable) ListCollections(ctx context.Context, ts Timestamp) ([]*model.Collection, error) {
return m.ListCollectionsFunc(ctx, ts)
}
func (m mockMetaTable) AddCollection(ctx context.Context, coll *model.Collection) error {
return m.AddCollectionFunc(ctx, coll)
}
func (m mockMetaTable) GetCollectionByName(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return m.GetCollectionByNameFunc(ctx, collectionName, ts)
}
func (m mockMetaTable) GetCollectionByID(ctx context.Context, collectionID UniqueID, ts Timestamp) (*model.Collection, error) {
return m.GetCollectionByIDFunc(ctx, collectionID, ts)
}
func (m mockMetaTable) ChangeCollectionState(ctx context.Context, collectionID UniqueID, state pb.CollectionState, ts Timestamp) error {
return m.ChangeCollectionStateFunc(ctx, collectionID, state, ts)
}
func (m mockMetaTable) RemoveCollection(ctx context.Context, collectionID UniqueID, ts Timestamp) error {
return m.RemoveCollectionFunc(ctx, collectionID, ts)
}
func (m mockMetaTable) AddPartition(ctx context.Context, partition *model.Partition) error {
return m.AddPartitionFunc(ctx, partition)
}
func (m mockMetaTable) ChangePartitionState(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error {
return m.ChangePartitionStateFunc(ctx, collectionID, partitionID, state, ts)
}
func (m mockMetaTable) RemovePartition(ctx context.Context, collectionID UniqueID, partitionID UniqueID, ts Timestamp) error {
return m.RemovePartitionFunc(ctx, collectionID, partitionID, ts)
}
func (m mockMetaTable) CreateAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error {
return m.CreateAliasFunc(ctx, alias, collectionName, ts)
}
func (m mockMetaTable) AlterAlias(ctx context.Context, alias string, collectionName string, ts Timestamp) error {
return m.AlterAliasFunc(ctx, alias, collectionName, ts)
}
func (m mockMetaTable) DropAlias(ctx context.Context, alias string, ts Timestamp) error {
return m.DropAliasFunc(ctx, alias, ts)
}
func (m mockMetaTable) IsAlias(name string) bool {
return m.IsAliasFunc(name)
}
func (m mockMetaTable) ListAliasesByID(collID UniqueID) []string {
return m.ListAliasesByIDFunc(collID)
}
func newMockMetaTable() *mockMetaTable {
return &mockMetaTable{}
}
type mockIndexCoord struct {
types.IndexCoord
GetComponentStatesFunc func(ctx context.Context) (*internalpb.ComponentStates, error)
GetSegmentIndexStateFunc func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error)
DropIndexFunc func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error)
}
func newMockIndexCoord() *mockIndexCoord {
return &mockIndexCoord{}
}
func (m mockIndexCoord) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
return m.GetComponentStatesFunc(ctx)
}
func (m mockIndexCoord) GetSegmentIndexState(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
return m.GetSegmentIndexStateFunc(ctx, req)
}
func (m mockIndexCoord) DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
return m.DropIndexFunc(ctx, req)
}
type mockDataCoord struct {
types.DataCoord
GetComponentStatesFunc func(ctx context.Context) (*internalpb.ComponentStates, error)
WatchChannelsFunc func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error)
AcquireSegmentLockFunc func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error)
ReleaseSegmentLockFunc func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error)
FlushFunc func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error)
ImportFunc func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error)
}
func newMockDataCoord() *mockDataCoord {
return &mockDataCoord{}
}
func (m *mockDataCoord) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
return m.GetComponentStatesFunc(ctx)
}
func (m *mockDataCoord) WatchChannels(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
return m.WatchChannelsFunc(ctx, req)
}
func (m *mockDataCoord) AcquireSegmentLock(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) {
return m.AcquireSegmentLockFunc(ctx, req)
}
func (m *mockDataCoord) ReleaseSegmentLock(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) {
return m.ReleaseSegmentLockFunc(ctx, req)
}
func (m *mockDataCoord) Flush(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
return m.FlushFunc(ctx, req)
}
func (m *mockDataCoord) Import(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
return m.ImportFunc(ctx, req)
}
type mockQueryCoord struct {
types.QueryCoord
GetSegmentInfoFunc func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error)
GetComponentStatesFunc func(ctx context.Context) (*internalpb.ComponentStates, error)
ReleaseCollectionFunc func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
}
func (m mockQueryCoord) GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return m.GetSegmentInfoFunc(ctx, req)
}
func (m mockQueryCoord) GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error) {
return m.GetComponentStatesFunc(ctx)
}
func (m mockQueryCoord) ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
return m.ReleaseCollectionFunc(ctx, req)
}
func newMockQueryCoord() *mockQueryCoord {
return &mockQueryCoord{}
}
func newMockIDAllocator() *allocator.MockGIDAllocator {
r := allocator.NewMockGIDAllocator()
r.AllocF = func(count uint32) (allocator.UniqueID, allocator.UniqueID, error) {
return 0, 0, nil
}
r.AllocOneF = func() (allocator.UniqueID, error) {
return 0, nil
}
return r
}
func newMockTsoAllocator() *tso.MockAllocator {
r := tso.NewMockAllocator()
r.GenerateTSOF = func(count uint32) (uint64, error) {
return 0, nil
}
return r
}
func newTxnKV() *kv.TxnKVMock {
r := kv.NewMockTxnKV()
r.SaveF = func(key, value string) error {
return nil
}
r.RemoveF = func(key string) error {
return nil
}
return r
}
type mockProxy struct {
types.Proxy
InvalidateCollectionMetaCacheFunc func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
InvalidateCredentialCacheFunc func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error)
RefreshPolicyInfoCacheFunc func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error)
}
func (m mockProxy) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return m.InvalidateCollectionMetaCacheFunc(ctx, request)
}
func (m mockProxy) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
return m.InvalidateCredentialCacheFunc(ctx, request)
}
func (m mockProxy) RefreshPolicyInfoCache(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
return m.RefreshPolicyInfoCacheFunc(ctx, request)
}
func newMockProxy() *mockProxy {
r := &mockProxy{}
r.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
return r
}
func newTestCore(opts ...Opt) *Core {
c := &Core{
session: &sessionutil.Session{ServerID: TestRootCoordID},
}
for _, opt := range opts {
opt(c)
}
return c
}
func withValidProxyManager() Opt {
return func(c *Core) {
c.proxyClientManager = &proxyClientManager{
proxyClient: make(map[UniqueID]types.Proxy),
}
p := newMockProxy()
p.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
}
}
func withInvalidProxyManager() Opt {
return func(c *Core) {
c.proxyClientManager = &proxyClientManager{
proxyClient: make(map[UniqueID]types.Proxy),
}
p := newMockProxy()
p.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return succStatus(), errors.New("error mock InvalidateCollectionMetaCache")
}
c.proxyClientManager.proxyClient[TestProxyID] = p
}
}
func withMeta(meta IMetaTable) Opt {
return func(c *Core) {
c.meta = meta
}
}
func withInvalidMeta() Opt {
meta := newMockMetaTable()
meta.ListCollectionsFunc = func(ctx context.Context, ts Timestamp) ([]*model.Collection, error) {
return nil, errors.New("error mock ListCollections")
}
meta.GetCollectionByNameFunc = func(ctx context.Context, collectionName string, ts Timestamp) (*model.Collection, error) {
return nil, errors.New("error mock GetCollectionByName")
}
meta.GetCollectionByIDFunc = func(ctx context.Context, collectionID typeutil.UniqueID, ts Timestamp) (*model.Collection, error) {
return nil, errors.New("error mock GetCollectionByID")
}
meta.AddPartitionFunc = func(ctx context.Context, partition *model.Partition) error {
return errors.New("error mock AddPartition")
}
meta.ChangePartitionStateFunc = func(ctx context.Context, collectionID UniqueID, partitionID UniqueID, state pb.PartitionState, ts Timestamp) error {
return errors.New("error mock ChangePartitionState")
}
meta.CreateAliasFunc = func(ctx context.Context, alias string, collectionName string, ts Timestamp) error {
return errors.New("error mock CreateAlias")
}
meta.AlterAliasFunc = func(ctx context.Context, alias string, collectionName string, ts Timestamp) error {
return errors.New("error mock AlterAlias")
}
meta.DropAliasFunc = func(ctx context.Context, alias string, ts Timestamp) error {
return errors.New("error mock DropAlias")
}
return withMeta(meta)
}
func withIDAllocator(idAllocator allocator.GIDAllocator) Opt {
return func(c *Core) {
c.idAllocator = idAllocator
}
}
func withValidIDAllocator() Opt {
idAllocator := newMockIDAllocator()
idAllocator.AllocOneF = func() (allocator.UniqueID, error) {
return rand.Int63(), nil
}
return withIDAllocator(idAllocator)
}
func withInvalidIDAllocator() Opt {
idAllocator := newMockIDAllocator()
idAllocator.AllocOneF = func() (allocator.UniqueID, error) {
return -1, errors.New("error mock AllocOne")
}
idAllocator.AllocF = func(count uint32) (allocator.UniqueID, allocator.UniqueID, error) {
return -1, -1, errors.New("error mock Alloc")
}
return withIDAllocator(idAllocator)
}
func withQueryCoord(qc types.QueryCoord) Opt {
return func(c *Core) {
c.queryCoord = qc
}
}
func withUnhealthyQueryCoord() Opt {
qc := newMockQueryCoord()
qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Abnormal},
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"),
}, retry.Unrecoverable(errors.New("error mock GetComponentStates"))
}
return withQueryCoord(qc)
}
func withInvalidQueryCoord() Opt {
qc := newMockQueryCoord()
qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
qc.ReleaseCollectionFunc = func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
return nil, errors.New("error mock ReleaseCollection")
}
qc.GetSegmentInfoFunc = func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return nil, errors.New("error mock GetSegmentInfo")
}
return withQueryCoord(qc)
}
func withFailedQueryCoord() Opt {
qc := newMockQueryCoord()
qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
qc.ReleaseCollectionFunc = func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "mock release collection error"), nil
}
qc.GetSegmentInfoFunc = func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return &querypb.GetSegmentInfoResponse{
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock get segment info error"),
}, nil
}
return withQueryCoord(qc)
}
func withValidQueryCoord() Opt {
qc := newMockQueryCoord()
qc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
qc.ReleaseCollectionFunc = func(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
qc.GetSegmentInfoFunc = func(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error) {
return &querypb.GetSegmentInfoResponse{
Status: succStatus(),
}, nil
}
return withQueryCoord(qc)
}
func withIndexCoord(ic types.IndexCoord) Opt {
return func(c *Core) {
c.indexCoord = ic
}
}
func withUnhealthyIndexCoord() Opt {
ic := newMockIndexCoord()
ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Abnormal},
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"),
}, retry.Unrecoverable(errors.New("error mock GetComponentStates"))
}
return withIndexCoord(ic)
}
func withInvalidIndexCoord() Opt {
ic := newMockIndexCoord()
ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
ic.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
return &indexpb.GetSegmentIndexStateResponse{}, errors.New("error mock GetSegmentIndexState")
}
ic.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
return succStatus(), errors.New("error mock DropIndex")
}
return withIndexCoord(ic)
}
func withFailedIndexCoord() Opt {
ic := newMockIndexCoord()
ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
ic.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
return &indexpb.GetSegmentIndexStateResponse{Status: failStatus(commonpb.ErrorCode_UnexpectedError, "reason mock GetSegmentIndexState")}, nil
}
ic.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "reason mock DropIndex"), nil
}
return withIndexCoord(ic)
}
func withValidIndexCoord() Opt {
ic := newMockIndexCoord()
ic.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
ic.GetSegmentIndexStateFunc = func(ctx context.Context, req *indexpb.GetSegmentIndexStateRequest) (*indexpb.GetSegmentIndexStateResponse, error) {
return &indexpb.GetSegmentIndexStateResponse{Status: succStatus()}, nil
}
ic.DropIndexFunc = func(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
return withIndexCoord(ic)
}
// cleanTestEnv clean test environment, for example, files generated by rocksmq.
func cleanTestEnv() {
path := "/tmp/milvus"
if err := os.RemoveAll(path); err != nil {
log.Warn("failed to clean test directories", zap.Error(err), zap.String("path", path))
}
log.Debug("clean test environment", zap.String("path", path))
}
func withTtSynchronizer(ticker *timetickSync) Opt {
return func(c *Core) {
c.chanTimeTick = ticker
}
}
func newRocksMqTtSynchronizer() *timetickSync {
Params.InitOnce()
Params.RootCoordCfg.DmlChannelNum = 4
ctx := context.Background()
factory := dependency.NewDefaultFactory(true)
chans := map[UniqueID][]string{}
ticker := newTimeTickSync(ctx, TestRootCoordID, factory, chans)
return ticker
}
// cleanTestEnv should be called if tested with this option.
func withRocksMqTtSynchronizer() Opt {
ticker := newRocksMqTtSynchronizer()
return withTtSynchronizer(ticker)
}
func withDataCoord(dc types.DataCoord) Opt {
return func(c *Core) {
c.dataCoord = dc
}
}
func withUnhealthyDataCoord() Opt {
dc := newMockDataCoord()
dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Abnormal},
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "error mock GetComponentStates"),
}, retry.Unrecoverable(errors.New("error mock GetComponentStates"))
}
return withDataCoord(dc)
}
func withInvalidDataCoord() Opt {
dc := newMockDataCoord()
dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
return nil, errors.New("error mock WatchChannels")
}
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
return nil, errors.New("error mock WatchChannels")
}
dc.AcquireSegmentLockFunc = func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) {
return nil, errors.New("error mock AddSegRefLock")
}
dc.ReleaseSegmentLockFunc = func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) {
return nil, errors.New("error mock ReleaseSegRefLock")
}
dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
return nil, errors.New("error mock Flush")
}
dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
return nil, errors.New("error mock Import")
}
return withDataCoord(dc)
}
func withFailedDataCoord() Opt {
dc := newMockDataCoord()
dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
return &datapb.WatchChannelsResponse{
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock watch channels error"),
}, nil
}
dc.AcquireSegmentLockFunc = func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "mock add seg ref lock error"), nil
}
dc.ReleaseSegmentLockFunc = func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "mock release seg ref lock error"), nil
}
dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
return &datapb.FlushResponse{
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock flush error"),
}, nil
}
dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
return &datapb.ImportTaskResponse{
Status: failStatus(commonpb.ErrorCode_UnexpectedError, "mock import error"),
}, nil
}
return withDataCoord(dc)
}
func withValidDataCoord() Opt {
dc := newMockDataCoord()
dc.GetComponentStatesFunc = func(ctx context.Context) (*internalpb.ComponentStates, error) {
return &internalpb.ComponentStates{
State: &internalpb.ComponentInfo{StateCode: internalpb.StateCode_Healthy},
Status: succStatus(),
}, nil
}
dc.WatchChannelsFunc = func(ctx context.Context, req *datapb.WatchChannelsRequest) (*datapb.WatchChannelsResponse, error) {
return &datapb.WatchChannelsResponse{
Status: succStatus(),
}, nil
}
dc.AcquireSegmentLockFunc = func(ctx context.Context, req *datapb.AcquireSegmentLockRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
dc.ReleaseSegmentLockFunc = func(ctx context.Context, req *datapb.ReleaseSegmentLockRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
dc.FlushFunc = func(ctx context.Context, req *datapb.FlushRequest) (*datapb.FlushResponse, error) {
return &datapb.FlushResponse{
Status: succStatus(),
}, nil
}
dc.ImportFunc = func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error) {
return &datapb.ImportTaskResponse{
Status: succStatus(),
}, nil
}
return withDataCoord(dc)
}
func withStateCode(code internalpb.StateCode) Opt {
return func(c *Core) {
c.UpdateStateCode(code)
}
}
func withHealthyCode() Opt {
return withStateCode(internalpb.StateCode_Healthy)
}
func withAbnormalCode() Opt {
return withStateCode(internalpb.StateCode_Abnormal)
}
type mockScheduler struct {
IScheduler
AddTaskFunc func(t taskV2) error
}
func newMockScheduler() *mockScheduler {
return &mockScheduler{}
}
func (m mockScheduler) AddTask(t taskV2) error {
if m.AddTaskFunc != nil {
return m.AddTaskFunc(t)
}
return nil
}
func withScheduler(sched IScheduler) Opt {
return func(c *Core) {
c.scheduler = sched
}
}
func withValidScheduler() Opt {
sched := newMockScheduler()
sched.AddTaskFunc = func(t taskV2) error {
t.NotifyDone(nil)
return nil
}
return withScheduler(sched)
}
func withInvalidScheduler() Opt {
sched := newMockScheduler()
sched.AddTaskFunc = func(t taskV2) error {
return errors.New("error mock AddTask")
}
return withScheduler(sched)
}
func withTaskFailScheduler() Opt {
sched := newMockScheduler()
sched.AddTaskFunc = func(t taskV2) error {
err := errors.New("error mock task fail")
t.NotifyDone(err)
return nil
}
return withScheduler(sched)
}
func withTsoAllocator(alloc tso.Allocator) Opt {
return func(c *Core) {
c.tsoAllocator = alloc
}
}
func withInvalidTsoAllocator() Opt {
alloc := newMockTsoAllocator()
alloc.GenerateTSOF = func(count uint32) (uint64, error) {
return 0, errors.New("error mock GenerateTSO")
}
return withTsoAllocator(alloc)
}
func withMetricsCacheManager() Opt {
return func(c *Core) {
m := metricsinfo.NewMetricsCacheManager()
c.metricsCacheManager = m
}
}
type mockBroker struct {
Broker
ReleaseCollectionFunc func(ctx context.Context, collectionID UniqueID) error
GetQuerySegmentInfoFunc func(ctx context.Context, collectionID int64, segIDs []int64) (retResp *querypb.GetSegmentInfoResponse, retErr error)
WatchChannelsFunc func(ctx context.Context, info *watchInfo) error
UnwatchChannelsFunc func(ctx context.Context, info *watchInfo) error
AddSegRefLockFunc func(ctx context.Context, taskID int64, segIDs []int64) error
ReleaseSegRefLockFunc func(ctx context.Context, taskID int64, segIDs []int64) error
FlushFunc func(ctx context.Context, cID int64, segIDs []int64) error
ImportFunc func(ctx context.Context, req *datapb.ImportTaskRequest) (*datapb.ImportTaskResponse, error)
DropCollectionIndexFunc func(ctx context.Context, collID UniqueID) error
}
func newMockBroker() *mockBroker {
return &mockBroker{}
}
func (b mockBroker) WatchChannels(ctx context.Context, info *watchInfo) error {
return b.WatchChannelsFunc(ctx, info)
}
func (b mockBroker) UnwatchChannels(ctx context.Context, info *watchInfo) error {
return b.UnwatchChannelsFunc(ctx, info)
}
func (b mockBroker) ReleaseCollection(ctx context.Context, collectionID UniqueID) error {
return b.ReleaseCollectionFunc(ctx, collectionID)
}
func (b mockBroker) DropCollectionIndex(ctx context.Context, collID UniqueID) error {
return b.DropCollectionIndexFunc(ctx, collID)
}
func withBroker(b Broker) Opt {
return func(c *Core) {
c.broker = b
}
}
type mockGarbageCollector struct {
GarbageCollector
GcCollectionDataFunc func(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error
GcPartitionDataFunc func(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error
}
func (m mockGarbageCollector) GcCollectionData(ctx context.Context, coll *model.Collection, ts typeutil.Timestamp) error {
return m.GcCollectionDataFunc(ctx, coll, ts)
}
func (m mockGarbageCollector) GcPartitionData(ctx context.Context, pChannels []string, partition *model.Partition, ts typeutil.Timestamp) error {
return m.GcPartitionDataFunc(ctx, pChannels, partition, ts)
}
func newMockGarbageCollector() *mockGarbageCollector {
return &mockGarbageCollector{}
}
func withGarbageCollector(gc GarbageCollector) Opt {
return func(c *Core) {
c.garbageCollector = gc
}
}
func newMockFailStream() *msgstream.MockMsgStream {
stream := msgstream.NewMockMsgStream()
stream.BroadcastFunc = func(pack *msgstream.MsgPack) error {
return errors.New("error mock Broadcast")
}
stream.BroadcastMarkFunc = func(pack *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) {
return nil, errors.New("error mock BroadcastMark")
}
stream.AsProducerFunc = func(channels []string) {
}
return stream
}
func newMockFailStreamFactory() *msgstream.MockMqFactory {
f := msgstream.NewMockMqFactory()
f.NewMsgStreamFunc = func(ctx context.Context) (msgstream.MsgStream, error) {
return newMockFailStream(), nil
}
return f
}
func newTickerWithMockFailStream() *timetickSync {
factory := newMockFailStreamFactory()
return newTickerWithFactory(factory)
}
func newMockNormalStream() *msgstream.MockMsgStream {
stream := msgstream.NewMockMsgStream()
stream.BroadcastMarkFunc = func(pack *msgstream.MsgPack) (map[string][]msgstream.MessageID, error) {
return map[string][]msgstream.MessageID{}, nil
}
stream.AsProducerFunc = func(channels []string) {
}
return stream
}
func newMockNormalStreamFactory() *msgstream.MockMqFactory {
f := msgstream.NewMockMqFactory()
f.NewMsgStreamFunc = func(ctx context.Context) (msgstream.MsgStream, error) {
return newMockNormalStream(), nil
}
return f
}
func newTickerWithMockNormalStream() *timetickSync {
factory := newMockNormalStreamFactory()
return newTickerWithFactory(factory)
}
func newTickerWithFactory(factory msgstream.Factory) *timetickSync {
Params.InitOnce()
Params.RootCoordCfg.DmlChannelNum = 4
ctx := context.Background()
chans := map[UniqueID][]string{}
ticker := newTimeTickSync(ctx, TestRootCoordID, factory, chans)
return ticker
}

View File

@ -18,6 +18,7 @@ package rootcoord
import (
"context"
"errors"
"fmt"
"sync"
@ -31,8 +32,10 @@ import (
"github.com/milvus-io/milvus/internal/util/sessionutil"
)
type proxyCreator func(sess *sessionutil.Session) (types.Proxy, error)
type proxyClientManager struct {
core *Core
creator proxyCreator
lock sync.RWMutex
proxyClient map[int64]types.Proxy
helper proxyClientManagerHelper
@ -46,9 +49,9 @@ var defaultClientManagerHelper = proxyClientManagerHelper{
afterConnect: func() {},
}
func newProxyClientManager(c *Core) *proxyClientManager {
func newProxyClientManager(creator proxyCreator) *proxyClientManager {
return &proxyClientManager{
core: c,
creator: creator,
proxyClient: make(map[int64]types.Proxy),
helper: defaultClientManagerHelper,
}
@ -72,7 +75,7 @@ func (p *proxyClientManager) AddProxyClient(session *sessionutil.Session) {
}
func (p *proxyClientManager) connect(session *sessionutil.Session) {
pc, err := p.core.NewProxyClient(session)
pc, err := p.creator(session)
if err != nil {
log.Warn("failed to create proxy client", zap.String("address", session.Address), zap.Int64("serverID", session.ServerID), zap.Error(err))
return
@ -130,32 +133,7 @@ func (p *proxyClientManager) InvalidateCollectionMetaCache(ctx context.Context,
return group.Wait()
}
func (p *proxyClientManager) ReleaseDQLMessageStream(ctx context.Context, in *proxypb.ReleaseDQLMessageStreamRequest) error {
p.lock.Lock()
defer p.lock.Unlock()
if len(p.proxyClient) == 0 {
log.Warn("proxy client is empty, ReleaseDQLMessageStream will not send to any client")
return nil
}
group := &errgroup.Group{}
for k, v := range p.proxyClient {
k, v := k, v
group.Go(func() error {
sta, err := v.ReleaseDQLMessageStream(ctx, in)
if err != nil {
return fmt.Errorf("ReleaseDQLMessageStream failed, proxyID = %d, err = %s", k, err)
}
if sta.ErrorCode != commonpb.ErrorCode_Success {
return fmt.Errorf("ReleaseDQLMessageStream failed, proxyID = %d, err = %s", k, sta.Reason)
}
return nil
})
}
return group.Wait()
}
// InvalidateCredentialCache TODO: too many codes similar to InvalidateCollectionMetaCache.
func (p *proxyClientManager) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) error {
p.lock.Lock()
defer p.lock.Unlock()
@ -182,6 +160,7 @@ func (p *proxyClientManager) InvalidateCredentialCache(ctx context.Context, requ
return group.Wait()
}
// UpdateCredentialCache TODO: too many codes similar to InvalidateCollectionMetaCache.
func (p *proxyClientManager) UpdateCredentialCache(ctx context.Context, request *proxypb.UpdateCredCacheRequest) error {
p.lock.Lock()
defer p.lock.Unlock()
@ -208,6 +187,7 @@ func (p *proxyClientManager) UpdateCredentialCache(ctx context.Context, request
return group.Wait()
}
// RefreshPolicyInfoCache TODO: too many codes similar to InvalidateCollectionMetaCache.
func (p *proxyClientManager) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) error {
p.lock.Lock()
defer p.lock.Unlock()
@ -221,10 +201,13 @@ func (p *proxyClientManager) RefreshPolicyInfoCache(ctx context.Context, req *pr
for k, v := range p.proxyClient {
k, v := k, v
group.Go(func() error {
_, err := v.RefreshPolicyInfoCache(ctx, req)
status, err := v.RefreshPolicyInfoCache(ctx, req)
if err != nil {
return fmt.Errorf("RefreshPolicyInfoCache failed, proxyID = %d, err = %s", k, err)
}
if status.GetErrorCode() != commonpb.ErrorCode_Success {
return errors.New(status.GetReason())
}
return nil
})
}

View File

@ -19,8 +19,12 @@ package rootcoord
import (
"context"
"errors"
"fmt"
"sync"
"testing"
"github.com/milvus-io/milvus/internal/proto/commonpb"
"github.com/stretchr/testify/assert"
"github.com/milvus-io/milvus/internal/proto/proxypb"
@ -29,6 +33,74 @@ import (
"github.com/milvus-io/milvus/internal/util/sessionutil"
)
type proxyMock struct {
types.Proxy
collArray []string
collIDs []UniqueID
mutex sync.Mutex
returnError bool
returnGrpcError bool
}
func (p *proxyMock) Stop() error {
return nil
}
func (p *proxyMock) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.returnError {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
}, nil
}
if p.returnGrpcError {
return nil, fmt.Errorf("grpc error")
}
p.collArray = append(p.collArray, request.CollectionName)
p.collIDs = append(p.collIDs, request.CollectionID)
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
func (p *proxyMock) GetCollArray() []string {
p.mutex.Lock()
defer p.mutex.Unlock()
ret := make([]string, 0, len(p.collArray))
ret = append(ret, p.collArray...)
return ret
}
func (p *proxyMock) GetCollIDs() []UniqueID {
p.mutex.Lock()
defer p.mutex.Unlock()
ret := p.collIDs
return ret
}
func (p *proxyMock) InvalidateCredentialCache(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
if p.returnError {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UnexpectedError,
}, nil
}
if p.returnGrpcError {
return nil, fmt.Errorf("grpc error")
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
Reason: "",
}, nil
}
func (p *proxyMock) RefreshPolicyInfoCache(ctx context.Context, req *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_Success,
}, nil
}
func TestProxyClientManager_GetProxyClients(t *testing.T) {
Params.Init()
@ -45,7 +117,7 @@ func TestProxyClientManager_GetProxyClients(t *testing.T) {
},
)
pcm := newProxyClientManager(core)
pcm := newProxyClientManager(core.proxyCreator)
session := &sessionutil.Session{
ServerID: 100,
@ -72,7 +144,7 @@ func TestProxyClientManager_AddProxyClient(t *testing.T) {
},
)
pcm := newProxyClientManager(core)
pcm := newProxyClientManager(core.proxyCreator)
session := &sessionutil.Session{
ServerID: 100,
@ -83,197 +155,145 @@ func TestProxyClientManager_AddProxyClient(t *testing.T) {
}
func TestProxyClientManager_InvalidateCollectionMetaCache(t *testing.T) {
Params.Init()
ctx := context.Background()
core, err := NewCore(ctx, nil)
assert.Nil(t, err)
cli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer cli.Close()
core.etcdCli = cli
pcm := newProxyClientManager(core)
ch := make(chan struct{})
pcm.helper = proxyClientManagerHelper{
afterConnect: func() { ch <- struct{}{} },
}
err = pcm.InvalidateCollectionMetaCache(ctx, nil)
assert.NoError(t, err)
core.SetNewProxyClient(
func(se *sessionutil.Session) (types.Proxy, error) {
return &proxyMock{}, nil
},
)
session := &sessionutil.Session{
ServerID: 100,
Address: "localhost",
}
pcm.AddProxyClient(session)
<-ch
err = pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{
CollectionName: "collection0",
t.Run("empty proxy list", func(t *testing.T) {
ctx := context.Background()
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{}}
err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{})
assert.NoError(t, err)
})
assert.NoError(t, err)
// test releaseDQLMessageStream failed
for _, v := range pcm.proxyClient {
v.(*proxyMock).returnError = true
}
err = pcm.InvalidateCollectionMetaCache(ctx, nil)
assert.Error(t, err)
t.Run("mock rpc error", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return succStatus(), errors.New("error mock InvalidateCollectionMetaCache")
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{})
assert.Error(t, err)
})
for _, v := range pcm.proxyClient {
v.(*proxyMock).returnGrpcError = true
}
err = pcm.InvalidateCollectionMetaCache(ctx, nil)
assert.Error(t, err)
}
t.Run("mock error code", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{})
assert.Error(t, err)
})
func TestProxyClientManager_ReleaseDQLMessageStream(t *testing.T) {
Params.Init()
ctx := context.Background()
core, err := NewCore(ctx, nil)
assert.Nil(t, err)
cli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer cli.Close()
core.etcdCli = cli
pcm := newProxyClientManager(core)
ch := make(chan struct{})
pcm.helper = proxyClientManagerHelper{
afterConnect: func() { ch <- struct{}{} },
}
err = pcm.ReleaseDQLMessageStream(ctx, nil)
assert.NoError(t, err)
core.SetNewProxyClient(
func(se *sessionutil.Session) (types.Proxy, error) {
return &proxyMock{}, nil
},
)
session := &sessionutil.Session{
ServerID: 100,
Address: "localhost",
}
pcm.AddProxyClient(session)
<-ch
err = pcm.ReleaseDQLMessageStream(ctx, nil)
assert.NoError(t, err)
// test releaseDQLMessageStream failed
for _, v := range pcm.proxyClient {
v.(*proxyMock).returnError = true
}
err = pcm.ReleaseDQLMessageStream(ctx, nil)
assert.Error(t, err)
for _, v := range pcm.proxyClient {
v.(*proxyMock).returnGrpcError = true
}
err = pcm.ReleaseDQLMessageStream(ctx, nil)
assert.Error(t, err)
t.Run("normal case", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.InvalidateCollectionMetaCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.InvalidateCollectionMetaCache(ctx, &proxypb.InvalidateCollMetaCacheRequest{})
assert.NoError(t, err)
})
}
func TestProxyClientManager_InvalidateCredentialCache(t *testing.T) {
Params.Init()
ctx := context.Background()
t.Run("empty proxy list", func(t *testing.T) {
ctx := context.Background()
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{}}
err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{})
assert.NoError(t, err)
})
core, err := NewCore(ctx, nil)
assert.Nil(t, err)
cli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer cli.Close()
core.etcdCli = cli
t.Run("mock rpc error", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
return succStatus(), errors.New("error mock InvalidateCredentialCache")
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{})
assert.Error(t, err)
})
pcm := newProxyClientManager(core)
t.Run("mock error code", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{})
assert.Error(t, err)
})
ch := make(chan struct{})
pcm.helper = proxyClientManagerHelper{
afterConnect: func() { ch <- struct{}{} },
}
err = pcm.InvalidateCredentialCache(ctx, nil)
assert.NoError(t, err)
core.SetNewProxyClient(
func(se *sessionutil.Session) (types.Proxy, error) {
return &proxyMock{}, nil
},
)
session := &sessionutil.Session{
ServerID: 100,
Address: "localhost",
}
pcm.AddProxyClient(session)
<-ch
err = pcm.InvalidateCredentialCache(ctx, nil)
assert.NoError(t, err)
// test releaseDQLMessageStream failed
for _, v := range pcm.proxyClient {
v.(*proxyMock).returnError = true
}
err = pcm.InvalidateCredentialCache(ctx, nil)
assert.Error(t, err)
for _, v := range pcm.proxyClient {
v.(*proxyMock).returnGrpcError = true
}
err = pcm.InvalidateCredentialCache(ctx, nil)
assert.Error(t, err)
t.Run("normal case", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.InvalidateCredentialCacheFunc = func(ctx context.Context, request *proxypb.InvalidateCredCacheRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.InvalidateCredentialCache(ctx, &proxypb.InvalidateCredCacheRequest{})
assert.NoError(t, err)
})
}
func TestProxyClientManager_RefreshPolicyInfoCache(t *testing.T) {
Params.Init()
ctx := context.Background()
t.Run("empty proxy list", func(t *testing.T) {
ctx := context.Background()
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{}}
err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{})
assert.NoError(t, err)
})
core, err := NewCore(ctx, nil)
assert.Nil(t, err)
cli, err := etcd.GetEtcdClient(&Params.EtcdCfg)
assert.Nil(t, err)
defer cli.Close()
core.etcdCli = cli
t.Run("mock rpc error", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
return succStatus(), errors.New("error mock RefreshPolicyInfoCache")
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{})
assert.Error(t, err)
})
pcm := newProxyClientManager(core)
ch := make(chan struct{})
pcm.helper = proxyClientManagerHelper{
afterConnect: func() { ch <- struct{}{} },
}
err = pcm.RefreshPolicyInfoCache(ctx, nil)
assert.NoError(t, err)
core.SetNewProxyClient(
func(se *sessionutil.Session) (types.Proxy, error) {
return &proxyMock{}, nil
},
)
session := &sessionutil.Session{
ServerID: 100,
Address: "localhost",
}
pcm.AddProxyClient(session)
<-ch
err = pcm.RefreshPolicyInfoCache(ctx, nil)
assert.NoError(t, err)
t.Run("mock error code", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
return failStatus(commonpb.ErrorCode_UnexpectedError, "error mock error code"), nil
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{})
assert.Error(t, err)
})
t.Run("normal case", func(t *testing.T) {
ctx := context.Background()
p1 := newMockProxy()
p1.RefreshPolicyInfoCacheFunc = func(ctx context.Context, request *proxypb.RefreshPolicyInfoCacheRequest) (*commonpb.Status, error) {
return succStatus(), nil
}
pcm := &proxyClientManager{proxyClient: map[int64]types.Proxy{
TestProxyID: p1,
}}
err := pcm.RefreshPolicyInfoCache(ctx, &proxypb.RefreshPolicyInfoCacheRequest{})
assert.NoError(t, err)
})
}

View File

@ -0,0 +1,57 @@
package rootcoord
import (
"context"
"time"
"github.com/milvus-io/milvus/internal/log"
"go.uber.org/zap"
)
type baseRedoTask struct {
syncTodoStep []Step // steps to execute synchronously
asyncTodoStep []Step // steps to execute asynchronously
}
func newBaseRedoTask() *baseRedoTask {
return &baseRedoTask{
syncTodoStep: make([]Step, 0),
asyncTodoStep: make([]Step, 0),
}
}
func (b *baseRedoTask) AddSyncStep(step Step) {
b.syncTodoStep = append(b.syncTodoStep, step)
}
func (b *baseRedoTask) AddAsyncStep(step Step) {
b.asyncTodoStep = append(b.asyncTodoStep, step)
}
func (b *baseRedoTask) redoAsyncSteps() {
// You cannot just use the ctx of task, since it will be canceled after response is returned.
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
for i := 0; i < len(b.asyncTodoStep); i++ {
todo := b.asyncTodoStep[i]
if err := todo.Execute(ctx); err != nil {
// You depend on the collection meta to do other gc.
// TODO: add ddl logger after other service can be idempotent enough, then you can do separate steps
// independently.
log.Error("failed to execute step, garbage may be generated", zap.Error(err))
return
}
}
}
func (b *baseRedoTask) Execute(ctx context.Context) error {
for i := 0; i < len(b.syncTodoStep); i++ {
todo := b.syncTodoStep[i]
if err := todo.Execute(ctx); err != nil {
log.Error("failed to execute step", zap.Error(err))
return err
}
}
go b.redoAsyncSteps()
return nil
}

View File

@ -0,0 +1,119 @@
package rootcoord
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
type mockFailStep struct {
calledChan chan struct{}
called bool
}
func newMockFailStep() *mockFailStep {
return &mockFailStep{calledChan: make(chan struct{}, 1), called: false}
}
func (m *mockFailStep) Execute(ctx context.Context) error {
m.called = true
m.calledChan <- struct{}{}
return errors.New("error mock Execute")
}
type mockNormalStep struct {
calledChan chan struct{}
called bool
}
func newMockNormalStep() *mockNormalStep {
return &mockNormalStep{calledChan: make(chan struct{}, 1), called: false}
}
func (m *mockNormalStep) Execute(ctx context.Context) error {
m.called = true
m.calledChan <- struct{}{}
return nil
}
func Test_baseRedoTask_redoAsyncSteps(t *testing.T) {
t.Run("partial error", func(t *testing.T) {
redo := newBaseRedoTask()
steps := []Step{newMockNormalStep(), newMockFailStep(), newMockNormalStep()}
for _, step := range steps {
redo.AddAsyncStep(step)
}
redo.redoAsyncSteps()
assert.True(t, steps[0].(*mockNormalStep).called)
assert.False(t, steps[2].(*mockNormalStep).called)
})
t.Run("normal case", func(t *testing.T) {
redo := newBaseRedoTask()
n := 10
steps := make([]Step, 0, n)
for i := 0; i < n; i++ {
steps = append(steps, newMockNormalStep())
}
for _, step := range steps {
redo.AddAsyncStep(step)
}
redo.redoAsyncSteps()
for _, step := range steps {
assert.True(t, step.(*mockNormalStep).called)
}
})
}
func Test_baseRedoTask_Execute(t *testing.T) {
t.Run("sync not finished, no async task", func(t *testing.T) {
redo := newBaseRedoTask()
syncSteps := []Step{newMockFailStep()}
asyncNum := 10
asyncSteps := make([]Step, 0, asyncNum)
for i := 0; i < asyncNum; i++ {
asyncSteps = append(asyncSteps, newMockNormalStep())
}
for _, step := range asyncSteps {
redo.AddAsyncStep(step)
}
for _, step := range syncSteps {
redo.AddSyncStep(step)
}
err := redo.Execute(context.Background())
assert.Error(t, err)
for _, step := range asyncSteps {
assert.False(t, step.(*mockNormalStep).called)
}
})
// TODO: sync finished, but some async fail.
t.Run("normal case", func(t *testing.T) {
redo := newBaseRedoTask()
syncNum := 10
syncSteps := make([]Step, 0, syncNum)
asyncNum := 10
asyncSteps := make([]Step, 0, asyncNum)
for i := 0; i < syncNum; i++ {
syncSteps = append(syncSteps, newMockNormalStep())
}
for i := 0; i < asyncNum; i++ {
asyncSteps = append(asyncSteps, newMockNormalStep())
}
for _, step := range asyncSteps {
redo.AddAsyncStep(step)
}
for _, step := range syncSteps {
redo.AddSyncStep(step)
}
err := redo.Execute(context.Background())
assert.NoError(t, err)
for _, step := range asyncSteps {
<-step.(*mockNormalStep).calledChan
assert.True(t, step.(*mockNormalStep).called)
}
})
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More