Refactor query node and query serviceMain changes:1. Add ddBuffer and save binLog.

2. Trans to insertData.
3. Change dataFields data to Data, dim to Dim4. Add float vector and binary vector.5. Deserialize data and convert to InsertData.6. Move all data into InsertData.7. Add insert buffer and hash string.
8. Add minIOkV in insertBuffer node.
9. Init write node insertBuffer maxSize from writeNode.yaml.
10. Add ddBuffer.
11. Add ddBuffer binLog and minio.
12. Add ddNode unittest.
13. Remove redundant call.
14. Increase test time.
15. Delete ddl const, use request's timestamp instead.

Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
pull/4973/head^2
bigsheeper 2020-12-24 15:38:29 +08:00 committed by yefu.chen
parent f5198bcaee
commit 4ecdea698f
18 changed files with 1577 additions and 1443 deletions

View File

@ -27,7 +27,7 @@ func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID) error {
func (m *MockWriteNodeClient) DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error) {
now := time.Now()
if now.Sub(m.flushTime).Seconds() > 3 {
if now.Sub(m.flushTime).Seconds() > 2 {
return &writerclient.SegmentDescription{
SegmentID: segmentID,
IsClosed: true,
@ -67,7 +67,7 @@ func (m *MockBuildIndexClient) BuildIndexWithoutID(columnDataPaths []string, typ
func (m *MockBuildIndexClient) DescribeIndex(indexID UniqueID) (*buildindexclient.IndexDescription, error) {
now := time.Now()
if now.Sub(m.buildTime).Seconds() > 3 {
if now.Sub(m.buildTime).Seconds() > 2 {
return &buildindexclient.IndexDescription{
ID: 1,
Status: indexbuilderpb.IndexStatus_FINISHED,

View File

@ -1,373 +0,0 @@
package master
import (
"context"
"log"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_CollectionTask(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
assert.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root/meta",
KvRootPath: "/test/root/kv",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
DDChannelNames: []string{"dd1", "dd2"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
}
svr, err := CreateServer(ctx)
assert.Nil(t, err)
err = svr.Run(int64(Params.Port))
assert.Nil(t, err)
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
sch := schemapb.CollectionSchema{
Name: "col1",
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
Name: "col1_f1",
Description: "test collection filed 1",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_tk1",
Value: "col1_f1_tv1",
},
{
Key: "col1_f1_tk2",
Value: "col1_f1_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_ik1",
Value: "col1_f1_iv1",
},
{
Key: "col1_f1_ik2",
Value: "col1_f1_iv2",
},
},
},
{
Name: "col1_f2",
Description: "test collection filed 2",
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_tk1",
Value: "col1_f2_tv1",
},
{
Key: "col1_f2_tk2",
Value: "col1_f2_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_ik1",
Value: "col1_f2_iv1",
},
{
Key: "col1_f2_ik2",
Value: "col1_f2_iv2",
},
},
},
},
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
req := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
log.Printf("... [Create] collection col1\n")
st, err := cli.CreateCollection(ctx, &req)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
// HasCollection
reqHasCollection := internalpb.HasCollectionRequest{
MsgType: internalpb.MsgType_kHasCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{
CollectionName: "col1",
},
}
// "col1" is true
log.Printf("... [Has] collection col1\n")
boolResp, err := cli.HasCollection(ctx, &reqHasCollection)
assert.Nil(t, err)
assert.Equal(t, true, boolResp.Value)
assert.Equal(t, boolResp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
// "colNotExist" is false
reqHasCollection.CollectionName.CollectionName = "colNotExist"
boolResp, err = cli.HasCollection(ctx, &reqHasCollection)
assert.Nil(t, err)
assert.Equal(t, boolResp.Value, false)
assert.Equal(t, boolResp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
// error
reqHasCollection.Timestamp = Timestamp(10)
reqHasCollection.CollectionName.CollectionName = "col1"
boolResp, err = cli.HasCollection(ctx, &reqHasCollection)
assert.Nil(t, err)
assert.NotEqual(t, boolResp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
// ShowCollection
reqShowCollection := internalpb.ShowCollectionRequest{
MsgType: internalpb.MsgType_kShowCollections,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
}
listResp, err := cli.ShowCollections(ctx, &reqShowCollection)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, listResp.Status.ErrorCode)
assert.Equal(t, 1, len(listResp.Values))
assert.Equal(t, "col1", listResp.Values[0])
reqShowCollection.Timestamp = Timestamp(10)
listResp, err = cli.ShowCollections(ctx, &reqShowCollection)
assert.Nil(t, err)
assert.NotEqual(t, commonpb.ErrorCode_SUCCESS, listResp.Status.ErrorCode)
// CreateCollection Test
collMeta, err := svr.metaTable.GetCollectionByName(sch.Name)
assert.Nil(t, err)
t.Logf("collection id = %d", collMeta.ID)
assert.Equal(t, collMeta.CreateTime, uint64(11))
assert.Equal(t, collMeta.Schema.Name, "col1")
assert.Equal(t, collMeta.Schema.AutoID, false)
assert.Equal(t, len(collMeta.Schema.Fields), 2)
assert.Equal(t, collMeta.Schema.Fields[0].Name, "col1_f1")
assert.Equal(t, collMeta.Schema.Fields[1].Name, "col1_f2")
assert.Equal(t, collMeta.Schema.Fields[0].DataType, schemapb.DataType_VECTOR_FLOAT)
assert.Equal(t, collMeta.Schema.Fields[1].DataType, schemapb.DataType_VECTOR_BINARY)
assert.Equal(t, len(collMeta.Schema.Fields[0].TypeParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[0].IndexParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[1].TypeParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[1].IndexParams), 2)
assert.Equal(t, int64(100), collMeta.Schema.Fields[0].FieldID)
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[0].Key, "col1_f1_tk1")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[1].Key, "col1_f1_tk2")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[0].Value, "col1_f1_tv1")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[1].Value, "col1_f1_tv2")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[0].Key, "col1_f1_ik1")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[1].Key, "col1_f1_ik2")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[0].Value, "col1_f1_iv1")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[1].Value, "col1_f1_iv2")
assert.Equal(t, int64(101), collMeta.Schema.Fields[1].FieldID)
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[0].Key, "col1_f2_tk1")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[1].Key, "col1_f2_tk2")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[0].Value, "col1_f2_tv1")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[1].Value, "col1_f2_tv2")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[0].Key, "col1_f2_ik1")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[1].Key, "col1_f2_ik2")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[0].Value, "col1_f2_iv1")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[1].Value, "col1_f2_iv2")
req.Timestamp = Timestamp(10)
st, err = cli.CreateCollection(ctx, &req)
assert.Nil(t, err)
assert.NotEqual(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
// DescribeCollection Test
reqDescribe := &internalpb.DescribeCollectionRequest{
MsgType: internalpb.MsgType_kDescribeCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{
CollectionName: "col1",
},
}
des, err := cli.DescribeCollection(ctx, reqDescribe)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, des.Status.ErrorCode)
assert.Equal(t, "col1", des.Schema.Name)
assert.Equal(t, false, des.Schema.AutoID)
assert.Equal(t, 2, len(des.Schema.Fields))
assert.Equal(t, "col1_f1", des.Schema.Fields[0].Name)
assert.Equal(t, "col1_f2", des.Schema.Fields[1].Name)
assert.Equal(t, schemapb.DataType_VECTOR_FLOAT, des.Schema.Fields[0].DataType)
assert.Equal(t, schemapb.DataType_VECTOR_BINARY, des.Schema.Fields[1].DataType)
assert.Equal(t, 2, len(des.Schema.Fields[0].TypeParams))
assert.Equal(t, 2, len(des.Schema.Fields[0].IndexParams))
assert.Equal(t, 2, len(des.Schema.Fields[1].TypeParams))
assert.Equal(t, 2, len(des.Schema.Fields[1].IndexParams))
assert.Equal(t, int64(100), des.Schema.Fields[0].FieldID)
assert.Equal(t, "col1_f1_tk1", des.Schema.Fields[0].TypeParams[0].Key)
assert.Equal(t, "col1_f1_tv1", des.Schema.Fields[0].TypeParams[0].Value)
assert.Equal(t, "col1_f1_ik1", des.Schema.Fields[0].IndexParams[0].Key)
assert.Equal(t, "col1_f1_iv1", des.Schema.Fields[0].IndexParams[0].Value)
assert.Equal(t, "col1_f1_tk2", des.Schema.Fields[0].TypeParams[1].Key)
assert.Equal(t, "col1_f1_tv2", des.Schema.Fields[0].TypeParams[1].Value)
assert.Equal(t, "col1_f1_ik2", des.Schema.Fields[0].IndexParams[1].Key)
assert.Equal(t, "col1_f1_iv2", des.Schema.Fields[0].IndexParams[1].Value)
assert.Equal(t, int64(101), des.Schema.Fields[1].FieldID)
assert.Equal(t, "col1_f2_tk1", des.Schema.Fields[1].TypeParams[0].Key)
assert.Equal(t, "col1_f2_tv1", des.Schema.Fields[1].TypeParams[0].Value)
assert.Equal(t, "col1_f2_ik1", des.Schema.Fields[1].IndexParams[0].Key)
assert.Equal(t, "col1_f2_iv1", des.Schema.Fields[1].IndexParams[0].Value)
assert.Equal(t, "col1_f2_tk2", des.Schema.Fields[1].TypeParams[1].Key)
assert.Equal(t, "col1_f2_tv2", des.Schema.Fields[1].TypeParams[1].Value)
assert.Equal(t, "col1_f2_ik2", des.Schema.Fields[1].IndexParams[1].Key)
assert.Equal(t, "col1_f2_iv2", des.Schema.Fields[1].IndexParams[1].Value)
reqDescribe.CollectionName.CollectionName = "colNotExist"
des, err = cli.DescribeCollection(ctx, reqDescribe)
assert.Nil(t, err)
assert.NotEqual(t, commonpb.ErrorCode_SUCCESS, des.Status.ErrorCode)
log.Printf(des.Status.Reason)
reqDescribe.CollectionName.CollectionName = "col1"
reqDescribe.Timestamp = Timestamp(10)
des, err = cli.DescribeCollection(ctx, reqDescribe)
assert.Nil(t, err)
assert.NotEqual(t, commonpb.ErrorCode_SUCCESS, des.Status.ErrorCode)
log.Printf(des.Status.Reason)
// ------------------------------DropCollectionTask---------------------------
log.Printf("... [Drop] collection col1\n")
ser := servicepb.CollectionName{CollectionName: "col1"}
reqDrop := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &ser,
}
// DropCollection
st, err = cli.DropCollection(ctx, &reqDrop)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
collMeta, err = svr.metaTable.GetCollectionByName(sch.Name)
assert.NotNil(t, err)
// HasCollection "col1" is false
reqHasCollection.Timestamp = Timestamp(11)
reqHasCollection.CollectionName.CollectionName = "col1"
boolResp, err = cli.HasCollection(ctx, &reqHasCollection)
assert.Nil(t, err)
assert.Equal(t, false, boolResp.Value)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, boolResp.Status.ErrorCode)
// ShowCollections
reqShowCollection.Timestamp = Timestamp(11)
listResp, err = cli.ShowCollections(ctx, &reqShowCollection)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, listResp.Status.ErrorCode)
assert.Equal(t, 0, len(listResp.Values))
// Drop again
st, err = cli.DropCollection(ctx, &reqDrop)
assert.Nil(t, err)
assert.NotEqual(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
// Create "col1"
req.Timestamp = Timestamp(11)
st, err = cli.CreateCollection(ctx, &req)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
boolResp, err = cli.HasCollection(ctx, &reqHasCollection)
assert.Nil(t, err)
assert.Equal(t, true, boolResp.Value)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, boolResp.Status.ErrorCode)
// Create "col2"
sch.Name = "col2"
schemaBytes, err = proto.Marshal(&sch)
assert.Nil(t, err)
req = internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, err = cli.CreateCollection(ctx, &req)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, st.ErrorCode)
// Show Collections
listResp, err = cli.ShowCollections(ctx, &reqShowCollection)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, listResp.Status.ErrorCode)
assert.Equal(t, 2, len(listResp.Values))
assert.ElementsMatch(t, []string{"col1", "col2"}, listResp.Values)
svr.Close()
}

View File

@ -1,151 +0,0 @@
package master
import (
"context"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_ConfigTask(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
require.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
}
svr, err := CreateServer(ctx)
require.Nil(t, err)
err = svr.Run(int64(Params.Port))
defer svr.Close()
require.Nil(t, err)
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
require.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
sc := SysConfig{kv: svr.kvBase}
sc.InitFromFile(".")
configRequest := &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: testKeys,
KeyPrefixes: []string{},
}
response, err := cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
assert.ElementsMatch(t, testKeys, response.Keys)
assert.ElementsMatch(t, testVals, response.Values)
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
configRequest = &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: []string{},
KeyPrefixes: []string{"/master"},
}
response, err = cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
for i := range response.GetKeys() {
assert.True(t, strings.HasPrefix(response.GetKeys()[i], "/master"))
}
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
t.Run("Test duplicate keys and key prefix", func(t *testing.T) {
configRequest.Keys = []string{}
configRequest.KeyPrefixes = []string{"/master"}
resp, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(resp.GetKeys()), len(resp.GetValues()))
assert.NotEqual(t, 0, len(resp.GetKeys()))
configRequest.Keys = []string{"/master/port"}
configRequest.KeyPrefixes = []string{"/master"}
respDup, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(respDup.GetKeys()), len(respDup.GetValues()))
assert.NotEqual(t, 0, len(respDup.GetKeys()))
assert.Equal(t, len(respDup.GetKeys()), len(resp.GetKeys()))
})
}

View File

@ -443,8 +443,39 @@ func (s *Master) AssignSegmentID(ctx context.Context, request *internalpb.Assign
}, nil
}
func (s *Master) CreateIndex(context.Context, *internalpb.CreateIndexRequest) (*commonpb.Status, error) {
return nil, nil
func (s *Master) CreateIndex(ctx context.Context, req *internalpb.CreateIndexRequest) (*commonpb.Status, error) {
task := &createIndexTask{
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
req: req,
indexBuildScheduler: s.indexBuildSch,
indexLoadScheduler: s.indexLoadSch,
segManager: s.segmentManager,
}
err := s.scheduler.Enqueue(task)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Enqueue failed: " + err.Error(),
}, nil
}
err = task.WaitToFinish(ctx)
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Create Index error: " + err.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}, nil
}
func (s *Master) DescribeIndex(context.Context, *internalpb.DescribeIndexRequest) (*servicepb.DescribeIndexResponse, error) {

View File

@ -1,184 +0,0 @@
package master
import (
"context"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_CreateCollection(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdAddr := Params.EtcdAddress
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root/meta",
KvRootPath: "/test/root/kv",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
}
svr, err := CreateServer(ctx)
assert.Nil(t, err)
err = svr.Run(int64(Params.Port))
assert.Nil(t, err)
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
sch := schemapb.CollectionSchema{
Name: "col1",
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
Name: "col1_f1",
Description: "test collection filed 1",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_tk1",
Value: "col1_f1_tv1",
},
{
Key: "col1_f1_tk2",
Value: "col1_f1_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_ik1",
Value: "col1_f1_iv1",
},
{
Key: "col1_f1_ik2",
Value: "col1_f1_iv2",
},
},
},
{
Name: "col1_f2",
Description: "test collection filed 2",
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_tk1",
Value: "col1_f2_tv1",
},
{
Key: "col1_f2_tk2",
Value: "col1_f2_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_ik1",
Value: "col1_f2_iv1",
},
{
Key: "col1_f2_ik2",
Value: "col1_f2_iv2",
},
},
},
},
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
req := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, err := cli.CreateCollection(ctx, &req)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
collMeta, err := svr.metaTable.GetCollectionByName(sch.Name)
assert.Nil(t, err)
t.Logf("collection id = %d", collMeta.ID)
assert.Equal(t, collMeta.CreateTime, uint64(11))
assert.Equal(t, collMeta.Schema.Name, "col1")
assert.Equal(t, collMeta.Schema.AutoID, false)
assert.Equal(t, len(collMeta.Schema.Fields), 2)
assert.Equal(t, collMeta.Schema.Fields[0].Name, "col1_f1")
assert.Equal(t, collMeta.Schema.Fields[1].Name, "col1_f2")
assert.Equal(t, collMeta.Schema.Fields[0].DataType, schemapb.DataType_VECTOR_FLOAT)
assert.Equal(t, collMeta.Schema.Fields[1].DataType, schemapb.DataType_VECTOR_BINARY)
assert.Equal(t, len(collMeta.Schema.Fields[0].TypeParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[0].IndexParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[1].TypeParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[1].IndexParams), 2)
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[0].Key, "col1_f1_tk1")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[1].Key, "col1_f1_tk2")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[0].Value, "col1_f1_tv1")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[1].Value, "col1_f1_tv2")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[0].Key, "col1_f1_ik1")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[1].Key, "col1_f1_ik2")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[0].Value, "col1_f1_iv1")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[1].Value, "col1_f1_iv2")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[0].Key, "col1_f2_tk1")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[1].Key, "col1_f2_tk2")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[0].Value, "col1_f2_tv1")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[1].Value, "col1_f2_tv2")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[0].Key, "col1_f2_ik1")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[1].Key, "col1_f2_ik2")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[0].Value, "col1_f2_iv1")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[1].Value, "col1_f2_iv2")
req.Timestamp = Timestamp(10)
st, err = cli.CreateCollection(ctx, &req)
assert.Nil(t, err)
assert.NotEqual(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
svr.Close()
}

View File

@ -0,0 +1,95 @@
package master
import (
"fmt"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type createIndexTask struct {
baseTask
req *internalpb.CreateIndexRequest
indexBuildScheduler *IndexBuildScheduler
indexLoadScheduler *IndexLoadScheduler
segManager *SegmentManager
}
func (task *createIndexTask) Type() internalpb.MsgType {
return internalpb.MsgType_kCreateIndex
}
func (task *createIndexTask) Ts() (Timestamp, error) {
return task.req.Timestamp, nil
}
func (task *createIndexTask) Execute() error {
// modify schema
if err := task.mt.UpdateFieldIndexParams(task.req.CollectionName, task.req.FieldName, task.req.ExtraParams); err != nil {
return err
}
// check if closed segment has the same index build history
collMeta, err := task.mt.GetCollectionByName(task.req.CollectionName)
if err != nil {
return err
}
var fieldID int64 = -1
for _, fieldSchema := range collMeta.Schema.Fields {
if fieldSchema.Name == task.req.FieldName {
fieldID = fieldSchema.FieldID
break
}
}
if fieldID == -1 {
return fmt.Errorf("can not find field name %s", task.req.FieldName)
}
for _, segID := range collMeta.SegmentIDs {
segMeta, err := task.mt.GetSegmentByID(segID)
if err != nil {
return err
}
if segMeta.CloseTime == 0 {
continue
}
hasIndexMeta, err := task.mt.HasFieldIndexMeta(segID, fieldID, task.req.ExtraParams)
if err != nil {
return err
}
if hasIndexMeta {
// load index
indexMeta, err := task.mt.GetFieldIndexMeta(segID, fieldID, task.req.ExtraParams)
if err != nil {
return err
}
err = task.indexLoadScheduler.Enqueue(&IndexLoadInfo{
segmentID: segID,
fieldID: fieldID,
fieldName: task.req.FieldName,
indexFilePaths: indexMeta.IndexFilePaths,
})
if err != nil {
return err
}
} else {
// create index
for _, kv := range segMeta.BinlogFilePaths {
if kv.FieldID != fieldID {
continue
}
err := task.indexBuildScheduler.Enqueue(&IndexBuildInfo{
segmentID: segID,
fieldID: fieldID,
binlogFilePath: kv.BinlogFiles,
})
if err != nil {
return err
}
break
}
}
}
// close unfilled segment
return task.segManager.ForceClose(collMeta.ID)
}

File diff suppressed because it is too large Load Diff

View File

@ -524,7 +524,7 @@ func (mt *metaTable) saveFieldIndexMetaToEtcd(meta *pb.FieldIndexMeta) error {
return mt.client.Save(key, marshaledMeta)
}
func (mt *metaTable) DeleteFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexType string, indexParams []*commonpb.KeyValuePair) error {
func (mt *metaTable) DeleteFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexParams []*commonpb.KeyValuePair) error {
mt.indexLock.Lock()
defer mt.indexLock.Unlock()
@ -568,6 +568,22 @@ func (mt *metaTable) HasFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexPa
return false, nil
}
func (mt *metaTable) GetFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexParams []*commonpb.KeyValuePair) (*pb.FieldIndexMeta, error) {
mt.indexLock.RLock()
defer mt.indexLock.RUnlock()
if _, ok := mt.segID2IndexMetas[segID]; !ok {
return nil, fmt.Errorf("can not find segment %d", segID)
}
for _, v := range mt.segID2IndexMetas[segID] {
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
return &v, nil
}
}
return nil, fmt.Errorf("can not find field %d", fieldID)
}
func (mt *metaTable) UpdateFieldIndexMeta(meta *pb.FieldIndexMeta) error {
mt.indexLock.Lock()
defer mt.indexLock.Unlock()
@ -635,3 +651,30 @@ func (mt *metaTable) GetFieldIndexParams(collID UniqueID, fieldID UniqueID) ([]*
}
return nil, fmt.Errorf("can not find field %d in collection %d", fieldID, collID)
}
func (mt *metaTable) UpdateFieldIndexParams(collName string, fieldName string, indexParams []*commonpb.KeyValuePair) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
vid, ok := mt.collName2ID[collName]
if !ok {
return errors.Errorf("can't find collection: " + collName)
}
meta, ok := mt.collID2Meta[vid]
if !ok {
return errors.Errorf("can't find collection: " + collName)
}
for _, fieldSchema := range meta.Schema.Fields {
if fieldSchema.Name == fieldName {
fieldSchema.IndexParams = indexParams
if err := mt.saveCollectionMeta(&meta); err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
}
return fmt.Errorf("can not find field with id %s", fieldName)
}

View File

@ -497,7 +497,7 @@ func TestMetaTable_IndexMeta(t *testing.T) {
})
assert.Nil(t, err)
assert.EqualValues(t, indexbuilderpb.IndexStatus_FINISHED, meta.segID2IndexMetas[1][0].Status)
err = meta.DeleteFieldIndexMeta(1, 100, "type1", []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
err = meta.DeleteFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
assert.Nil(t, err)
res, err = meta.HasFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
assert.Nil(t, err)

View File

@ -1,339 +0,0 @@
package master
import (
"context"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_Partition(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdAddr := Params.EtcdAddress
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root/meta",
KvRootPath: "/test/root/kv",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
MaxPartitionNum: int64(4096),
DefaultPartitionTag: "_default",
}
svr, err := CreateServer(ctx)
assert.Nil(t, err)
err = svr.Run(int64(Params.Port))
assert.Nil(t, err)
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
sch := schemapb.CollectionSchema{
Name: "col1",
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
Name: "col1_f1",
Description: "test collection filed 1",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_tk1",
Value: "col1_f1_tv1",
},
{
Key: "col1_f1_tk2",
Value: "col1_f1_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_ik1",
Value: "col1_f1_iv1",
},
{
Key: "col1_f1_ik2",
Value: "col1_f1_iv2",
},
},
},
{
Name: "col1_f2",
Description: "test collection filed 2",
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_tk1",
Value: "col1_f2_tv1",
},
{
Key: "col1_f2_tk2",
Value: "col1_f2_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_ik1",
Value: "col1_f2_iv1",
},
{
Key: "col1_f2_ik2",
Value: "col1_f2_iv2",
},
},
},
},
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 1,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, _ := cli.CreateCollection(ctx, &createCollectionReq)
assert.NotNil(t, st)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, st.ErrorCode)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 2,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
}
st, _ = cli.CreatePartition(ctx, &createPartitionReq)
assert.NotNil(t, st)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, st.ErrorCode)
createPartitionReq = internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 1,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
}
st, _ = cli.CreatePartition(ctx, &createPartitionReq)
assert.NotNil(t, st)
assert.Equal(t, commonpb.ErrorCode_UNEXPECTED_ERROR, st.ErrorCode)
createPartitionReq = internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 3,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition2"},
}
st, _ = cli.CreatePartition(ctx, &createPartitionReq)
assert.NotNil(t, st)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, st.ErrorCode)
collMeta, err := svr.metaTable.GetCollectionByName(sch.Name)
assert.Nil(t, err)
t.Logf("collection id = %d", collMeta.ID)
assert.Equal(t, collMeta.CreateTime, uint64(1))
assert.Equal(t, collMeta.Schema.Name, "col1")
assert.Equal(t, collMeta.Schema.AutoID, false)
assert.Equal(t, len(collMeta.Schema.Fields), 2)
assert.Equal(t, collMeta.Schema.Fields[0].Name, "col1_f1")
assert.Equal(t, collMeta.Schema.Fields[1].Name, "col1_f2")
assert.Equal(t, collMeta.Schema.Fields[0].DataType, schemapb.DataType_VECTOR_FLOAT)
assert.Equal(t, collMeta.Schema.Fields[1].DataType, schemapb.DataType_VECTOR_BINARY)
assert.Equal(t, len(collMeta.Schema.Fields[0].TypeParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[0].IndexParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[1].TypeParams), 2)
assert.Equal(t, len(collMeta.Schema.Fields[1].IndexParams), 2)
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[0].Key, "col1_f1_tk1")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[1].Key, "col1_f1_tk2")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[0].Value, "col1_f1_tv1")
assert.Equal(t, collMeta.Schema.Fields[0].TypeParams[1].Value, "col1_f1_tv2")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[0].Key, "col1_f1_ik1")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[1].Key, "col1_f1_ik2")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[0].Value, "col1_f1_iv1")
assert.Equal(t, collMeta.Schema.Fields[0].IndexParams[1].Value, "col1_f1_iv2")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[0].Key, "col1_f2_tk1")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[1].Key, "col1_f2_tk2")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[0].Value, "col1_f2_tv1")
assert.Equal(t, collMeta.Schema.Fields[1].TypeParams[1].Value, "col1_f2_tv2")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[0].Key, "col1_f2_ik1")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[1].Key, "col1_f2_ik2")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[0].Value, "col1_f2_iv1")
assert.Equal(t, collMeta.Schema.Fields[1].IndexParams[1].Value, "col1_f2_iv2")
//assert.Equal(t, collMeta.PartitionTags[0], "partition1")
//assert.Equal(t, collMeta.PartitionTags[1], "partition2")
assert.ElementsMatch(t, []string{"_default", "partition1", "partition2"}, collMeta.PartitionTags)
showPartitionReq := internalpb.ShowPartitionRequest{
MsgType: internalpb.MsgType_kShowPartitions,
ReqID: 1,
Timestamp: 4,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: "col1"},
}
stringList, err := cli.ShowPartitions(ctx, &showPartitionReq)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"_default", "partition1", "partition2"}, stringList.Values)
showPartitionReq = internalpb.ShowPartitionRequest{
MsgType: internalpb.MsgType_kShowPartitions,
ReqID: 1,
Timestamp: 3,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: "col1"},
}
stringList, _ = cli.ShowPartitions(ctx, &showPartitionReq)
assert.NotNil(t, stringList)
assert.Equal(t, commonpb.ErrorCode_UNEXPECTED_ERROR, stringList.Status.ErrorCode)
hasPartitionReq := internalpb.HasPartitionRequest{
MsgType: internalpb.MsgType_kHasPartition,
ReqID: 1,
Timestamp: 5,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
}
hasPartition, err := cli.HasPartition(ctx, &hasPartitionReq)
assert.Nil(t, err)
assert.True(t, hasPartition.Value)
hasPartitionReq = internalpb.HasPartitionRequest{
MsgType: internalpb.MsgType_kHasPartition,
ReqID: 1,
Timestamp: 4,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
}
hasPartition, _ = cli.HasPartition(ctx, &hasPartitionReq)
assert.NotNil(t, hasPartition)
assert.Equal(t, commonpb.ErrorCode_UNEXPECTED_ERROR, stringList.Status.ErrorCode)
hasPartitionReq = internalpb.HasPartitionRequest{
MsgType: internalpb.MsgType_kHasPartition,
ReqID: 1,
Timestamp: 6,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition3"},
}
hasPartition, err = cli.HasPartition(ctx, &hasPartitionReq)
assert.Nil(t, err)
assert.False(t, hasPartition.Value)
deletePartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 7,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition2"},
}
st, err = cli.DropPartition(ctx, &deletePartitionReq)
assert.Nil(t, err)
assert.Equal(t, commonpb.ErrorCode_SUCCESS, st.ErrorCode)
deletePartitionReq = internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 6,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition2"},
}
st, _ = cli.DropPartition(ctx, &deletePartitionReq)
assert.NotNil(t, st)
assert.Equal(t, commonpb.ErrorCode_UNEXPECTED_ERROR, st.ErrorCode)
hasPartitionReq = internalpb.HasPartitionRequest{
MsgType: internalpb.MsgType_kHasPartition,
ReqID: 1,
Timestamp: 8,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition2"},
}
hasPartition, err = cli.HasPartition(ctx, &hasPartitionReq)
assert.Nil(t, err)
assert.False(t, hasPartition.Value)
describePartitionReq := internalpb.DescribePartitionRequest{
MsgType: internalpb.MsgType_kDescribePartition,
ReqID: 1,
Timestamp: 9,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
}
describePartition, err := cli.DescribePartition(ctx, &describePartitionReq)
assert.Nil(t, err)
assert.Equal(t, &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"}, describePartition.Name)
describePartitionReq = internalpb.DescribePartitionRequest{
MsgType: internalpb.MsgType_kDescribePartition,
ReqID: 1,
Timestamp: 8,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
}
describePartition, _ = cli.DescribePartition(ctx, &describePartitionReq)
assert.Equal(t, commonpb.ErrorCode_UNEXPECTED_ERROR, describePartition.Status.ErrorCode)
svr.Close()
}

View File

@ -358,6 +358,20 @@ func (manager *SegmentManager) initChannelRanges() error {
}
return nil
}
// ForceClose set segments of collection with collID closable, segment will be closed after the assignments of it has expired
func (manager *SegmentManager) ForceClose(collID UniqueID) error {
status, ok := manager.collStatus[collID]
if !ok {
return nil
}
for _, segStatus := range status.segments {
segStatus.closable = true
}
return nil
}
func NewSegmentManager(ctx context.Context,
meta *metaTable,
globalIDAllocator func() (UniqueID, error),

View File

@ -6,20 +6,16 @@ import (
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestSegmentManager_AssignSegment(t *testing.T) {
@ -160,143 +156,3 @@ func TestSegmentManager_AssignSegment(t *testing.T) {
assert.Nil(t, err)
assert.NotEqualValues(t, 0, segMeta.CloseTime)
}
func TestSegmentManager_RPC(t *testing.T) {
Init()
refreshMasterAddress()
etcdAddress := Params.EtcdAddress
rootPath := "/test/root"
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
_, err = cli.Delete(ctx, rootPath, clientv3.WithPrefix())
assert.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root/meta",
KvRootPath: "/test/root/kv",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
MaxPartitionNum: int64(4096),
DefaultPartitionTag: "_default",
}
collName := "test_coll"
partitionTag := "test_part"
master, err := CreateServer(ctx)
assert.Nil(t, err)
defer master.Close()
err = master.Run(int64(Params.Port))
assert.Nil(t, err)
dialContext, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer dialContext.Close()
client := masterpb.NewMasterClient(dialContext)
schema := &schemapb.CollectionSchema{
Name: collName,
Description: "test coll",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{FieldID: 1, Name: "f1", IsPrimaryKey: false, DataType: schemapb.DataType_INT32},
{FieldID: 1, Name: "f1", IsPrimaryKey: false, DataType: schemapb.DataType_VECTOR_FLOAT, TypeParams: []*commonpb.KeyValuePair{{Key: "dim", Value: "128"}}},
},
}
schemaBytes, err := proto.Marshal(schema)
assert.Nil(t, err)
_, err = client.CreateCollection(ctx, &internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 100,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
})
assert.Nil(t, err)
_, err = client.CreatePartition(ctx, &internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 2,
Timestamp: 101,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: collName,
Tag: partitionTag,
},
})
assert.Nil(t, err)
resp, err := client.AssignSegmentID(ctx, &internalpb.AssignSegIDRequest{
PeerID: 1,
Role: internalpb.PeerRole_Proxy,
PerChannelReq: []*internalpb.SegIDRequest{
{Count: 10000, ChannelID: 0, CollName: collName, PartitionTag: partitionTag},
},
})
assert.Nil(t, err)
assignments := resp.GetPerChannelAssignment()
assert.EqualValues(t, 1, len(assignments))
assert.EqualValues(t, commonpb.ErrorCode_SUCCESS, assignments[0].Status.ErrorCode)
assert.EqualValues(t, collName, assignments[0].CollName)
assert.EqualValues(t, partitionTag, assignments[0].PartitionTag)
assert.EqualValues(t, int32(0), assignments[0].ChannelID)
assert.EqualValues(t, uint32(10000), assignments[0].Count)
// test stats
segID := assignments[0].SegID
pulsarAddress := Params.PulsarAddress
ms := msgstream.NewPulsarMsgStream(ctx, 1024)
ms.SetPulsarClient(pulsarAddress)
ms.CreatePulsarProducers([]string{"statistic"})
ms.Start()
defer ms.Close()
err = ms.Produce(&msgstream.MsgPack{
BeginTs: 102,
EndTs: 104,
Msgs: []msgstream.TsMsg{
&msgstream.QueryNodeStatsMsg{
QueryNodeStats: internalpb.QueryNodeStats{
MsgType: internalpb.MsgType_kQueryNodeStats,
PeerID: 1,
SegStats: []*internalpb.SegmentStats{
{SegmentID: segID, MemorySize: 600000000, NumRows: 1000000, RecentlyModified: true},
},
},
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{0},
},
},
},
})
assert.Nil(t, err)
time.Sleep(500 * time.Millisecond)
segMeta, err := master.metaTable.GetSegmentByID(segID)
assert.Nil(t, err)
assert.EqualValues(t, 1000000, segMeta.GetNumRows())
assert.EqualValues(t, int64(600000000), segMeta.GetMemSize())
}

View File

@ -13,6 +13,14 @@ type MsgStreamMsg struct {
timestampMax Timestamp
}
func GenerateMsgStreamMsg(tsMessages []msgstream.TsMsg, timestampMin, timestampMax Timestamp) *MsgStreamMsg {
return &MsgStreamMsg{
tsMessages: tsMessages,
timestampMin: timestampMin,
timestampMax: timestampMax,
}
}
func (msMsg *MsgStreamMsg) TimeTick() Timestamp {
return msMsg.timestampMax
}

View File

@ -39,7 +39,7 @@ func (dsService *dataSyncService) initNodes() {
var dmStreamNode Node = newDmInputNode(dsService.ctx)
var ddStreamNode Node = newDDInputNode(dsService.ctx)
var ddNode Node = newDDNode()
var ddNode Node = newDDNode(dsService.ctx)
var filterDmNode Node = newFilteredDmNode()
var insertBufferNode Node = newInsertBufferNode(dsService.ctx)

View File

@ -1,47 +0,0 @@
package writenode
import (
"errors"
"strconv"
)
type ddBuffer struct {
collectionBuffer map[UniqueID]interface{}
partitionBuffer map[UniqueID]interface{}
}
func (d *ddBuffer) addCollection(collectionID UniqueID) error {
if _, ok := d.collectionBuffer[collectionID]; !ok {
return errors.New("collection " + strconv.FormatInt(collectionID, 10) + " is already exists")
}
d.collectionBuffer[collectionID] = nil
return nil
}
func (d *ddBuffer) removeCollection(collectionID UniqueID) error {
if _, ok := d.collectionBuffer[collectionID]; !ok {
return errors.New("cannot found collection " + strconv.FormatInt(collectionID, 10))
}
delete(d.collectionBuffer, collectionID)
return nil
}
func (d *ddBuffer) addPartition(partitionID UniqueID) error {
if _, ok := d.partitionBuffer[partitionID]; !ok {
return errors.New("partition " + strconv.FormatInt(partitionID, 10) + " is already exists")
}
d.partitionBuffer[partitionID] = nil
return nil
}
func (d *ddBuffer) removePartition(partitionID UniqueID) error {
if _, ok := d.partitionBuffer[partitionID]; !ok {
return errors.New("cannot found partition " + strconv.FormatInt(partitionID, 10))
}
delete(d.partitionBuffer, partitionID)
return nil
}

View File

@ -1,20 +1,65 @@
package writenode
import (
"context"
"errors"
"log"
"sort"
"strconv"
"github.com/golang/protobuf/proto"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/kv"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/storage"
)
type ddNode struct {
BaseNode
ddMsg *ddMsg
ddBuffer *ddBuffer
ddMsg *ddMsg
ddRecords *ddRecords
ddBuffer *ddBuffer
idAllocator *allocator.IDAllocator
kv kv.Base
}
type ddData struct {
ddRequestString []string
timestamps []Timestamp
eventTypes []storage.EventTypeCode
}
type ddBuffer struct {
ddData map[UniqueID]*ddData
maxSize int
}
type ddRecords struct {
collectionRecords map[UniqueID]interface{}
partitionRecords map[UniqueID]interface{}
}
func (d *ddBuffer) size() int {
if d.ddData == nil || len(d.ddData) <= 0 {
return 0
}
size := 0
for _, data := range d.ddData {
size += len(data.ddRequestString)
}
return size
}
func (d *ddBuffer) full() bool {
return d.size() >= d.maxSize
}
func (ddNode *ddNode) Name() string {
@ -68,6 +113,62 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
}
}
// generate binlog
if ddNode.ddBuffer.full() {
ddCodec := &storage.DataDefinitionCodec{}
for collectionID, data := range ddNode.ddBuffer.ddData {
// buffer data to binlog
binLogs, err := ddCodec.Serialize(data.timestamps, data.ddRequestString, data.eventTypes)
if err != nil {
log.Println(err)
continue
}
if len(binLogs) != 2 {
log.Println("illegal binLogs")
continue
}
// binLogs -> minIO/S3
if len(data.ddRequestString) != len(data.timestamps) ||
len(data.timestamps) != len(data.eventTypes) {
log.Println("illegal ddBuffer, failed to save binlog")
continue
} else {
// Blob key example:
// ${tenant}/data_definition_log/${collection_id}/ts/${log_idx}
// ${tenant}/data_definition_log/${collection_id}/ddl/${log_idx}
keyCommon := Params.DdLogRootPath + strconv.FormatInt(collectionID, 10) + "/"
// save ts binlog
timestampLogIdx, err := ddNode.idAllocator.AllocOne()
if err != nil {
log.Println(err)
}
timestampKey := keyCommon + binLogs[0].GetKey() + "/" + strconv.FormatInt(timestampLogIdx, 10)
err = ddNode.kv.Save(timestampKey, string(binLogs[0].GetValue()))
if err != nil {
log.Println(err)
}
log.Println("save ts binlog, key = ", timestampKey)
// save dd binlog
ddLogIdx, err := ddNode.idAllocator.AllocOne()
if err != nil {
log.Println(err)
}
ddKey := keyCommon + binLogs[1].GetKey() + "/" + strconv.FormatInt(ddLogIdx, 10)
err = ddNode.kv.Save(ddKey, string(binLogs[1].GetValue()))
if err != nil {
log.Println(err)
}
log.Println("save dd binlog, key = ", ddKey)
}
}
// clear buffer
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
log.Println("dd buffer flushed")
}
var res Msg = ddNode.ddMsg
return []*Msg{&res}
}
@ -75,16 +176,18 @@ func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
collectionID := msg.CollectionID
err := ddNode.ddBuffer.addCollection(collectionID)
if err != nil {
// add collection
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; ok {
err := errors.New("collection " + strconv.FormatInt(collectionID, 10) + " is already exists")
log.Println(err)
return
}
ddNode.ddRecords.collectionRecords[collectionID] = nil
// TODO: add default partition?
var schema schemapb.CollectionSchema
err = proto.Unmarshal((*msg.Schema).Value, &schema)
err := proto.Unmarshal((*msg.Schema).Value, &schema)
if err != nil {
log.Println(err)
return
@ -96,17 +199,30 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
timestamp: msg.Timestamp,
})
// TODO: write dd binlog
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreateCollectionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreateCollectionEventType)
}
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
collectionID := msg.CollectionID
err := ddNode.ddBuffer.removeCollection(collectionID)
if err != nil {
// remove collection
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; !ok {
err := errors.New("cannot found collection " + strconv.FormatInt(collectionID, 10))
log.Println(err)
return
}
delete(ddNode.ddRecords.collectionRecords, collectionID)
collectionName := msg.CollectionName.CollectionName
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
@ -115,17 +231,31 @@ func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
timestamp: msg.Timestamp,
})
// TODO: write dd binlog
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropCollectionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropCollectionEventType)
}
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
partitionID := msg.PartitionID
collectionID := msg.CollectionID
err := ddNode.ddBuffer.addPartition(partitionID)
if err != nil {
// add partition
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; ok {
err := errors.New("partition " + strconv.FormatInt(partitionID, 10) + " is already exists")
log.Println(err)
return
}
ddNode.ddRecords.partitionRecords[partitionID] = nil
partitionTag := msg.PartitionName.Tag
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
@ -134,17 +264,31 @@ func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
timestamp: msg.Timestamp,
})
// TODO: write dd binlog
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreatePartitionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreatePartitionEventType)
}
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
partitionID := msg.PartitionID
collectionID := msg.CollectionID
err := ddNode.ddBuffer.removePartition(partitionID)
if err != nil {
// remove partition
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; !ok {
err := errors.New("cannot found partition " + strconv.FormatInt(partitionID, 10))
log.Println(err)
return
}
delete(ddNode.ddRecords.partitionRecords, partitionID)
partitionTag := msg.PartitionName.Tag
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
@ -153,10 +297,21 @@ func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
timestamp: msg.Timestamp,
})
// TODO: write dd binlog
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropPartitionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropPartitionEventType)
}
func newDDNode() *ddNode {
func newDDNode(ctx context.Context) *ddNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
@ -164,13 +319,46 @@ func newDDNode() *ddNode {
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
ddBuffer := &ddBuffer{
collectionBuffer: make(map[UniqueID]interface{}),
partitionBuffer: make(map[UniqueID]interface{}),
ddRecords := &ddRecords{
collectionRecords: make(map[UniqueID]interface{}),
partitionRecords: make(map[UniqueID]interface{}),
}
minIOEndPoint := Params.MinioAddress
minIOAccessKeyID := Params.MinioAccessKeyID
minIOSecretAccessKey := Params.MinioSecretAccessKey
minIOUseSSL := Params.MinioUseSSL
minIOClient, err := minio.New(minIOEndPoint, &minio.Options{
Creds: credentials.NewStaticV4(minIOAccessKeyID, minIOSecretAccessKey, ""),
Secure: minIOUseSSL,
})
if err != nil {
panic(err)
}
// TODO: load bucket name from yaml?
minioKV, err := miniokv.NewMinIOKV(ctx, minIOClient, "write-node-dd-node")
if err != nil {
panic(err)
}
idAllocator, err := allocator.NewIDAllocator(ctx, Params.MasterAddress)
if err != nil {
panic(err)
}
err = idAllocator.Start()
if err != nil {
panic(err)
}
return &ddNode{
BaseNode: baseNode,
ddBuffer: ddBuffer,
BaseNode: baseNode,
ddRecords: ddRecords,
ddBuffer: &ddBuffer{
ddData: make(map[UniqueID]*ddData),
maxSize: Params.FlushDdBufSize,
},
idAllocator: idAllocator,
kv: minioKV,
}
}

View File

@ -0,0 +1,126 @@
package writenode
import (
"context"
"testing"
"time"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
func TestFlowGraphDDNode_Operate(t *testing.T) {
const ctxTimeInMillisecond = 2000
const closeWithDeadline = false
var ctx context.Context
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
ctx = context.Background()
}
startMaster(ctx)
Params.FlushDdBufSize = 4
ddNode := newDDNode(ctx)
colID := UniqueID(0)
colName := "col-test-0"
// create collection
createColReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
CollectionID: colID,
ReqID: 1,
Timestamp: 1,
ProxyID: 1,
Schema: &commonpb.Blob{},
}
createColMsg := msgstream.CreateCollectionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(1),
EndTimestamp: Timestamp(1),
HashValues: []uint32{uint32(0)},
},
CreateCollectionRequest: createColReq,
}
// drop collection
dropColReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
CollectionID: colID,
ReqID: 2,
Timestamp: 2,
ProxyID: 2,
CollectionName: &servicepb.CollectionName{CollectionName: colName},
}
dropColMsg := msgstream.DropCollectionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(2),
EndTimestamp: Timestamp(2),
HashValues: []uint32{uint32(0)},
},
DropCollectionRequest: dropColReq,
}
partitionID := UniqueID(100)
partitionTag := "partition-test-0"
// create partition
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
CollectionID: colID,
PartitionID: partitionID,
ReqID: 3,
Timestamp: 3,
ProxyID: 3,
PartitionName: &servicepb.PartitionName{
CollectionName: colName,
Tag: partitionTag,
},
}
createPartitionMsg := msgstream.CreatePartitionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(3),
EndTimestamp: Timestamp(3),
HashValues: []uint32{uint32(0)},
},
CreatePartitionRequest: createPartitionReq,
}
// drop partition
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
CollectionID: colID,
PartitionID: partitionID,
ReqID: 4,
Timestamp: 4,
ProxyID: 4,
PartitionName: &servicepb.PartitionName{
CollectionName: colName,
Tag: partitionTag,
},
}
dropPartitionMsg := msgstream.DropPartitionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(4),
EndTimestamp: Timestamp(4),
HashValues: []uint32{uint32(0)},
},
DropPartitionRequest: dropPartitionReq,
}
tsMessages := make([]msgstream.TsMsg, 0)
tsMessages = append(tsMessages, msgstream.TsMsg(&createColMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&dropColMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&createPartitionMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&dropPartitionMsg))
msgStream := flowgraph.GenerateMsgStreamMsg(tsMessages, Timestamp(0), Timestamp(3))
var inMsg Msg = msgStream
ddNode.Operate([]*Msg{&inMsg})
}

View File

@ -1,11 +1,18 @@
package writenode
import (
"context"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"testing"
"go.etcd.io/etcd/clientv3"
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/master"
)
func makeNewChannelNames(names []string, suffix string) []string {
@ -22,6 +29,34 @@ func refreshChannelNames() {
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
}
func startMaster(ctx context.Context) {
master.Init()
etcdAddr := master.Params.EtcdAddress
metaRootPath := master.Params.MetaRootPath
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
if err != nil {
panic(err)
}
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
if err != nil {
panic(err)
}
masterPort := 53101
master.Params.Port = masterPort
svr, err := master.CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(master.Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
fmt.Println("Waiting for server!", svr.IsServing())
Params.MasterAddress = master.Params.Address + ":" + strconv.Itoa(masterPort)
}
func TestMain(m *testing.M) {
Params.Init()
refreshChannelNames()