mirror of https://github.com/milvus-io/milvus.git
Refactor param table, and add ddNode
Signed-off-by: bigsheeper <yihao.dai@zilliz.com>pull/4973/head^2
parent
8df05714a8
commit
585d3f9831
|
@ -20,6 +20,10 @@ writeNode:
|
||||||
maxParallelism: 1024
|
maxParallelism: 1024
|
||||||
|
|
||||||
msgStream:
|
msgStream:
|
||||||
|
dataDefinition:
|
||||||
|
recvBufSize: 64 # msgPack chan buffer size
|
||||||
|
pulsarBufSize: 64 # pulsar chan buffer size
|
||||||
|
|
||||||
insert:
|
insert:
|
||||||
#streamBufSize: 1024 # msgPack chan buffer size
|
#streamBufSize: 1024 # msgPack chan buffer size
|
||||||
recvBufSize: 1024 # msgPack chan buffer size
|
recvBufSize: 1024 # msgPack chan buffer size
|
||||||
|
@ -29,13 +33,3 @@ writeNode:
|
||||||
#streamBufSize: 1024 # msgPack chan buffer size
|
#streamBufSize: 1024 # msgPack chan buffer size
|
||||||
recvBufSize: 1024 # msgPack chan buffer size
|
recvBufSize: 1024 # msgPack chan buffer size
|
||||||
pulsarBufSize: 1024 # pulsar chan buffer size
|
pulsarBufSize: 1024 # pulsar chan buffer size
|
||||||
|
|
||||||
search:
|
|
||||||
recvBufSize: 512
|
|
||||||
pulsarBufSize: 512
|
|
||||||
|
|
||||||
searchResult:
|
|
||||||
recvBufSize: 64
|
|
||||||
|
|
||||||
stats:
|
|
||||||
recvBufSize: 64
|
|
||||||
|
|
|
@ -17,11 +17,6 @@ type ddNode struct {
|
||||||
replica collectionReplica
|
replica collectionReplica
|
||||||
}
|
}
|
||||||
|
|
||||||
type metaOperateRecord struct {
|
|
||||||
createOrDrop bool // create: true, drop: false
|
|
||||||
timestamp Timestamp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ddNode *ddNode) Name() string {
|
func (ddNode *ddNode) Name() string {
|
||||||
return "ddNode"
|
return "ddNode"
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type filterDmNode struct {
|
type filterDmNode struct {
|
||||||
ddMsg *ddMsg
|
|
||||||
BaseNode
|
BaseNode
|
||||||
|
ddMsg *ddMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fdmNode *filterDmNode) Name() string {
|
func (fdmNode *filterDmNode) Name() string {
|
||||||
|
|
|
@ -19,6 +19,11 @@ type ddMsg struct {
|
||||||
timeRange TimeRange
|
timeRange TimeRange
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type metaOperateRecord struct {
|
||||||
|
createOrDrop bool // create: true, drop: false
|
||||||
|
timestamp Timestamp
|
||||||
|
}
|
||||||
|
|
||||||
type insertMsg struct {
|
type insertMsg struct {
|
||||||
insertMessages []*msgstream.InsertMsg
|
insertMessages []*msgstream.InsertMsg
|
||||||
timeRange TimeRange
|
timeRange TimeRange
|
||||||
|
|
|
@ -118,7 +118,7 @@ func makeNewChannelNames(names []string, suffix string) []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func refreshChannelNames() {
|
func refreshChannelNames() {
|
||||||
suffix := "_test_query_node" + strconv.FormatInt(rand.Int63n(100), 10)
|
suffix := "-test-query-node" + strconv.FormatInt(rand.Int63n(100), 10)
|
||||||
Params.DDChannelNames = makeNewChannelNames(Params.DDChannelNames, suffix)
|
Params.DDChannelNames = makeNewChannelNames(Params.DDChannelNames, suffix)
|
||||||
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
|
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
|
||||||
Params.SearchChannelNames = makeNewChannelNames(Params.SearchChannelNames, suffix)
|
Params.SearchChannelNames = makeNewChannelNames(Params.SearchChannelNames, suffix)
|
||||||
|
|
|
@ -291,9 +291,9 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
||||||
|
|
||||||
inReduced := make([]bool, len(searchResults))
|
inReduced := make([]bool, len(searchResults))
|
||||||
numSegment := int64(len(searchResults))
|
numSegment := int64(len(searchResults))
|
||||||
err = reduceSearchResults(searchResults, numSegment, inReduced)
|
err2 := reduceSearchResults(searchResults, numSegment, inReduced)
|
||||||
if err != nil {
|
if err2 != nil {
|
||||||
return err
|
return err2
|
||||||
}
|
}
|
||||||
err = fillTargetEntry(plan, searchResults, matchedSegments, inReduced)
|
err = fillTargetEntry(plan, searchResults, matchedSegments, inReduced)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -11,15 +11,24 @@ import (
|
||||||
type BinlogReader struct {
|
type BinlogReader struct {
|
||||||
magicNumber int32
|
magicNumber int32
|
||||||
descriptorEvent
|
descriptorEvent
|
||||||
buffer *bytes.Buffer
|
currentEventReader *EventReader
|
||||||
eventList []*EventReader
|
buffer *bytes.Buffer
|
||||||
isClose bool
|
bufferLength int
|
||||||
|
currentOffset int32
|
||||||
|
isClose bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *BinlogReader) NextEventReader() (*EventReader, error) {
|
func (reader *BinlogReader) NextEventReader() (*EventReader, error) {
|
||||||
if reader.isClose {
|
if reader.isClose {
|
||||||
return nil, errors.New("bin log reader is closed")
|
return nil, errors.New("bin log reader is closed")
|
||||||
}
|
}
|
||||||
|
if reader.currentEventReader != nil {
|
||||||
|
reader.currentOffset = reader.currentEventReader.NextPosition
|
||||||
|
if err := reader.currentEventReader.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
reader.currentEventReader = nil
|
||||||
|
}
|
||||||
if reader.buffer.Len() <= 0 {
|
if reader.buffer.Len() <= 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -27,14 +36,15 @@ func (reader *BinlogReader) NextEventReader() (*EventReader, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reader.eventList = append(reader.eventList, eventReader)
|
reader.currentEventReader = eventReader
|
||||||
return eventReader, nil
|
return reader.currentEventReader, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *BinlogReader) readMagicNumber() (int32, error) {
|
func (reader *BinlogReader) readMagicNumber() (int32, error) {
|
||||||
if err := binary.Read(reader.buffer, binary.LittleEndian, &reader.magicNumber); err != nil {
|
if err := binary.Read(reader.buffer, binary.LittleEndian, &reader.magicNumber); err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
reader.currentOffset = 4
|
||||||
if reader.magicNumber != MagicNumber {
|
if reader.magicNumber != MagicNumber {
|
||||||
return -1, errors.New("parse magic number failed, expected: " + strconv.Itoa(int(MagicNumber)) +
|
return -1, errors.New("parse magic number failed, expected: " + strconv.Itoa(int(MagicNumber)) +
|
||||||
", actual: " + strconv.Itoa(int(reader.magicNumber)))
|
", actual: " + strconv.Itoa(int(reader.magicNumber)))
|
||||||
|
@ -45,6 +55,7 @@ func (reader *BinlogReader) readMagicNumber() (int32, error) {
|
||||||
|
|
||||||
func (reader *BinlogReader) readDescriptorEvent() (*descriptorEvent, error) {
|
func (reader *BinlogReader) readDescriptorEvent() (*descriptorEvent, error) {
|
||||||
event, err := ReadDescriptorEvent(reader.buffer)
|
event, err := ReadDescriptorEvent(reader.buffer)
|
||||||
|
reader.currentOffset = event.NextPosition
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -56,20 +67,20 @@ func (reader *BinlogReader) Close() error {
|
||||||
if reader.isClose {
|
if reader.isClose {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, e := range reader.eventList {
|
reader.isClose = true
|
||||||
if err := e.Close(); err != nil {
|
if reader.currentEventReader != nil {
|
||||||
|
if err := reader.currentEventReader.Close(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reader.isClose = true
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBinlogReader(data []byte) (*BinlogReader, error) {
|
func NewBinlogReader(data []byte) (*BinlogReader, error) {
|
||||||
reader := &BinlogReader{
|
reader := &BinlogReader{
|
||||||
buffer: bytes.NewBuffer(data),
|
buffer: bytes.NewBuffer(data),
|
||||||
eventList: []*EventReader{},
|
bufferLength: len(data),
|
||||||
isClose: false,
|
isClose: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := reader.readMagicNumber(); err != nil {
|
if _, err := reader.readMagicNumber(); err != nil {
|
||||||
|
|
|
@ -241,812 +241,4 @@ func TestInsertBinlog(t *testing.T) {
|
||||||
|
|
||||||
assert.Equal(t, int(e2NxtPos), len(buf))
|
assert.Equal(t, int(e2NxtPos), len(buf))
|
||||||
|
|
||||||
//read binlog
|
|
||||||
r, err := NewBinlogReader(buf)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
event1, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event1)
|
|
||||||
p1, err := event1.GetInt64FromPayload()
|
|
||||||
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, event1.TypeCode, InsertEventType)
|
|
||||||
ed1, ok := (event1.eventData).(*insertEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, ed1.StartTimestamp, Timestamp(100))
|
|
||||||
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
|
|
||||||
|
|
||||||
event2, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event2)
|
|
||||||
p2, err := event2.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
assert.Equal(t, event2.TypeCode, InsertEventType)
|
|
||||||
ed2, ok := (event2.eventData).(*insertEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = (event2.eventData).(*deleteEventData)
|
|
||||||
assert.False(t, ok)
|
|
||||||
assert.Equal(t, ed2.StartTimestamp, Timestamp(300))
|
|
||||||
assert.Equal(t, ed2.EndTimestamp, Timestamp(400))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDeleteBinlog(t *testing.T) {
|
|
||||||
w, err := NewDeleteBinlogWriter(schemapb.DataType_INT64, 50)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
e1, err := w.NextDeleteEventWriter()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int64{1, 2, 3})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int32{4, 5, 6})
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int64{4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e1.SetStartTimestamp(100)
|
|
||||||
e1.SetEndTimestamp(200)
|
|
||||||
|
|
||||||
e2, err := w.NextDeleteEventWriter()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]int64{7, 8, 9})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]bool{true, false, true})
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]int64{10, 11, 12})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e2.SetStartTimestamp(300)
|
|
||||||
e2.SetEndTimestamp(400)
|
|
||||||
|
|
||||||
w.SetStartTimeStamp(1000)
|
|
||||||
w.SetEndTimeStamp(2000)
|
|
||||||
|
|
||||||
_, err = w.GetBuffer()
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = w.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
buf, err := w.GetBuffer()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
//magic number
|
|
||||||
magicNum := UnsafeReadInt32(buf, 0)
|
|
||||||
assert.Equal(t, magicNum, MagicNumber)
|
|
||||||
pos := int(unsafe.Sizeof(MagicNumber))
|
|
||||||
|
|
||||||
//descriptor header, timestamp
|
|
||||||
ts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Greater(t, ts, int64(0))
|
|
||||||
curts := time.Now().UnixNano() / int64(time.Millisecond)
|
|
||||||
curts = int64(tsoutil.ComposeTS(curts, 0))
|
|
||||||
diffts := curts - ts
|
|
||||||
maxdiff := int64(tsoutil.ComposeTS(1000, 0))
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(ts))
|
|
||||||
|
|
||||||
//descriptor header, type code
|
|
||||||
tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(tc), DescriptorEventType)
|
|
||||||
pos += int(unsafe.Sizeof(tc))
|
|
||||||
|
|
||||||
//descriptor header, server id
|
|
||||||
svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(svrID))
|
|
||||||
|
|
||||||
//descriptor header, event length
|
|
||||||
descEventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(descEventLen))
|
|
||||||
|
|
||||||
//descriptor header, next position
|
|
||||||
descNxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, descEventLen+int32(unsafe.Sizeof(MagicNumber)), descNxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//descriptor data fix, binlog version
|
|
||||||
binLogVer := UnsafeReadInt16(buf, pos)
|
|
||||||
assert.Equal(t, binLogVer, int16(BinlogVersion))
|
|
||||||
pos += int(unsafe.Sizeof(binLogVer))
|
|
||||||
|
|
||||||
//descriptor data fix, server version
|
|
||||||
svrVer := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, svrVer, int64(ServerVersion))
|
|
||||||
pos += int(unsafe.Sizeof(svrVer))
|
|
||||||
|
|
||||||
//descriptor data fix, commit id
|
|
||||||
cmitID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, cmitID, int64(CommitID))
|
|
||||||
pos += int(unsafe.Sizeof(cmitID))
|
|
||||||
|
|
||||||
//descriptor data fix, header length
|
|
||||||
headLen := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, headLen, int8(binary.Size(eventHeader{})))
|
|
||||||
pos += int(unsafe.Sizeof(headLen))
|
|
||||||
|
|
||||||
//descriptor data fix, collection id
|
|
||||||
collID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, collID, int64(50))
|
|
||||||
pos += int(unsafe.Sizeof(collID))
|
|
||||||
|
|
||||||
//descriptor data fix, partition id
|
|
||||||
partID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, partID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(partID))
|
|
||||||
|
|
||||||
//descriptor data fix, segment id
|
|
||||||
segID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, segID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(segID))
|
|
||||||
|
|
||||||
//descriptor data fix, field id
|
|
||||||
fieldID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, fieldID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(fieldID))
|
|
||||||
|
|
||||||
//descriptor data fix, start time stamp
|
|
||||||
startts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, startts, int64(1000))
|
|
||||||
pos += int(unsafe.Sizeof(startts))
|
|
||||||
|
|
||||||
//descriptor data fix, end time stamp
|
|
||||||
endts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, endts, int64(2000))
|
|
||||||
pos += int(unsafe.Sizeof(endts))
|
|
||||||
|
|
||||||
//descriptor data fix, payload type
|
|
||||||
colType := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, schemapb.DataType(colType), schemapb.DataType_INT64)
|
|
||||||
pos += int(unsafe.Sizeof(colType))
|
|
||||||
|
|
||||||
//descriptor data, post header lengths
|
|
||||||
for i := DescriptorEventType; i < EventTypeEnd; i++ {
|
|
||||||
size := getEventFixPartSize(i)
|
|
||||||
assert.Equal(t, uint8(size), buf[pos])
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
|
|
||||||
//start of e1
|
|
||||||
assert.Equal(t, pos, int(descNxtPos))
|
|
||||||
|
|
||||||
//insert e1 header, Timestamp
|
|
||||||
e1ts := UnsafeReadInt64(buf, pos)
|
|
||||||
diffts = curts - e1ts
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(e1ts))
|
|
||||||
|
|
||||||
//insert e1 header, type code
|
|
||||||
e1tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(e1tc), DeleteEventType)
|
|
||||||
pos += int(unsafe.Sizeof(e1tc))
|
|
||||||
|
|
||||||
//insert e1 header, Server id
|
|
||||||
e1svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e1svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(e1svrID))
|
|
||||||
|
|
||||||
//insert e1 header, event length
|
|
||||||
e1EventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(e1EventLen))
|
|
||||||
|
|
||||||
//insert e1 header, next position
|
|
||||||
e1NxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, descNxtPos+e1EventLen, e1NxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//insert e1 data, start time stamp
|
|
||||||
e1st := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e1st, int64(100))
|
|
||||||
pos += int(unsafe.Sizeof(e1st))
|
|
||||||
|
|
||||||
//insert e1 data, end time stamp
|
|
||||||
e1et := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e1et, int64(200))
|
|
||||||
pos += int(unsafe.Sizeof(e1et))
|
|
||||||
|
|
||||||
//insert e1, payload
|
|
||||||
e1Payload := buf[pos:e1NxtPos]
|
|
||||||
e1r, err := NewPayloadReader(schemapb.DataType_INT64, e1Payload)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e1a, err := e1r.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
err = e1r.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
//start of e2
|
|
||||||
pos = int(e1NxtPos)
|
|
||||||
|
|
||||||
//insert e2 header, Timestamp
|
|
||||||
e2ts := UnsafeReadInt64(buf, pos)
|
|
||||||
diffts = curts - e2ts
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(e2ts))
|
|
||||||
|
|
||||||
//insert e2 header, type code
|
|
||||||
e2tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(e2tc), DeleteEventType)
|
|
||||||
pos += int(unsafe.Sizeof(e2tc))
|
|
||||||
|
|
||||||
//insert e2 header, Server id
|
|
||||||
e2svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e2svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(e2svrID))
|
|
||||||
|
|
||||||
//insert e2 header, event length
|
|
||||||
e2EventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(e2EventLen))
|
|
||||||
|
|
||||||
//insert e2 header, next position
|
|
||||||
e2NxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e1NxtPos+e2EventLen, e2NxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//insert e2 data, start time stamp
|
|
||||||
e2st := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e2st, int64(300))
|
|
||||||
pos += int(unsafe.Sizeof(e2st))
|
|
||||||
|
|
||||||
//insert e2 data, end time stamp
|
|
||||||
e2et := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e2et, int64(400))
|
|
||||||
pos += int(unsafe.Sizeof(e2et))
|
|
||||||
|
|
||||||
//insert e2, payload
|
|
||||||
e2Payload := buf[pos:]
|
|
||||||
e2r, err := NewPayloadReader(schemapb.DataType_INT64, e2Payload)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e2a, err := e2r.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
err = e2r.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, int(e2NxtPos), len(buf))
|
|
||||||
|
|
||||||
//read binlog
|
|
||||||
r, err := NewBinlogReader(buf)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
event1, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event1)
|
|
||||||
p1, err := event1.GetInt64FromPayload()
|
|
||||||
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, event1.TypeCode, DeleteEventType)
|
|
||||||
ed1, ok := (event1.eventData).(*deleteEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, ed1.StartTimestamp, Timestamp(100))
|
|
||||||
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
|
|
||||||
|
|
||||||
event2, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event2)
|
|
||||||
p2, err := event2.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
assert.Equal(t, event2.TypeCode, DeleteEventType)
|
|
||||||
ed2, ok := (event2.eventData).(*deleteEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = (event2.eventData).(*insertEventData)
|
|
||||||
assert.False(t, ok)
|
|
||||||
assert.Equal(t, ed2.StartTimestamp, Timestamp(300))
|
|
||||||
assert.Equal(t, ed2.EndTimestamp, Timestamp(400))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDDLBinlog1(t *testing.T) {
|
|
||||||
w, err := NewDDLBinlogWriter(schemapb.DataType_INT64, 50)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
e1, err := w.NextCreateCollectionEventWriter()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int64{1, 2, 3})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int32{4, 5, 6})
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int64{4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e1.SetStartTimestamp(100)
|
|
||||||
e1.SetEndTimestamp(200)
|
|
||||||
|
|
||||||
e2, err := w.NextDropCollectionEventWriter()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]int64{7, 8, 9})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]bool{true, false, true})
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]int64{10, 11, 12})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e2.SetStartTimestamp(300)
|
|
||||||
e2.SetEndTimestamp(400)
|
|
||||||
|
|
||||||
w.SetStartTimeStamp(1000)
|
|
||||||
w.SetEndTimeStamp(2000)
|
|
||||||
|
|
||||||
_, err = w.GetBuffer()
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = w.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
buf, err := w.GetBuffer()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
//magic number
|
|
||||||
magicNum := UnsafeReadInt32(buf, 0)
|
|
||||||
assert.Equal(t, magicNum, MagicNumber)
|
|
||||||
pos := int(unsafe.Sizeof(MagicNumber))
|
|
||||||
|
|
||||||
//descriptor header, timestamp
|
|
||||||
ts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Greater(t, ts, int64(0))
|
|
||||||
curts := time.Now().UnixNano() / int64(time.Millisecond)
|
|
||||||
curts = int64(tsoutil.ComposeTS(curts, 0))
|
|
||||||
diffts := curts - ts
|
|
||||||
maxdiff := int64(tsoutil.ComposeTS(1000, 0))
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(ts))
|
|
||||||
|
|
||||||
//descriptor header, type code
|
|
||||||
tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(tc), DescriptorEventType)
|
|
||||||
pos += int(unsafe.Sizeof(tc))
|
|
||||||
|
|
||||||
//descriptor header, server id
|
|
||||||
svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(svrID))
|
|
||||||
|
|
||||||
//descriptor header, event length
|
|
||||||
descEventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(descEventLen))
|
|
||||||
|
|
||||||
//descriptor header, next position
|
|
||||||
descNxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, descEventLen+int32(unsafe.Sizeof(MagicNumber)), descNxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//descriptor data fix, binlog version
|
|
||||||
binLogVer := UnsafeReadInt16(buf, pos)
|
|
||||||
assert.Equal(t, binLogVer, int16(BinlogVersion))
|
|
||||||
pos += int(unsafe.Sizeof(binLogVer))
|
|
||||||
|
|
||||||
//descriptor data fix, server version
|
|
||||||
svrVer := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, svrVer, int64(ServerVersion))
|
|
||||||
pos += int(unsafe.Sizeof(svrVer))
|
|
||||||
|
|
||||||
//descriptor data fix, commit id
|
|
||||||
cmitID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, cmitID, int64(CommitID))
|
|
||||||
pos += int(unsafe.Sizeof(cmitID))
|
|
||||||
|
|
||||||
//descriptor data fix, header length
|
|
||||||
headLen := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, headLen, int8(binary.Size(eventHeader{})))
|
|
||||||
pos += int(unsafe.Sizeof(headLen))
|
|
||||||
|
|
||||||
//descriptor data fix, collection id
|
|
||||||
collID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, collID, int64(50))
|
|
||||||
pos += int(unsafe.Sizeof(collID))
|
|
||||||
|
|
||||||
//descriptor data fix, partition id
|
|
||||||
partID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, partID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(partID))
|
|
||||||
|
|
||||||
//descriptor data fix, segment id
|
|
||||||
segID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, segID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(segID))
|
|
||||||
|
|
||||||
//descriptor data fix, field id
|
|
||||||
fieldID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, fieldID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(fieldID))
|
|
||||||
|
|
||||||
//descriptor data fix, start time stamp
|
|
||||||
startts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, startts, int64(1000))
|
|
||||||
pos += int(unsafe.Sizeof(startts))
|
|
||||||
|
|
||||||
//descriptor data fix, end time stamp
|
|
||||||
endts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, endts, int64(2000))
|
|
||||||
pos += int(unsafe.Sizeof(endts))
|
|
||||||
|
|
||||||
//descriptor data fix, payload type
|
|
||||||
colType := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, schemapb.DataType(colType), schemapb.DataType_INT64)
|
|
||||||
pos += int(unsafe.Sizeof(colType))
|
|
||||||
|
|
||||||
//descriptor data, post header lengths
|
|
||||||
for i := DescriptorEventType; i < EventTypeEnd; i++ {
|
|
||||||
size := getEventFixPartSize(i)
|
|
||||||
assert.Equal(t, uint8(size), buf[pos])
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
|
|
||||||
//start of e1
|
|
||||||
assert.Equal(t, pos, int(descNxtPos))
|
|
||||||
|
|
||||||
//insert e1 header, Timestamp
|
|
||||||
e1ts := UnsafeReadInt64(buf, pos)
|
|
||||||
diffts = curts - e1ts
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(e1ts))
|
|
||||||
|
|
||||||
//insert e1 header, type code
|
|
||||||
e1tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(e1tc), CreateCollectionEventType)
|
|
||||||
pos += int(unsafe.Sizeof(e1tc))
|
|
||||||
|
|
||||||
//insert e1 header, Server id
|
|
||||||
e1svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e1svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(e1svrID))
|
|
||||||
|
|
||||||
//insert e1 header, event length
|
|
||||||
e1EventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(e1EventLen))
|
|
||||||
|
|
||||||
//insert e1 header, next position
|
|
||||||
e1NxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, descNxtPos+e1EventLen, e1NxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//insert e1 data, start time stamp
|
|
||||||
e1st := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e1st, int64(100))
|
|
||||||
pos += int(unsafe.Sizeof(e1st))
|
|
||||||
|
|
||||||
//insert e1 data, end time stamp
|
|
||||||
e1et := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e1et, int64(200))
|
|
||||||
pos += int(unsafe.Sizeof(e1et))
|
|
||||||
|
|
||||||
//insert e1, payload
|
|
||||||
e1Payload := buf[pos:e1NxtPos]
|
|
||||||
e1r, err := NewPayloadReader(schemapb.DataType_INT64, e1Payload)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e1a, err := e1r.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
err = e1r.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
//start of e2
|
|
||||||
pos = int(e1NxtPos)
|
|
||||||
|
|
||||||
//insert e2 header, Timestamp
|
|
||||||
e2ts := UnsafeReadInt64(buf, pos)
|
|
||||||
diffts = curts - e2ts
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(e2ts))
|
|
||||||
|
|
||||||
//insert e2 header, type code
|
|
||||||
e2tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(e2tc), DropCollectionEventType)
|
|
||||||
pos += int(unsafe.Sizeof(e2tc))
|
|
||||||
|
|
||||||
//insert e2 header, Server id
|
|
||||||
e2svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e2svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(e2svrID))
|
|
||||||
|
|
||||||
//insert e2 header, event length
|
|
||||||
e2EventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(e2EventLen))
|
|
||||||
|
|
||||||
//insert e2 header, next position
|
|
||||||
e2NxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e1NxtPos+e2EventLen, e2NxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//insert e2 data, start time stamp
|
|
||||||
e2st := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e2st, int64(300))
|
|
||||||
pos += int(unsafe.Sizeof(e2st))
|
|
||||||
|
|
||||||
//insert e2 data, end time stamp
|
|
||||||
e2et := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e2et, int64(400))
|
|
||||||
pos += int(unsafe.Sizeof(e2et))
|
|
||||||
|
|
||||||
//insert e2, payload
|
|
||||||
e2Payload := buf[pos:]
|
|
||||||
e2r, err := NewPayloadReader(schemapb.DataType_INT64, e2Payload)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e2a, err := e2r.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
err = e2r.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, int(e2NxtPos), len(buf))
|
|
||||||
|
|
||||||
//read binlog
|
|
||||||
r, err := NewBinlogReader(buf)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
event1, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event1)
|
|
||||||
p1, err := event1.GetInt64FromPayload()
|
|
||||||
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, event1.TypeCode, CreateCollectionEventType)
|
|
||||||
ed1, ok := (event1.eventData).(*createCollectionEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, ed1.StartTimestamp, Timestamp(100))
|
|
||||||
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
|
|
||||||
|
|
||||||
event2, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event2)
|
|
||||||
p2, err := event2.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
assert.Equal(t, event2.TypeCode, DropCollectionEventType)
|
|
||||||
ed2, ok := (event2.eventData).(*dropCollectionEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = (event2.eventData).(*insertEventData)
|
|
||||||
assert.False(t, ok)
|
|
||||||
assert.Equal(t, ed2.StartTimestamp, Timestamp(300))
|
|
||||||
assert.Equal(t, ed2.EndTimestamp, Timestamp(400))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDDLBinlog2(t *testing.T) {
|
|
||||||
w, err := NewDDLBinlogWriter(schemapb.DataType_INT64, 50)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
e1, err := w.NextCreatePartitionEventWriter()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int64{1, 2, 3})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int32{4, 5, 6})
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = e1.AddDataToPayload([]int64{4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e1.SetStartTimestamp(100)
|
|
||||||
e1.SetEndTimestamp(200)
|
|
||||||
|
|
||||||
e2, err := w.NextDropPartitionEventWriter()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]int64{7, 8, 9})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]bool{true, false, true})
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = e2.AddDataToPayload([]int64{10, 11, 12})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e2.SetStartTimestamp(300)
|
|
||||||
e2.SetEndTimestamp(400)
|
|
||||||
|
|
||||||
w.SetStartTimeStamp(1000)
|
|
||||||
w.SetEndTimeStamp(2000)
|
|
||||||
|
|
||||||
_, err = w.GetBuffer()
|
|
||||||
assert.NotNil(t, err)
|
|
||||||
err = w.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
buf, err := w.GetBuffer()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
//magic number
|
|
||||||
magicNum := UnsafeReadInt32(buf, 0)
|
|
||||||
assert.Equal(t, magicNum, MagicNumber)
|
|
||||||
pos := int(unsafe.Sizeof(MagicNumber))
|
|
||||||
|
|
||||||
//descriptor header, timestamp
|
|
||||||
ts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Greater(t, ts, int64(0))
|
|
||||||
curts := time.Now().UnixNano() / int64(time.Millisecond)
|
|
||||||
curts = int64(tsoutil.ComposeTS(curts, 0))
|
|
||||||
diffts := curts - ts
|
|
||||||
maxdiff := int64(tsoutil.ComposeTS(1000, 0))
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(ts))
|
|
||||||
|
|
||||||
//descriptor header, type code
|
|
||||||
tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(tc), DescriptorEventType)
|
|
||||||
pos += int(unsafe.Sizeof(tc))
|
|
||||||
|
|
||||||
//descriptor header, server id
|
|
||||||
svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(svrID))
|
|
||||||
|
|
||||||
//descriptor header, event length
|
|
||||||
descEventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(descEventLen))
|
|
||||||
|
|
||||||
//descriptor header, next position
|
|
||||||
descNxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, descEventLen+int32(unsafe.Sizeof(MagicNumber)), descNxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//descriptor data fix, binlog version
|
|
||||||
binLogVer := UnsafeReadInt16(buf, pos)
|
|
||||||
assert.Equal(t, binLogVer, int16(BinlogVersion))
|
|
||||||
pos += int(unsafe.Sizeof(binLogVer))
|
|
||||||
|
|
||||||
//descriptor data fix, server version
|
|
||||||
svrVer := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, svrVer, int64(ServerVersion))
|
|
||||||
pos += int(unsafe.Sizeof(svrVer))
|
|
||||||
|
|
||||||
//descriptor data fix, commit id
|
|
||||||
cmitID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, cmitID, int64(CommitID))
|
|
||||||
pos += int(unsafe.Sizeof(cmitID))
|
|
||||||
|
|
||||||
//descriptor data fix, header length
|
|
||||||
headLen := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, headLen, int8(binary.Size(eventHeader{})))
|
|
||||||
pos += int(unsafe.Sizeof(headLen))
|
|
||||||
|
|
||||||
//descriptor data fix, collection id
|
|
||||||
collID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, collID, int64(50))
|
|
||||||
pos += int(unsafe.Sizeof(collID))
|
|
||||||
|
|
||||||
//descriptor data fix, partition id
|
|
||||||
partID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, partID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(partID))
|
|
||||||
|
|
||||||
//descriptor data fix, segment id
|
|
||||||
segID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, segID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(segID))
|
|
||||||
|
|
||||||
//descriptor data fix, field id
|
|
||||||
fieldID := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, fieldID, int64(-1))
|
|
||||||
pos += int(unsafe.Sizeof(fieldID))
|
|
||||||
|
|
||||||
//descriptor data fix, start time stamp
|
|
||||||
startts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, startts, int64(1000))
|
|
||||||
pos += int(unsafe.Sizeof(startts))
|
|
||||||
|
|
||||||
//descriptor data fix, end time stamp
|
|
||||||
endts := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, endts, int64(2000))
|
|
||||||
pos += int(unsafe.Sizeof(endts))
|
|
||||||
|
|
||||||
//descriptor data fix, payload type
|
|
||||||
colType := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, schemapb.DataType(colType), schemapb.DataType_INT64)
|
|
||||||
pos += int(unsafe.Sizeof(colType))
|
|
||||||
|
|
||||||
//descriptor data, post header lengths
|
|
||||||
for i := DescriptorEventType; i < EventTypeEnd; i++ {
|
|
||||||
size := getEventFixPartSize(i)
|
|
||||||
assert.Equal(t, uint8(size), buf[pos])
|
|
||||||
pos++
|
|
||||||
}
|
|
||||||
|
|
||||||
//start of e1
|
|
||||||
assert.Equal(t, pos, int(descNxtPos))
|
|
||||||
|
|
||||||
//insert e1 header, Timestamp
|
|
||||||
e1ts := UnsafeReadInt64(buf, pos)
|
|
||||||
diffts = curts - e1ts
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(e1ts))
|
|
||||||
|
|
||||||
//insert e1 header, type code
|
|
||||||
e1tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(e1tc), CreatePartitionEventType)
|
|
||||||
pos += int(unsafe.Sizeof(e1tc))
|
|
||||||
|
|
||||||
//insert e1 header, Server id
|
|
||||||
e1svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e1svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(e1svrID))
|
|
||||||
|
|
||||||
//insert e1 header, event length
|
|
||||||
e1EventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(e1EventLen))
|
|
||||||
|
|
||||||
//insert e1 header, next position
|
|
||||||
e1NxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, descNxtPos+e1EventLen, e1NxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//insert e1 data, start time stamp
|
|
||||||
e1st := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e1st, int64(100))
|
|
||||||
pos += int(unsafe.Sizeof(e1st))
|
|
||||||
|
|
||||||
//insert e1 data, end time stamp
|
|
||||||
e1et := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e1et, int64(200))
|
|
||||||
pos += int(unsafe.Sizeof(e1et))
|
|
||||||
|
|
||||||
//insert e1, payload
|
|
||||||
e1Payload := buf[pos:e1NxtPos]
|
|
||||||
e1r, err := NewPayloadReader(schemapb.DataType_INT64, e1Payload)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e1a, err := e1r.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, e1a, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
err = e1r.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
//start of e2
|
|
||||||
pos = int(e1NxtPos)
|
|
||||||
|
|
||||||
//insert e2 header, Timestamp
|
|
||||||
e2ts := UnsafeReadInt64(buf, pos)
|
|
||||||
diffts = curts - e2ts
|
|
||||||
assert.LessOrEqual(t, diffts, maxdiff)
|
|
||||||
pos += int(unsafe.Sizeof(e2ts))
|
|
||||||
|
|
||||||
//insert e2 header, type code
|
|
||||||
e2tc := UnsafeReadInt8(buf, pos)
|
|
||||||
assert.Equal(t, EventTypeCode(e2tc), DropPartitionEventType)
|
|
||||||
pos += int(unsafe.Sizeof(e2tc))
|
|
||||||
|
|
||||||
//insert e2 header, Server id
|
|
||||||
e2svrID := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e2svrID, int32(ServerID))
|
|
||||||
pos += int(unsafe.Sizeof(e2svrID))
|
|
||||||
|
|
||||||
//insert e2 header, event length
|
|
||||||
e2EventLen := UnsafeReadInt32(buf, pos)
|
|
||||||
pos += int(unsafe.Sizeof(e2EventLen))
|
|
||||||
|
|
||||||
//insert e2 header, next position
|
|
||||||
e2NxtPos := UnsafeReadInt32(buf, pos)
|
|
||||||
assert.Equal(t, e1NxtPos+e2EventLen, e2NxtPos)
|
|
||||||
pos += int(unsafe.Sizeof(descNxtPos))
|
|
||||||
|
|
||||||
//insert e2 data, start time stamp
|
|
||||||
e2st := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e2st, int64(300))
|
|
||||||
pos += int(unsafe.Sizeof(e2st))
|
|
||||||
|
|
||||||
//insert e2 data, end time stamp
|
|
||||||
e2et := UnsafeReadInt64(buf, pos)
|
|
||||||
assert.Equal(t, e2et, int64(400))
|
|
||||||
pos += int(unsafe.Sizeof(e2et))
|
|
||||||
|
|
||||||
//insert e2, payload
|
|
||||||
e2Payload := buf[pos:]
|
|
||||||
e2r, err := NewPayloadReader(schemapb.DataType_INT64, e2Payload)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
e2a, err := e2r.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, e2a, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
err = e2r.Close()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, int(e2NxtPos), len(buf))
|
|
||||||
|
|
||||||
//read binlog
|
|
||||||
r, err := NewBinlogReader(buf)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
event1, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event1)
|
|
||||||
p1, err := event1.GetInt64FromPayload()
|
|
||||||
assert.Equal(t, p1, []int64{1, 2, 3, 4, 5, 6})
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, event1.TypeCode, CreatePartitionEventType)
|
|
||||||
ed1, ok := (event1.eventData).(*createPartitionEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
assert.Equal(t, ed1.StartTimestamp, Timestamp(100))
|
|
||||||
assert.Equal(t, ed1.EndTimestamp, Timestamp(200))
|
|
||||||
|
|
||||||
event2, err := r.NextEventReader()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.NotNil(t, event2)
|
|
||||||
p2, err := event2.GetInt64FromPayload()
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, p2, []int64{7, 8, 9, 10, 11, 12})
|
|
||||||
assert.Equal(t, event2.TypeCode, DropPartitionEventType)
|
|
||||||
ed2, ok := (event2.eventData).(*dropPartitionEventData)
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = (event2.eventData).(*insertEventData)
|
|
||||||
assert.False(t, ok)
|
|
||||||
assert.Equal(t, ed2.StartTimestamp, Timestamp(300))
|
|
||||||
assert.Equal(t, ed2.EndTimestamp, Timestamp(400))
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -223,13 +223,12 @@ func NewInsertBinlogWriter(dataType schemapb.DataType, collectionID, partitionID
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
func NewDeleteBinlogWriter(dataType schemapb.DataType, collectionID int64) (*DeleteBinlogWriter, error) {
|
func NewDeleteBinlogWriter(dataType schemapb.DataType) (*DeleteBinlogWriter, error) {
|
||||||
descriptorEvent, err := newDescriptorEvent()
|
descriptorEvent, err := newDescriptorEvent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
descriptorEvent.PayloadDataType = dataType
|
descriptorEvent.PayloadDataType = dataType
|
||||||
descriptorEvent.CollectionID = collectionID
|
|
||||||
return &DeleteBinlogWriter{
|
return &DeleteBinlogWriter{
|
||||||
baseBinlogWriter: baseBinlogWriter{
|
baseBinlogWriter: baseBinlogWriter{
|
||||||
descriptorEvent: *descriptorEvent,
|
descriptorEvent: *descriptorEvent,
|
||||||
|
@ -240,13 +239,12 @@ func NewDeleteBinlogWriter(dataType schemapb.DataType, collectionID int64) (*Del
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
func NewDDLBinlogWriter(dataType schemapb.DataType, collectionID int64) (*DDLBinlogWriter, error) {
|
func NewDDLBinlogWriter(dataType schemapb.DataType) (*DDLBinlogWriter, error) {
|
||||||
descriptorEvent, err := newDescriptorEvent()
|
descriptorEvent, err := newDescriptorEvent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
descriptorEvent.PayloadDataType = dataType
|
descriptorEvent.PayloadDataType = dataType
|
||||||
descriptorEvent.CollectionID = collectionID
|
|
||||||
return &DDLBinlogWriter{
|
return &DDLBinlogWriter{
|
||||||
baseBinlogWriter: baseBinlogWriter{
|
baseBinlogWriter: baseBinlogWriter{
|
||||||
descriptorEvent: *descriptorEvent,
|
descriptorEvent: *descriptorEvent,
|
||||||
|
|
|
@ -54,5 +54,6 @@ func TestBinlogWriterReader(t *testing.T) {
|
||||||
|
|
||||||
reader, err := binlogReader.NextEventReader()
|
reader, err := binlogReader.NextEventReader()
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
fmt.Println("reader offset : " + strconv.Itoa(int(binlogReader.currentOffset)))
|
||||||
assert.Nil(t, reader)
|
assert.Nil(t, reader)
|
||||||
}
|
}
|
||||||
|
|
|
@ -354,7 +354,7 @@ type DataDefinitionCodec struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dataDefinitionCodec *DataDefinitionCodec) Serialize(ts []Timestamp, ddRequests []string, eventTypes []EventTypeCode) ([]*Blob, error) {
|
func (dataDefinitionCodec *DataDefinitionCodec) Serialize(ts []Timestamp, ddRequests []string, eventTypes []EventTypeCode) ([]*Blob, error) {
|
||||||
writer, err := NewDDLBinlogWriter(schemapb.DataType_STRING, dataDefinitionCodec.Schema.ID)
|
writer, err := NewDDLBinlogWriter(schemapb.DataType_STRING)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -426,7 +426,7 @@ func (dataDefinitionCodec *DataDefinitionCodec) Serialize(ts []Timestamp, ddRequ
|
||||||
value: buffer,
|
value: buffer,
|
||||||
})
|
})
|
||||||
|
|
||||||
writer, err = NewDDLBinlogWriter(schemapb.DataType_INT64, dataDefinitionCodec.Schema.ID)
|
writer, err = NewDDLBinlogWriter(schemapb.DataType_INT64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,15 +37,21 @@ func (dsService *dataSyncService) initNodes() {
|
||||||
dsService.fg = flowgraph.NewTimeTickedFlowGraph(dsService.ctx)
|
dsService.fg = flowgraph.NewTimeTickedFlowGraph(dsService.ctx)
|
||||||
|
|
||||||
var dmStreamNode Node = newDmInputNode(dsService.ctx)
|
var dmStreamNode Node = newDmInputNode(dsService.ctx)
|
||||||
|
var ddStreamNode Node = newDDInputNode(dsService.ctx)
|
||||||
|
|
||||||
|
var ddNode Node = newDDNode()
|
||||||
var filterDmNode Node = newFilteredDmNode()
|
var filterDmNode Node = newFilteredDmNode()
|
||||||
var insertBufferNode Node = newInsertBufferNode()
|
var insertBufferNode Node = newInsertBufferNode()
|
||||||
var serviceTimeNode Node = newServiceTimeNode()
|
|
||||||
|
|
||||||
dsService.fg.AddNode(&dmStreamNode)
|
dsService.fg.AddNode(&dmStreamNode)
|
||||||
dsService.fg.AddNode(&filterDmNode)
|
dsService.fg.AddNode(&ddStreamNode)
|
||||||
dsService.fg.AddNode(&insertBufferNode)
|
|
||||||
dsService.fg.AddNode(&serviceTimeNode)
|
|
||||||
|
|
||||||
|
dsService.fg.AddNode(&filterDmNode)
|
||||||
|
dsService.fg.AddNode(&ddNode)
|
||||||
|
|
||||||
|
dsService.fg.AddNode(&insertBufferNode)
|
||||||
|
|
||||||
|
// dmStreamNode
|
||||||
var err = dsService.fg.SetEdges(dmStreamNode.Name(),
|
var err = dsService.fg.SetEdges(dmStreamNode.Name(),
|
||||||
[]string{},
|
[]string{},
|
||||||
[]string{filterDmNode.Name()},
|
[]string{filterDmNode.Name()},
|
||||||
|
@ -54,27 +60,39 @@ func (dsService *dataSyncService) initNodes() {
|
||||||
log.Fatal("set edges failed in node:", dmStreamNode.Name())
|
log.Fatal("set edges failed in node:", dmStreamNode.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ddStreamNode
|
||||||
|
err = dsService.fg.SetEdges(ddStreamNode.Name(),
|
||||||
|
[]string{},
|
||||||
|
[]string{ddNode.Name()},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("set edges failed in node:", ddStreamNode.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterDmNode
|
||||||
err = dsService.fg.SetEdges(filterDmNode.Name(),
|
err = dsService.fg.SetEdges(filterDmNode.Name(),
|
||||||
[]string{dmStreamNode.Name()},
|
[]string{dmStreamNode.Name(), ddNode.Name()},
|
||||||
[]string{insertBufferNode.Name()},
|
[]string{insertBufferNode.Name()},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("set edges failed in node:", filterDmNode.Name())
|
log.Fatal("set edges failed in node:", filterDmNode.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ddNode
|
||||||
|
err = dsService.fg.SetEdges(ddNode.Name(),
|
||||||
|
[]string{ddStreamNode.Name()},
|
||||||
|
[]string{filterDmNode.Name()},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal("set edges failed in node:", ddNode.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertBufferNode
|
||||||
err = dsService.fg.SetEdges(insertBufferNode.Name(),
|
err = dsService.fg.SetEdges(insertBufferNode.Name(),
|
||||||
[]string{filterDmNode.Name()},
|
[]string{filterDmNode.Name()},
|
||||||
[]string{serviceTimeNode.Name()},
|
[]string{},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("set edges failed in node:", insertBufferNode.Name())
|
log.Fatal("set edges failed in node:", insertBufferNode.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
err = dsService.fg.SetEdges(serviceTimeNode.Name(),
|
|
||||||
[]string{insertBufferNode.Name()},
|
|
||||||
[]string{},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal("set edges failed in node:", serviceTimeNode.Name())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
|
|
||||||
// NOTE: start pulsar before test
|
// NOTE: start pulsar before test
|
||||||
func TestDataSyncService_Start(t *testing.T) {
|
func TestDataSyncService_Start(t *testing.T) {
|
||||||
Params.Init()
|
|
||||||
const ctxTimeInMillisecond = 200
|
const ctxTimeInMillisecond = 200
|
||||||
const closeWithDeadline = true
|
const closeWithDeadline = true
|
||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
|
@ -31,7 +30,7 @@ func TestDataSyncService_Start(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// init write node
|
// init write node
|
||||||
pulsarURL, _ := Params.pulsarAddress()
|
pulsarURL := Params.PulsarAddress
|
||||||
node := NewWriteNode(ctx, 0)
|
node := NewWriteNode(ctx, 0)
|
||||||
|
|
||||||
// test data generate
|
// test data generate
|
||||||
|
@ -116,20 +115,30 @@ func TestDataSyncService_Start(t *testing.T) {
|
||||||
|
|
||||||
// pulsar produce
|
// pulsar produce
|
||||||
const receiveBufSize = 1024
|
const receiveBufSize = 1024
|
||||||
producerChannels := Params.insertChannelNames()
|
insertChannels := Params.InsertChannelNames
|
||||||
|
ddChannels := Params.DDChannelNames
|
||||||
|
|
||||||
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||||
insertStream.SetPulsarClient(pulsarURL)
|
insertStream.SetPulsarClient(pulsarURL)
|
||||||
insertStream.CreatePulsarProducers(producerChannels)
|
insertStream.CreatePulsarProducers(insertChannels)
|
||||||
|
|
||||||
|
ddStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||||
|
ddStream.SetPulsarClient(pulsarURL)
|
||||||
|
ddStream.CreatePulsarProducers(ddChannels)
|
||||||
|
|
||||||
var insertMsgStream msgstream.MsgStream = insertStream
|
var insertMsgStream msgstream.MsgStream = insertStream
|
||||||
insertMsgStream.Start()
|
insertMsgStream.Start()
|
||||||
|
|
||||||
|
var ddMsgStream msgstream.MsgStream = ddStream
|
||||||
|
ddMsgStream.Start()
|
||||||
|
|
||||||
err := insertMsgStream.Produce(&msgPack)
|
err := insertMsgStream.Produce(&msgPack)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = insertMsgStream.Broadcast(&timeTickMsgPack)
|
err = insertMsgStream.Broadcast(&timeTickMsgPack)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
err = ddMsgStream.Broadcast(&timeTickMsgPack)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// dataSync
|
// dataSync
|
||||||
node.dataSyncService = newDataSyncService(node.ctx)
|
node.dataSyncService = newDataSyncService(node.ctx)
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
package writenode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ddBuffer struct {
|
||||||
|
collectionBuffer map[UniqueID]interface{}
|
||||||
|
partitionBuffer map[UniqueID]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ddBuffer) addCollection(collectionID UniqueID) error {
|
||||||
|
if _, ok := d.collectionBuffer[collectionID]; !ok {
|
||||||
|
return errors.New("collection " + strconv.FormatInt(collectionID, 10) + " is already exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.collectionBuffer[collectionID] = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ddBuffer) removeCollection(collectionID UniqueID) error {
|
||||||
|
if _, ok := d.collectionBuffer[collectionID]; !ok {
|
||||||
|
return errors.New("cannot found collection " + strconv.FormatInt(collectionID, 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(d.collectionBuffer, collectionID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ddBuffer) addPartition(partitionID UniqueID) error {
|
||||||
|
if _, ok := d.partitionBuffer[partitionID]; !ok {
|
||||||
|
return errors.New("partition " + strconv.FormatInt(partitionID, 10) + " is already exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
d.partitionBuffer[partitionID] = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ddBuffer) removePartition(partitionID UniqueID) error {
|
||||||
|
if _, ok := d.partitionBuffer[partitionID]; !ok {
|
||||||
|
return errors.New("cannot found partition " + strconv.FormatInt(partitionID, 10))
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(d.partitionBuffer, partitionID)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,176 @@
|
||||||
|
package writenode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
|
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ddNode struct {
|
||||||
|
BaseNode
|
||||||
|
ddMsg *ddMsg
|
||||||
|
ddBuffer *ddBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ddNode *ddNode) Name() string {
|
||||||
|
return "ddNode"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
|
||||||
|
//fmt.Println("Do filterDmNode operation")
|
||||||
|
|
||||||
|
if len(in) != 1 {
|
||||||
|
log.Println("Invalid operate message input in ddNode, input length = ", len(in))
|
||||||
|
// TODO: add error handling
|
||||||
|
}
|
||||||
|
|
||||||
|
msMsg, ok := (*in[0]).(*MsgStreamMsg)
|
||||||
|
if !ok {
|
||||||
|
log.Println("type assertion failed for MsgStreamMsg")
|
||||||
|
// TODO: add error handling
|
||||||
|
}
|
||||||
|
|
||||||
|
var ddMsg = ddMsg{
|
||||||
|
collectionRecords: make(map[string][]metaOperateRecord),
|
||||||
|
partitionRecords: make(map[string][]metaOperateRecord),
|
||||||
|
timeRange: TimeRange{
|
||||||
|
timestampMin: msMsg.TimestampMin(),
|
||||||
|
timestampMax: msMsg.TimestampMax(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ddNode.ddMsg = &ddMsg
|
||||||
|
|
||||||
|
// sort tsMessages
|
||||||
|
tsMessages := msMsg.TsMessages()
|
||||||
|
sort.Slice(tsMessages,
|
||||||
|
func(i, j int) bool {
|
||||||
|
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
|
||||||
|
})
|
||||||
|
|
||||||
|
// do dd tasks
|
||||||
|
for _, msg := range tsMessages {
|
||||||
|
switch msg.Type() {
|
||||||
|
case internalPb.MsgType_kCreateCollection:
|
||||||
|
ddNode.createCollection(msg.(*msgstream.CreateCollectionMsg))
|
||||||
|
case internalPb.MsgType_kDropCollection:
|
||||||
|
ddNode.dropCollection(msg.(*msgstream.DropCollectionMsg))
|
||||||
|
case internalPb.MsgType_kCreatePartition:
|
||||||
|
ddNode.createPartition(msg.(*msgstream.CreatePartitionMsg))
|
||||||
|
case internalPb.MsgType_kDropPartition:
|
||||||
|
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
|
||||||
|
default:
|
||||||
|
log.Println("Non supporting message type:", msg.Type())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var res Msg = ddNode.ddMsg
|
||||||
|
return []*Msg{&res}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
||||||
|
collectionID := msg.CollectionID
|
||||||
|
|
||||||
|
err := ddNode.ddBuffer.addCollection(collectionID)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: add default partition?
|
||||||
|
|
||||||
|
var schema schemapb.CollectionSchema
|
||||||
|
err = proto.Unmarshal((*msg.Schema).Value, &schema)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
collectionName := schema.Name
|
||||||
|
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
|
||||||
|
metaOperateRecord{
|
||||||
|
createOrDrop: true,
|
||||||
|
timestamp: msg.Timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: write dd binlog
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
|
||||||
|
collectionID := msg.CollectionID
|
||||||
|
|
||||||
|
err := ddNode.ddBuffer.removeCollection(collectionID)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
collectionName := msg.CollectionName.CollectionName
|
||||||
|
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
|
||||||
|
metaOperateRecord{
|
||||||
|
createOrDrop: false,
|
||||||
|
timestamp: msg.Timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: write dd binlog
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
|
||||||
|
partitionID := msg.PartitionID
|
||||||
|
|
||||||
|
err := ddNode.ddBuffer.addPartition(partitionID)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
partitionTag := msg.PartitionName.Tag
|
||||||
|
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
|
||||||
|
metaOperateRecord{
|
||||||
|
createOrDrop: true,
|
||||||
|
timestamp: msg.Timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: write dd binlog
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
|
||||||
|
partitionID := msg.PartitionID
|
||||||
|
|
||||||
|
err := ddNode.ddBuffer.removePartition(partitionID)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
partitionTag := msg.PartitionName.Tag
|
||||||
|
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
|
||||||
|
metaOperateRecord{
|
||||||
|
createOrDrop: false,
|
||||||
|
timestamp: msg.Timestamp,
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: write dd binlog
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDDNode() *ddNode {
|
||||||
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||||
|
maxParallelism := Params.FlowGraphMaxParallelism
|
||||||
|
|
||||||
|
baseNode := BaseNode{}
|
||||||
|
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||||
|
baseNode.SetMaxParallelism(maxParallelism)
|
||||||
|
|
||||||
|
ddBuffer := &ddBuffer{
|
||||||
|
collectionBuffer: make(map[UniqueID]interface{}),
|
||||||
|
partitionBuffer: make(map[UniqueID]interface{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ddNode{
|
||||||
|
BaseNode: baseNode,
|
||||||
|
ddBuffer: ddBuffer,
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,11 +4,13 @@ import (
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||||
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
type filterDmNode struct {
|
type filterDmNode struct {
|
||||||
BaseNode
|
BaseNode
|
||||||
|
ddMsg *ddMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fdmNode *filterDmNode) Name() string {
|
func (fdmNode *filterDmNode) Name() string {
|
||||||
|
@ -16,29 +18,40 @@ func (fdmNode *filterDmNode) Name() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
||||||
|
//fmt.Println("Do filterDmNode operation")
|
||||||
|
|
||||||
if len(in) != 1 {
|
if len(in) != 2 {
|
||||||
log.Println("Invalid operate message input in filterDmNode, input length = ", len(in))
|
log.Println("Invalid operate message input in filterDmNode, input length = ", len(in))
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
msMsg, ok := (*in[0]).(*MsgStreamMsg)
|
msgStreamMsg, ok := (*in[0]).(*MsgStreamMsg)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Println("type assertion failed for MsgStreamMsg")
|
log.Println("type assertion failed for MsgStreamMsg")
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ddMsg, ok := (*in[1]).(*ddMsg)
|
||||||
|
if !ok {
|
||||||
|
log.Println("type assertion failed for ddMsg")
|
||||||
|
// TODO: add error handling
|
||||||
|
}
|
||||||
|
fdmNode.ddMsg = ddMsg
|
||||||
|
|
||||||
var iMsg = insertMsg{
|
var iMsg = insertMsg{
|
||||||
insertMessages: make([]*msgstream.InsertMsg, 0),
|
insertMessages: make([]*msgstream.InsertMsg, 0),
|
||||||
timeRange: TimeRange{
|
timeRange: TimeRange{
|
||||||
timestampMin: msMsg.TimestampMin(),
|
timestampMin: msgStreamMsg.TimestampMin(),
|
||||||
timestampMax: msMsg.TimestampMax(),
|
timestampMax: msgStreamMsg.TimestampMax(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, msg := range msMsg.TsMessages() {
|
for _, msg := range msgStreamMsg.TsMessages() {
|
||||||
switch msg.Type() {
|
switch msg.Type() {
|
||||||
case internalPb.MsgType_kInsert:
|
case internalPb.MsgType_kInsert:
|
||||||
iMsg.insertMessages = append(iMsg.insertMessages, msg.(*msgstream.InsertMsg))
|
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
|
||||||
|
if resMsg != nil {
|
||||||
|
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
|
||||||
|
}
|
||||||
case internalPb.MsgType_kFlush:
|
case internalPb.MsgType_kFlush:
|
||||||
iMsg.insertMessages = append(iMsg.insertMessages, msg.(*msgstream.InsertMsg))
|
iMsg.insertMessages = append(iMsg.insertMessages, msg.(*msgstream.InsertMsg))
|
||||||
// case internalPb.MsgType_kDelete:
|
// case internalPb.MsgType_kDelete:
|
||||||
|
@ -52,9 +65,44 @@ func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
||||||
return []*Msg{&res}
|
return []*Msg{&res}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
|
||||||
|
// No dd record, do all insert requests.
|
||||||
|
records, ok := fdmNode.ddMsg.collectionRecords[msg.CollectionName]
|
||||||
|
if !ok {
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the last record is drop type, all insert requests are invalid.
|
||||||
|
if !records[len(records)-1].createOrDrop {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter insert requests before last record.
|
||||||
|
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
|
||||||
|
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
|
||||||
|
log.Println("Error, misaligned messages detected")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
tmpTimestamps := make([]Timestamp, 0)
|
||||||
|
tmpRowIDs := make([]int64, 0)
|
||||||
|
tmpRowData := make([]*commonpb.Blob, 0)
|
||||||
|
targetTimestamp := records[len(records)-1].timestamp
|
||||||
|
for i, t := range msg.Timestamps {
|
||||||
|
if t >= targetTimestamp {
|
||||||
|
tmpTimestamps = append(tmpTimestamps, t)
|
||||||
|
tmpRowIDs = append(tmpRowIDs, msg.RowIDs[i])
|
||||||
|
tmpRowData = append(tmpRowData, msg.RowData[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg.Timestamps = tmpTimestamps
|
||||||
|
msg.RowIDs = tmpRowIDs
|
||||||
|
msg.RowData = tmpRowData
|
||||||
|
return msg
|
||||||
|
}
|
||||||
|
|
||||||
func newFilteredDmNode() *filterDmNode {
|
func newFilteredDmNode() *filterDmNode {
|
||||||
maxQueueLength := Params.flowGraphMaxQueueLength()
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||||
maxParallelism := Params.flowGraphMaxParallelism()
|
maxParallelism := Params.FlowGraphMaxParallelism
|
||||||
|
|
||||||
baseNode := BaseNode{}
|
baseNode := BaseNode{}
|
||||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
}
|
}
|
||||||
|
|
||||||
iMsg, ok := (*in[0]).(*insertMsg)
|
_, ok := (*in[0]).(*insertMsg)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Println("type assertion failed for insertMsg")
|
log.Println("type assertion failed for insertMsg")
|
||||||
// TODO: add error handling
|
// TODO: add error handling
|
||||||
|
@ -64,18 +64,13 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
||||||
// log.Printf("t(%d) : %v ", task.Timestamps[0], task.RowData[0])
|
// log.Printf("t(%d) : %v ", task.Timestamps[0], task.RowData[0])
|
||||||
// }
|
// }
|
||||||
|
|
||||||
var res Msg = &serviceTimeMsg{
|
|
||||||
timeRange: iMsg.timeRange,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO
|
// TODO
|
||||||
return []*Msg{&res}
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newInsertBufferNode() *insertBufferNode {
|
func newInsertBufferNode() *insertBufferNode {
|
||||||
maxQueueLength := Params.flowGraphMaxQueueLength()
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||||
maxParallelism := Params.flowGraphMaxParallelism()
|
maxParallelism := Params.FlowGraphMaxParallelism
|
||||||
|
|
||||||
baseNode := BaseNode{}
|
baseNode := BaseNode{}
|
||||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||||
|
|
|
@ -2,7 +2,6 @@ package writenode
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -18,8 +17,17 @@ type (
|
||||||
timeRange TimeRange
|
timeRange TimeRange
|
||||||
}
|
}
|
||||||
|
|
||||||
schemaUpdateMsg struct {
|
ddMsg struct {
|
||||||
timeRange TimeRange
|
// TODO: use collection id
|
||||||
|
collectionRecords map[string][]metaOperateRecord
|
||||||
|
// TODO: use partition id
|
||||||
|
partitionRecords map[string][]metaOperateRecord
|
||||||
|
timeRange TimeRange
|
||||||
|
}
|
||||||
|
|
||||||
|
metaOperateRecord struct {
|
||||||
|
createOrDrop bool // create: true, drop: false
|
||||||
|
timestamp Timestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
insertMsg struct {
|
insertMsg struct {
|
||||||
|
@ -32,34 +40,6 @@ type (
|
||||||
deleteMessages []*msgstream.DeleteMsg
|
deleteMessages []*msgstream.DeleteMsg
|
||||||
timeRange TimeRange
|
timeRange TimeRange
|
||||||
}
|
}
|
||||||
|
|
||||||
serviceTimeMsg struct {
|
|
||||||
timeRange TimeRange
|
|
||||||
}
|
|
||||||
|
|
||||||
InsertData struct {
|
|
||||||
insertIDs map[SegmentID][]UniqueID
|
|
||||||
insertTimestamps map[SegmentID][]Timestamp
|
|
||||||
insertRecords map[SegmentID][]*commonpb.Blob
|
|
||||||
insertOffset map[SegmentID]int64
|
|
||||||
}
|
|
||||||
|
|
||||||
DeleteData struct {
|
|
||||||
deleteIDs map[SegmentID][]UniqueID
|
|
||||||
deleteTimestamps map[SegmentID][]Timestamp
|
|
||||||
deleteOffset map[SegmentID]int64
|
|
||||||
}
|
|
||||||
|
|
||||||
DeleteRecord struct {
|
|
||||||
entityID UniqueID
|
|
||||||
timestamp Timestamp
|
|
||||||
segmentID UniqueID
|
|
||||||
}
|
|
||||||
|
|
||||||
DeletePreprocessData struct {
|
|
||||||
deleteRecords []*DeleteRecord
|
|
||||||
count int32
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (ksMsg *key2SegMsg) TimeTick() Timestamp {
|
func (ksMsg *key2SegMsg) TimeTick() Timestamp {
|
||||||
|
@ -70,11 +50,11 @@ func (ksMsg *key2SegMsg) DownStreamNodeIdx() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suMsg *schemaUpdateMsg) TimeTick() Timestamp {
|
func (suMsg *ddMsg) TimeTick() Timestamp {
|
||||||
return suMsg.timeRange.timestampMax
|
return suMsg.timeRange.timestampMax
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suMsg *schemaUpdateMsg) DownStreamNodeIdx() int {
|
func (suMsg *ddMsg) DownStreamNodeIdx() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,11 +73,3 @@ func (dMsg *deleteMsg) TimeTick() Timestamp {
|
||||||
func (dMsg *deleteMsg) DownStreamNodeIdx() int {
|
func (dMsg *deleteMsg) DownStreamNodeIdx() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (stMsg *serviceTimeMsg) TimeTick() Timestamp {
|
|
||||||
return stMsg.timeRange.timestampMax
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stMsg *serviceTimeMsg) DownStreamNodeIdx() int {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,23 +2,19 @@ package writenode
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
|
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||||
receiveBufSize := Params.insertReceiveBufSize()
|
receiveBufSize := Params.InsertReceiveBufSize
|
||||||
pulsarBufSize := Params.insertPulsarBufSize()
|
pulsarBufSize := Params.InsertPulsarBufSize
|
||||||
|
|
||||||
msgStreamURL, err := Params.pulsarAddress()
|
msgStreamURL := Params.PulsarAddress
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
consumeChannels := Params.insertChannelNames()
|
consumeChannels := Params.InsertChannelNames
|
||||||
consumeSubName := Params.msgChannelSubName()
|
consumeSubName := Params.MsgChannelSubName
|
||||||
|
|
||||||
insertStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
insertStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
||||||
|
|
||||||
|
@ -31,9 +27,32 @@ func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||||
|
|
||||||
var stream msgstream.MsgStream = insertStream
|
var stream msgstream.MsgStream = insertStream
|
||||||
|
|
||||||
maxQueueLength := Params.flowGraphMaxQueueLength()
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||||
maxParallelism := Params.flowGraphMaxParallelism()
|
maxParallelism := Params.FlowGraphMaxParallelism
|
||||||
|
|
||||||
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
|
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
|
||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newDDInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||||
|
receiveBufSize := Params.DDReceiveBufSize
|
||||||
|
pulsarBufSize := Params.DDPulsarBufSize
|
||||||
|
|
||||||
|
msgStreamURL := Params.PulsarAddress
|
||||||
|
|
||||||
|
consumeChannels := Params.DDChannelNames
|
||||||
|
consumeSubName := Params.MsgChannelSubName
|
||||||
|
|
||||||
|
ddStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
||||||
|
ddStream.SetPulsarClient(msgStreamURL)
|
||||||
|
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
|
||||||
|
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||||
|
|
||||||
|
var stream msgstream.MsgStream = ddStream
|
||||||
|
|
||||||
|
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||||
|
maxParallelism := Params.FlowGraphMaxParallelism
|
||||||
|
|
||||||
|
node := flowgraph.NewInputNode(&stream, "ddInputNode", maxQueueLength, maxParallelism)
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
package writenode
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
type serviceTimeNode struct {
|
|
||||||
BaseNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stNode *serviceTimeNode) Name() string {
|
|
||||||
return "stNode"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
|
|
||||||
|
|
||||||
if len(in) != 1 {
|
|
||||||
log.Println("Invalid operate message input in serviceTimeNode, input length = ", len(in))
|
|
||||||
// TODO: add error handling
|
|
||||||
}
|
|
||||||
|
|
||||||
// serviceTimeMsg, ok := (*in[0]).(*serviceTimeMsg)
|
|
||||||
_, ok := (*in[0]).(*serviceTimeMsg)
|
|
||||||
if !ok {
|
|
||||||
log.Println("type assertion failed for serviceTimeMsg")
|
|
||||||
// TODO: add error handling
|
|
||||||
}
|
|
||||||
|
|
||||||
// update service time
|
|
||||||
// (*(*stNode.replica).getTSafe()).set(serviceTimeMsg.timeRange.timestampMax)
|
|
||||||
// fmt.Println("update tSafe to:", getPhysicalTime(serviceTimeMsg.timeRange.timestampMax))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newServiceTimeNode() *serviceTimeNode {
|
|
||||||
maxQueueLength := Params.flowGraphMaxQueueLength()
|
|
||||||
maxParallelism := Params.flowGraphMaxParallelism()
|
|
||||||
|
|
||||||
baseNode := BaseNode{}
|
|
||||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
|
||||||
baseNode.SetMaxParallelism(maxParallelism)
|
|
||||||
|
|
||||||
return &serviceTimeNode{
|
|
||||||
BaseNode: baseNode,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -10,6 +10,30 @@ import (
|
||||||
|
|
||||||
type ParamTable struct {
|
type ParamTable struct {
|
||||||
paramtable.BaseTable
|
paramtable.BaseTable
|
||||||
|
|
||||||
|
PulsarAddress string
|
||||||
|
|
||||||
|
WriteNodeID UniqueID
|
||||||
|
WriteNodeNum int
|
||||||
|
WriteNodeTimeTickChannelName string
|
||||||
|
|
||||||
|
FlowGraphMaxQueueLength int32
|
||||||
|
FlowGraphMaxParallelism int32
|
||||||
|
|
||||||
|
// dm
|
||||||
|
InsertChannelNames []string
|
||||||
|
InsertChannelRange []int
|
||||||
|
InsertReceiveBufSize int64
|
||||||
|
InsertPulsarBufSize int64
|
||||||
|
|
||||||
|
// dd
|
||||||
|
DDChannelNames []string
|
||||||
|
DDReceiveBufSize int64
|
||||||
|
DDPulsarBufSize int64
|
||||||
|
|
||||||
|
MsgChannelSubName string
|
||||||
|
DefaultPartitionTag string
|
||||||
|
SliceIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
var Params ParamTable
|
var Params ParamTable
|
||||||
|
@ -30,18 +54,35 @@ func (p *ParamTable) Init() {
|
||||||
writeNodeIDStr = strconv.Itoa(int(writeNodeIDList[0]))
|
writeNodeIDStr = strconv.Itoa(int(writeNodeIDList[0]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p.Save("_writeNodeID", writeNodeIDStr)
|
err = p.Save("_writeNodeID", writeNodeIDStr)
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) pulsarAddress() (string, error) {
|
|
||||||
url, err := p.Load("_PulsarAddress")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return url, nil
|
|
||||||
|
p.initPulsarAddress()
|
||||||
|
|
||||||
|
p.initWriteNodeID()
|
||||||
|
p.initWriteNodeNum()
|
||||||
|
p.initWriteNodeTimeTickChannelName()
|
||||||
|
|
||||||
|
p.initMsgChannelSubName()
|
||||||
|
p.initDefaultPartitionTag()
|
||||||
|
p.initSliceIndex()
|
||||||
|
|
||||||
|
p.initFlowGraphMaxQueueLength()
|
||||||
|
p.initFlowGraphMaxParallelism()
|
||||||
|
|
||||||
|
p.initInsertChannelNames()
|
||||||
|
p.initInsertChannelRange()
|
||||||
|
p.initInsertReceiveBufSize()
|
||||||
|
p.initInsertPulsarBufSize()
|
||||||
|
|
||||||
|
p.initDDChannelNames()
|
||||||
|
p.initDDReceiveBufSize()
|
||||||
|
p.initDDPulsarBufSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) WriteNodeID() UniqueID {
|
func (p *ParamTable) initWriteNodeID() {
|
||||||
writeNodeID, err := p.Load("_writeNodeID")
|
writeNodeID, err := p.Load("_writeNodeID")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -50,187 +91,153 @@ func (p *ParamTable) WriteNodeID() UniqueID {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return UniqueID(id)
|
p.WriteNodeID = UniqueID(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) insertChannelRange() []int {
|
func (p *ParamTable) initPulsarAddress() {
|
||||||
|
url, err := p.Load("_PulsarAddress")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
p.PulsarAddress = url
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ParamTable) initInsertChannelRange() {
|
||||||
insertChannelRange, err := p.Load("msgChannel.channelRange.insert")
|
insertChannelRange, err := p.Load("msgChannel.channelRange.insert")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
p.InsertChannelRange = paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
|
||||||
return paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// advanced params
|
// advanced params
|
||||||
// stats
|
|
||||||
func (p *ParamTable) statsPublishInterval() int {
|
|
||||||
return p.ParseInt("writeNode.stats.publishInterval")
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataSync:
|
// dataSync:
|
||||||
func (p *ParamTable) flowGraphMaxQueueLength() int32 {
|
func (p *ParamTable) initFlowGraphMaxQueueLength() {
|
||||||
return p.ParseInt32("writeNode.dataSync.flowGraph.maxQueueLength")
|
p.FlowGraphMaxQueueLength = p.ParseInt32("writeNode.dataSync.flowGraph.maxQueueLength")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) flowGraphMaxParallelism() int32 {
|
func (p *ParamTable) initFlowGraphMaxParallelism() {
|
||||||
return p.ParseInt32("writeNode.dataSync.flowGraph.maxParallelism")
|
p.FlowGraphMaxParallelism = p.ParseInt32("writeNode.dataSync.flowGraph.maxParallelism")
|
||||||
}
|
}
|
||||||
|
|
||||||
// msgStream
|
// msgStream
|
||||||
func (p *ParamTable) insertReceiveBufSize() int64 {
|
func (p *ParamTable) initInsertReceiveBufSize() {
|
||||||
return p.ParseInt64("writeNode.msgStream.insert.recvBufSize")
|
p.InsertReceiveBufSize = p.ParseInt64("writeNode.msgStream.insert.recvBufSize")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) insertPulsarBufSize() int64 {
|
func (p *ParamTable) initInsertPulsarBufSize() {
|
||||||
return p.ParseInt64("writeNode.msgStream.insert.pulsarBufSize")
|
p.InsertPulsarBufSize = p.ParseInt64("writeNode.msgStream.insert.pulsarBufSize")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) searchReceiveBufSize() int64 {
|
func (p *ParamTable) initDDReceiveBufSize() {
|
||||||
return p.ParseInt64("writeNode.msgStream.search.recvBufSize")
|
revBufSize, err := p.Load("writeNode.msgStream.dataDefinition.recvBufSize")
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) searchPulsarBufSize() int64 {
|
|
||||||
return p.ParseInt64("writeNode.msgStream.search.pulsarBufSize")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) searchResultReceiveBufSize() int64 {
|
|
||||||
return p.ParseInt64("writeNode.msgStream.searchResult.recvBufSize")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) statsReceiveBufSize() int64 {
|
|
||||||
return p.ParseInt64("writeNode.msgStream.stats.recvBufSize")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) etcdAddress() string {
|
|
||||||
etcdAddress, err := p.Load("_EtcdAddress")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return etcdAddress
|
bufSize, err := strconv.Atoi(revBufSize)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
p.DDReceiveBufSize = int64(bufSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) metaRootPath() string {
|
func (p *ParamTable) initDDPulsarBufSize() {
|
||||||
rootPath, err := p.Load("etcd.rootPath")
|
pulsarBufSize, err := p.Load("writeNode.msgStream.dataDefinition.pulsarBufSize")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
subPath, err := p.Load("etcd.metaSubPath")
|
bufSize, err := strconv.Atoi(pulsarBufSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return rootPath + "/" + subPath
|
p.DDPulsarBufSize = int64(bufSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) gracefulTime() int64 {
|
func (p *ParamTable) initInsertChannelNames() {
|
||||||
gracefulTime, err := p.Load("writeNode.gracefulTime")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
time, err := strconv.Atoi(gracefulTime)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return int64(time)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) insertChannelNames() []string {
|
|
||||||
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
|
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
prefix += "-"
|
||||||
channelRange, err := p.Load("msgChannel.channelRange.insert")
|
channelRange, err := p.Load("msgChannel.channelRange.insert")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
||||||
|
|
||||||
var ret []string
|
var ret []string
|
||||||
for _, ID := range channelIDs {
|
for _, ID := range channelIDs {
|
||||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||||
}
|
}
|
||||||
sep := len(channelIDs) / p.writeNodeNum()
|
sep := len(channelIDs) / p.WriteNodeNum
|
||||||
index := p.sliceIndex()
|
index := p.SliceIndex
|
||||||
if index == -1 {
|
if index == -1 {
|
||||||
panic("writeNodeID not Match with Config")
|
panic("writeNodeID not Match with Config")
|
||||||
}
|
}
|
||||||
start := index * sep
|
start := index * sep
|
||||||
return ret[start : start+sep]
|
p.InsertChannelNames = ret[start : start+sep]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) searchChannelNames() []string {
|
func (p *ParamTable) initMsgChannelSubName() {
|
||||||
prefix, err := p.Load("msgChannel.chanNamePrefix.search")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
channelRange, err := p.Load("msgChannel.channelRange.search")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
|
||||||
|
|
||||||
var ret []string
|
|
||||||
for _, ID := range channelIDs {
|
|
||||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) searchResultChannelNames() []string {
|
|
||||||
prefix, err := p.Load("msgChannel.chanNamePrefix.searchResult")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix += "-"
|
|
||||||
channelRange, err := p.Load("msgChannel.channelRange.searchResult")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
|
||||||
|
|
||||||
var ret []string
|
|
||||||
for _, ID := range channelIDs {
|
|
||||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ParamTable) msgChannelSubName() string {
|
|
||||||
// TODO: subName = namePrefix + "-" + writeNodeID, writeNodeID is assigned by master
|
|
||||||
name, err := p.Load("msgChannel.subNamePrefix.writeNodeSubNamePrefix")
|
name, err := p.Load("msgChannel.subNamePrefix.writeNodeSubNamePrefix")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panic(err)
|
log.Panic(err)
|
||||||
}
|
}
|
||||||
writeNodeIDStr, err := p.Load("_WriteNodeID")
|
writeNodeIDStr, err := p.Load("_writeNodeID")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return name + "-" + writeNodeIDStr
|
p.MsgChannelSubName = name + "-" + writeNodeIDStr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) writeNodeTimeTickChannelName() string {
|
func (p *ParamTable) initDDChannelNames() {
|
||||||
|
prefix, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
prefix += "-"
|
||||||
|
iRangeStr, err := p.Load("msgChannel.channelRange.dataDefinition")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
|
||||||
|
var ret []string
|
||||||
|
for _, ID := range channelIDs {
|
||||||
|
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||||
|
}
|
||||||
|
p.DDChannelNames = ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ParamTable) initDefaultPartitionTag() {
|
||||||
|
defaultTag, err := p.Load("common.defaultPartitionTag")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.DefaultPartitionTag = defaultTag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ParamTable) initWriteNodeTimeTickChannelName() {
|
||||||
channels, err := p.Load("msgChannel.chanNamePrefix.writeNodeTimeTick")
|
channels, err := p.Load("msgChannel.chanNamePrefix.writeNodeTimeTick")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return channels
|
p.WriteNodeTimeTickChannelName = channels
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) sliceIndex() int {
|
func (p *ParamTable) initSliceIndex() {
|
||||||
writeNodeID := p.WriteNodeID()
|
writeNodeID := p.WriteNodeID
|
||||||
writeNodeIDList := p.WriteNodeIDList()
|
writeNodeIDList := p.WriteNodeIDList()
|
||||||
for i := 0; i < len(writeNodeIDList); i++ {
|
for i := 0; i < len(writeNodeIDList); i++ {
|
||||||
if writeNodeID == writeNodeIDList[i] {
|
if writeNodeID == writeNodeIDList[i] {
|
||||||
return i
|
p.SliceIndex = i
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return -1
|
p.SliceIndex = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ParamTable) writeNodeNum() int {
|
func (p *ParamTable) initWriteNodeNum() {
|
||||||
return len(p.WriteNodeIDList())
|
p.WriteNodeNum = len(p.WriteNodeIDList())
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,101 +12,58 @@ func TestParamTable_WriteNode(t *testing.T) {
|
||||||
Params.Init()
|
Params.Init()
|
||||||
|
|
||||||
t.Run("Test PulsarAddress", func(t *testing.T) {
|
t.Run("Test PulsarAddress", func(t *testing.T) {
|
||||||
address, err := Params.pulsarAddress()
|
address := Params.PulsarAddress
|
||||||
assert.NoError(t, err)
|
|
||||||
split := strings.Split(address, ":")
|
split := strings.Split(address, ":")
|
||||||
assert.Equal(t, split[0], "pulsar")
|
assert.Equal(t, split[0], "pulsar")
|
||||||
assert.Equal(t, split[len(split)-1], "6650")
|
assert.Equal(t, split[len(split)-1], "6650")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test WriteNodeID", func(t *testing.T) {
|
t.Run("Test WriteNodeID", func(t *testing.T) {
|
||||||
id := Params.WriteNodeID()
|
id := Params.WriteNodeID
|
||||||
assert.Equal(t, id, UniqueID(3))
|
assert.Equal(t, id, UniqueID(3))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test insertChannelRange", func(t *testing.T) {
|
t.Run("Test insertChannelRange", func(t *testing.T) {
|
||||||
channelRange := Params.insertChannelRange()
|
channelRange := Params.InsertChannelRange
|
||||||
assert.Equal(t, len(channelRange), 2)
|
assert.Equal(t, len(channelRange), 2)
|
||||||
assert.Equal(t, channelRange[0], 0)
|
assert.Equal(t, channelRange[0], 0)
|
||||||
assert.Equal(t, channelRange[1], 2)
|
assert.Equal(t, channelRange[1], 2)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test statsServiceTimeInterval", func(t *testing.T) {
|
|
||||||
interval := Params.statsPublishInterval()
|
|
||||||
assert.Equal(t, interval, 1000)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test statsMsgStreamReceiveBufSize", func(t *testing.T) {
|
|
||||||
bufSize := Params.statsReceiveBufSize()
|
|
||||||
assert.Equal(t, bufSize, int64(64))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test insertMsgStreamReceiveBufSize", func(t *testing.T) {
|
t.Run("Test insertMsgStreamReceiveBufSize", func(t *testing.T) {
|
||||||
bufSize := Params.insertReceiveBufSize()
|
bufSize := Params.InsertReceiveBufSize
|
||||||
assert.Equal(t, bufSize, int64(1024))
|
assert.Equal(t, bufSize, int64(1024))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test searchMsgStreamReceiveBufSize", func(t *testing.T) {
|
|
||||||
bufSize := Params.searchReceiveBufSize()
|
|
||||||
assert.Equal(t, bufSize, int64(512))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test searchResultMsgStreamReceiveBufSize", func(t *testing.T) {
|
|
||||||
bufSize := Params.searchResultReceiveBufSize()
|
|
||||||
assert.Equal(t, bufSize, int64(64))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test searchPulsarBufSize", func(t *testing.T) {
|
|
||||||
bufSize := Params.searchPulsarBufSize()
|
|
||||||
assert.Equal(t, bufSize, int64(512))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test insertPulsarBufSize", func(t *testing.T) {
|
t.Run("Test insertPulsarBufSize", func(t *testing.T) {
|
||||||
bufSize := Params.insertPulsarBufSize()
|
bufSize := Params.InsertPulsarBufSize
|
||||||
assert.Equal(t, bufSize, int64(1024))
|
assert.Equal(t, bufSize, int64(1024))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test flowGraphMaxQueueLength", func(t *testing.T) {
|
t.Run("Test flowGraphMaxQueueLength", func(t *testing.T) {
|
||||||
length := Params.flowGraphMaxQueueLength()
|
length := Params.FlowGraphMaxQueueLength
|
||||||
assert.Equal(t, length, int32(1024))
|
assert.Equal(t, length, int32(1024))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test flowGraphMaxParallelism", func(t *testing.T) {
|
t.Run("Test flowGraphMaxParallelism", func(t *testing.T) {
|
||||||
maxParallelism := Params.flowGraphMaxParallelism()
|
maxParallelism := Params.FlowGraphMaxParallelism
|
||||||
assert.Equal(t, maxParallelism, int32(1024))
|
assert.Equal(t, maxParallelism, int32(1024))
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test insertChannelNames", func(t *testing.T) {
|
t.Run("Test insertChannelNames", func(t *testing.T) {
|
||||||
names := Params.insertChannelNames()
|
names := Params.InsertChannelNames
|
||||||
assert.Equal(t, len(names), 2)
|
assert.Equal(t, len(names), 2)
|
||||||
assert.Equal(t, names[0], "insert0")
|
assert.Equal(t, names[0], "insert-0")
|
||||||
assert.Equal(t, names[1], "insert1")
|
assert.Equal(t, names[1], "insert-1")
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test searchChannelNames", func(t *testing.T) {
|
|
||||||
names := Params.searchChannelNames()
|
|
||||||
assert.Equal(t, len(names), 1)
|
|
||||||
assert.Equal(t, names[0], "search0")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Test searchResultChannelName", func(t *testing.T) {
|
|
||||||
names := Params.searchResultChannelNames()
|
|
||||||
assert.Equal(t, len(names), 1)
|
|
||||||
assert.Equal(t, names[0], "searchResult-0")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test msgChannelSubName", func(t *testing.T) {
|
t.Run("Test msgChannelSubName", func(t *testing.T) {
|
||||||
name := Params.msgChannelSubName()
|
name := Params.MsgChannelSubName
|
||||||
assert.Equal(t, name, "writeNode-3")
|
assert.Equal(t, name, "writeNode-3")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test timeTickChannelName", func(t *testing.T) {
|
t.Run("Test timeTickChannelName", func(t *testing.T) {
|
||||||
name := Params.writeNodeTimeTickChannelName()
|
name := Params.WriteNodeTimeTickChannelName
|
||||||
assert.Equal(t, name, "writeNodeTimeTick")
|
assert.Equal(t, name, "writeNodeTimeTick")
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Test metaRootPath", func(t *testing.T) {
|
|
||||||
path := Params.metaRootPath()
|
|
||||||
assert.Equal(t, path, "by-dev/meta")
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
package writenode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func makeNewChannelNames(names []string, suffix string) []string {
|
||||||
|
var ret []string
|
||||||
|
for _, name := range names {
|
||||||
|
ret = append(ret, name+suffix)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func refreshChannelNames() {
|
||||||
|
suffix := "-test-write-node" + strconv.FormatInt(rand.Int63n(100), 10)
|
||||||
|
Params.DDChannelNames = makeNewChannelNames(Params.DDChannelNames, suffix)
|
||||||
|
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
Params.Init()
|
||||||
|
refreshChannelNames()
|
||||||
|
p := Params
|
||||||
|
fmt.Println(p)
|
||||||
|
exitCode := m.Run()
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
Loading…
Reference in New Issue