2021-01-15 09:09:41 +00:00
|
|
|
package dataservice
|
|
|
|
|
|
|
|
import (
|
|
|
|
"log"
|
|
|
|
|
2021-01-22 11:43:27 +00:00
|
|
|
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
|
|
|
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
|
|
|
|
2021-01-15 09:09:41 +00:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
|
|
|
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
proxyTimeTickWatcher struct {
|
2021-01-22 03:07:07 +00:00
|
|
|
allocator segmentAllocator
|
|
|
|
msgQueue chan *msgstream.TimeTickMsg
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|
|
|
|
dataNodeTimeTickWatcher struct {
|
2021-01-22 11:43:27 +00:00
|
|
|
meta *meta
|
|
|
|
cluster *dataNodeCluster
|
2021-01-22 03:07:07 +00:00
|
|
|
allocator segmentAllocator
|
|
|
|
msgQueue chan *msgstream.TimeTickMsg
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2021-01-22 03:07:07 +00:00
|
|
|
func newProxyTimeTickWatcher(allocator segmentAllocator) *proxyTimeTickWatcher {
|
2021-01-15 09:09:41 +00:00
|
|
|
return &proxyTimeTickWatcher{
|
2021-01-22 03:07:07 +00:00
|
|
|
allocator: allocator,
|
|
|
|
msgQueue: make(chan *msgstream.TimeTickMsg, 1),
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 03:07:07 +00:00
|
|
|
func (watcher *proxyTimeTickWatcher) StartBackgroundLoop(ctx context.Context) {
|
2021-01-15 09:09:41 +00:00
|
|
|
for {
|
|
|
|
select {
|
2021-01-22 03:07:07 +00:00
|
|
|
case <-ctx.Done():
|
2021-01-22 11:43:27 +00:00
|
|
|
log.Println("proxy time tick watcher closed")
|
2021-01-15 09:09:41 +00:00
|
|
|
return
|
|
|
|
case msg := <-watcher.msgQueue:
|
2021-01-18 11:32:08 +00:00
|
|
|
if err := watcher.allocator.ExpireAllocations(msg.Base.Timestamp); err != nil {
|
2021-01-15 09:09:41 +00:00
|
|
|
log.Printf("expire allocations error : %s", err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-22 03:07:07 +00:00
|
|
|
func (watcher *proxyTimeTickWatcher) Watch(msg *msgstream.TimeTickMsg) {
|
|
|
|
watcher.msgQueue <- msg
|
|
|
|
}
|
|
|
|
|
2021-01-22 11:43:27 +00:00
|
|
|
func newDataNodeTimeTickWatcher(meta *meta, allocator segmentAllocator, cluster *dataNodeCluster) *dataNodeTimeTickWatcher {
|
2021-01-15 09:09:41 +00:00
|
|
|
return &dataNodeTimeTickWatcher{
|
2021-01-22 11:43:27 +00:00
|
|
|
meta: meta,
|
2021-01-22 03:07:07 +00:00
|
|
|
allocator: allocator,
|
2021-01-22 11:43:27 +00:00
|
|
|
cluster: cluster,
|
2021-01-22 03:07:07 +00:00
|
|
|
msgQueue: make(chan *msgstream.TimeTickMsg, 1),
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (watcher *dataNodeTimeTickWatcher) Watch(msg *msgstream.TimeTickMsg) {
|
|
|
|
watcher.msgQueue <- msg
|
|
|
|
}
|
|
|
|
|
2021-01-22 03:07:07 +00:00
|
|
|
func (watcher *dataNodeTimeTickWatcher) StartBackgroundLoop(ctx context.Context) {
|
2021-01-15 09:09:41 +00:00
|
|
|
for {
|
|
|
|
select {
|
2021-01-22 03:07:07 +00:00
|
|
|
case <-ctx.Done():
|
2021-01-25 07:17:17 +00:00
|
|
|
log.Println("data node time tick watcher closed")
|
2021-01-15 09:09:41 +00:00
|
|
|
return
|
|
|
|
case msg := <-watcher.msgQueue:
|
2021-01-29 09:08:31 +00:00
|
|
|
if err := watcher.handleTimeTickMsg(msg); err != nil {
|
|
|
|
log.Println(err.Error())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (watcher *dataNodeTimeTickWatcher) handleTimeTickMsg(msg *msgstream.TimeTickMsg) error {
|
|
|
|
segments, err := watcher.allocator.GetSealedSegments()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, id := range segments {
|
|
|
|
expired, err := watcher.allocator.IsAllocationsExpired(id, msg.Base.Timestamp)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("check allocations expired error %s", err.Error())
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if expired {
|
|
|
|
segmentInfo, err := watcher.meta.GetSegment(id)
|
2021-01-15 09:09:41 +00:00
|
|
|
if err != nil {
|
2021-01-29 09:08:31 +00:00
|
|
|
log.Println(err.Error())
|
2021-01-15 09:09:41 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-02-03 10:55:00 +00:00
|
|
|
if err = watcher.meta.SetSegmentState(id, commonpb.SegmentState_SegmentSealed); err != nil {
|
2021-01-29 09:08:31 +00:00
|
|
|
log.Println(err.Error())
|
|
|
|
continue
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|
2021-01-29 09:08:31 +00:00
|
|
|
watcher.cluster.FlushSegment(&datapb.FlushSegRequest{
|
|
|
|
Base: &commonpb.MsgBase{
|
2021-02-06 02:28:53 +00:00
|
|
|
MsgType: commonpb.MsgType_kFlush,
|
2021-01-29 09:08:31 +00:00
|
|
|
MsgID: -1, // todo add msg id
|
|
|
|
Timestamp: 0, // todo
|
|
|
|
SourceID: Params.NodeID,
|
|
|
|
},
|
|
|
|
CollectionID: segmentInfo.CollectionID,
|
|
|
|
SegmentIDs: []int64{segmentInfo.SegmentID},
|
|
|
|
})
|
|
|
|
watcher.allocator.DropSegment(id)
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|
|
|
|
}
|
2021-01-29 09:08:31 +00:00
|
|
|
return nil
|
2021-01-15 09:09:41 +00:00
|
|
|
}
|