mirror of https://github.com/milvus-io/milvus.git
Add judgement when executing clang-format
Signed-off-by: cai.zhang <cai.zhang@zilliz.com>pull/4973/head^2
parent
e36b565ec0
commit
8ff82c2fd5
|
@ -34,7 +34,7 @@ func main() {
|
|||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
if err := svr.Run(); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.
|
|||
case <-ctx.Done():
|
||||
return errors.New("insert timeout")
|
||||
default:
|
||||
return p.sched.DmQueue.Enqueue(it)
|
||||
return p.taskSch.DmQueue.Enqueue(it)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -96,7 +96,7 @@ func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSc
|
|||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(cct)
|
||||
return p.taskSch.DdQueue.Enqueue(cct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -144,7 +144,7 @@ func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.Qu
|
|||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DqQueue.Enqueue(qt)
|
||||
return p.taskSch.DqQueue.Enqueue(qt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -189,7 +189,7 @@ func (p *Proxy) DropCollection(ctx context.Context, req *servicepb.CollectionNam
|
|||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dct)
|
||||
return p.taskSch.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -230,7 +230,7 @@ func (p *Proxy) HasCollection(ctx context.Context, req *servicepb.CollectionName
|
|||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(hct)
|
||||
return p.taskSch.DdQueue.Enqueue(hct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -275,7 +275,7 @@ func (p *Proxy) DescribeCollection(ctx context.Context, req *servicepb.Collectio
|
|||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dct)
|
||||
return p.taskSch.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -319,7 +319,7 @@ func (p *Proxy) ShowCollections(ctx context.Context, req *commonpb.Empty) (*serv
|
|||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(sct)
|
||||
return p.taskSch.DdQueue.Enqueue(sct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
@ -369,7 +369,7 @@ func (p *Proxy) CreatePartition(ctx context.Context, in *servicepb.PartitionName
|
|||
case <-ctx.Done():
|
||||
return errors.New("create partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(cpt)
|
||||
return p.taskSch.DdQueue.Enqueue(cpt)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -415,7 +415,7 @@ func (p *Proxy) DropPartition(ctx context.Context, in *servicepb.PartitionName)
|
|||
case <-ctx.Done():
|
||||
return errors.New("drop partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dpt)
|
||||
return p.taskSch.DdQueue.Enqueue(dpt)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -461,7 +461,7 @@ func (p *Proxy) HasPartition(ctx context.Context, in *servicepb.PartitionName) (
|
|||
case <-ctx.Done():
|
||||
return errors.New("has partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(hpt)
|
||||
return p.taskSch.DdQueue.Enqueue(hpt)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -513,7 +513,7 @@ func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionNa
|
|||
case <-ctx.Done():
|
||||
return errors.New("describe partion timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dpt)
|
||||
return p.taskSch.DdQueue.Enqueue(dpt)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -566,7 +566,7 @@ func (p *Proxy) ShowPartitions(ctx context.Context, req *servicepb.CollectionNam
|
|||
case <-ctx.Done():
|
||||
return errors.New("show partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(spt)
|
||||
return p.taskSch.DdQueue.Enqueue(spt)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
@ -28,7 +29,7 @@ type Proxy struct {
|
|||
grpcServer *grpc.Server
|
||||
masterConn *grpc.ClientConn
|
||||
masterClient masterpb.MasterClient
|
||||
sched *TaskScheduler
|
||||
taskSch *TaskScheduler
|
||||
tick *timeTick
|
||||
|
||||
idAllocator *allocator.IDAllocator
|
||||
|
@ -37,6 +38,7 @@ type Proxy struct {
|
|||
|
||||
manipulationMsgStream *msgstream.PulsarMsgStream
|
||||
queryMsgStream *msgstream.PulsarMsgStream
|
||||
queryResultMsgStream *msgstream.PulsarMsgStream
|
||||
|
||||
// Add callback functions at different stages
|
||||
startCallbacks []func()
|
||||
|
@ -60,6 +62,9 @@ func CreateProxy(ctx context.Context) (*Proxy, error) {
|
|||
bufSize := int64(1000)
|
||||
manipulationChannels := []string{"manipulation"}
|
||||
queryChannels := []string{"query"}
|
||||
queryResultChannels := []string{"QueryResult"}
|
||||
queryResultSubName := "QueryResultSubject"
|
||||
unmarshal := msgstream.NewUnmarshalDispatcher()
|
||||
|
||||
p.manipulationMsgStream = msgstream.NewPulsarMsgStream(p.proxyLoopCtx, bufSize)
|
||||
p.manipulationMsgStream.SetPulsarClient(pulsarAddress)
|
||||
|
@ -69,6 +74,13 @@ func CreateProxy(ctx context.Context) (*Proxy, error) {
|
|||
p.queryMsgStream.SetPulsarClient(pulsarAddress)
|
||||
p.queryMsgStream.CreatePulsarProducers(queryChannels)
|
||||
|
||||
p.queryResultMsgStream = msgstream.NewPulsarMsgStream(p.proxyLoopCtx, bufSize)
|
||||
p.queryResultMsgStream.SetPulsarClient(pulsarAddress)
|
||||
p.queryResultMsgStream.CreatePulsarConsumers(queryResultChannels,
|
||||
queryResultSubName,
|
||||
unmarshal,
|
||||
bufSize)
|
||||
|
||||
masterAddr := Params.MasterAddress()
|
||||
idAllocator, err := allocator.NewIDAllocator(p.proxyLoopCtx, masterAddr)
|
||||
|
||||
|
@ -89,7 +101,7 @@ func CreateProxy(ctx context.Context) (*Proxy, error) {
|
|||
}
|
||||
p.segAssigner = segAssigner
|
||||
|
||||
p.sched, err = NewTaskScheduler(p.proxyLoopCtx, p.idAllocator, p.tsoAllocator)
|
||||
p.taskSch, err = NewTaskScheduler(p.proxyLoopCtx, p.idAllocator, p.tsoAllocator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -110,19 +122,17 @@ func (p *Proxy) startProxy() error {
|
|||
initGlobalMetaCache(p.proxyLoopCtx, p.masterClient, p.idAllocator, p.tsoAllocator)
|
||||
p.manipulationMsgStream.Start()
|
||||
p.queryMsgStream.Start()
|
||||
p.sched.Start()
|
||||
p.queryResultMsgStream.Start()
|
||||
p.taskSch.Start()
|
||||
p.idAllocator.Start()
|
||||
p.tsoAllocator.Start()
|
||||
p.segAssigner.Start()
|
||||
|
||||
// Start callbacks
|
||||
// Run callbacks
|
||||
for _, cb := range p.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
p.proxyLoopWg.Add(1)
|
||||
go p.grpcLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -163,8 +173,65 @@ func (p *Proxy) connectMaster() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Proxy) Start() error {
|
||||
return p.startProxy()
|
||||
func (p *Proxy) queryResultLoop() {
|
||||
defer p.proxyLoopWg.Done()
|
||||
defer p.proxyLoopCancel()
|
||||
|
||||
queryResultBuf := make(map[UniqueID][]*internalpb.SearchResult)
|
||||
|
||||
for {
|
||||
select {
|
||||
case msgPack, ok := <-p.queryResultMsgStream.Chan():
|
||||
if !ok {
|
||||
log.Print("buf chan closed")
|
||||
return
|
||||
}
|
||||
if msgPack == nil {
|
||||
continue
|
||||
}
|
||||
for _, tsMsg := range msgPack.Msgs {
|
||||
searchResultMsg, _ := tsMsg.(*msgstream.SearchResultMsg)
|
||||
reqID := searchResultMsg.GetReqID()
|
||||
_, ok = queryResultBuf[reqID]
|
||||
if !ok {
|
||||
queryResultBuf[reqID] = make([]*internalpb.SearchResult, 0)
|
||||
}
|
||||
queryResultBuf[reqID] = append(queryResultBuf[reqID], &searchResultMsg.SearchResult)
|
||||
if len(queryResultBuf[reqID]) == 4 {
|
||||
// TODO: use the number of query node instead
|
||||
t := p.taskSch.getTaskByReqID(reqID)
|
||||
if t != nil {
|
||||
qt, ok := t.(*QueryTask)
|
||||
if ok {
|
||||
log.Printf("address of query task: %p", qt)
|
||||
qt.resultBuf <- queryResultBuf[reqID]
|
||||
delete(queryResultBuf, reqID)
|
||||
}
|
||||
} else {
|
||||
log.Printf("task with reqID %v is nil", reqID)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-p.proxyLoopCtx.Done():
|
||||
log.Print("proxy server is closed ...")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proxy) startProxyLoop() {
|
||||
p.proxyLoopWg.Add(2)
|
||||
go p.grpcLoop()
|
||||
go p.queryResultLoop()
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) Run() error {
|
||||
if err := p.startProxy(); err != nil {
|
||||
return err
|
||||
}
|
||||
p.startProxyLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Proxy) stopProxyLoop() {
|
||||
|
@ -179,12 +246,14 @@ func (p *Proxy) stopProxyLoop() {
|
|||
|
||||
p.segAssigner.Close()
|
||||
|
||||
p.sched.Close()
|
||||
p.taskSch.Close()
|
||||
|
||||
p.manipulationMsgStream.Close()
|
||||
|
||||
p.queryMsgStream.Close()
|
||||
|
||||
p.queryResultMsgStream.Close()
|
||||
|
||||
p.proxyLoopWg.Wait()
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ func startProxy(ctx context.Context) {
|
|||
}
|
||||
|
||||
// TODO: change to wait until master is ready
|
||||
if err := svr.Start(); err != nil {
|
||||
if err := svr.Run(); err != nil {
|
||||
log.Fatal("run proxy failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,8 +8,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
|
||||
)
|
||||
|
||||
type TaskQueue interface {
|
||||
|
@ -142,7 +140,7 @@ func (queue *BaseTaskQueue) TaskDoneTest(ts Timestamp) bool {
|
|||
queue.utLock.Lock()
|
||||
defer queue.utLock.Unlock()
|
||||
for e := queue.unissuedTasks.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(task).EndTs() < ts {
|
||||
if e.Value.(task).EndTs() >= ts {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +148,7 @@ func (queue *BaseTaskQueue) TaskDoneTest(ts Timestamp) bool {
|
|||
queue.atLock.Lock()
|
||||
defer queue.atLock.Unlock()
|
||||
for ats := range queue.activeTasks {
|
||||
if ats < ts {
|
||||
if ats >= ts {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -359,68 +357,6 @@ func (sched *TaskScheduler) queryLoop() {
|
|||
}
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) queryResultLoop() {
|
||||
defer sched.wg.Done()
|
||||
|
||||
// TODO: use config instead
|
||||
pulsarAddress := "pulsar://localhost:6650"
|
||||
bufSize := int64(1000)
|
||||
queryResultChannels := []string{"QueryResult"}
|
||||
queryResultSubName := "QueryResultSubject"
|
||||
unmarshal := msgstream.NewUnmarshalDispatcher()
|
||||
|
||||
queryResultMsgStream := msgstream.NewPulsarMsgStream(sched.ctx, bufSize)
|
||||
queryResultMsgStream.SetPulsarClient(pulsarAddress)
|
||||
queryResultMsgStream.CreatePulsarConsumers(queryResultChannels,
|
||||
queryResultSubName,
|
||||
unmarshal,
|
||||
bufSize)
|
||||
|
||||
queryResultMsgStream.Start()
|
||||
defer queryResultMsgStream.Close()
|
||||
|
||||
queryResultBuf := make(map[UniqueID][]*internalpb.SearchResult)
|
||||
|
||||
for {
|
||||
select {
|
||||
case msgPack, ok := <-queryResultMsgStream.Chan():
|
||||
if !ok {
|
||||
log.Print("buf chan closed")
|
||||
return
|
||||
}
|
||||
if msgPack == nil {
|
||||
continue
|
||||
}
|
||||
for _, tsMsg := range msgPack.Msgs {
|
||||
searchResultMsg, _ := tsMsg.(*msgstream.SearchResultMsg)
|
||||
reqID := searchResultMsg.GetReqID()
|
||||
_, ok = queryResultBuf[reqID]
|
||||
if !ok {
|
||||
queryResultBuf[reqID] = make([]*internalpb.SearchResult, 0)
|
||||
}
|
||||
queryResultBuf[reqID] = append(queryResultBuf[reqID], &searchResultMsg.SearchResult)
|
||||
if len(queryResultBuf[reqID]) == 4 {
|
||||
// TODO: use the number of query node instead
|
||||
t := sched.getTaskByReqID(reqID)
|
||||
if t != nil {
|
||||
qt, ok := t.(*QueryTask)
|
||||
if ok {
|
||||
log.Printf("address of query task: %p", qt)
|
||||
qt.resultBuf <- queryResultBuf[reqID]
|
||||
delete(queryResultBuf, reqID)
|
||||
}
|
||||
} else {
|
||||
log.Printf("task with reqID %v is nil", reqID)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-sched.ctx.Done():
|
||||
log.Print("proxy server is closed ...")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sched *TaskScheduler) Start() error {
|
||||
sched.wg.Add(1)
|
||||
go sched.definitionLoop()
|
||||
|
@ -431,9 +367,6 @@ func (sched *TaskScheduler) Start() error {
|
|||
sched.wg.Add(1)
|
||||
go sched.queryLoop()
|
||||
|
||||
sched.wg.Add(1)
|
||||
go sched.queryResultLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -11,3 +11,9 @@ formatThis() {
|
|||
formatThis "${CorePath}/src"
|
||||
formatThis "${CorePath}/unittest"
|
||||
|
||||
if test -z "$(git status | grep -E "*\.c|*\.h")"; then
|
||||
exit 0
|
||||
else
|
||||
echo "Please format your code by clang-format!"
|
||||
exit 1
|
||||
fi
|
||||
|
|
Loading…
Reference in New Issue