mirror of https://github.com/milvus-io/milvus.git
Add Meta table implementation
Signed-off-by: neza2017 <yefu.chen@zilliz.com>pull/4973/head^2
parent
bade037fb2
commit
d18c17e786
|
@ -41,6 +41,7 @@ sdk/cmake_build_release
|
|||
# Compiled source
|
||||
bin/
|
||||
lib/
|
||||
cmake_build/
|
||||
*.a
|
||||
*.so
|
||||
*.so.*
|
||||
|
|
|
@ -15,7 +15,7 @@ linters-settings:
|
|||
locale: US
|
||||
|
||||
linters:
|
||||
disable-all: false
|
||||
disable-all: true
|
||||
enable:
|
||||
- typecheck
|
||||
- goimports
|
||||
|
@ -24,8 +24,6 @@ linters:
|
|||
- golint
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- deadcode
|
||||
- structcheck
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.27.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
|
6
Makefile
6
Makefile
|
@ -36,7 +36,9 @@ fmt:
|
|||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml || true
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml ./internal/... || true
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml ./cmd/... || true
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml ./test/... || true
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
|
@ -45,7 +47,7 @@ ruleguard:
|
|||
verifiers: get-check-deps fmt lint ruleguard
|
||||
|
||||
# Builds various components locally.
|
||||
build-go: verifiers
|
||||
build-go:
|
||||
@echo "Building each component's binary to './'"
|
||||
@echo "Building reader ..."
|
||||
@mkdir -p $(INSTALL_PATH) && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/reader $(PWD)/cmd/reader/reader.go 1>/dev/null
|
||||
|
|
|
@ -3,13 +3,14 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"github.com/zilliztech/milvus-distributed/internal/conf"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"go.uber.org/zap"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/conf"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/proxy"
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proxy"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -4,9 +4,10 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/conf"
|
||||
"github.com/zilliztech/milvus-distributed/internal/reader"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -5,15 +5,16 @@ import (
|
|||
"crypto/md5"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/pivotal-golang/bytefmt"
|
||||
"github.com/zilliztech/milvus-distributed/internal/storage"
|
||||
"github.com/zilliztech/milvus-distributed/internal/storage/type"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pivotal-golang/bytefmt"
|
||||
"github.com/zilliztech/milvus-distributed/internal/storage"
|
||||
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
|
||||
)
|
||||
|
||||
// Global variables
|
||||
|
|
|
@ -4,12 +4,13 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/conf"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgclient"
|
||||
"github.com/zilliztech/milvus-distributed/internal/storage"
|
||||
"github.com/zilliztech/milvus-distributed/internal/writer"
|
||||
"log"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
|
|
@ -24,4 +24,4 @@ services:
|
|||
working_dir: "/milvus-distributed"
|
||||
command: &ubuntu-command >
|
||||
/bin/bash -c "
|
||||
make all"
|
||||
make verifiers && make all"
|
||||
|
|
|
@ -1128,9 +1128,7 @@ func NewMetaTable(kv kv.Base) (*metaTable,error)
|
|||
|
||||
* Soft Time Tick Barrier
|
||||
|
||||
<img src="./raw_figs/soft_tt_barrier.jpeg" width=700>
|
||||
|
||||
|
||||
<img src="./raw_figs/Soft_time_tick_barrier.png" width=600>
|
||||
|
||||
```go
|
||||
type softTimeTickBarrier struct {
|
||||
|
@ -1151,7 +1149,7 @@ func newSoftTimeTickBarrier(ctx context.Context, ttStream *MsgStream, peerIds []
|
|||
|
||||
* Hard Time Tick Barrier
|
||||
|
||||
<img src="./raw_figs/hard_tt_barrier.jpeg" width=700>
|
||||
<img src="./raw_figs/Hard_time_tick_barrier.png" width=500>
|
||||
|
||||
```go
|
||||
type hardTimeTickBarrier struct {
|
||||
|
@ -1168,9 +1166,9 @@ func newHardTimeTickBarrier(ctx context.Context, ttStream *MsgStream, peerIds []
|
|||
|
||||
|
||||
|
||||
###### 10.5.1 Time Synchornization Message Producer
|
||||
###### 10.5.1 Time Synchronization Message Producer
|
||||
|
||||
<img src="./raw_figs/tt_msg_producer.jpeg" width=700>
|
||||
<img src="./raw_figs/time_sync_msg_producer.png" width=900>
|
||||
|
||||
|
||||
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 65 KiB |
Binary file not shown.
After Width: | Height: | Size: 100 KiB |
Binary file not shown.
After Width: | Height: | Size: 2.2 MiB |
|
@ -14,10 +14,10 @@ done
|
|||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
|
||||
BUILD_OUTPUT_DIR="cmake_build"
|
||||
BUILD_OUTPUT_DIR="${SCRIPTS_DIR}/../../cmake_build"
|
||||
BUILD_TYPE="Release"
|
||||
BUILD_UNITTEST="OFF"
|
||||
INSTALL_PREFIX=${SCRIPTS_DIR}/output
|
||||
INSTALL_PREFIX="${SCRIPTS_DIR}/output"
|
||||
MAKE_CLEAN="OFF"
|
||||
BUILD_COVERAGE="OFF"
|
||||
DB_PATH="/tmp/milvus"
|
||||
|
@ -133,7 +133,7 @@ CMAKE_CMD="cmake \
|
|||
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
|
||||
-DMILVUS_CUDA_ARCH=${CUDA_ARCH} \
|
||||
-DCUSTOM_THIRDPARTY_DOWNLOAD_PATH=${CUSTOM_THIRDPARTY_PATH} \
|
||||
../"
|
||||
${SCRIPTS_DIR}"
|
||||
echo ${CMAKE_CMD}
|
||||
${CMAKE_CMD}
|
||||
|
||||
|
|
|
@ -44,12 +44,12 @@ type showCollectionsTask struct {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *createCollectionTask) Type() internalpb.ReqType {
|
||||
func (t *createCollectionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *createCollectionTask) Ts() (Timestamp, error) {
|
||||
|
@ -106,12 +106,12 @@ func (t *createCollectionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *dropCollectionTask) Type() internalpb.ReqType {
|
||||
func (t *dropCollectionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *dropCollectionTask) Ts() (Timestamp, error) {
|
||||
|
@ -149,12 +149,12 @@ func (t *dropCollectionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *hasCollectionTask) Type() internalpb.ReqType {
|
||||
func (t *hasCollectionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *hasCollectionTask) Ts() (Timestamp, error) {
|
||||
|
@ -181,12 +181,12 @@ func (t *hasCollectionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *describeCollectionTask) Type() internalpb.ReqType {
|
||||
func (t *describeCollectionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *describeCollectionTask) Ts() (Timestamp, error) {
|
||||
|
@ -223,12 +223,12 @@ func (t *describeCollectionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *showCollectionsTask) Type() internalpb.ReqType {
|
||||
func (t *showCollectionsTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *showCollectionsTask) Ts() (Timestamp, error) {
|
||||
|
|
|
@ -42,12 +42,12 @@ type showPartitionTask struct {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *createPartitionTask) Type() internalpb.ReqType {
|
||||
func (t *createPartitionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *createPartitionTask) Ts() (Timestamp, error) {
|
||||
|
@ -91,12 +91,12 @@ func (t *createPartitionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *dropPartitionTask) Type() internalpb.ReqType {
|
||||
func (t *dropPartitionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *dropPartitionTask) Ts() (Timestamp, error) {
|
||||
|
@ -143,12 +143,12 @@ func (t *dropPartitionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *hasPartitionTask) Type() internalpb.ReqType {
|
||||
func (t *hasPartitionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *hasPartitionTask) Ts() (Timestamp, error) {
|
||||
|
@ -178,12 +178,12 @@ func (t *hasPartitionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *describePartitionTask) Type() internalpb.ReqType {
|
||||
func (t *describePartitionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *describePartitionTask) Ts() (Timestamp, error) {
|
||||
|
@ -215,12 +215,12 @@ func (t *describePartitionTask) Execute() error {
|
|||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *showPartitionTask) Type() internalpb.ReqType {
|
||||
func (t *showPartitionTask) Type() internalpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.ReqType
|
||||
return t.req.MsgType
|
||||
}
|
||||
|
||||
func (t *showPartitionTask) Ts() (Timestamp, error) {
|
||||
|
|
|
@ -18,7 +18,7 @@ type baseTask struct {
|
|||
}
|
||||
|
||||
type task interface {
|
||||
Type() internalpb.ReqType
|
||||
Type() internalpb.MsgType
|
||||
Ts() (Timestamp, error)
|
||||
Execute() error
|
||||
WaitToFinish(ctx context.Context) error
|
||||
|
|
|
@ -17,25 +17,25 @@ func GetMarshalers(inputMsgType MsgType, outputMsgType MsgType) (*TsMsgMarshaler
|
|||
|
||||
func GetMarshaler(MsgType MsgType) *TsMsgMarshaler {
|
||||
switch MsgType {
|
||||
case kInsert:
|
||||
case KInsert:
|
||||
insertMarshaler := &InsertMarshaler{}
|
||||
var tsMsgMarshaller TsMsgMarshaler = insertMarshaler
|
||||
return &tsMsgMarshaller
|
||||
case kDelete:
|
||||
case KDelete:
|
||||
deleteMarshaler := &DeleteMarshaler{}
|
||||
var tsMsgMarshaller TsMsgMarshaler = deleteMarshaler
|
||||
return &tsMsgMarshaller
|
||||
case kSearch:
|
||||
case KSearch:
|
||||
searchMarshaler := &SearchMarshaler{}
|
||||
var tsMsgMarshaller TsMsgMarshaler = searchMarshaler
|
||||
return &tsMsgMarshaller
|
||||
case kSearchResult:
|
||||
case KSearchResult:
|
||||
searchResultMarshler := &SearchResultMarshaler{}
|
||||
var tsMsgMarshaller TsMsgMarshaler = searchResultMarshler
|
||||
return &tsMsgMarshaller
|
||||
case kTimeSync:
|
||||
timeSyncMarshaler := &TimeSyncMarshaler{}
|
||||
var tsMsgMarshaller TsMsgMarshaler = timeSyncMarshaler
|
||||
case KTimeTick:
|
||||
timeTickMarshaler := &TimeTickMarshaler{}
|
||||
var tsMsgMarshaller TsMsgMarshaler = timeTickMarshaler
|
||||
return &tsMsgMarshaller
|
||||
default:
|
||||
return nil
|
||||
|
@ -143,28 +143,28 @@ func (srm *SearchResultMarshaler) Unmarshal(input []byte) (*TsMsg, commonPb.Stat
|
|||
return &tsMsg, commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS}
|
||||
}
|
||||
|
||||
/////////////////////////////////////TimeSync///////////////////////////////////////////////
|
||||
/////////////////////////////////////TimeTick///////////////////////////////////////////////
|
||||
|
||||
type TimeSyncMarshaler struct{}
|
||||
type TimeTickMarshaler struct{}
|
||||
|
||||
func (tm *TimeSyncMarshaler) Marshal(input *TsMsg) ([]byte, commonPb.Status) {
|
||||
timeSyncTask := (*input).(TimeSyncTask)
|
||||
timeSyncMsg := &timeSyncTask.TimeTickMsg
|
||||
mb, err := proto.Marshal(timeSyncMsg)
|
||||
func (tm *TimeTickMarshaler) Marshal(input *TsMsg) ([]byte, commonPb.Status) {
|
||||
timeTickTask := (*input).(TimeTickTask)
|
||||
timeTickMsg := &timeTickTask.TimeTickMsg
|
||||
mb, err := proto.Marshal(timeTickMsg)
|
||||
if err != nil {
|
||||
return nil, commonPb.Status{ErrorCode: commonPb.ErrorCode_UNEXPECTED_ERROR}
|
||||
}
|
||||
return mb, commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS}
|
||||
}
|
||||
|
||||
func (tm *TimeSyncMarshaler) Unmarshal(input []byte) (*TsMsg, commonPb.Status) {
|
||||
timeSyncMsg := internalPb.TimeTickMsg{}
|
||||
err := proto.Unmarshal(input, &timeSyncMsg)
|
||||
timeSyncTask := TimeSyncTask{TimeTickMsg: timeSyncMsg}
|
||||
func (tm *TimeTickMarshaler) Unmarshal(input []byte) (*TsMsg, commonPb.Status) {
|
||||
timeTickMsg := internalPb.TimeTickMsg{}
|
||||
err := proto.Unmarshal(input, &timeTickMsg)
|
||||
timeTickTask := TimeTickTask{TimeTickMsg: timeTickMsg}
|
||||
if err != nil {
|
||||
return nil, commonPb.Status{ErrorCode: commonPb.ErrorCode_UNEXPECTED_ERROR}
|
||||
}
|
||||
var tsMsg TsMsg = timeSyncTask
|
||||
var tsMsg TsMsg = timeTickTask
|
||||
return &tsMsg, commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS}
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,23 @@ func (ms *PulsarMsgStream) Produce(msgPack *MsgPack) commonPb.Status {
|
|||
reBucketValues[channelId] = bucketValues
|
||||
}
|
||||
|
||||
result := ms.repackFunc(tsMsgs, reBucketValues)
|
||||
var result map[int32]*MsgPack
|
||||
if ms.repackFunc != nil {
|
||||
result = ms.repackFunc(tsMsgs, reBucketValues)
|
||||
} else {
|
||||
result = make(map[int32]*MsgPack)
|
||||
for i, request := range tsMsgs {
|
||||
keys := reBucketValues[i]
|
||||
for _, channelId := range keys {
|
||||
_, ok := result[channelId]
|
||||
if ok == false {
|
||||
msgPack := MsgPack{}
|
||||
result[channelId] = &msgPack
|
||||
}
|
||||
result[channelId].Msgs = append(result[channelId].Msgs, request)
|
||||
}
|
||||
}
|
||||
}
|
||||
for k, v := range result {
|
||||
for i := 0; i < len(v.Msgs); i++ {
|
||||
mb, status := (*ms.msgMarshaler).Marshal(v.Msgs[i])
|
||||
|
@ -273,7 +289,7 @@ func (ms *PulsarTtMsgStream) findTimeTick(ctx context.Context,
|
|||
(*ms.consumers[channelIndex]).Ack(pulsarMsg)
|
||||
tsMsg, status := (*ms.msgUnmarshaler).Unmarshal(pulsarMsg.Payload())
|
||||
// TODO:: Find the EOF
|
||||
if (*tsMsg).Type() == kTimeSync {
|
||||
if (*tsMsg).Type() == KTimeTick {
|
||||
eofMsgMap[channelIndex] = (*tsMsg).EndTs()
|
||||
wg.Done()
|
||||
return
|
||||
|
|
|
@ -27,9 +27,9 @@ func repackFunc(msgs []*TsMsg, hashKeys [][]int32) map[int32]*MsgPack {
|
|||
func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
|
||||
var tsMsg TsMsg
|
||||
switch msgType {
|
||||
case kInsert:
|
||||
case KInsert:
|
||||
insertRequest := internalPb.InsertRequest{
|
||||
ReqType: internalPb.ReqType_kInsert,
|
||||
MsgType: internalPb.MsgType_kInsert,
|
||||
ReqId: reqId,
|
||||
CollectionName: "Collection",
|
||||
PartitionTag: "Partition",
|
||||
|
@ -43,9 +43,9 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
|
|||
InsertRequest: insertRequest,
|
||||
}
|
||||
tsMsg = insertMsg
|
||||
case kDelete:
|
||||
case KDelete:
|
||||
deleteRequest := internalPb.DeleteRequest{
|
||||
ReqType: internalPb.ReqType_kDelete,
|
||||
MsgType: internalPb.MsgType_kDelete,
|
||||
ReqId: reqId,
|
||||
CollectionName: "Collection",
|
||||
ChannelId: 1,
|
||||
|
@ -58,9 +58,9 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
|
|||
DeleteRequest: deleteRequest,
|
||||
}
|
||||
tsMsg = deleteMsg
|
||||
case kSearch:
|
||||
case KSearch:
|
||||
searchRequest := internalPb.SearchRequest{
|
||||
ReqType: internalPb.ReqType_kSearch,
|
||||
MsgType: internalPb.MsgType_kSearch,
|
||||
ReqId: reqId,
|
||||
ProxyId: 1,
|
||||
Timestamp: 1,
|
||||
|
@ -71,7 +71,7 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
|
|||
SearchRequest: searchRequest,
|
||||
}
|
||||
tsMsg = searchMsg
|
||||
case kSearchResult:
|
||||
case KSearchResult:
|
||||
searchResult := internalPb.SearchResult{
|
||||
Status: &commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS},
|
||||
ReqId: reqId,
|
||||
|
@ -85,32 +85,16 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
|
|||
SearchResult: searchResult,
|
||||
}
|
||||
tsMsg = searchResultMsg
|
||||
case kTimeSync:
|
||||
timeSyncResult := internalPb.TimeTickMsg{
|
||||
case KTimeTick:
|
||||
timeTickResult := internalPb.TimeTickMsg{
|
||||
PeerId: reqId,
|
||||
Timestamp: 1,
|
||||
}
|
||||
timeSyncMsg := TimeSyncTask{
|
||||
timeTickMsg := TimeTickTask{
|
||||
HashValues: []int32{hashValue},
|
||||
TimeTickMsg: timeSyncResult,
|
||||
TimeTickMsg: timeTickResult,
|
||||
}
|
||||
tsMsg = timeSyncMsg
|
||||
case kTimeTick:
|
||||
insertRequest := internalPb.InsertRequest{
|
||||
ReqType: internalPb.ReqType_kTimeTick,
|
||||
ReqId: reqId,
|
||||
CollectionName: "Collection",
|
||||
PartitionTag: "Partition",
|
||||
SegmentId: 1,
|
||||
ChannelId: 1,
|
||||
ProxyId: 1,
|
||||
Timestamps: []uint64{1},
|
||||
}
|
||||
insertMsg := InsertTask{
|
||||
HashValues: []int32{hashValue},
|
||||
InsertRequest: insertRequest,
|
||||
}
|
||||
tsMsg = insertMsg
|
||||
tsMsg = timeTickMsg
|
||||
}
|
||||
return &tsMsg
|
||||
}
|
||||
|
@ -121,7 +105,8 @@ func initStream(pulsarAddress string,
|
|||
consumerSubName string,
|
||||
msgPack *MsgPack,
|
||||
inputMsgType MsgType,
|
||||
outputMsgType MsgType) {
|
||||
outputMsgType MsgType,
|
||||
broadCast bool) {
|
||||
|
||||
// set input stream
|
||||
inputStream := PulsarMsgStream{}
|
||||
|
@ -139,8 +124,13 @@ func initStream(pulsarAddress string,
|
|||
outputStream.Start()
|
||||
|
||||
//send msgPack
|
||||
inputStream.Produce(msgPack)
|
||||
//outputStream.Start()
|
||||
if broadCast {
|
||||
inputStream.BroadCast(msgPack)
|
||||
} else {
|
||||
inputStream.Produce(msgPack)
|
||||
//outputStream.Start()
|
||||
}
|
||||
|
||||
|
||||
// receive msg
|
||||
receiveCount := 0
|
||||
|
@ -153,8 +143,14 @@ func initStream(pulsarAddress string,
|
|||
fmt.Println("msg type: ", (*v).Type(), ", msg value: ", *v)
|
||||
}
|
||||
}
|
||||
if receiveCount >= len(msgPack.Msgs) {
|
||||
break
|
||||
if broadCast {
|
||||
if receiveCount >= len(msgPack.Msgs) * len(producerChannels) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if receiveCount >= len(msgPack.Msgs) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -166,11 +162,11 @@ func TestStream_Insert(t *testing.T) {
|
|||
consumerSubName := "subInsert"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 1, 1))
|
||||
|
||||
//run stream
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kInsert, kInsert)
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KInsert, KInsert, false)
|
||||
}
|
||||
|
||||
func TestStream_Delete(t *testing.T) {
|
||||
|
@ -180,11 +176,11 @@ func TestStream_Delete(t *testing.T) {
|
|||
consumerSubName := "subDelete"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 3, 3))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 3, 3))
|
||||
|
||||
//run stream
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kDelete, kDelete)
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KDelete, KDelete, false)
|
||||
}
|
||||
|
||||
func TestStream_Search(t *testing.T) {
|
||||
|
@ -194,11 +190,11 @@ func TestStream_Search(t *testing.T) {
|
|||
consumerSubName := "subSearch"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 3, 3))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 3, 3))
|
||||
|
||||
//run stream
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kSearch, kSearch)
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KSearch, KSearch, false)
|
||||
}
|
||||
|
||||
func TestStream_SearchResult(t *testing.T) {
|
||||
|
@ -208,37 +204,38 @@ func TestStream_SearchResult(t *testing.T) {
|
|||
consumerSubName := "subSearch"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 3, 3))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 3, 3))
|
||||
|
||||
//run stream
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kSearchResult, kSearchResult)
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KSearchResult, KSearchResult, false)
|
||||
}
|
||||
|
||||
func TestStream_TimeSync(t *testing.T) {
|
||||
func TestStream_TimeTick(t *testing.T) {
|
||||
pulsarAddress := "pulsar://localhost:6650"
|
||||
producerChannels := []string{"search"}
|
||||
consumerChannels := []string{"search"}
|
||||
consumerSubName := "subSearch"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 3, 3))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 3, 3))
|
||||
|
||||
//run stream
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kTimeSync, kTimeSync)
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KTimeTick, KTimeTick, false)
|
||||
}
|
||||
|
||||
|
||||
func TestStream_BroadCast(t *testing.T) {
|
||||
pulsarAddress := "pulsar://localhost:6650"
|
||||
producerChannels := []string{"insert"}
|
||||
consumerChannels := []string{"insert"}
|
||||
producerChannels := []string{"insert1", "insert2"}
|
||||
consumerChannels := []string{"insert2", "insert2"}
|
||||
consumerSubName := "subInsert"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeTick, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeTick, 3, 3))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 3, 3))
|
||||
|
||||
//run stream
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kInsert, kInsert)
|
||||
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KTimeTick, KTimeTick, true)
|
||||
}
|
||||
|
|
|
@ -14,14 +14,14 @@ func TestNewStream_Insert(t *testing.T) {
|
|||
consumerSubName := "subInsert"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 1, 1))
|
||||
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
|
||||
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
|
||||
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(kInsert), nil)
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(KInsert), nil)
|
||||
(*inputStream).SetRepackFunc(repackFunc)
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kInsert))
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KInsert))
|
||||
(*outputStream).Start()
|
||||
|
||||
//send msgPack
|
||||
|
@ -52,14 +52,14 @@ func TestNewStream_Delete(t *testing.T) {
|
|||
consumerSubName := "subDelete"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 1, 1))
|
||||
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
|
||||
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
|
||||
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(kDelete), nil)
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(KDelete), nil)
|
||||
(*inputStream).SetRepackFunc(repackFunc)
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kDelete))
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KDelete))
|
||||
(*outputStream).Start()
|
||||
|
||||
//send msgPack
|
||||
|
@ -90,14 +90,14 @@ func TestNewStream_Search(t *testing.T) {
|
|||
consumerSubName := "subSearch"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 1, 1))
|
||||
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
|
||||
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
|
||||
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(kSearch), nil)
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(KSearch), nil)
|
||||
(*inputStream).SetRepackFunc(repackFunc)
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kSearch))
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KSearch))
|
||||
(*outputStream).Start()
|
||||
|
||||
//send msgPack
|
||||
|
@ -128,14 +128,14 @@ func TestNewStream_SearchResult(t *testing.T) {
|
|||
consumerSubName := "subInsert"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 1, 1))
|
||||
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
|
||||
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
|
||||
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(kSearchResult), nil)
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(KSearchResult), nil)
|
||||
(*inputStream).SetRepackFunc(repackFunc)
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kSearchResult))
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KSearchResult))
|
||||
(*outputStream).Start()
|
||||
|
||||
//send msgPack
|
||||
|
@ -159,21 +159,21 @@ func TestNewStream_SearchResult(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewStream_TimeSync(t *testing.T) {
|
||||
func TestNewStream_TimeTick(t *testing.T) {
|
||||
pulsarAddress := "pulsar://localhost:6650"
|
||||
producerChannels := []string{"timeSync1", "timeSync2"}
|
||||
consumerChannels := []string{"timeSync1", "timeSync2"}
|
||||
consumerSubName := "subInsert"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 1, 1))
|
||||
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
|
||||
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
|
||||
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(kTimeSync), nil)
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(KTimeTick), nil)
|
||||
(*inputStream).SetRepackFunc(repackFunc)
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kTimeSync))
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KTimeTick))
|
||||
(*outputStream).Start()
|
||||
|
||||
//send msgPack
|
||||
|
@ -196,18 +196,18 @@ func TestNewStream_TimeSync(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNewStream_Insert_TimeTick(t *testing.T) {
|
||||
func TestNewTtStream_Insert_TimeSync(t *testing.T) {
|
||||
pulsarAddress := "pulsar://localhost:6650"
|
||||
producerChannels := []string{"insert"}
|
||||
consumerChannels := []string{"insert"}
|
||||
consumerSubName := "subInsert"
|
||||
|
||||
msgPack := MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 1, 1))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 0, 0))
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 1, 1))
|
||||
|
||||
insertRequest := internalPb.InsertRequest{
|
||||
ReqType: internalPb.ReqType_kTimeTick,
|
||||
MsgType: internalPb.MsgType_kTimeTick,
|
||||
ReqId: 2,
|
||||
CollectionName: "Collection",
|
||||
PartitionTag: "Partition",
|
||||
|
@ -226,9 +226,9 @@ func TestNewStream_Insert_TimeTick(t *testing.T) {
|
|||
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
|
||||
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, true)
|
||||
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(kInsert), nil)
|
||||
(*inputStream).SetMsgMarshaler(GetMarshaler(KInsert), nil)
|
||||
(*inputStream).SetRepackFunc(repackFunc)
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kInsert))
|
||||
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KInsert))
|
||||
(*outputStream).Start()
|
||||
|
||||
//send msgPack
|
||||
|
@ -245,7 +245,7 @@ func TestNewStream_Insert_TimeTick(t *testing.T) {
|
|||
fmt.Println("msg type: ", (*v).Type(), ", msg value: ", *v)
|
||||
}
|
||||
}
|
||||
if receiveCount+1 >= len(msgPack.Msgs) {
|
||||
if receiveCount + 1 >= len(msgPack.Msgs) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,14 +8,14 @@ import (
|
|||
type MsgType uint32
|
||||
|
||||
const (
|
||||
kInsert MsgType = 400
|
||||
kDelete MsgType = 401
|
||||
kSearch MsgType = 500
|
||||
kSearchResult MsgType = 1000
|
||||
KInsert MsgType = 400
|
||||
KDelete MsgType = 401
|
||||
KSearch MsgType = 500
|
||||
KSearchResult MsgType = 1000
|
||||
|
||||
kSegmentStatics MsgType = 1100
|
||||
kTimeTick MsgType = 1200
|
||||
kTimeSync MsgType = 1201
|
||||
KSegmentStatics MsgType = 1100
|
||||
KTimeTick MsgType = 1200
|
||||
KTimeSync MsgType = 1201
|
||||
)
|
||||
|
||||
type TsMsg interface {
|
||||
|
@ -67,10 +67,10 @@ func (it InsertTask) EndTs() Timestamp {
|
|||
}
|
||||
|
||||
func (it InsertTask) Type() MsgType {
|
||||
if it.ReqType == internalPb.ReqType_kTimeTick {
|
||||
return kTimeSync
|
||||
if it.MsgType == internalPb.MsgType_kTimeTick {
|
||||
return KTimeTick
|
||||
}
|
||||
return kInsert
|
||||
return KInsert
|
||||
}
|
||||
|
||||
func (it InsertTask) HashKeys() []int32 {
|
||||
|
@ -118,10 +118,10 @@ func (dt DeleteTask) EndTs() Timestamp {
|
|||
}
|
||||
|
||||
func (dt DeleteTask) Type() MsgType {
|
||||
if dt.ReqType == internalPb.ReqType_kTimeTick {
|
||||
return kTimeSync
|
||||
if dt.MsgType == internalPb.MsgType_kTimeTick {
|
||||
return KTimeTick
|
||||
}
|
||||
return kDelete
|
||||
return KDelete
|
||||
}
|
||||
|
||||
func (dt DeleteTask) HashKeys() []int32 {
|
||||
|
@ -147,10 +147,10 @@ func (st SearchTask) EndTs() Timestamp {
|
|||
}
|
||||
|
||||
func (st SearchTask) Type() MsgType {
|
||||
if st.ReqType == internalPb.ReqType_kTimeTick {
|
||||
return kTimeSync
|
||||
if st.MsgType == internalPb.MsgType_kTimeTick {
|
||||
return KTimeTick
|
||||
}
|
||||
return kSearch
|
||||
return KSearch
|
||||
}
|
||||
|
||||
func (st SearchTask) HashKeys() []int32 {
|
||||
|
@ -176,36 +176,36 @@ func (srt SearchResultTask) EndTs() Timestamp {
|
|||
}
|
||||
|
||||
func (srt SearchResultTask) Type() MsgType {
|
||||
return kSearchResult
|
||||
return KSearchResult
|
||||
}
|
||||
|
||||
func (srt SearchResultTask) HashKeys() []int32 {
|
||||
return srt.HashValues
|
||||
}
|
||||
|
||||
/////////////////////////////////////////TimeSync//////////////////////////////////////////
|
||||
type TimeSyncTask struct {
|
||||
/////////////////////////////////////////TimeTick//////////////////////////////////////////
|
||||
type TimeTickTask struct {
|
||||
HashValues []int32
|
||||
internalPb.TimeTickMsg
|
||||
}
|
||||
|
||||
func (tst TimeSyncTask) SetTs(ts Timestamp) {
|
||||
func (tst TimeTickTask) SetTs(ts Timestamp) {
|
||||
tst.Timestamp = uint64(ts)
|
||||
}
|
||||
|
||||
func (tst TimeSyncTask) BeginTs() Timestamp {
|
||||
func (tst TimeTickTask) BeginTs() Timestamp {
|
||||
return Timestamp(tst.Timestamp)
|
||||
}
|
||||
|
||||
func (tst TimeSyncTask) EndTs() Timestamp {
|
||||
func (tst TimeTickTask) EndTs() Timestamp {
|
||||
return Timestamp(tst.Timestamp)
|
||||
}
|
||||
|
||||
func (tst TimeSyncTask) Type() MsgType {
|
||||
return kTimeSync
|
||||
func (tst TimeTickTask) Type() MsgType {
|
||||
return KTimeTick
|
||||
}
|
||||
|
||||
func (tst TimeSyncTask) HashKeys() []int32 {
|
||||
func (tst TimeTickTask) HashKeys() []int32 {
|
||||
return tst.HashValues
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import "common.proto";
|
|||
import "service_msg.proto";
|
||||
|
||||
|
||||
enum ReqType {
|
||||
enum MsgType {
|
||||
kNone = 0;
|
||||
/* Definition Requests: collection */
|
||||
kCreateCollection = 100;
|
||||
|
@ -72,7 +72,7 @@ message TsoResponse {
|
|||
|
||||
|
||||
message CreateCollectionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -81,7 +81,7 @@ message CreateCollectionRequest {
|
|||
|
||||
|
||||
message DropCollectionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -90,7 +90,7 @@ message DropCollectionRequest {
|
|||
|
||||
|
||||
message HasCollectionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -99,7 +99,7 @@ message HasCollectionRequest {
|
|||
|
||||
|
||||
message DescribeCollectionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -108,7 +108,7 @@ message DescribeCollectionRequest {
|
|||
|
||||
|
||||
message ShowCollectionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -116,7 +116,7 @@ message ShowCollectionRequest {
|
|||
|
||||
|
||||
message CreatePartitionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -125,7 +125,7 @@ message CreatePartitionRequest {
|
|||
|
||||
|
||||
message DropPartitionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -134,7 +134,7 @@ message DropPartitionRequest {
|
|||
|
||||
|
||||
message HasPartitionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -143,7 +143,7 @@ message HasPartitionRequest {
|
|||
|
||||
|
||||
message DescribePartitionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -152,7 +152,7 @@ message DescribePartitionRequest {
|
|||
|
||||
|
||||
message ShowPartitionRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
uint64 timestamp = 3;
|
||||
int64 proxy_id = 4;
|
||||
|
@ -161,7 +161,7 @@ message ShowPartitionRequest {
|
|||
|
||||
|
||||
message InsertRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
string collection_name = 3;
|
||||
string partition_tag = 4;
|
||||
|
@ -175,7 +175,7 @@ message InsertRequest {
|
|||
|
||||
|
||||
message DeleteRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
string collection_name = 3;
|
||||
int64 channel_id = 4;
|
||||
|
@ -186,7 +186,7 @@ message DeleteRequest {
|
|||
|
||||
|
||||
message SearchRequest {
|
||||
ReqType req_type = 1;
|
||||
MsgType msg_type = 1;
|
||||
int64 req_id = 2;
|
||||
int64 proxy_id = 3;
|
||||
uint64 timestamp = 4;
|
||||
|
@ -205,7 +205,6 @@ message SearchResult {
|
|||
repeated service.Hits hits = 7;
|
||||
}
|
||||
|
||||
|
||||
message TimeTickMsg {
|
||||
int64 peer_id = 1;
|
||||
uint64 timestamp = 2;
|
||||
|
|
|
@ -22,33 +22,33 @@ var _ = math.Inf
|
|||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type ReqType int32
|
||||
type MsgType int32
|
||||
|
||||
const (
|
||||
ReqType_kNone ReqType = 0
|
||||
MsgType_kNone MsgType = 0
|
||||
// Definition Requests: collection
|
||||
ReqType_kCreateCollection ReqType = 100
|
||||
ReqType_kDropCollection ReqType = 101
|
||||
ReqType_kHasCollection ReqType = 102
|
||||
ReqType_kDescribeCollection ReqType = 103
|
||||
ReqType_kShowCollections ReqType = 104
|
||||
MsgType_kCreateCollection MsgType = 100
|
||||
MsgType_kDropCollection MsgType = 101
|
||||
MsgType_kHasCollection MsgType = 102
|
||||
MsgType_kDescribeCollection MsgType = 103
|
||||
MsgType_kShowCollections MsgType = 104
|
||||
// Definition Requests: partition
|
||||
ReqType_kCreatePartition ReqType = 200
|
||||
ReqType_kDropPartition ReqType = 201
|
||||
ReqType_kHasPartition ReqType = 202
|
||||
ReqType_kDescribePartition ReqType = 203
|
||||
ReqType_kShowPartitions ReqType = 204
|
||||
MsgType_kCreatePartition MsgType = 200
|
||||
MsgType_kDropPartition MsgType = 201
|
||||
MsgType_kHasPartition MsgType = 202
|
||||
MsgType_kDescribePartition MsgType = 203
|
||||
MsgType_kShowPartitions MsgType = 204
|
||||
// Manipulation Requests
|
||||
ReqType_kInsert ReqType = 400
|
||||
ReqType_kDelete ReqType = 401
|
||||
MsgType_kInsert MsgType = 400
|
||||
MsgType_kDelete MsgType = 401
|
||||
// Query
|
||||
ReqType_kSearch ReqType = 500
|
||||
MsgType_kSearch MsgType = 500
|
||||
// System Control
|
||||
ReqType_kTimeTick ReqType = 1200
|
||||
ReqType_kTimeSync ReqType = 1201
|
||||
MsgType_kTimeTick MsgType = 1200
|
||||
MsgType_kTimeSync MsgType = 1201
|
||||
)
|
||||
|
||||
var ReqType_name = map[int32]string{
|
||||
var MsgType_name = map[int32]string{
|
||||
0: "kNone",
|
||||
100: "kCreateCollection",
|
||||
101: "kDropCollection",
|
||||
|
@ -67,7 +67,7 @@ var ReqType_name = map[int32]string{
|
|||
1201: "kTimeSync",
|
||||
}
|
||||
|
||||
var ReqType_value = map[string]int32{
|
||||
var MsgType_value = map[string]int32{
|
||||
"kNone": 0,
|
||||
"kCreateCollection": 100,
|
||||
"kDropCollection": 101,
|
||||
|
@ -86,11 +86,11 @@ var ReqType_value = map[string]int32{
|
|||
"kTimeSync": 1201,
|
||||
}
|
||||
|
||||
func (x ReqType) String() string {
|
||||
return proto.EnumName(ReqType_name, int32(x))
|
||||
func (x MsgType) String() string {
|
||||
return proto.EnumName(MsgType_name, int32(x))
|
||||
}
|
||||
|
||||
func (ReqType) EnumDescriptor() ([]byte, []int) {
|
||||
func (MsgType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_7eb37f6b80b23116, []int{0}
|
||||
}
|
||||
|
||||
|
@ -346,7 +346,7 @@ func (m *TsoResponse) GetCount() uint32 {
|
|||
}
|
||||
|
||||
type CreateCollectionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -381,11 +381,11 @@ func (m *CreateCollectionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_CreateCollectionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateCollectionRequest) GetReqType() ReqType {
|
||||
func (m *CreateCollectionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *CreateCollectionRequest) GetReqId() int64 {
|
||||
|
@ -417,7 +417,7 @@ func (m *CreateCollectionRequest) GetSchema() *commonpb.Blob {
|
|||
}
|
||||
|
||||
type DropCollectionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -452,11 +452,11 @@ func (m *DropCollectionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_DropCollectionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DropCollectionRequest) GetReqType() ReqType {
|
||||
func (m *DropCollectionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *DropCollectionRequest) GetReqId() int64 {
|
||||
|
@ -488,7 +488,7 @@ func (m *DropCollectionRequest) GetCollectionName() *servicepb.CollectionName {
|
|||
}
|
||||
|
||||
type HasCollectionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -523,11 +523,11 @@ func (m *HasCollectionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_HasCollectionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *HasCollectionRequest) GetReqType() ReqType {
|
||||
func (m *HasCollectionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *HasCollectionRequest) GetReqId() int64 {
|
||||
|
@ -559,7 +559,7 @@ func (m *HasCollectionRequest) GetCollectionName() *servicepb.CollectionName {
|
|||
}
|
||||
|
||||
type DescribeCollectionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -594,11 +594,11 @@ func (m *DescribeCollectionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_DescribeCollectionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DescribeCollectionRequest) GetReqType() ReqType {
|
||||
func (m *DescribeCollectionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *DescribeCollectionRequest) GetReqId() int64 {
|
||||
|
@ -630,7 +630,7 @@ func (m *DescribeCollectionRequest) GetCollectionName() *servicepb.CollectionNam
|
|||
}
|
||||
|
||||
type ShowCollectionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -664,11 +664,11 @@ func (m *ShowCollectionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_ShowCollectionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ShowCollectionRequest) GetReqType() ReqType {
|
||||
func (m *ShowCollectionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *ShowCollectionRequest) GetReqId() int64 {
|
||||
|
@ -693,7 +693,7 @@ func (m *ShowCollectionRequest) GetProxyId() int64 {
|
|||
}
|
||||
|
||||
type CreatePartitionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -728,11 +728,11 @@ func (m *CreatePartitionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_CreatePartitionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreatePartitionRequest) GetReqType() ReqType {
|
||||
func (m *CreatePartitionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *CreatePartitionRequest) GetReqId() int64 {
|
||||
|
@ -764,7 +764,7 @@ func (m *CreatePartitionRequest) GetPartitionName() *servicepb.PartitionName {
|
|||
}
|
||||
|
||||
type DropPartitionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -799,11 +799,11 @@ func (m *DropPartitionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_DropPartitionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DropPartitionRequest) GetReqType() ReqType {
|
||||
func (m *DropPartitionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *DropPartitionRequest) GetReqId() int64 {
|
||||
|
@ -835,7 +835,7 @@ func (m *DropPartitionRequest) GetPartitionName() *servicepb.PartitionName {
|
|||
}
|
||||
|
||||
type HasPartitionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -870,11 +870,11 @@ func (m *HasPartitionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_HasPartitionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *HasPartitionRequest) GetReqType() ReqType {
|
||||
func (m *HasPartitionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *HasPartitionRequest) GetReqId() int64 {
|
||||
|
@ -906,7 +906,7 @@ func (m *HasPartitionRequest) GetPartitionName() *servicepb.PartitionName {
|
|||
}
|
||||
|
||||
type DescribePartitionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -941,11 +941,11 @@ func (m *DescribePartitionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_DescribePartitionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DescribePartitionRequest) GetReqType() ReqType {
|
||||
func (m *DescribePartitionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *DescribePartitionRequest) GetReqId() int64 {
|
||||
|
@ -977,7 +977,7 @@ func (m *DescribePartitionRequest) GetPartitionName() *servicepb.PartitionName {
|
|||
}
|
||||
|
||||
type ShowPartitionRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
|
@ -1012,11 +1012,11 @@ func (m *ShowPartitionRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_ShowPartitionRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ShowPartitionRequest) GetReqType() ReqType {
|
||||
func (m *ShowPartitionRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *ShowPartitionRequest) GetReqId() int64 {
|
||||
|
@ -1048,7 +1048,7 @@ func (m *ShowPartitionRequest) GetCollectionName() *servicepb.CollectionName {
|
|||
}
|
||||
|
||||
type InsertRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
PartitionTag string `protobuf:"bytes,4,opt,name=partition_tag,json=partitionTag,proto3" json:"partition_tag,omitempty"`
|
||||
|
@ -1088,11 +1088,11 @@ func (m *InsertRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_InsertRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *InsertRequest) GetReqType() ReqType {
|
||||
func (m *InsertRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *InsertRequest) GetReqId() int64 {
|
||||
|
@ -1159,7 +1159,7 @@ func (m *InsertRequest) GetRowData() []*commonpb.Blob {
|
|||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
ChannelId int64 `protobuf:"varint,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
|
||||
|
@ -1196,11 +1196,11 @@ func (m *DeleteRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteRequest) GetReqType() ReqType {
|
||||
func (m *DeleteRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *DeleteRequest) GetReqId() int64 {
|
||||
|
@ -1246,7 +1246,7 @@ func (m *DeleteRequest) GetPrimaryKeys() []int64 {
|
|||
}
|
||||
|
||||
type SearchRequest struct {
|
||||
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
|
||||
ProxyId int64 `protobuf:"varint,3,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
|
||||
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
|
@ -1282,11 +1282,11 @@ func (m *SearchRequest) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_SearchRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SearchRequest) GetReqType() ReqType {
|
||||
func (m *SearchRequest) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.ReqType
|
||||
return m.MsgType
|
||||
}
|
||||
return ReqType_kNone
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *SearchRequest) GetReqId() int64 {
|
||||
|
@ -1632,7 +1632,7 @@ func (m *SegmentStatistics) GetNumRows() int64 {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("milvus.proto.internal.ReqType", ReqType_name, ReqType_value)
|
||||
proto.RegisterEnum("milvus.proto.internal.MsgType", MsgType_name, MsgType_value)
|
||||
proto.RegisterEnum("milvus.proto.internal.PeerRole", PeerRole_name, PeerRole_value)
|
||||
proto.RegisterType((*IdRequest)(nil), "milvus.proto.internal.IdRequest")
|
||||
proto.RegisterType((*IdResponse)(nil), "milvus.proto.internal.IdResponse")
|
||||
|
@ -1661,79 +1661,79 @@ func init() {
|
|||
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
|
||||
|
||||
var fileDescriptor_7eb37f6b80b23116 = []byte{
|
||||
// 1178 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0xd5,
|
||||
0x17, 0xef, 0xf8, 0x19, 0x1f, 0xc7, 0xce, 0xe4, 0x26, 0xf9, 0xc7, 0xed, 0x1f, 0x5a, 0x33, 0x45,
|
||||
0x22, 0xaa, 0x84, 0x23, 0x5c, 0x16, 0x74, 0xdb, 0x7a, 0x51, 0x53, 0xb5, 0xaa, 0xc6, 0x11, 0x48,
|
||||
0x48, 0x68, 0x34, 0x9e, 0x39, 0xd8, 0x57, 0xf3, 0xb8, 0x93, 0x7b, 0xaf, 0x13, 0x9c, 0x2f, 0xc0,
|
||||
0x16, 0xc4, 0x92, 0x1d, 0x9f, 0x00, 0xf6, 0x7c, 0x00, 0x5e, 0x7b, 0xbe, 0x04, 0x08, 0x2a, 0x81,
|
||||
0xba, 0x45, 0xf7, 0xce, 0xf8, 0x31, 0xce, 0x83, 0x67, 0x51, 0xa4, 0xec, 0xe6, 0x9c, 0xfb, 0x38,
|
||||
0xe7, 0xf7, 0x3b, 0x0f, 0xdf, 0x63, 0x20, 0x34, 0x96, 0xc8, 0x63, 0x37, 0x74, 0x22, 0x31, 0xea,
|
||||
0x24, 0x9c, 0x49, 0x46, 0x76, 0x22, 0x1a, 0x1e, 0x4d, 0x44, 0x2a, 0x75, 0x66, 0x1b, 0x6e, 0xac,
|
||||
0x7b, 0x2c, 0x8a, 0x58, 0x9c, 0xaa, 0x6f, 0x6c, 0x0a, 0xe4, 0x47, 0xd4, 0xc3, 0xc5, 0x39, 0x8b,
|
||||
0x41, 0xad, 0xef, 0xdb, 0x78, 0x38, 0x41, 0x21, 0xc9, 0x2e, 0x54, 0x13, 0x44, 0xee, 0x50, 0xbf,
|
||||
0x65, 0xb4, 0x8d, 0xbd, 0xa2, 0x5d, 0x51, 0x62, 0xdf, 0x27, 0x77, 0xa1, 0xc4, 0x59, 0x88, 0xad,
|
||||
0x42, 0xdb, 0xd8, 0x6b, 0x76, 0x6f, 0x75, 0xce, 0x34, 0xd6, 0x79, 0x8a, 0xc8, 0x6d, 0x16, 0xa2,
|
||||
0xad, 0x37, 0x93, 0x6d, 0x28, 0x7b, 0x6c, 0x12, 0xcb, 0x56, 0xb1, 0x6d, 0xec, 0x35, 0xec, 0x54,
|
||||
0xb0, 0x46, 0x00, 0xca, 0xa0, 0x48, 0x58, 0x2c, 0x90, 0xdc, 0x85, 0x8a, 0x90, 0xae, 0x9c, 0x08,
|
||||
0x6d, 0xb0, 0xde, 0xfd, 0x7f, 0xfe, 0xea, 0xcc, 0xfb, 0x81, 0xde, 0x62, 0x67, 0x5b, 0x49, 0x13,
|
||||
0x0a, 0xd4, 0xd7, 0xbe, 0x14, 0xed, 0x02, 0xf5, 0xcf, 0x31, 0x94, 0x00, 0x1c, 0x08, 0xf6, 0x5f,
|
||||
0x42, 0x3b, 0x82, 0xba, 0xb6, 0xf8, 0x4f, 0xb0, 0xbd, 0x04, 0x35, 0x49, 0x23, 0x14, 0xd2, 0x8d,
|
||||
0x12, 0xed, 0x53, 0xc9, 0x5e, 0x28, 0xce, 0xb1, 0xfb, 0x83, 0x01, 0xbb, 0x0f, 0x38, 0xba, 0x12,
|
||||
0x1f, 0xb0, 0x30, 0x44, 0x4f, 0x52, 0x16, 0xcf, 0x70, 0xdf, 0x83, 0x35, 0x8e, 0x87, 0x8e, 0x9c,
|
||||
0x26, 0xa8, 0xdd, 0x68, 0x76, 0x6f, 0x9e, 0x03, 0xd1, 0xc6, 0xc3, 0x83, 0x69, 0x82, 0x76, 0x95,
|
||||
0xa7, 0x1f, 0x64, 0x07, 0x2a, 0xea, 0xe8, 0x9c, 0xea, 0x32, 0xc7, 0xc3, 0xbe, 0x9f, 0xf7, 0xb0,
|
||||
0xb8, 0xea, 0xe1, 0x75, 0x58, 0x4b, 0x38, 0xfb, 0x70, 0xaa, 0x8e, 0x95, 0xf4, 0xb1, 0xaa, 0x96,
|
||||
0xfb, 0x3e, 0x79, 0x03, 0x2a, 0xc2, 0x1b, 0x63, 0xe4, 0xb6, 0xca, 0x9a, 0x8f, 0xeb, 0x67, 0xf2,
|
||||
0x71, 0x3f, 0x64, 0x43, 0x3b, 0xdb, 0x68, 0x3d, 0x33, 0x60, 0xa7, 0xc7, 0x59, 0x72, 0xa9, 0x71,
|
||||
0x3d, 0x86, 0x0d, 0x6f, 0xee, 0x9f, 0x13, 0xbb, 0x11, 0x66, 0x00, 0x5f, 0xcd, 0x7b, 0x94, 0x15,
|
||||
0x5f, 0x67, 0x01, 0xe6, 0x89, 0x1b, 0xa1, 0xdd, 0xf4, 0x72, 0xb2, 0xf5, 0x8b, 0x01, 0xdb, 0x0f,
|
||||
0x5d, 0x71, 0x95, 0x20, 0xff, 0x66, 0xc0, 0xf5, 0x1e, 0x0a, 0x8f, 0xd3, 0x21, 0x5e, 0x25, 0xdc,
|
||||
0x9f, 0x1b, 0xb0, 0x33, 0x18, 0xb3, 0xe3, 0xcb, 0x8c, 0xd9, 0xfa, 0xd9, 0x80, 0xff, 0xa5, 0xdd,
|
||||
0xe5, 0xa9, 0xcb, 0x25, 0xbd, 0xa4, 0x91, 0x79, 0x1b, 0x9a, 0xc9, 0xcc, 0xbd, 0xe5, 0xc0, 0xdc,
|
||||
0x3e, 0x3b, 0x30, 0x73, 0x28, 0x3a, 0x2e, 0x8d, 0x64, 0x59, 0xb4, 0x7e, 0x32, 0x60, 0x5b, 0x75,
|
||||
0x9d, 0xab, 0x82, 0xf7, 0x47, 0x03, 0xb6, 0x1e, 0xba, 0xe2, 0xaa, 0xc0, 0x7d, 0x66, 0x40, 0x6b,
|
||||
0xd6, 0x6d, 0xae, 0x0a, 0x66, 0xf5, 0xa3, 0xa2, 0x3a, 0xcd, 0x65, 0xc6, 0xfb, 0x2f, 0x37, 0xd7,
|
||||
0xe7, 0x05, 0x68, 0xf4, 0x63, 0x81, 0x5c, 0xbe, 0x38, 0xac, 0xaf, 0x9d, 0x76, 0x59, 0x21, 0xae,
|
||||
0xad, 0x3a, 0x43, 0x6e, 0xc3, 0x22, 0x20, 0x8e, 0x74, 0x47, 0x1a, 0x7b, 0xcd, 0x5e, 0x9f, 0x2b,
|
||||
0x0f, 0xdc, 0x11, 0x79, 0x19, 0x40, 0xe0, 0x28, 0xc2, 0x58, 0x2a, 0x43, 0x65, 0x6d, 0xa8, 0x96,
|
||||
0x69, 0xfa, 0xbe, 0x5a, 0xf6, 0xc6, 0x6e, 0x1c, 0x63, 0xa8, 0x96, 0x2b, 0xe9, 0x72, 0xa6, 0xe9,
|
||||
0xfb, 0x39, 0x66, 0xab, 0x79, 0x66, 0x6f, 0x02, 0xcc, 0x23, 0x20, 0x5a, 0x6b, 0xed, 0xe2, 0x5e,
|
||||
0xc9, 0x5e, 0xd2, 0xa8, 0xc7, 0x31, 0x67, 0xc7, 0x0e, 0xf5, 0x45, 0xab, 0xd6, 0x2e, 0xaa, 0xc7,
|
||||
0x31, 0x67, 0xc7, 0x7d, 0x5f, 0x90, 0x37, 0x61, 0x4d, 0x2d, 0xf8, 0xae, 0x74, 0x5b, 0xd0, 0x2e,
|
||||
0x5e, 0xfc, 0x68, 0x53, 0x77, 0xf4, 0x5c, 0xe9, 0x5a, 0x1f, 0x15, 0xa0, 0xd1, 0xc3, 0x10, 0x25,
|
||||
0x5e, 0x02, 0xe6, 0xf3, 0xac, 0x95, 0x2e, 0x62, 0xad, 0x7c, 0x11, 0x6b, 0x95, 0x53, 0xac, 0xbd,
|
||||
0x02, 0xeb, 0x09, 0xa7, 0x91, 0xcb, 0xa7, 0x4e, 0x80, 0x53, 0xd1, 0xaa, 0x6a, 0xea, 0xea, 0x99,
|
||||
0xee, 0x11, 0x4e, 0x85, 0xf5, 0xdc, 0x80, 0xc6, 0x00, 0x5d, 0xee, 0x8d, 0x5f, 0x1c, 0x13, 0xcb,
|
||||
0x08, 0x8a, 0x79, 0x04, 0xb9, 0x52, 0x2c, 0xad, 0x96, 0xe2, 0x1d, 0xd8, 0xe4, 0x28, 0x26, 0xa1,
|
||||
0x74, 0x96, 0x08, 0x4a, 0x39, 0xd8, 0x48, 0x17, 0x1e, 0xcc, 0x69, 0xda, 0x87, 0xf2, 0xe1, 0x04,
|
||||
0xf9, 0x54, 0xa7, 0xdd, 0x85, 0x59, 0x90, 0xee, 0xb3, 0x3e, 0x2d, 0xc0, 0xfa, 0x0c, 0xb9, 0xba,
|
||||
0xea, 0xef, 0x4d, 0x43, 0x7f, 0x1d, 0xb2, 0x05, 0x0d, 0xed, 0x80, 0x13, 0x33, 0x1f, 0x17, 0x11,
|
||||
0xaf, 0x6b, 0xe5, 0x13, 0xe6, 0xe3, 0x2a, 0x2d, 0xe5, 0x3f, 0x45, 0x4b, 0xe5, 0x6c, 0x5a, 0x3a,
|
||||
0x50, 0x1a, 0x53, 0x99, 0x86, 0xbe, 0xde, 0xbd, 0x71, 0x76, 0x9f, 0x7a, 0x48, 0xa5, 0xb0, 0xf5,
|
||||
0x3e, 0xab, 0x07, 0xf5, 0x03, 0x1a, 0xe1, 0x01, 0xf5, 0x82, 0xc7, 0x62, 0x74, 0xfe, 0x50, 0x7a,
|
||||
0xe1, 0x14, 0x68, 0x7d, 0x66, 0x40, 0xf5, 0x11, 0x4e, 0xbb, 0x03, 0x1c, 0x69, 0x86, 0x74, 0xe9,
|
||||
0x66, 0x37, 0x94, 0x75, 0xe5, 0x92, 0x5b, 0x50, 0x5f, 0xca, 0xcd, 0x8c, 0x3d, 0x58, 0xa4, 0xe6,
|
||||
0x1f, 0x77, 0x69, 0x2a, 0x9c, 0x23, 0x37, 0xcc, 0x08, 0x5c, 0xb3, 0xab, 0x54, 0xbc, 0xa3, 0x44,
|
||||
0x75, 0xf3, 0xa2, 0x49, 0x89, 0x56, 0x59, 0x27, 0x3d, 0xcc, 0xbb, 0x94, 0xb0, 0xde, 0x07, 0xc8,
|
||||
0x9c, 0x53, 0x10, 0x17, 0x11, 0x34, 0x96, 0x23, 0xf8, 0x16, 0x54, 0x03, 0x9c, 0x76, 0x05, 0x8e,
|
||||
0x5a, 0x05, 0xcd, 0xdd, 0x79, 0x55, 0x90, 0x5d, 0x65, 0xcf, 0xb6, 0x5b, 0x31, 0x6c, 0x0e, 0x52,
|
||||
0x63, 0x2a, 0x57, 0xa8, 0x90, 0xd4, 0x13, 0x2b, 0x9d, 0xd3, 0x58, 0xed, 0x9c, 0xb7, 0xa0, 0x1e,
|
||||
0x61, 0xc4, 0xf8, 0xd4, 0x11, 0xf4, 0x04, 0x67, 0x6c, 0xa4, 0xaa, 0x01, 0x3d, 0x41, 0x85, 0x37,
|
||||
0x9e, 0x44, 0x0e, 0x67, 0xc7, 0x62, 0x96, 0x50, 0xf1, 0x24, 0xb2, 0xd9, 0xb1, 0xb8, 0xf3, 0x55,
|
||||
0x01, 0xaa, 0x59, 0x29, 0x92, 0x1a, 0x94, 0x83, 0x27, 0x2c, 0x46, 0xf3, 0x1a, 0xd9, 0x81, 0xcd,
|
||||
0x60, 0x75, 0xe6, 0x36, 0x7d, 0xb2, 0x05, 0x1b, 0x41, 0x7e, 0x60, 0x35, 0x91, 0x10, 0x68, 0x06,
|
||||
0xb9, 0x89, 0xce, 0xfc, 0x80, 0xec, 0xc2, 0x56, 0x70, 0x7a, 0xe4, 0x31, 0x47, 0x64, 0x1b, 0xcc,
|
||||
0x20, 0x3f, 0x13, 0x08, 0x73, 0x4c, 0x76, 0xc0, 0x0c, 0x56, 0x1e, 0xe1, 0xe6, 0xd7, 0x06, 0xd9,
|
||||
0x82, 0x66, 0x90, 0x7b, 0xa9, 0x9a, 0xdf, 0x18, 0x84, 0x40, 0x23, 0x58, 0x7e, 0xce, 0x99, 0xdf,
|
||||
0x1a, 0x64, 0x17, 0x48, 0x70, 0xea, 0xcd, 0x63, 0x7e, 0x67, 0x90, 0x6d, 0xd8, 0x08, 0x72, 0x0f,
|
||||
0x03, 0x61, 0x7e, 0x6f, 0x90, 0x75, 0xa8, 0x06, 0xe9, 0x6f, 0xa7, 0xf9, 0x71, 0x51, 0x4b, 0x69,
|
||||
0x3f, 0x37, 0x3f, 0x49, 0xa5, 0xb4, 0xb2, 0xcd, 0x5f, 0x8b, 0xa4, 0x09, 0xb5, 0x60, 0x96, 0xd2,
|
||||
0xe6, 0x17, 0xb5, 0xb9, 0x3c, 0x98, 0xc6, 0x9e, 0xf9, 0x65, 0xed, 0xce, 0x3d, 0x58, 0x9b, 0xfd,
|
||||
0x77, 0x42, 0x00, 0x2a, 0x8f, 0x5d, 0x21, 0x91, 0x9b, 0xd7, 0xd4, 0xb7, 0x8d, 0xae, 0x8f, 0xdc,
|
||||
0x34, 0xd4, 0xf7, 0xbb, 0x9c, 0x2a, 0x7d, 0x41, 0x51, 0xfc, 0x54, 0x95, 0xb2, 0x59, 0xbc, 0xdf,
|
||||
0x7b, 0xef, 0xfe, 0x88, 0xca, 0xf1, 0x64, 0xa8, 0x9a, 0xc3, 0xfe, 0x09, 0x0d, 0x43, 0x7a, 0x22,
|
||||
0xd1, 0x1b, 0xef, 0xa7, 0x99, 0xf2, 0xba, 0x4f, 0x85, 0xe4, 0x74, 0x38, 0x91, 0xe8, 0xef, 0xcf,
|
||||
0xf2, 0x65, 0x5f, 0xa7, 0xcf, 0x5c, 0x4c, 0x86, 0xc3, 0x8a, 0xd6, 0xdc, 0xfd, 0x3d, 0x00, 0x00,
|
||||
0xff, 0xff, 0x82, 0x1f, 0xa0, 0x91, 0x35, 0x13, 0x00, 0x00,
|
||||
// 1181 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1c, 0xc5,
|
||||
0x13, 0xcf, 0xec, 0xd3, 0x5b, 0xeb, 0x5d, 0x8f, 0xdb, 0xf6, 0xdf, 0x9b, 0xfc, 0x21, 0x31, 0x13,
|
||||
0x24, 0xac, 0x48, 0xd8, 0xc2, 0xe1, 0x40, 0xae, 0xc9, 0x1e, 0xb2, 0x44, 0x8e, 0xac, 0x59, 0x0b,
|
||||
0x24, 0x24, 0x34, 0x9a, 0x9d, 0x29, 0x66, 0x5b, 0xf3, 0xe8, 0x71, 0x77, 0xaf, 0xcd, 0xfa, 0x0b,
|
||||
0x70, 0x05, 0x71, 0xe4, 0xc6, 0x27, 0x80, 0x3b, 0x1f, 0x80, 0xd7, 0x9d, 0x2f, 0x01, 0x82, 0x48,
|
||||
0xa0, 0x5c, 0x51, 0xf7, 0xcc, 0x3e, 0x66, 0xfd, 0xe0, 0x19, 0x64, 0xc9, 0xb7, 0xa9, 0x9a, 0x9e,
|
||||
0xae, 0xfa, 0xfd, 0xea, 0xb1, 0x55, 0x0b, 0x84, 0x26, 0x12, 0x79, 0xe2, 0x46, 0x4e, 0x2c, 0x82,
|
||||
0x9d, 0x94, 0x33, 0xc9, 0xc8, 0x46, 0x4c, 0xa3, 0xe3, 0x91, 0xc8, 0xa4, 0x9d, 0xc9, 0x81, 0x5b,
|
||||
0xcb, 0x1e, 0x8b, 0x63, 0x96, 0x64, 0xea, 0x5b, 0xab, 0x02, 0xf9, 0x31, 0xf5, 0x70, 0xf6, 0x9d,
|
||||
0xc5, 0xa0, 0xd1, 0xf3, 0x6d, 0x3c, 0x1a, 0xa1, 0x90, 0x64, 0x13, 0xea, 0x29, 0x22, 0x77, 0xa8,
|
||||
0xdf, 0x31, 0xb6, 0x8c, 0xed, 0xb2, 0x5d, 0x53, 0x62, 0xcf, 0x27, 0xf7, 0xa1, 0xc2, 0x59, 0x84,
|
||||
0x9d, 0xd2, 0x96, 0xb1, 0xdd, 0xde, 0xbb, 0xb3, 0x73, 0xae, 0xb1, 0x9d, 0x03, 0x44, 0x6e, 0xb3,
|
||||
0x08, 0x6d, 0x7d, 0x98, 0xac, 0x43, 0xd5, 0x63, 0xa3, 0x44, 0x76, 0xca, 0x5b, 0xc6, 0x76, 0xcb,
|
||||
0xce, 0x04, 0x2b, 0x00, 0x50, 0x06, 0x45, 0xca, 0x12, 0x81, 0xe4, 0x3e, 0xd4, 0x84, 0x74, 0xe5,
|
||||
0x48, 0x68, 0x83, 0xcd, 0xbd, 0xff, 0x17, 0xaf, 0xce, 0xbd, 0xef, 0xeb, 0x23, 0x76, 0x7e, 0x94,
|
||||
0xb4, 0xa1, 0x44, 0x7d, 0xed, 0x4b, 0xd9, 0x2e, 0x51, 0xff, 0x02, 0x43, 0x29, 0xc0, 0xa1, 0x60,
|
||||
0xff, 0x25, 0xb4, 0x63, 0x68, 0x6a, 0x8b, 0xff, 0x04, 0xdb, 0x4b, 0xd0, 0x90, 0x34, 0x46, 0x21,
|
||||
0xdd, 0x38, 0xd5, 0x3e, 0x55, 0xec, 0x99, 0xe2, 0x02, 0xbb, 0x3f, 0x18, 0xb0, 0xf9, 0x88, 0xa3,
|
||||
0x2b, 0xf1, 0x11, 0x8b, 0x22, 0xf4, 0x24, 0x65, 0xc9, 0x04, 0xf7, 0x03, 0x58, 0x8a, 0x45, 0xe0,
|
||||
0xc8, 0x71, 0x8a, 0xda, 0x8d, 0xf6, 0xde, 0xed, 0x0b, 0x20, 0xee, 0x8b, 0xe0, 0x70, 0x9c, 0xa2,
|
||||
0x5d, 0x8f, 0xb3, 0x07, 0xb2, 0x01, 0x35, 0x8e, 0x47, 0xce, 0x94, 0xea, 0x2a, 0xc7, 0xa3, 0x9e,
|
||||
0x5f, 0xf4, 0xb0, 0xbc, 0xe8, 0xe1, 0x4d, 0x58, 0x4a, 0x39, 0xfb, 0x70, 0xac, 0x3e, 0xab, 0xe8,
|
||||
0xcf, 0xea, 0x5a, 0xee, 0xf9, 0xe4, 0x0d, 0xa8, 0x09, 0x6f, 0x88, 0xb1, 0xdb, 0xa9, 0x6a, 0x3e,
|
||||
0x6e, 0x9e, 0xcb, 0xc7, 0xc3, 0x88, 0x0d, 0xec, 0xfc, 0xa0, 0xf5, 0xcc, 0x80, 0x8d, 0x2e, 0x67,
|
||||
0xe9, 0x95, 0xc6, 0xb5, 0x0f, 0x2b, 0xde, 0xd4, 0x3f, 0x27, 0x71, 0x63, 0xcc, 0x01, 0xbe, 0x5a,
|
||||
0xf4, 0x28, 0x2f, 0xbe, 0x9d, 0x19, 0x98, 0xa7, 0x6e, 0x8c, 0x76, 0xdb, 0x2b, 0xc8, 0xd6, 0x2f,
|
||||
0x06, 0xac, 0x3f, 0x76, 0xc5, 0x75, 0x82, 0xfc, 0x9b, 0x01, 0x37, 0xbb, 0x28, 0x3c, 0x4e, 0x07,
|
||||
0x78, 0x9d, 0x70, 0x7f, 0x6e, 0xc0, 0x46, 0x7f, 0xc8, 0x4e, 0xae, 0x32, 0x66, 0xeb, 0x67, 0x03,
|
||||
0xfe, 0x97, 0x75, 0x97, 0x03, 0x97, 0x4b, 0x7a, 0x45, 0x23, 0xf3, 0x36, 0xb4, 0xd3, 0x89, 0x7b,
|
||||
0xf3, 0x81, 0xb9, 0x7b, 0x7e, 0x60, 0xa6, 0x50, 0x74, 0x5c, 0x5a, 0xe9, 0xbc, 0x68, 0xfd, 0x64,
|
||||
0xc0, 0xba, 0xea, 0x3a, 0xd7, 0x05, 0xef, 0x8f, 0x06, 0xac, 0x3d, 0x76, 0xc5, 0x75, 0x81, 0xfb,
|
||||
0xcc, 0x80, 0xce, 0xa4, 0xdb, 0x5c, 0x17, 0xcc, 0xea, 0x47, 0x45, 0x75, 0x9a, 0xab, 0x8c, 0xf7,
|
||||
0x5f, 0x6e, 0xae, 0xcf, 0x4b, 0xd0, 0xea, 0x25, 0x02, 0xb9, 0x7c, 0x71, 0x58, 0x5f, 0x3b, 0xeb,
|
||||
0xb2, 0x42, 0xdc, 0x58, 0x74, 0x86, 0xdc, 0x85, 0x59, 0x40, 0x1c, 0xe9, 0x06, 0x1a, 0x7b, 0xc3,
|
||||
0x5e, 0x9e, 0x2a, 0x0f, 0xdd, 0x80, 0xbc, 0x0c, 0x20, 0x30, 0x88, 0x31, 0x91, 0xca, 0x50, 0x55,
|
||||
0x1b, 0x6a, 0xe4, 0x9a, 0x9e, 0xaf, 0x5e, 0x7b, 0x43, 0x37, 0x49, 0x30, 0x52, 0xaf, 0x6b, 0xd9,
|
||||
0xeb, 0x5c, 0xd3, 0xf3, 0x0b, 0xcc, 0xd6, 0x8b, 0xcc, 0xde, 0x06, 0x98, 0x46, 0x40, 0x74, 0x96,
|
||||
0xb6, 0xca, 0xdb, 0x15, 0x7b, 0x4e, 0xa3, 0x86, 0x63, 0xce, 0x4e, 0x1c, 0xea, 0x8b, 0x4e, 0x63,
|
||||
0xab, 0xac, 0x86, 0x63, 0xce, 0x4e, 0x7a, 0xbe, 0x20, 0x6f, 0xc2, 0x92, 0x7a, 0xe1, 0xbb, 0xd2,
|
||||
0xed, 0xc0, 0x56, 0xf9, 0xf2, 0xa1, 0x4d, 0xdd, 0xd1, 0x75, 0xa5, 0x6b, 0x7d, 0x54, 0x82, 0x56,
|
||||
0x17, 0x23, 0x94, 0x78, 0x05, 0x98, 0x2f, 0xb2, 0x56, 0xb9, 0x8c, 0xb5, 0xea, 0x65, 0xac, 0xd5,
|
||||
0xce, 0xb0, 0xf6, 0x0a, 0x2c, 0xa7, 0x9c, 0xc6, 0x2e, 0x1f, 0x3b, 0x21, 0x8e, 0x45, 0xa7, 0xae,
|
||||
0xa9, 0x6b, 0xe6, 0xba, 0x27, 0x38, 0x16, 0xd6, 0x73, 0x03, 0x5a, 0x7d, 0x74, 0xb9, 0x37, 0x7c,
|
||||
0x71, 0x4c, 0xcc, 0x23, 0x28, 0x17, 0x11, 0x14, 0x4a, 0xb1, 0xb2, 0x58, 0x8a, 0xf7, 0x60, 0x95,
|
||||
0xa3, 0x18, 0x45, 0xd2, 0x99, 0x23, 0x28, 0xe3, 0x60, 0x25, 0x7b, 0xf1, 0x68, 0x4a, 0xd3, 0x2e,
|
||||
0x54, 0x8f, 0x46, 0xc8, 0xc7, 0x3a, 0xed, 0x2e, 0xcd, 0x82, 0xec, 0x9c, 0xf5, 0x69, 0x09, 0x96,
|
||||
0x27, 0xc8, 0xd5, 0x55, 0x7f, 0x6f, 0x1b, 0xfa, 0xeb, 0x90, 0x2d, 0x68, 0x69, 0x07, 0x9c, 0x84,
|
||||
0xf9, 0x38, 0x8b, 0x78, 0x53, 0x2b, 0x9f, 0x32, 0x1f, 0x17, 0x69, 0xa9, 0xfe, 0x29, 0x5a, 0x6a,
|
||||
0xe7, 0xd3, 0xb2, 0x03, 0x95, 0x21, 0x95, 0x59, 0xe8, 0x9b, 0x7b, 0xb7, 0xce, 0xef, 0x53, 0x8f,
|
||||
0xa9, 0x14, 0xb6, 0x3e, 0x67, 0x75, 0xa1, 0x79, 0x48, 0x63, 0x3c, 0xa4, 0x5e, 0xb8, 0x2f, 0x82,
|
||||
0x8b, 0x97, 0xd2, 0x4b, 0xb7, 0x40, 0xeb, 0x33, 0x03, 0xea, 0x4f, 0x70, 0xbc, 0xd7, 0xc7, 0x40,
|
||||
0x33, 0xa4, 0x4b, 0x37, 0xbf, 0xa1, 0xaa, 0x2b, 0x97, 0xdc, 0x81, 0xe6, 0x5c, 0x6e, 0xe6, 0xec,
|
||||
0xc1, 0x2c, 0x35, 0xff, 0xb8, 0x4b, 0x53, 0xe1, 0x1c, 0xbb, 0x51, 0x4e, 0xe0, 0x92, 0x5d, 0xa7,
|
||||
0xe2, 0x1d, 0x25, 0xaa, 0x9b, 0x67, 0x4d, 0x4a, 0x74, 0xaa, 0x3a, 0xe9, 0x61, 0xda, 0xa5, 0x84,
|
||||
0xf5, 0x3e, 0x40, 0xee, 0x9c, 0x82, 0x38, 0x8b, 0xa0, 0x31, 0x1f, 0xc1, 0xb7, 0xa0, 0x1e, 0xe2,
|
||||
0x78, 0x4f, 0x60, 0xd0, 0x29, 0x69, 0xee, 0x2e, 0xaa, 0x82, 0xfc, 0x2a, 0x7b, 0x72, 0xdc, 0x4a,
|
||||
0x60, 0xb5, 0x9f, 0x19, 0x53, 0xb9, 0x42, 0x85, 0xa4, 0x9e, 0x58, 0xe8, 0x9c, 0xc6, 0x62, 0xe7,
|
||||
0xbc, 0x03, 0xcd, 0x18, 0x63, 0xc6, 0xc7, 0x8e, 0xa0, 0xa7, 0x38, 0x61, 0x23, 0x53, 0xf5, 0xe9,
|
||||
0x29, 0x2a, 0xbc, 0xc9, 0x28, 0x76, 0x38, 0x3b, 0x11, 0x93, 0x84, 0x4a, 0x46, 0xb1, 0xcd, 0x4e,
|
||||
0xc4, 0xbd, 0xaf, 0x4a, 0x50, 0xcf, 0x4b, 0x91, 0x34, 0xa0, 0x1a, 0x3e, 0x65, 0x09, 0x9a, 0x37,
|
||||
0xc8, 0x06, 0xac, 0x86, 0x8b, 0x3b, 0xb7, 0xe9, 0x93, 0x35, 0x58, 0x09, 0x8b, 0x0b, 0xab, 0x89,
|
||||
0x84, 0x40, 0x3b, 0x2c, 0x6c, 0x74, 0xe6, 0x07, 0x64, 0x13, 0xd6, 0xc2, 0xb3, 0x2b, 0x8f, 0x19,
|
||||
0x90, 0x75, 0x30, 0xc3, 0xe2, 0x4e, 0x20, 0xcc, 0x21, 0xd9, 0x00, 0x33, 0x5c, 0x18, 0xc2, 0xcd,
|
||||
0xaf, 0x0d, 0xb2, 0x06, 0xed, 0xb0, 0x30, 0xa9, 0x9a, 0xdf, 0x18, 0x84, 0x40, 0x2b, 0x9c, 0x1f,
|
||||
0xe7, 0xcc, 0x6f, 0x0d, 0xb2, 0x09, 0x24, 0x3c, 0x33, 0xf3, 0x98, 0xdf, 0x19, 0x64, 0x1d, 0x56,
|
||||
0xc2, 0xc2, 0x60, 0x20, 0xcc, 0xef, 0x0d, 0xb2, 0x0c, 0xf5, 0x30, 0xfb, 0xed, 0x34, 0x3f, 0x2e,
|
||||
0x6b, 0x29, 0xeb, 0xe7, 0xe6, 0x27, 0x99, 0x94, 0x55, 0xb6, 0xf9, 0x6b, 0x99, 0xb4, 0xa1, 0x11,
|
||||
0x4e, 0x52, 0xda, 0xfc, 0xa2, 0x31, 0x95, 0xfb, 0xe3, 0xc4, 0x33, 0xbf, 0x6c, 0xdc, 0x7b, 0x00,
|
||||
0x4b, 0x93, 0xff, 0x4e, 0x08, 0x40, 0x6d, 0xdf, 0x15, 0x12, 0xb9, 0x79, 0x43, 0x3d, 0xdb, 0xe8,
|
||||
0xfa, 0xc8, 0x4d, 0x43, 0x3d, 0xbf, 0xcb, 0xa9, 0xd2, 0x97, 0x14, 0xc5, 0x07, 0xaa, 0x94, 0xcd,
|
||||
0xf2, 0xc3, 0xee, 0x7b, 0x0f, 0x03, 0x2a, 0x87, 0xa3, 0x81, 0x6a, 0x0e, 0xbb, 0xa7, 0x34, 0x8a,
|
||||
0xe8, 0xa9, 0x44, 0x6f, 0xb8, 0x9b, 0x65, 0xca, 0xeb, 0x3e, 0x15, 0x92, 0xd3, 0xc1, 0x48, 0xa2,
|
||||
0xbf, 0x3b, 0xc9, 0x97, 0x5d, 0x9d, 0x3e, 0x53, 0x31, 0x1d, 0x0c, 0x6a, 0x5a, 0x73, 0xff, 0xf7,
|
||||
0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x14, 0x7f, 0x35, 0x35, 0x13, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -0,0 +1,93 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: msg_header.proto
|
||||
|
||||
package internalpb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
any "github.com/golang/protobuf/ptypes/any"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type MsgHeader struct {
|
||||
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
|
||||
Message *any.Any `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MsgHeader) Reset() { *m = MsgHeader{} }
|
||||
func (m *MsgHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*MsgHeader) ProtoMessage() {}
|
||||
func (*MsgHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_4712536c36da8833, []int{0}
|
||||
}
|
||||
|
||||
func (m *MsgHeader) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MsgHeader.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MsgHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MsgHeader.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MsgHeader) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MsgHeader.Merge(m, src)
|
||||
}
|
||||
func (m *MsgHeader) XXX_Size() int {
|
||||
return xxx_messageInfo_MsgHeader.Size(m)
|
||||
}
|
||||
func (m *MsgHeader) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MsgHeader.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MsgHeader proto.InternalMessageInfo
|
||||
|
||||
func (m *MsgHeader) GetMsgType() MsgType {
|
||||
if m != nil {
|
||||
return m.MsgType
|
||||
}
|
||||
return MsgType_kNone
|
||||
}
|
||||
|
||||
func (m *MsgHeader) GetMessage() *any.Any {
|
||||
if m != nil {
|
||||
return m.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*MsgHeader)(nil), "milvus.proto.internal.MsgHeader")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("msg_header.proto", fileDescriptor_4712536c36da8833) }
|
||||
|
||||
var fileDescriptor_4712536c36da8833 = []byte{
|
||||
// 222 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x31, 0x4b, 0xc4, 0x40,
|
||||
0x10, 0x85, 0xc9, 0x15, 0x9e, 0x46, 0x10, 0x09, 0x0a, 0xe7, 0x15, 0x72, 0x58, 0xa5, 0x71, 0x16,
|
||||
0x62, 0x65, 0x69, 0xb0, 0xb0, 0xb9, 0xe6, 0xb0, 0xb2, 0x39, 0xb2, 0x97, 0x71, 0xb2, 0xb0, 0x9b,
|
||||
0x0d, 0x3b, 0xbb, 0xc2, 0xe6, 0xd7, 0x8b, 0x59, 0xd6, 0xea, 0xba, 0xf7, 0xcd, 0x1b, 0xe6, 0x63,
|
||||
0xca, 0x5b, 0xc3, 0x74, 0x1c, 0xb0, 0xeb, 0xd1, 0xc1, 0xe4, 0xac, 0xb7, 0xd5, 0xbd, 0x51, 0xfa,
|
||||
0x27, 0x70, 0x22, 0x50, 0xa3, 0x47, 0x37, 0x76, 0x7a, 0x5b, 0xe5, 0x74, 0x34, 0x4c, 0xa9, 0xdc,
|
||||
0x3e, 0x90, 0xb5, 0xa4, 0x51, 0x2c, 0x24, 0xc3, 0xb7, 0xe8, 0xc6, 0x98, 0xaa, 0xa7, 0xb9, 0xbc,
|
||||
0xda, 0x33, 0x7d, 0x2c, 0x87, 0xab, 0xd7, 0xf2, 0xf2, 0x4f, 0xe3, 0xe3, 0x84, 0x9b, 0x62, 0x57,
|
||||
0xd4, 0x37, 0xcd, 0x23, 0x9c, 0xb5, 0xc0, 0x9e, 0xe9, 0x33, 0x4e, 0x78, 0x58, 0x9b, 0x14, 0xaa,
|
||||
0xa6, 0x5c, 0x1b, 0x64, 0xee, 0x08, 0x37, 0xab, 0x5d, 0x51, 0x5f, 0x37, 0x77, 0x90, 0xa4, 0x90,
|
||||
0xa5, 0xf0, 0x36, 0xc6, 0x76, 0x55, 0x17, 0x87, 0xbc, 0xd8, 0xbe, 0x7f, 0xb5, 0xa4, 0xfc, 0x10,
|
||||
0x24, 0x9c, 0xac, 0x11, 0xb3, 0xd2, 0x5a, 0xcd, 0x1e, 0x4f, 0x83, 0x48, 0xce, 0xe7, 0x5e, 0xb1,
|
||||
0x77, 0x4a, 0x06, 0x8f, 0xbd, 0xc8, 0xe6, 0xf4, 0xc3, 0x3f, 0x4e, 0x52, 0x5e, 0x2c, 0x93, 0x97,
|
||||
0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x87, 0xfd, 0x10, 0x22, 0x01, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package milvus.proto.internal;
|
||||
|
||||
option go_package="github.com/zilliztech/milvus-distributed/internal/proto/internalpb";
|
||||
|
||||
import "internal_msg.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
message MsgHeader {
|
||||
MsgType msg_type = 1;
|
||||
google.protobuf.Any message = 2[lazy=true];
|
||||
}
|
|
@ -22,7 +22,7 @@ func (it *insertTask) PreExecute() error {
|
|||
func (it *insertTask) Execute() error {
|
||||
ts := it.GetTs()
|
||||
insertRequest := internalpb.InsertRequest{
|
||||
ReqType: internalpb.ReqType_kInsert,
|
||||
MsgType: internalpb.MsgType_kInsert,
|
||||
ReqId: it.ReqId,
|
||||
CollectionName: it.rowBatch.CollectionName,
|
||||
PartitionTag: it.rowBatch.PartitionTag,
|
||||
|
|
|
@ -52,7 +52,7 @@ func (ins *proxyInstance) restartSchedulerRoutine(bufSize int) error {
|
|||
select {
|
||||
case t := <-ins.taskChan:
|
||||
switch (*t).Type() {
|
||||
case internalpb.ReqType_kInsert:
|
||||
case internalpb.MsgType_kInsert:
|
||||
ins.taskSch.DmQueue.Enqueue(t)
|
||||
default:
|
||||
return
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
type BaseRequest interface {
|
||||
Type() internalpb.ReqType
|
||||
Type() internalpb.MsgType
|
||||
PreExecute() commonpb.Status
|
||||
Execute() commonpb.Status
|
||||
PostExecute() commonpb.Status
|
||||
|
|
|
@ -19,8 +19,8 @@ type queryReq struct {
|
|||
}
|
||||
|
||||
// BaseRequest interfaces
|
||||
func (req *queryReq) Type() internalpb.ReqType {
|
||||
return req.ReqType
|
||||
func (req *queryReq) Type() internalpb.MsgType {
|
||||
return req.MsgType
|
||||
}
|
||||
|
||||
func (req *queryReq) PreExecute() commonpb.Status {
|
||||
|
|
|
@ -145,38 +145,6 @@ func (s *proxyServer) ShowPartitions(ctx context.Context, req *servicepb.Collect
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *proxyServer) DeleteByID(ctx context.Context, req *pb.DeleteByIDParam) (*commonpb.Status, error) {
|
||||
log.Printf("delete entites, total = %d", len(req.IdArray))
|
||||
mReqMsg := pb.ManipulationReqMsg{
|
||||
CollectionName: req.CollectionName,
|
||||
ReqType: pb.ReqType_kDeleteEntityByID,
|
||||
ProxyId: s.proxyId,
|
||||
}
|
||||
for _, id := range req.IdArray {
|
||||
mReqMsg.PrimaryKeys = append(mReqMsg.PrimaryKeys, id)
|
||||
}
|
||||
if len(mReqMsg.PrimaryKeys) > 1 {
|
||||
mReq := &manipulationReq{
|
||||
stats: make([]commonpb.Status, 1),
|
||||
msgs: append([]*pb.ManipulationReqMsg{}, &mReqMsg),
|
||||
proxy: s,
|
||||
}
|
||||
if st := mReq.PreExecute(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return &st, nil
|
||||
}
|
||||
if st := mReq.Execute(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return &st, nil
|
||||
}
|
||||
if st := mReq.PostExecute(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return &st, nil
|
||||
}
|
||||
if st := mReq.WaitToFinish(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
|
||||
return &st, nil
|
||||
}
|
||||
}
|
||||
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS}, nil
|
||||
}
|
||||
|
||||
func (s *proxyServer) Insert(ctx context.Context, req *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
|
||||
log.Printf("Insert Entities, total = %d", len(req.RowData))
|
||||
msgMap := make(map[uint32]*pb.ManipulationReqMsg)
|
||||
|
@ -262,7 +230,7 @@ func (s *proxyServer) Insert(ctx context.Context, req *servicepb.RowBatch) (*ser
|
|||
func (s *proxyServer) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
|
||||
qm := &queryReq{
|
||||
SearchRequest: internalpb.SearchRequest{
|
||||
ReqType: internalpb.ReqType_kSearch,
|
||||
MsgType: internalpb.MsgType_kSearch,
|
||||
ProxyId: s.proxyId,
|
||||
ReqId: s.queryId.Add(1),
|
||||
Timestamp: 0,
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
type task interface {
|
||||
Id() int64 // return ReqId
|
||||
Type() internalpb.ReqType
|
||||
Type() internalpb.MsgType
|
||||
GetTs() typeutil.Timestamp
|
||||
SetTs(ts typeutil.Timestamp)
|
||||
PreExecute() error
|
||||
|
@ -20,7 +20,7 @@ type task interface {
|
|||
}
|
||||
|
||||
type baseTask struct {
|
||||
ReqType internalpb.ReqType
|
||||
ReqType internalpb.MsgType
|
||||
ReqId int64
|
||||
Ts typeutil.Timestamp
|
||||
ProxyId int64
|
||||
|
@ -30,7 +30,7 @@ func (bt *baseTask) Id() int64 {
|
|||
return bt.ReqId
|
||||
}
|
||||
|
||||
func (bt *baseTask) Type() internalpb.ReqType {
|
||||
func (bt *baseTask) Type() internalpb.MsgType {
|
||||
return bt.ReqType
|
||||
}
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ func (c *Collection) DeletePartition(node *QueryNode, partition *Partition) {
|
|||
for _, p := range c.Partitions {
|
||||
if p.PartitionName == partition.PartitionName {
|
||||
for _, s := range p.Segments {
|
||||
delete(node.SegmentsMap, s.SegmentId)
|
||||
delete(node.SegmentsMap, s.SegmentID)
|
||||
}
|
||||
} else {
|
||||
tmpPartitions = append(tmpPartitions, p)
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
type dmNode struct {
|
||||
BaseNode
|
||||
dmMsg dmMsg
|
||||
|
@ -10,7 +14,22 @@ func (dmNode *dmNode) Name() string {
|
|||
}
|
||||
|
||||
func (dmNode *dmNode) Operate(in []*Msg) []*Msg {
|
||||
return in
|
||||
// TODO: add filtered by schema update
|
||||
// But for now, we think all the messages are valid
|
||||
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in filteredDmNode")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
dmMsg, ok := (*in[0]).(*dmMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for dmMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
var res Msg = dmMsg
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func newDmNode() *dmNode {
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
)
|
||||
|
||||
type filteredDmNode struct {
|
||||
BaseNode
|
||||
filteredDmMsg filteredDmMsg
|
||||
|
@ -10,7 +16,41 @@ func (fdmNode *filteredDmNode) Name() string {
|
|||
}
|
||||
|
||||
func (fdmNode *filteredDmNode) Operate(in []*Msg) []*Msg {
|
||||
return in
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in filteredDmNode")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
fdmMsg, ok := (*in[0]).(*filteredDmMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for filteredDmMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
insertData := InsertData{
|
||||
insertIDs: make(map[int64][]int64),
|
||||
insertTimestamps: make(map[int64][]uint64),
|
||||
insertRecords: make(map[int64][]*commonpb.Blob),
|
||||
insertOffset: make(map[int64]int64),
|
||||
}
|
||||
|
||||
var iMsg = insertMsg{
|
||||
insertData: insertData,
|
||||
timeRange: fdmMsg.timeRange,
|
||||
}
|
||||
for _, task := range fdmMsg.insertMessages {
|
||||
if len(task.RowIds) != len(task.Timestamps) || len(task.RowIds) != len(task.RowData) {
|
||||
// TODO: what if the messages are misaligned?
|
||||
// Here, we ignore those messages and print error
|
||||
log.Println("Error, misaligned messages detected")
|
||||
continue
|
||||
}
|
||||
iMsg.insertData.insertIDs[task.SegmentId] = append(iMsg.insertData.insertIDs[task.SegmentId], task.RowIds...)
|
||||
iMsg.insertData.insertTimestamps[task.SegmentId] = append(iMsg.insertData.insertTimestamps[task.SegmentId], task.Timestamps...)
|
||||
iMsg.insertData.insertRecords[task.SegmentId] = append(iMsg.insertData.insertRecords[task.SegmentId], task.RowData...)
|
||||
}
|
||||
var res Msg = &iMsg
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func newFilteredDmNode() *filteredDmNode {
|
||||
|
|
|
@ -1,8 +1,17 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type insertNode struct {
|
||||
BaseNode
|
||||
insertMsg insertMsg
|
||||
SegmentsMap *map[int64]*Segment
|
||||
insertMsg *insertMsg
|
||||
}
|
||||
|
||||
func (iNode *insertNode) Name() string {
|
||||
|
@ -10,7 +19,85 @@ func (iNode *insertNode) Name() string {
|
|||
}
|
||||
|
||||
func (iNode *insertNode) Operate(in []*Msg) []*Msg {
|
||||
return in
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in insertNode")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
insertMsg, ok := (*in[0]).(*insertMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for insertMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
iNode.insertMsg = insertMsg
|
||||
|
||||
var err = iNode.preInsert()
|
||||
if err != nil {
|
||||
log.Println("preInsert failed")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for segmentID := range iNode.insertMsg.insertData.insertRecords {
|
||||
wg.Add(1)
|
||||
go iNode.insert(segmentID, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
var res Msg = &serviceTimeMsg{
|
||||
timeRange: insertMsg.timeRange,
|
||||
}
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func (iNode *insertNode) preInsert() error {
|
||||
for segmentID := range iNode.insertMsg.insertData.insertRecords {
|
||||
var targetSegment, err = iNode.getSegmentBySegmentID(segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var numOfRecords = len(iNode.insertMsg.insertData.insertRecords[segmentID])
|
||||
var offset = targetSegment.SegmentPreInsert(numOfRecords)
|
||||
iNode.insertMsg.insertData.insertOffset[segmentID] = offset
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iNode *insertNode) getSegmentBySegmentID(segmentID int64) (*Segment, error) {
|
||||
targetSegment, ok := (*iNode.SegmentsMap)[segmentID]
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("cannot found segment with id = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
|
||||
return targetSegment, nil
|
||||
}
|
||||
|
||||
func (iNode *insertNode) insert(segmentID int64, wg *sync.WaitGroup) {
|
||||
var targetSegment, err = iNode.getSegmentBySegmentID(segmentID)
|
||||
if err != nil {
|
||||
log.Println("cannot find segment:", segmentID)
|
||||
// TODO: add error handling
|
||||
return
|
||||
}
|
||||
|
||||
ids := iNode.insertMsg.insertData.insertIDs[segmentID]
|
||||
timestamps := iNode.insertMsg.insertData.insertTimestamps[segmentID]
|
||||
records := iNode.insertMsg.insertData.insertRecords[segmentID]
|
||||
offsets := iNode.insertMsg.insertData.insertOffset[segmentID]
|
||||
|
||||
err = targetSegment.SegmentInsert(offsets, &ids, ×tamps, &records)
|
||||
if err != nil {
|
||||
log.Println("insert failed")
|
||||
// TODO: add error handling
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Do insert done, len = ", len(iNode.insertMsg.insertData.insertIDs[segmentID]))
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func newInsertNode() *insertNode {
|
||||
|
|
|
@ -6,13 +6,21 @@ import (
|
|||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
msgPb "github.com/zilliztech/milvus-distributed/internal/proto/message"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
type manipulationService struct {
|
||||
ctx context.Context
|
||||
fg *flowgraph.TimeTickedFlowGraph
|
||||
ctx context.Context
|
||||
fg *flowgraph.TimeTickedFlowGraph
|
||||
msgStream *msgstream.PulsarMsgStream
|
||||
}
|
||||
|
||||
func (dmService *manipulationService) Start() {
|
||||
dmService.initNodes()
|
||||
go dmService.fg.Start()
|
||||
dmService.consumeFromMsgStream()
|
||||
}
|
||||
|
||||
func (dmService *manipulationService) initNodes() {
|
||||
|
@ -85,9 +93,34 @@ func (dmService *manipulationService) initNodes() {
|
|||
log.Fatal("set edges failed in node:", serviceTimeNode.Name())
|
||||
}
|
||||
|
||||
err = dmService.fg.SetStartNode(msgStreamNode.Name())
|
||||
if err != nil {
|
||||
log.Fatal("set start node failed")
|
||||
}
|
||||
|
||||
// TODO: add top nodes's initialization
|
||||
}
|
||||
|
||||
func (dmService *manipulationService) consumeFromMsgStream() {
|
||||
for {
|
||||
select {
|
||||
case <-dmService.ctx.Done():
|
||||
log.Println("service stop")
|
||||
return
|
||||
default:
|
||||
msgPack := dmService.msgStream.Consume()
|
||||
var msgStreamMsg Msg = &msgStreamMsg{
|
||||
tsMessages: msgPack.Msgs,
|
||||
timeRange: TimeRange{
|
||||
timestampMin: Timestamp(msgPack.BeginTs),
|
||||
timestampMax: Timestamp(msgPack.EndTs),
|
||||
},
|
||||
}
|
||||
dmService.fg.Input(&msgStreamMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (node *QueryNode) MessagesPreprocess(insertDeleteMessages []*msgPb.InsertOrDeleteMsg, timeRange TimeRange) msgPb.Status {
|
||||
var tMax = timeRange.timestampMax
|
||||
|
||||
|
@ -116,7 +149,7 @@ func (node *QueryNode) MessagesPreprocess(insertDeleteMessages []*msgPb.InsertOr
|
|||
}
|
||||
node.insertData.insertIDs[msg.SegmentId] = append(node.insertData.insertIDs[msg.SegmentId], msg.Uid)
|
||||
node.insertData.insertTimestamps[msg.SegmentId] = append(node.insertData.insertTimestamps[msg.SegmentId], msg.Timestamp)
|
||||
node.insertData.insertRecords[msg.SegmentId] = append(node.insertData.insertRecords[msg.SegmentId], msg.RowsData.Blob)
|
||||
// node.insertData.insertRecords[msg.SegmentID] = append(node.insertData.insertRecords[msg.SegmentID], msg.RowsData.Blob)
|
||||
} else if msg.Op == msgPb.OpType_DELETE {
|
||||
var r = DeleteRecord{
|
||||
entityID: msg.Uid,
|
||||
|
@ -170,7 +203,7 @@ func (node *QueryNode) MessagesPreprocess(insertDeleteMessages []*msgPb.InsertOr
|
|||
}
|
||||
node.insertData.insertIDs[msg.SegmentId] = append(node.insertData.insertIDs[msg.SegmentId], msg.Uid)
|
||||
node.insertData.insertTimestamps[msg.SegmentId] = append(node.insertData.insertTimestamps[msg.SegmentId], msg.Timestamp)
|
||||
node.insertData.insertRecords[msg.SegmentId] = append(node.insertData.insertRecords[msg.SegmentId], msg.RowsData.Blob)
|
||||
// node.insertData.insertRecords[msg.SegmentID] = append(node.insertData.insertRecords[msg.SegmentID], msg.RowsData.Blob)
|
||||
} else if msg.Op == msgPb.OpType_DELETE {
|
||||
var r = DeleteRecord{
|
||||
entityID: msg.Uid,
|
||||
|
|
|
@ -93,7 +93,8 @@ func TestInsertAndDelete_WriterDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -195,7 +196,8 @@ func TestInsertAndDelete_PreInsertAndDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -327,7 +329,8 @@ func TestInsertAndDelete_DoInsert(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -417,7 +420,8 @@ func TestInsertAndDelete_DoDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -536,7 +540,8 @@ func TestInsertAndDelete_DoInsertAndDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
|
|
@ -2,6 +2,7 @@ package reader
|
|||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
|
@ -13,8 +14,10 @@ type msgStreamMsg struct {
|
|||
}
|
||||
|
||||
type dmMsg struct {
|
||||
tsMessages []*msgstream.TsMsg
|
||||
timeRange TimeRange
|
||||
insertMessages []*msgstream.InsertTask
|
||||
// TODO: add delete message support
|
||||
// deleteMessages []*msgstream.DeleteTask
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
type key2SegMsg struct {
|
||||
|
@ -27,8 +30,10 @@ type schemaUpdateMsg struct {
|
|||
}
|
||||
|
||||
type filteredDmMsg struct {
|
||||
tsMessages []*msgstream.TsMsg
|
||||
timeRange TimeRange
|
||||
insertMessages []*msgstream.InsertTask
|
||||
// TODO: add delete message support
|
||||
// deleteMessages []*msgstream.DeleteTask
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
type insertMsg struct {
|
||||
|
@ -53,7 +58,7 @@ type serviceTimeMsg struct {
|
|||
type InsertData struct {
|
||||
insertIDs map[int64][]int64
|
||||
insertTimestamps map[int64][]uint64
|
||||
insertRecords map[int64][][]byte
|
||||
insertRecords map[int64][]*commonpb.Blob
|
||||
insertOffset map[int64]int64
|
||||
}
|
||||
|
||||
|
|
|
@ -23,12 +23,12 @@ const (
|
|||
SegmentPrefix = "/segment/"
|
||||
)
|
||||
|
||||
func GetCollectionObjId(key string) string {
|
||||
func GetCollectionObjID(key string) string {
|
||||
prefix := path.Join(conf.Config.Etcd.Rootpath, CollectionPrefix) + "/"
|
||||
return strings.TrimPrefix(key, prefix)
|
||||
}
|
||||
|
||||
func GetSegmentObjId(key string) string {
|
||||
func GetSegmentObjID(key string) string {
|
||||
prefix := path.Join(conf.Config.Etcd.Rootpath, SegmentPrefix) + "/"
|
||||
return strings.TrimPrefix(key, prefix)
|
||||
}
|
||||
|
@ -133,10 +133,10 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
|
|||
func (node *QueryNode) processCreate(key string, msg string) {
|
||||
println("process create", key)
|
||||
if isCollectionObj(key) {
|
||||
objID := GetCollectionObjId(key)
|
||||
objID := GetCollectionObjID(key)
|
||||
node.processCollectionCreate(objID, msg)
|
||||
} else if isSegmentObj(key) {
|
||||
objID := GetSegmentObjId(key)
|
||||
objID := GetSegmentObjID(key)
|
||||
node.processSegmentCreate(objID, msg)
|
||||
} else {
|
||||
println("can not process create msg:", key)
|
||||
|
@ -170,10 +170,10 @@ func (node *QueryNode) processCollectionModify(id string, value string) {
|
|||
func (node *QueryNode) processModify(key string, msg string) {
|
||||
// println("process modify")
|
||||
if isCollectionObj(key) {
|
||||
objID := GetCollectionObjId(key)
|
||||
objID := GetCollectionObjID(key)
|
||||
node.processCollectionModify(objID, msg)
|
||||
} else if isSegmentObj(key) {
|
||||
objID := GetSegmentObjId(key)
|
||||
objID := GetSegmentObjID(key)
|
||||
node.processSegmentModify(objID, msg)
|
||||
} else {
|
||||
println("can not process modify msg:", key)
|
||||
|
@ -183,7 +183,7 @@ func (node *QueryNode) processModify(key string, msg string) {
|
|||
func (node *QueryNode) processSegmentDelete(id string) {
|
||||
println("Delete segment: ", id)
|
||||
|
||||
segmentId, err := strconv.ParseInt(id, 10, 64)
|
||||
segmentID, err := strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
log.Println("Cannot parse segment id:" + id)
|
||||
}
|
||||
|
@ -191,7 +191,7 @@ func (node *QueryNode) processSegmentDelete(id string) {
|
|||
for _, col := range node.Collections {
|
||||
for _, p := range col.Partitions {
|
||||
for _, s := range p.Segments {
|
||||
if s.SegmentId == segmentId {
|
||||
if s.SegmentID == segmentID {
|
||||
p.DeleteSegment(node, s)
|
||||
}
|
||||
}
|
||||
|
@ -202,22 +202,22 @@ func (node *QueryNode) processSegmentDelete(id string) {
|
|||
func (node *QueryNode) processCollectionDelete(id string) {
|
||||
println("Delete collection: ", id)
|
||||
|
||||
collectionId, err := strconv.ParseInt(id, 10, 64)
|
||||
collectionID, err := strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
log.Println("Cannot parse collection id:" + id)
|
||||
}
|
||||
|
||||
targetCollection := node.GetCollectionByID(collectionId)
|
||||
targetCollection := node.GetCollectionByID(collectionID)
|
||||
node.DeleteCollection(targetCollection)
|
||||
}
|
||||
|
||||
func (node *QueryNode) processDelete(key string) {
|
||||
println("process delete")
|
||||
if isCollectionObj(key) {
|
||||
objID := GetCollectionObjId(key)
|
||||
objID := GetCollectionObjID(key)
|
||||
node.processCollectionDelete(objID)
|
||||
} else if isSegmentObj(key) {
|
||||
objID := GetSegmentObjId(key)
|
||||
objID := GetSegmentObjID(key)
|
||||
node.processSegmentDelete(objID)
|
||||
} else {
|
||||
println("can not process delete msg:", key)
|
||||
|
@ -256,7 +256,7 @@ func (node *QueryNode) loadCollections() error {
|
|||
return err
|
||||
}
|
||||
for i := range keys {
|
||||
objID := GetCollectionObjId(keys[i])
|
||||
objID := GetCollectionObjID(keys[i])
|
||||
node.processCollectionCreate(objID, values[i])
|
||||
}
|
||||
return nil
|
||||
|
@ -267,7 +267,7 @@ func (node *QueryNode) loadSegments() error {
|
|||
return err
|
||||
}
|
||||
for i := range keys {
|
||||
objID := GetSegmentObjId(keys[i])
|
||||
objID := GetSegmentObjID(keys[i])
|
||||
node.processSegmentCreate(objID, values[i])
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -20,28 +20,28 @@ func TestMeta_GetCollectionObjId(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
var key = "/collection/collection0"
|
||||
var collectionObjId1 = GetCollectionObjId(key)
|
||||
var collectionObjID1 = GetCollectionObjID(key)
|
||||
|
||||
assert.Equal(t, collectionObjId1, "/collection/collection0")
|
||||
assert.Equal(t, collectionObjID1, "/collection/collection0")
|
||||
|
||||
key = "fakeKey"
|
||||
var collectionObjId2 = GetCollectionObjId(key)
|
||||
var collectionObjID2 = GetCollectionObjID(key)
|
||||
|
||||
assert.Equal(t, collectionObjId2, "fakeKey")
|
||||
assert.Equal(t, collectionObjID2, "fakeKey")
|
||||
}
|
||||
|
||||
func TestMeta_GetSegmentObjId(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
|
||||
var key = "/segment/segment0"
|
||||
var segmentObjId1 = GetSegmentObjId(key)
|
||||
var segmentObjID1 = GetSegmentObjID(key)
|
||||
|
||||
assert.Equal(t, segmentObjId1, "/segment/segment0")
|
||||
assert.Equal(t, segmentObjID1, "/segment/segment0")
|
||||
|
||||
key = "fakeKey"
|
||||
var segmentObjId2 = GetSegmentObjId(key)
|
||||
var segmentObjID2 = GetSegmentObjID(key)
|
||||
|
||||
assert.Equal(t, segmentObjId2, "fakeKey")
|
||||
assert.Equal(t, segmentObjID2, "fakeKey")
|
||||
}
|
||||
|
||||
func TestMeta_isCollectionObj(t *testing.T) {
|
||||
|
@ -158,7 +158,8 @@ func TestMeta_ProcessCollectionCreate(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -185,7 +186,8 @@ func TestMeta_ProcessSegmentCreate(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -202,7 +204,7 @@ func TestMeta_ProcessSegmentCreate(t *testing.T) {
|
|||
node.processSegmentCreate(id, value)
|
||||
s := node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
}
|
||||
|
@ -211,7 +213,8 @@ func TestMeta_ProcessCreate(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -242,7 +245,7 @@ func TestMeta_ProcessCreate(t *testing.T) {
|
|||
node.processCreate(key2, msg2)
|
||||
s := node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
}
|
||||
|
@ -251,7 +254,8 @@ func TestMeta_ProcessSegmentModify(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -268,7 +272,7 @@ func TestMeta_ProcessSegmentModify(t *testing.T) {
|
|||
node.processSegmentCreate(id, value)
|
||||
var s = node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
|
||||
|
@ -280,7 +284,7 @@ func TestMeta_ProcessSegmentModify(t *testing.T) {
|
|||
node.processSegmentModify(id, newValue)
|
||||
s = node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177888))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
}
|
||||
|
@ -289,7 +293,8 @@ func TestMeta_ProcessCollectionModify(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -333,7 +338,8 @@ func TestMeta_ProcessModify(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -364,7 +370,7 @@ func TestMeta_ProcessModify(t *testing.T) {
|
|||
node.processCreate(key2, msg2)
|
||||
s := node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
|
||||
|
@ -394,7 +400,7 @@ func TestMeta_ProcessModify(t *testing.T) {
|
|||
node.processModify(key2, msg4)
|
||||
s = node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177888))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
}
|
||||
|
@ -403,7 +409,8 @@ func TestMeta_ProcessSegmentDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -420,7 +427,7 @@ func TestMeta_ProcessSegmentDelete(t *testing.T) {
|
|||
node.processSegmentCreate(id, value)
|
||||
s := node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
|
||||
|
@ -434,7 +441,8 @@ func TestMeta_ProcessCollectionDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -466,7 +474,8 @@ func TestMeta_ProcessDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -497,7 +506,7 @@ func TestMeta_ProcessDelete(t *testing.T) {
|
|||
node.processCreate(key2, msg2)
|
||||
s := node.SegmentsMap[int64(0)]
|
||||
|
||||
assert.Equal(t, s.SegmentId, int64(0))
|
||||
assert.Equal(t, s.SegmentID, int64(0))
|
||||
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
|
||||
assert.Equal(t, s.SegmentStatus, 0)
|
||||
|
||||
|
@ -515,7 +524,8 @@ func TestMeta_ProcessResp(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -537,7 +547,8 @@ func TestMeta_LoadCollections(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -553,7 +564,8 @@ func TestMeta_LoadSegments(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -569,7 +581,8 @@ func TestMeta_InitFromMeta(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
node := CreateQueryNode(ctx, 0, 0, &mc)
|
||||
|
@ -582,7 +595,8 @@ func TestMeta_RunMetaService(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
node := CreateQueryNode(ctx, 0, 0, nil)
|
||||
|
||||
|
|
|
@ -1,5 +1,11 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type msgStreamNode struct {
|
||||
BaseNode
|
||||
msgStreamMsg msgStreamMsg
|
||||
|
@ -10,7 +16,36 @@ func (msNode *msgStreamNode) Name() string {
|
|||
}
|
||||
|
||||
func (msNode *msgStreamNode) Operate(in []*Msg) []*Msg {
|
||||
return in
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in msgStreamNode")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
streamMsg, ok := (*in[0]).(*msgStreamMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for msgStreamMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
// TODO: add time range check
|
||||
|
||||
var dmMsg = dmMsg{
|
||||
insertMessages: make([]*msgstream.InsertTask, 0),
|
||||
// deleteMessages: make([]*msgstream.DeleteTask, 0),
|
||||
timeRange: streamMsg.timeRange,
|
||||
}
|
||||
for _, msg := range streamMsg.tsMessages {
|
||||
switch (*msg).Type() {
|
||||
case msgstream.KInsert:
|
||||
dmMsg.insertMessages = append(dmMsg.insertMessages, (*msg).(*msgstream.InsertTask))
|
||||
// case msgstream.KDelete:
|
||||
// dmMsg.deleteMessages = append(dmMsg.deleteMessages, (*msg).(*msgstream.DeleteTask))
|
||||
default:
|
||||
log.Println("Non supporting message type:", (*msg).Type())
|
||||
}
|
||||
}
|
||||
var res Msg = &dmMsg
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func newMsgStreamNode() *msgStreamNode {
|
||||
|
|
|
@ -19,14 +19,14 @@ type Partition struct {
|
|||
Segments []*Segment
|
||||
}
|
||||
|
||||
func (p *Partition) NewSegment(segmentId int64) *Segment {
|
||||
func (p *Partition) NewSegment(segmentID int64) *Segment {
|
||||
/*
|
||||
CSegmentBase
|
||||
NewSegment(CPartition partition, unsigned long segment_id);
|
||||
*/
|
||||
segmentPtr := C.NewSegment(p.PartitionPtr, C.ulong(segmentId))
|
||||
segmentPtr := C.NewSegment(p.PartitionPtr, C.ulong(segmentID))
|
||||
|
||||
var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentId: segmentId}
|
||||
var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentID: segmentID}
|
||||
p.Segments = append(p.Segments, newSegment)
|
||||
return newSegment
|
||||
}
|
||||
|
@ -42,8 +42,8 @@ func (p *Partition) DeleteSegment(node *QueryNode, segment *Segment) {
|
|||
tmpSegments := make([]*Segment, 0)
|
||||
|
||||
for _, s := range p.Segments {
|
||||
if s.SegmentId == segment.SegmentId {
|
||||
delete(node.SegmentsMap, s.SegmentId)
|
||||
if s.SegmentID == segment.SegmentID {
|
||||
delete(node.SegmentsMap, s.SegmentID)
|
||||
} else {
|
||||
tmpSegments = append(tmpSegments, s)
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@ func TestPartition_NewSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, collection.CollectionID, uint64(0))
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentId, int64(0))
|
||||
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentID, int64(0))
|
||||
|
||||
assert.Equal(t, len(collection.Partitions), 1)
|
||||
assert.Equal(t, len(node.Collections), 1)
|
||||
assert.Equal(t, len(node.Collections[0].Partitions[0].Segments), 1)
|
||||
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, node.FoundSegmentBySegmentID(int64(0)), true)
|
||||
}
|
||||
|
||||
|
@ -44,13 +44,13 @@ func TestPartition_DeleteSegment(t *testing.T) {
|
|||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, collection.CollectionID, uint64(0))
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentId, int64(0))
|
||||
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentID, int64(0))
|
||||
|
||||
assert.Equal(t, len(collection.Partitions), 1)
|
||||
assert.Equal(t, len(node.Collections), 1)
|
||||
assert.Equal(t, len(node.Collections[0].Partitions[0].Segments), 1)
|
||||
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
|
||||
// 2. Destruct collection, partition and segment
|
||||
partition.DeleteSegment(node, segment)
|
||||
|
|
|
@ -15,9 +15,8 @@ import "C"
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgclient"
|
||||
|
@ -86,7 +85,7 @@ type QueryNode struct {
|
|||
// context
|
||||
ctx context.Context
|
||||
|
||||
QueryNodeId uint64
|
||||
QueryNodeID uint64
|
||||
Collections []*Collection
|
||||
SegmentsMap map[int64]*Segment
|
||||
messageClient *msgclient.ReaderMessageClient
|
||||
|
@ -100,7 +99,7 @@ type QueryNode struct {
|
|||
InsertLogs []InsertLog
|
||||
}
|
||||
|
||||
func NewQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64) *QueryNode {
|
||||
func NewQueryNode(ctx context.Context, queryNodeID uint64, timeSync uint64) *QueryNode {
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
|
||||
queryNodeTimeSync := &QueryNodeTime{
|
||||
|
@ -128,7 +127,7 @@ func NewQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64) *Que
|
|||
|
||||
return &QueryNode{
|
||||
ctx: ctx,
|
||||
QueryNodeId: queryNodeId,
|
||||
QueryNodeID: queryNodeID,
|
||||
Collections: nil,
|
||||
SegmentsMap: segmentsMap,
|
||||
messageClient: &mc,
|
||||
|
@ -147,7 +146,7 @@ func (node *QueryNode) Close() {
|
|||
}
|
||||
}
|
||||
|
||||
func CreateQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64, mc *msgclient.ReaderMessageClient) *QueryNode {
|
||||
func CreateQueryNode(ctx context.Context, queryNodeID uint64, timeSync uint64, mc *msgclient.ReaderMessageClient) *QueryNode {
|
||||
queryNodeTimeSync := &QueryNodeTime{
|
||||
ReadTimeSyncMin: timeSync,
|
||||
ReadTimeSyncMax: timeSync,
|
||||
|
@ -176,7 +175,7 @@ func CreateQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64, m
|
|||
|
||||
return &QueryNode{
|
||||
ctx: ctx,
|
||||
QueryNodeId: queryNodeId,
|
||||
QueryNodeID: queryNodeID,
|
||||
Collections: nil,
|
||||
SegmentsMap: segmentsMap,
|
||||
messageClient: mc,
|
||||
|
@ -202,8 +201,8 @@ func (node *QueryNode) QueryNodeDataInit() {
|
|||
insertData := InsertData{
|
||||
insertIDs: make(map[int64][]int64),
|
||||
insertTimestamps: make(map[int64][]uint64),
|
||||
insertRecords: make(map[int64][][]byte),
|
||||
insertOffset: make(map[int64]int64),
|
||||
// insertRecords: make(map[int64][][]byte),
|
||||
insertOffset: make(map[int64]int64),
|
||||
}
|
||||
|
||||
node.deletePreprocessData = deletePreprocessData
|
||||
|
@ -236,7 +235,7 @@ func (node *QueryNode) DeleteCollection(collection *Collection) {
|
|||
if col.CollectionID == collectionID {
|
||||
for _, p := range collection.Partitions {
|
||||
for _, s := range p.Segments {
|
||||
delete(node.SegmentsMap, s.SegmentId)
|
||||
delete(node.SegmentsMap, s.SegmentID)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -10,7 +10,8 @@ import (
|
|||
|
||||
func TestQueryNode_CreateQueryNode(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
node := CreateQueryNode(ctx, 0, 0, nil)
|
||||
assert.NotNil(t, node)
|
||||
|
@ -18,7 +19,8 @@ func TestQueryNode_CreateQueryNode(t *testing.T) {
|
|||
|
||||
func TestQueryNode_NewQueryNode(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
node := NewQueryNode(ctx, 0, 0)
|
||||
assert.NotNil(t, node)
|
||||
|
@ -26,7 +28,8 @@ func TestQueryNode_NewQueryNode(t *testing.T) {
|
|||
|
||||
func TestQueryNode_Close(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
node := CreateQueryNode(ctx, 0, 0, nil)
|
||||
assert.NotNil(t, node)
|
||||
|
@ -36,7 +39,8 @@ func TestQueryNode_Close(t *testing.T) {
|
|||
|
||||
func TestQueryNode_QueryNodeDataInit(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
node := CreateQueryNode(ctx, 0, 0, nil)
|
||||
assert.NotNil(t, node)
|
||||
|
@ -50,7 +54,8 @@ func TestQueryNode_QueryNodeDataInit(t *testing.T) {
|
|||
|
||||
func TestQueryNode_NewCollection(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
node := CreateQueryNode(ctx, 0, 0, nil)
|
||||
assert.NotNil(t, node)
|
||||
|
@ -63,7 +68,8 @@ func TestQueryNode_NewCollection(t *testing.T) {
|
|||
|
||||
func TestQueryNode_DeleteCollection(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
node := CreateQueryNode(ctx, 0, 0, nil)
|
||||
assert.NotNil(t, node)
|
|
@ -1,34 +1,34 @@
|
|||
package reader
|
||||
|
||||
type QueryNodeTime struct {
|
||||
ReadTimeSyncMin uint64
|
||||
ReadTimeSyncMax uint64
|
||||
WriteTimeSync uint64
|
||||
ServiceTimeSync uint64
|
||||
TSOTimeSync uint64
|
||||
ReadTimeSyncMin Timestamp
|
||||
ReadTimeSyncMax Timestamp
|
||||
WriteTimeSync Timestamp
|
||||
ServiceTimeSync Timestamp
|
||||
TSOTimeSync Timestamp
|
||||
}
|
||||
|
||||
type TimeRange struct {
|
||||
timestampMin uint64
|
||||
timestampMax uint64
|
||||
timestampMin Timestamp
|
||||
timestampMax Timestamp
|
||||
}
|
||||
|
||||
func (t *QueryNodeTime) UpdateReadTimeSync() {
|
||||
func (t *QueryNodeTime) updateReadTimeSync() {
|
||||
t.ReadTimeSyncMin = t.ReadTimeSyncMax
|
||||
// TODO: Add time sync
|
||||
t.ReadTimeSyncMax = 1
|
||||
}
|
||||
|
||||
func (t *QueryNodeTime) UpdateWriteTimeSync() {
|
||||
func (t *QueryNodeTime) updateWriteTimeSync() {
|
||||
// TODO: Add time sync
|
||||
t.WriteTimeSync = 0
|
||||
}
|
||||
|
||||
func (t *QueryNodeTime) UpdateSearchTimeSync(timeRange TimeRange) {
|
||||
func (t *QueryNodeTime) updateSearchServiceTime(timeRange TimeRange) {
|
||||
t.ServiceTimeSync = timeRange.timestampMax
|
||||
}
|
||||
|
||||
func (t *QueryNodeTime) UpdateTSOTimeSync() {
|
||||
func (t *QueryNodeTime) updateTSOTimeSync() {
|
||||
// TODO: Add time sync
|
||||
t.TSOTimeSync = 0
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ func TestQueryNodeTime_UpdateReadTimeSync(t *testing.T) {
|
|||
TSOTimeSync: uint64(4),
|
||||
}
|
||||
|
||||
queryNodeTimeSync.UpdateReadTimeSync()
|
||||
queryNodeTimeSync.updateReadTimeSync()
|
||||
|
||||
assert.Equal(t, queryNodeTimeSync.ReadTimeSyncMin, uint64(1))
|
||||
}
|
||||
|
@ -33,15 +33,15 @@ func TestQueryNodeTime_UpdateSearchTimeSync(t *testing.T) {
|
|||
timestampMin: 0,
|
||||
timestampMax: 1,
|
||||
}
|
||||
queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
|
||||
queryNodeTimeSync.updateSearchServiceTime(timeRange)
|
||||
|
||||
assert.Equal(t, queryNodeTimeSync.ServiceTimeSync, uint64(1))
|
||||
}
|
||||
|
||||
func TestQueryNodeTime_UpdateTSOTimeSync(t *testing.T) {
|
||||
// TODO: add UpdateTSOTimeSync test
|
||||
// TODO: add updateTSOTimeSync test
|
||||
}
|
||||
|
||||
func TestQueryNodeTime_UpdateWriteTimeSync(t *testing.T) {
|
||||
// TODO: add UpdateWriteTimeSync test
|
||||
// TODO: add updateWriteTimeSync test
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
|
|||
}
|
||||
|
||||
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
|
||||
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
|
||||
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
|
|||
//fmt.Println("PreInsertAndDelete Done")
|
||||
node.DoInsertAndDelete()
|
||||
//fmt.Println("DoInsertAndDelete Done")
|
||||
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
|
||||
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -87,7 +87,7 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
|
|||
assert.NotEqual(nil, 0, timeRange.timestampMax)
|
||||
|
||||
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
|
||||
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
|
||||
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -99,11 +99,10 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
|
|||
//fmt.Println("PreInsertAndDelete Done")
|
||||
node.DoInsertAndDelete()
|
||||
//fmt.Println("DoInsertAndDelete Done")
|
||||
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
|
||||
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func (node *QueryNode) RunSearch(wg *sync.WaitGroup) {
|
||||
|
@ -129,5 +128,4 @@ func (node *QueryNode) RunSearch(wg *sync.WaitGroup) {
|
|||
default:
|
||||
}
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
|
|
@ -19,7 +19,8 @@ func TestReader_startQueryNode(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
pulsarAddr := "pulsar://"
|
||||
pulsarAddr += conf.Config.Pulsar.Address
|
||||
|
@ -37,7 +38,8 @@ func TestReader_RunInsertDelete(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -67,7 +69,8 @@ func TestReader_RunSearch(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
|
|
@ -20,7 +20,8 @@ func TestResult_PublishSearchResult(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -61,7 +62,8 @@ func TestResult_PublishFailedSearchResult(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -89,7 +91,8 @@ func TestResult_PublicStatistic(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
||||
|
||||
type SearchResultTmp struct {
|
||||
ResultId int64
|
||||
ResultID int64
|
||||
ResultDistance float32
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
|||
// Traverse all messages in the current messageClient.
|
||||
// TODO: Do not receive batched search requests
|
||||
for _, msg := range searchMessages {
|
||||
var clientId = msg.ClientId
|
||||
var clientID = msg.ClientId
|
||||
var searchTimestamp = msg.Timestamp
|
||||
|
||||
// ServiceTimeSync update by TimeSync, which is get from proxy.
|
||||
|
@ -34,7 +34,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
|||
|
||||
var vector = msg.Records
|
||||
// We now only the first Json is valid.
|
||||
var queryJson = msg.Json[0]
|
||||
var queryJSON = msg.Json[0]
|
||||
|
||||
// 1. Timestamp check
|
||||
// TODO: return or wait? Or adding graceful time
|
||||
|
@ -44,7 +44,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
|||
}
|
||||
|
||||
// 2. Get query information from query json
|
||||
query := node.QueryJson2Info(&queryJson)
|
||||
query := node.QueryJSON2Info(&queryJSON)
|
||||
// 2d slice for receiving multiple queries's results
|
||||
var resultsTmp = make([][]SearchResultTmp, query.NumQueries)
|
||||
for i := 0; i < int(query.NumQueries); i++ {
|
||||
|
@ -58,7 +58,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
|||
continue
|
||||
}
|
||||
|
||||
//fmt.Println("Search in segment:", segment.SegmentId, ",segment rows:", segment.GetRowCount())
|
||||
//fmt.Println("Search in segment:", segment.SegmentID, ",segment rows:", segment.GetRowCount())
|
||||
var res, err = segment.SegmentSearch(query, searchTimestamp, vector)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
|
@ -68,7 +68,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
|||
for i := 0; i < int(query.NumQueries); i++ {
|
||||
for j := i * query.TopK; j < (i+1)*query.TopK; j++ {
|
||||
resultsTmp[i] = append(resultsTmp[i], SearchResultTmp{
|
||||
ResultId: res.ResultIds[j],
|
||||
ResultID: res.ResultIds[j],
|
||||
ResultDistance: res.ResultDistances[j],
|
||||
})
|
||||
}
|
||||
|
@ -98,11 +98,11 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
|
|||
Entities: &entities,
|
||||
Distances: make([]float32, 0),
|
||||
QueryId: msg.Uid,
|
||||
ProxyId: clientId,
|
||||
ProxyId: clientID,
|
||||
}
|
||||
for _, rTmp := range resultsTmp {
|
||||
for _, res := range rTmp {
|
||||
results.Entities.Ids = append(results.Entities.Ids, res.ResultId)
|
||||
results.Entities.Ids = append(results.Entities.Ids, res.ResultID)
|
||||
results.Distances = append(results.Distances, res.ResultDistance)
|
||||
results.Scores = append(results.Distances, float32(0))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package reader
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type searchService struct {
|
||||
ctx context.Context
|
||||
queryNodeTime *QueryNodeTime
|
||||
msgStream *msgstream.PulsarMsgStream
|
||||
}
|
||||
|
||||
func (ss *searchService) Start() {}
|
|
@ -18,7 +18,8 @@ import (
|
|||
func TestSearch_Search(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
|
||||
|
@ -114,7 +115,7 @@ func TestSearch_Search(t *testing.T) {
|
|||
queryRawData = append(queryRawData, float32(i))
|
||||
}
|
||||
|
||||
var queryJson = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
|
||||
var queryJSON = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
|
||||
searchMsg1 := msgPb.SearchMsg{
|
||||
CollectionName: "collection0",
|
||||
Records: &msgPb.VectorRowRecord{
|
||||
|
@ -125,11 +126,11 @@ func TestSearch_Search(t *testing.T) {
|
|||
Timestamp: uint64(0),
|
||||
ClientId: int64(0),
|
||||
ExtraParams: nil,
|
||||
Json: []string{queryJson},
|
||||
Json: []string{queryJSON},
|
||||
}
|
||||
searchMessages := []*msgPb.SearchMsg{&searchMsg1}
|
||||
|
||||
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
|
||||
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
|
||||
assert.Equal(t, node.queryNodeTimeSync.ServiceTimeSync, timeRange.timestampMax)
|
||||
|
||||
status := node.Search(searchMessages)
|
||||
|
|
|
@ -13,12 +13,10 @@ package reader
|
|||
*/
|
||||
import "C"
|
||||
import (
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
msgPb "github.com/zilliztech/milvus-distributed/internal/proto/message"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const SegmentLifetime = 20000
|
||||
|
@ -32,7 +30,7 @@ const (
|
|||
|
||||
type Segment struct {
|
||||
SegmentPtr C.CSegmentBase
|
||||
SegmentId int64
|
||||
SegmentID int64
|
||||
SegmentCloseTime uint64
|
||||
LastMemSize int64
|
||||
SegmentStatus int
|
||||
|
@ -74,7 +72,7 @@ func (s *Segment) GetDeletedCount() int64 {
|
|||
// int
|
||||
// Close(CSegmentBase c_segment);
|
||||
// */
|
||||
// fmt.Println("Closing segment :", s.SegmentId)
|
||||
// fmt.Println("Closing segment :", s.SegmentID)
|
||||
//
|
||||
// var status = C.Close(s.SegmentPtr)
|
||||
// s.SegmentStatus = SegmentClosed
|
||||
|
@ -128,7 +126,7 @@ func (s *Segment) SegmentPreDelete(numOfRecords int) int64 {
|
|||
return int64(offset)
|
||||
}
|
||||
|
||||
func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]uint64, records *[][]byte) error {
|
||||
func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]uint64, records *[]*commonpb.Blob) error {
|
||||
/*
|
||||
int
|
||||
Insert(CSegmentBase c_segment,
|
||||
|
@ -141,37 +139,37 @@ func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]
|
|||
signed long int count);
|
||||
*/
|
||||
// Blobs to one big blob
|
||||
var numOfRow = len(*entityIDs)
|
||||
var sizeofPerRow = len((*records)[0])
|
||||
|
||||
assert.Equal(nil, numOfRow, len(*records))
|
||||
|
||||
var rawData = make([]byte, numOfRow*sizeofPerRow)
|
||||
var copyOffset = 0
|
||||
for i := 0; i < len(*records); i++ {
|
||||
copy(rawData[copyOffset:], (*records)[i])
|
||||
copyOffset += sizeofPerRow
|
||||
}
|
||||
|
||||
var cOffset = C.long(offset)
|
||||
var cNumOfRows = C.long(numOfRow)
|
||||
var cEntityIdsPtr = (*C.long)(&(*entityIDs)[0])
|
||||
var cTimestampsPtr = (*C.ulong)(&(*timestamps)[0])
|
||||
var cSizeofPerRow = C.int(sizeofPerRow)
|
||||
var cRawDataVoidPtr = unsafe.Pointer(&rawData[0])
|
||||
|
||||
var status = C.Insert(s.SegmentPtr,
|
||||
cOffset,
|
||||
cNumOfRows,
|
||||
cEntityIdsPtr,
|
||||
cTimestampsPtr,
|
||||
cRawDataVoidPtr,
|
||||
cSizeofPerRow,
|
||||
cNumOfRows)
|
||||
|
||||
if status != 0 {
|
||||
return errors.New("Insert failed, error code = " + strconv.Itoa(int(status)))
|
||||
}
|
||||
//var numOfRow = len(*entityIDs)
|
||||
//var sizeofPerRow = len((*records)[0])
|
||||
//
|
||||
//assert.Equal(nil, numOfRow, len(*records))
|
||||
//
|
||||
//var rawData = make([]byte, numOfRow*sizeofPerRow)
|
||||
//var copyOffset = 0
|
||||
//for i := 0; i < len(*records); i++ {
|
||||
// copy(rawData[copyOffset:], (*records)[i])
|
||||
// copyOffset += sizeofPerRow
|
||||
//}
|
||||
//
|
||||
//var cOffset = C.long(offset)
|
||||
//var cNumOfRows = C.long(numOfRow)
|
||||
//var cEntityIdsPtr = (*C.long)(&(*entityIDs)[0])
|
||||
//var cTimestampsPtr = (*C.ulong)(&(*timestamps)[0])
|
||||
//var cSizeofPerRow = C.int(sizeofPerRow)
|
||||
//var cRawDataVoidPtr = unsafe.Pointer(&rawData[0])
|
||||
//
|
||||
//var status = C.Insert(s.SegmentPtr,
|
||||
// cOffset,
|
||||
// cNumOfRows,
|
||||
// cEntityIdsPtr,
|
||||
// cTimestampsPtr,
|
||||
// cRawDataVoidPtr,
|
||||
// cSizeofPerRow,
|
||||
// cNumOfRows)
|
||||
//
|
||||
//if status != 0 {
|
||||
// return errors.New("Insert failed, error code = " + strconv.Itoa(int(status)))
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -228,9 +226,9 @@ func (s *Segment) SegmentSearch(query *QueryInfo, timestamp uint64, vectorRecord
|
|||
var cQueryRawDataLength C.int
|
||||
|
||||
if vectorRecord.BinaryData != nil {
|
||||
return nil, errors.New("Data of binary type is not supported yet")
|
||||
return nil, errors.New("data of binary type is not supported yet")
|
||||
} else if len(vectorRecord.FloatData) <= 0 {
|
||||
return nil, errors.New("Null query vector data")
|
||||
return nil, errors.New("null query vector data")
|
||||
} else {
|
||||
cQueryRawData = (*C.float)(&vectorRecord.FloatData[0])
|
||||
cQueryRawDataLength = (C.int)(len(vectorRecord.FloatData))
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
//func (node *QueryNode) SegmentsManagement() {
|
||||
// //node.queryNodeTimeSync.UpdateTSOTimeSync()
|
||||
// //node.queryNodeTimeSync.updateTSOTimeSync()
|
||||
// //var timeNow = node.queryNodeTimeSync.TSOTimeSync
|
||||
//
|
||||
// timeNow := node.messageClient.GetTimeNow() >> 18
|
||||
|
|
|
@ -44,7 +44,8 @@ import (
|
|||
func TestSegmentManagement_SegmentStatistic(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -73,7 +74,8 @@ func TestSegmentManagement_SegmentStatisticService(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
|
|
@ -23,7 +23,7 @@ func TestSegment_ConstructorAndDestructor(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Destruct collection, partition and segment
|
||||
|
@ -49,12 +49,12 @@ func TestSegment_SegmentInsert(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
ids := []int64{1, 2, 3}
|
||||
timestamps := []uint64{0, 0, 0}
|
||||
//ids := []int64{1, 2, 3}
|
||||
//timestamps := []uint64{0, 0, 0}
|
||||
|
||||
// 3. Create records, use schema below:
|
||||
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
|
||||
|
@ -81,8 +81,8 @@ func TestSegment_SegmentInsert(t *testing.T) {
|
|||
assert.GreaterOrEqual(t, offset, int64(0))
|
||||
|
||||
// 5. Do Insert
|
||||
var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
assert.NoError(t, err)
|
||||
//var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
//assert.NoError(t, err)
|
||||
|
||||
// 6. Destruct collection, partition and segment
|
||||
partition.DeleteSegment(node, segment)
|
||||
|
@ -107,7 +107,7 @@ func TestSegment_SegmentDelete(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
|
@ -145,7 +145,7 @@ func TestSegment_SegmentSearch(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
|
@ -179,11 +179,11 @@ func TestSegment_SegmentSearch(t *testing.T) {
|
|||
assert.GreaterOrEqual(t, offset, int64(0))
|
||||
|
||||
// 5. Do Insert
|
||||
var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
assert.NoError(t, err)
|
||||
//var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
//assert.NoError(t, err)
|
||||
|
||||
// 6. Do search
|
||||
var queryJson = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
|
||||
var queryJSON = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
|
||||
var queryRawData = make([]float32, 0)
|
||||
for i := 0; i < 16; i++ {
|
||||
queryRawData = append(queryRawData, float32(i))
|
||||
|
@ -191,7 +191,7 @@ func TestSegment_SegmentSearch(t *testing.T) {
|
|||
var vectorRecord = msgPb.VectorRowRecord{
|
||||
FloatData: queryRawData,
|
||||
}
|
||||
query := node.QueryJson2Info(&queryJson)
|
||||
query := node.QueryJSON2Info(&queryJSON)
|
||||
var searchRes, searchErr = segment.SegmentSearch(query, timestamps[N/2], &vectorRecord)
|
||||
assert.NoError(t, searchErr)
|
||||
fmt.Println(searchRes)
|
||||
|
@ -219,7 +219,7 @@ func TestSegment_SegmentPreInsert(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Do PreInsert
|
||||
|
@ -249,7 +249,7 @@ func TestSegment_SegmentPreDelete(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Do PreDelete
|
||||
|
@ -321,12 +321,12 @@ func TestSegment_GetRowCount(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
ids := []int64{1, 2, 3}
|
||||
timestamps := []uint64{0, 0, 0}
|
||||
//timestamps := []uint64{0, 0, 0}
|
||||
|
||||
// 3. Create records, use schema below:
|
||||
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
|
||||
|
@ -353,8 +353,8 @@ func TestSegment_GetRowCount(t *testing.T) {
|
|||
assert.GreaterOrEqual(t, offset, int64(0))
|
||||
|
||||
// 5. Do Insert
|
||||
var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
assert.NoError(t, err)
|
||||
//var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
//assert.NoError(t, err)
|
||||
|
||||
// 6. Get segment row count
|
||||
var rowCount = segment.GetRowCount()
|
||||
|
@ -383,7 +383,7 @@ func TestSegment_GetDeletedCount(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
|
@ -426,12 +426,12 @@ func TestSegment_GetMemSize(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
ids := []int64{1, 2, 3}
|
||||
timestamps := []uint64{0, 0, 0}
|
||||
//ids := []int64{1, 2, 3}
|
||||
//timestamps := []uint64{0, 0, 0}
|
||||
|
||||
// 3. Create records, use schema below:
|
||||
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
|
||||
|
@ -458,8 +458,8 @@ func TestSegment_GetMemSize(t *testing.T) {
|
|||
assert.GreaterOrEqual(t, offset, int64(0))
|
||||
|
||||
// 5. Do Insert
|
||||
var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
assert.NoError(t, err)
|
||||
//var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
//assert.NoError(t, err)
|
||||
|
||||
// 6. Get memory usage in bytes
|
||||
var memSize = segment.GetMemSize()
|
||||
|
@ -496,12 +496,12 @@ func TestSegment_RealSchemaTest(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
// 2. Create ids and timestamps
|
||||
ids := []int64{1, 2, 3}
|
||||
timestamps := []uint64{0, 0, 0}
|
||||
//ids := []int64{1, 2, 3}
|
||||
//timestamps := []uint64{0, 0, 0}
|
||||
|
||||
// 3. Create records, use schema below:
|
||||
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
|
||||
|
@ -528,8 +528,8 @@ func TestSegment_RealSchemaTest(t *testing.T) {
|
|||
assert.GreaterOrEqual(t, offset, int64(0))
|
||||
|
||||
// 5. Do Insert
|
||||
var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
assert.NoError(t, err)
|
||||
//var err = segment.SegmentInsert(offset, &ids, ×tamps, &records)
|
||||
//assert.NoError(t, err)
|
||||
|
||||
// 6. Destruct collection, partition and segment
|
||||
partition.DeleteSegment(node, segment)
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package reader
|
||||
|
||||
import "log"
|
||||
|
||||
type serviceTimeNode struct {
|
||||
BaseNode
|
||||
queryNodeTime *QueryNodeTime
|
||||
serviceTimeMsg serviceTimeMsg
|
||||
}
|
||||
|
||||
|
@ -10,7 +13,19 @@ func (stNode *serviceTimeNode) Name() string {
|
|||
}
|
||||
|
||||
func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
|
||||
return in
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in serviceTimeNode")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
serviceTimeMsg, ok := (*in[0]).(*serviceTimeMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for serviceTimeMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
stNode.queryNodeTime.updateSearchServiceTime(serviceTimeMsg.timeRange)
|
||||
return nil
|
||||
}
|
||||
|
||||
func newServiceTimeNode() *serviceTimeNode {
|
||||
|
|
|
@ -66,13 +66,9 @@ func (node *QueryNode) GetSegmentBySegmentID(segmentID int64) (*Segment, error)
|
|||
}
|
||||
|
||||
func (node *QueryNode) FoundSegmentBySegmentID(segmentID int64) bool {
|
||||
targetSegment := node.SegmentsMap[segmentID]
|
||||
_, ok := node.SegmentsMap[segmentID]
|
||||
|
||||
if targetSegment == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *Collection) GetPartitionByName(partitionName string) (partition *Partition) {
|
||||
|
@ -111,12 +107,12 @@ func (node *QueryNode) WriteQueryLog() {
|
|||
|
||||
// write logs
|
||||
for _, insertLog := range node.InsertLogs {
|
||||
insertLogJson, err := json.Marshal(&insertLog)
|
||||
insertLogJSON, err := json.Marshal(&insertLog)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
writeString := string(insertLogJson) + "\n"
|
||||
writeString := string(insertLogJSON) + "\n"
|
||||
fmt.Println(writeString)
|
||||
|
||||
_, err2 := f.WriteString(writeString)
|
||||
|
@ -141,9 +137,9 @@ func (node *QueryNode) PrepareBatchMsg() []int {
|
|||
return msgLen
|
||||
}
|
||||
|
||||
func (node *QueryNode) QueryJson2Info(queryJson *string) *QueryInfo {
|
||||
func (node *QueryNode) QueryJSON2Info(queryJSON *string) *QueryInfo {
|
||||
var query QueryInfo
|
||||
var err = json.Unmarshal([]byte(*queryJson), &query)
|
||||
var err = json.Unmarshal([]byte(*queryJSON), &query)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal("Unmarshal query json failed")
|
||||
|
|
|
@ -18,7 +18,8 @@ func TestUtilFunctions_GetKey2Segments(t *testing.T) {
|
|||
conf.LoadConfig("config.yaml")
|
||||
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, _ := context.WithDeadline(context.Background(), d)
|
||||
ctx, cancel := context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -65,7 +66,7 @@ func TestUtilFunctions_GetCollectionByID(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
c := node.GetCollectionByID(int64(0))
|
||||
|
@ -112,7 +113,7 @@ func TestUtilFunctions_GetSegmentBySegmentID(t *testing.T) {
|
|||
// 2. Get segment by segment id
|
||||
var s0, err = node.GetSegmentBySegmentID(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, s0.SegmentId, int64(0))
|
||||
assert.Equal(t, s0.SegmentID, int64(0))
|
||||
|
||||
node.Close()
|
||||
}
|
||||
|
@ -129,7 +130,7 @@ func TestUtilFunctions_FoundSegmentBySegmentID(t *testing.T) {
|
|||
|
||||
assert.Equal(t, collection.CollectionName, "collection0")
|
||||
assert.Equal(t, partition.PartitionName, "partition0")
|
||||
assert.Equal(t, segment.SegmentId, int64(0))
|
||||
assert.Equal(t, segment.SegmentID, int64(0))
|
||||
assert.Equal(t, len(node.SegmentsMap), 1)
|
||||
|
||||
b1 := node.FoundSegmentBySegmentID(int64(0))
|
||||
|
@ -168,7 +169,8 @@ func TestUtilFunctions_GetPartitionByName(t *testing.T) {
|
|||
func TestUtilFunctions_PrepareBatchMsg(t *testing.T) {
|
||||
conf.LoadConfig("config.yaml")
|
||||
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
mc := msgclient.ReaderMessageClient{}
|
||||
pulsarAddr := "pulsar://"
|
||||
|
@ -189,8 +191,8 @@ func TestUtilFunctions_QueryJson2Info(t *testing.T) {
|
|||
ctx := context.Background()
|
||||
node := NewQueryNode(ctx, 0, 0)
|
||||
|
||||
var queryJson = "{\"field_name\":\"age\",\"num_queries\":1,\"topK\":10}"
|
||||
info := node.QueryJson2Info(&queryJson)
|
||||
var queryJSON = "{\"field_name\":\"age\",\"num_queries\":1,\"topK\":10}"
|
||||
info := node.QueryJSON2Info(&queryJSON)
|
||||
|
||||
assert.Equal(t, info.FieldName, "age")
|
||||
assert.Equal(t, info.NumQueries, int64(1))
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
package S3_driver_test
|
||||
package s3driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
s3_driver "github.com/zilliztech/milvus-distributed/internal/storage/internal/S3"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
var client, err = s3_driver.NewS3Driver(ctx)
|
||||
var client, err = NewS3Driver(ctx)
|
||||
|
||||
func TestS3Driver_PutRowAndGetRow(t *testing.T) {
|
||||
err = client.PutRow(ctx, []byte("bar"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package S3_driver
|
||||
package s3driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package S3_driver
|
||||
package s3driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package minio_driver
|
||||
package miniodriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package minio_driver
|
||||
package miniodriver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
package minio_driver_test
|
||||
package miniodriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
minio_driver "github.com/zilliztech/milvus-distributed/internal/storage/internal/minio"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
var client, err = minio_driver.NewMinioDriver(ctx)
|
||||
var client, err = NewMinioDriver(ctx)
|
||||
|
||||
func TestMinioDriver_PutRowAndGetRow(t *testing.T) {
|
||||
err = client.PutRow(ctx, []byte("bar"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)
|
||||
|
|
|
@ -17,9 +17,10 @@ type flowGraphStates struct {
|
|||
}
|
||||
|
||||
type TimeTickedFlowGraph struct {
|
||||
ctx context.Context
|
||||
states *flowGraphStates
|
||||
nodeCtx map[string]*nodeCtx
|
||||
ctx context.Context
|
||||
states *flowGraphStates
|
||||
startNode *nodeCtx
|
||||
nodeCtx map[string]*nodeCtx
|
||||
}
|
||||
|
||||
func (fg *TimeTickedFlowGraph) AddNode(node *Node) {
|
||||
|
@ -67,6 +68,17 @@ func (fg *TimeTickedFlowGraph) SetEdges(nodeName string, in []string, out []stri
|
|||
return nil
|
||||
}
|
||||
|
||||
func (fg *TimeTickedFlowGraph) SetStartNode(nodeName string) error {
|
||||
startNode, ok := fg.nodeCtx[nodeName]
|
||||
if !ok {
|
||||
errMsg := "Cannot find node:" + nodeName
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
|
||||
fg.startNode = startNode
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fg *TimeTickedFlowGraph) Start() {
|
||||
wg := sync.WaitGroup{}
|
||||
for _, v := range fg.nodeCtx {
|
||||
|
@ -76,6 +88,11 @@ func (fg *TimeTickedFlowGraph) Start() {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func (fg *TimeTickedFlowGraph) Input(msg *Msg) {
|
||||
// start node should have only 1 input channel
|
||||
fg.startNode.inputChannels[0] <- msg
|
||||
}
|
||||
|
||||
func (fg *TimeTickedFlowGraph) Close() error {
|
||||
for _, v := range fg.nodeCtx {
|
||||
v.Close()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package typeutil
|
||||
|
||||
type Timestamp = uint64
|
||||
type Id = int64
|
||||
type ID = int64
|
||||
|
|
|
@ -14,9 +14,8 @@ done
|
|||
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
CPP_SRC_DIR="${SCRIPTS_DIR}/../internal/core"
|
||||
CPP_BUILD_DIR="${CPP_SRC_DIR}/cmake_build"
|
||||
|
||||
BUILD_OUTPUT_DIR=${CPP_BUILD_DIR}
|
||||
BUILD_OUTPUT_DIR="${SCRIPTS_DIR}/../cmake_build"
|
||||
BUILD_TYPE="Release"
|
||||
BUILD_UNITTEST="OFF"
|
||||
INSTALL_PREFIX="${CPP_SRC_DIR}/output"
|
||||
|
|
Loading…
Reference in New Issue