Add meta_table implementation

Signed-off-by: godchen <qingxiang.chen@zilliz.com>
pull/4973/head^2
godchen 2020-11-04 16:01:28 +08:00 committed by yefu.chen
parent 2e2e059f8a
commit bade037fb2
72 changed files with 1148 additions and 993 deletions

1
.gitignore vendored
View File

@ -41,7 +41,6 @@ sdk/cmake_build_release
# Compiled source
bin/
lib/
cmake_build/
*.a
*.so
*.so.*

View File

@ -15,7 +15,7 @@ linters-settings:
locale: US
linters:
disable-all: true
disable-all: false
enable:
- typecheck
- goimports
@ -24,6 +24,8 @@ linters:
- golint
- ineffassign
- gosimple
- deadcode
- structcheck
service:
golangci-lint-version: 1.27.0 # use the fixed version to not introduce new linters unexpectedly

View File

@ -36,9 +36,7 @@ fmt:
lint:
@echo "Running $@ check"
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml ./internal/... || true
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml ./cmd/... || true
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml ./test/... || true
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=1m --config ./.golangci.yml || true
ruleguard:
@echo "Running $@ check"
@ -47,7 +45,7 @@ ruleguard:
verifiers: get-check-deps fmt lint ruleguard
# Builds various components locally.
build-go:
build-go: verifiers
@echo "Building each component's binary to './'"
@echo "Building reader ..."
@mkdir -p $(INSTALL_PATH) && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/reader $(PWD)/cmd/reader/reader.go 1>/dev/null

View File

@ -3,14 +3,13 @@ package main
import (
"context"
"flag"
"github.com/zilliztech/milvus-distributed/internal/conf"
"github.com/zilliztech/milvus-distributed/internal/master"
"go.uber.org/zap"
"log"
"os"
"os/signal"
"syscall"
"github.com/zilliztech/milvus-distributed/internal/conf"
"github.com/zilliztech/milvus-distributed/internal/master"
"go.uber.org/zap"
)
func main() {

View File

@ -1,9 +1,8 @@
package main
import (
"log"
"github.com/zilliztech/milvus-distributed/internal/proxy"
"log"
)
func main() {

View File

@ -4,10 +4,9 @@ import (
"context"
"flag"
"fmt"
"strconv"
"github.com/zilliztech/milvus-distributed/internal/conf"
"github.com/zilliztech/milvus-distributed/internal/reader"
"strconv"
)
func main() {

View File

@ -5,16 +5,15 @@ import (
"crypto/md5"
"flag"
"fmt"
"github.com/pivotal-golang/bytefmt"
"github.com/zilliztech/milvus-distributed/internal/storage"
"github.com/zilliztech/milvus-distributed/internal/storage/type"
"log"
"math/rand"
"os"
"sync"
"sync/atomic"
"time"
"github.com/pivotal-golang/bytefmt"
"github.com/zilliztech/milvus-distributed/internal/storage"
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
// Global variables

View File

@ -4,13 +4,12 @@ import (
"context"
"flag"
"fmt"
"log"
"strconv"
"github.com/zilliztech/milvus-distributed/internal/conf"
"github.com/zilliztech/milvus-distributed/internal/msgclient"
"github.com/zilliztech/milvus-distributed/internal/storage"
"github.com/zilliztech/milvus-distributed/internal/writer"
"log"
"strconv"
)
func main() {

View File

@ -24,4 +24,4 @@ services:
working_dir: "/milvus-distributed"
command: &ubuntu-command >
/bin/bash -c "
make verifiers && make all"
make all"

View File

@ -1128,7 +1128,9 @@ func NewMetaTable(kv kv.Base) (*metaTable,error)
* Soft Time Tick Barrier
<img src="./raw_figs/Soft_time_tick_barrier.png" width=600>
<img src="./raw_figs/soft_tt_barrier.jpeg" width=700>
```go
type softTimeTickBarrier struct {
@ -1149,7 +1151,7 @@ func newSoftTimeTickBarrier(ctx context.Context, ttStream *MsgStream, peerIds []
* Hard Time Tick Barrier
<img src="./raw_figs/Hard_time_tick_barrier.png" width=500>
<img src="./raw_figs/hard_tt_barrier.jpeg" width=700>
```go
type hardTimeTickBarrier struct {
@ -1166,9 +1168,9 @@ func newHardTimeTickBarrier(ctx context.Context, ttStream *MsgStream, peerIds []
###### 10.5.1 Time Synchronization Message Producer
###### 10.5.1 Time Synchornization Message Producer
<img src="./raw_figs/time_sync_msg_producer.png" width=900>
<img src="./raw_figs/tt_msg_producer.jpeg" width=700>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 MiB

View File

@ -14,10 +14,10 @@ done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
BUILD_OUTPUT_DIR="${SCRIPTS_DIR}/../../cmake_build"
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Release"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX="${SCRIPTS_DIR}/output"
INSTALL_PREFIX=${SCRIPTS_DIR}/output
MAKE_CLEAN="OFF"
BUILD_COVERAGE="OFF"
DB_PATH="/tmp/milvus"
@ -133,7 +133,7 @@ CMAKE_CMD="cmake \
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
-DMILVUS_CUDA_ARCH=${CUDA_ARCH} \
-DCUSTOM_THIRDPARTY_DOWNLOAD_PATH=${CUSTOM_THIRDPARTY_PATH} \
${SCRIPTS_DIR}"
../"
echo ${CMAKE_CMD}
${CMAKE_CMD}

View File

@ -44,12 +44,12 @@ type showCollectionsTask struct {
}
//////////////////////////////////////////////////////////////////////////
func (t *createCollectionTask) Type() internalpb.MsgType {
func (t *createCollectionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *createCollectionTask) Ts() (Timestamp, error) {
@ -106,12 +106,12 @@ func (t *createCollectionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *dropCollectionTask) Type() internalpb.MsgType {
func (t *dropCollectionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *dropCollectionTask) Ts() (Timestamp, error) {
@ -149,12 +149,12 @@ func (t *dropCollectionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *hasCollectionTask) Type() internalpb.MsgType {
func (t *hasCollectionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *hasCollectionTask) Ts() (Timestamp, error) {
@ -181,12 +181,12 @@ func (t *hasCollectionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *describeCollectionTask) Type() internalpb.MsgType {
func (t *describeCollectionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *describeCollectionTask) Ts() (Timestamp, error) {
@ -223,12 +223,12 @@ func (t *describeCollectionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *showCollectionsTask) Type() internalpb.MsgType {
func (t *showCollectionsTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *showCollectionsTask) Ts() (Timestamp, error) {

View File

@ -23,20 +23,86 @@ type metaTable struct {
ddLock sync.RWMutex
}
//todo, load meta from etcd
func NewMetaTable(kv kv.Base) (*metaTable, error) {
mt := &metaTable{
client: kv,
tenantLock: sync.RWMutex{},
proxyLock: sync.RWMutex{},
ddLock: sync.RWMutex{},
}
err := mt.reloadFromKV()
if err != nil {
return nil, err
}
return mt, nil
}
return &metaTable{
client: kv,
tenantId2Meta: make(map[int64]pb.TenantMeta),
proxyId2Meta: make(map[int64]pb.ProxyMeta),
collId2Meta: make(map[int64]pb.CollectionMeta),
collName2Id: make(map[string]int64),
segId2Meta: make(map[int64]pb.SegmentMeta),
tenantLock: sync.RWMutex{},
proxyLock: sync.RWMutex{},
ddLock: sync.RWMutex{},
}, nil
func (mt *metaTable) reloadFromKV() error {
mt.tenantId2Meta = make(map[int64]pb.TenantMeta)
mt.proxyId2Meta = make(map[int64]pb.ProxyMeta)
mt.collId2Meta = make(map[int64]pb.CollectionMeta)
mt.collName2Id = make(map[string]int64)
mt.segId2Meta = make(map[int64]pb.SegmentMeta)
_, values, err := mt.client.LoadWithPrefix("tenant")
if err != nil {
return err
}
for _, value := range values {
tenant_meta := pb.TenantMeta{}
err := proto.Unmarshal([]byte(value), &tenant_meta)
if err != nil {
return err
}
mt.tenantId2Meta[tenant_meta.Id] = tenant_meta
}
_, values, err = mt.client.LoadWithPrefix("proxy")
if err != nil {
return err
}
for _, value := range values {
proxy_meta := pb.ProxyMeta{}
err = proto.Unmarshal([]byte(value), &proxy_meta)
if err != nil {
return err
}
mt.proxyId2Meta[proxy_meta.Id] = proxy_meta
}
_, values, err = mt.client.LoadWithPrefix("collection")
if err != nil {
return err
}
for _, value := range values {
collection_meta := pb.CollectionMeta{}
err = proto.Unmarshal([]byte(value), &collection_meta)
if err != nil {
return err
}
mt.collId2Meta[collection_meta.Id] = collection_meta
mt.collName2Id[collection_meta.Schema.Name] = collection_meta.Id
}
_, values, err = mt.client.LoadWithPrefix("segment")
if err != nil {
return err
}
for _, value := range values {
segment_meta := pb.SegmentMeta{}
err = proto.Unmarshal([]byte(value), &segment_meta)
if err != nil {
return err
}
mt.segId2Meta[segment_meta.SegmentId] = segment_meta
}
return nil
}
// mt.ddLock.Lock() before call this function
@ -45,13 +111,9 @@ func (mt *metaTable) saveCollectionMeta(coll *pb.CollectionMeta) error {
if err != nil {
return err
}
err = mt.client.Save(strconv.FormatInt(coll.Id, 10), string(coll_bytes))
if err != nil {
return err
}
mt.collId2Meta[coll.Id] = *coll
mt.collName2Id[coll.Schema.Name] = coll.Id
return nil
return mt.client.Save("/collection/"+strconv.FormatInt(coll.Id, 10), string(coll_bytes))
}
// mt.ddLock.Lock() before call this function
@ -60,18 +122,153 @@ func (mt *metaTable) saveSegmentMeta(seg *pb.SegmentMeta) error {
if err != nil {
return err
}
err = mt.client.Save(strconv.FormatInt(seg.SegmentId, 10), string(seg_bytes))
mt.segId2Meta[seg.SegmentId] = *seg
return mt.client.Save("/segment/"+strconv.FormatInt(seg.SegmentId, 10), string(seg_bytes))
}
// mt.ddLock.Lock() before call this function
func (mt *metaTable) deleteSegmentMeta(segId int64) error {
_, ok := mt.segId2Meta[segId]
if ok {
delete(mt.segId2Meta, segId)
}
return mt.client.Remove("/segment/" + strconv.FormatInt(segId, 10))
}
// mt.ddLock.Lock() before call this function
func (mt *metaTable) saveCollectionAndDeleteSegmentsMeta(coll *pb.CollectionMeta, segIds []int64) error {
segIdStrs := make([]string, 0, len(segIds))
for _, segId := range segIds {
segIdStrs = append(segIdStrs, "/segment/"+strconv.FormatInt(segId, 10))
}
kvs := make(map[string]string)
collStrs, err := proto.Marshal(coll)
if err != nil {
return err
}
kvs["/collection/"+strconv.FormatInt(coll.Id, 10)] = string(collStrs)
for _, segId := range segIds {
_, ok := mt.segId2Meta[segId]
if ok {
delete(mt.segId2Meta, segId)
}
}
mt.collId2Meta[coll.Id] = *coll
return mt.client.MultiSaveAndRemove(kvs, segIdStrs)
}
// mt.ddLock.Lock() before call this function
func (mt *metaTable) saveCollectionsAndSegmentsMeta(coll *pb.CollectionMeta, seg *pb.SegmentMeta) error {
kvs := make(map[string]string, 0)
coll_bytes, err := proto.Marshal(coll)
if err != nil {
return err
}
kvs["/collection/"+strconv.FormatInt(coll.Id, 10)] = string(coll_bytes)
mt.collId2Meta[coll.Id] = *coll
mt.collName2Id[coll.Schema.Name] = coll.Id
seg_bytes, err := proto.Marshal(seg)
if err != nil {
return err
}
kvs["/segment/"+strconv.FormatInt(seg.SegmentId, 10)] = string(seg_bytes)
mt.segId2Meta[seg.SegmentId] = *seg
return nil
return mt.client.MultiSave(kvs)
}
// mt.ddLock.Lock() before call this function
func (mt *metaTable) deleteCollectionsAndSegmentsMeta(collId int64, segIds []int64) error {
collIdStr := "/collection/" + strconv.FormatInt(collId, 10)
totalIdStrs := make([]string, 0, 1+len(segIds))
totalIdStrs = append(totalIdStrs, collIdStr)
for _, singleId := range segIds {
totalIdStrs = append(totalIdStrs, "/segment/"+strconv.FormatInt(singleId, 10))
}
coll_meta, ok := mt.collId2Meta[collId]
if ok {
delete(mt.collId2Meta, collId)
}
_, ok = mt.collName2Id[coll_meta.Schema.Name]
if ok {
delete(mt.collName2Id, coll_meta.Schema.Name)
}
for _, segId := range segIds {
_, ok := mt.segId2Meta[segId]
if ok {
delete(mt.segId2Meta, segId)
}
}
return mt.client.MultiRemove(totalIdStrs)
}
func (mt *metaTable) AddCollection(coll *pb.CollectionMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
return mt.saveCollectionMeta(coll)
if len(coll.SegmentIds) != 0 {
return errors.Errorf("segment should be empty when creating collection")
}
if len(coll.PartitionTags) != 0 {
return errors.Errorf("segment should be empty when creating collection")
}
_, ok := mt.collName2Id[coll.Schema.Name]
if ok {
return errors.Errorf("collection alread exists with name = " + coll.Schema.Name)
}
err := mt.saveCollectionMeta(coll)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) DeleteCollection(collId int64) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
coll_meta, ok := mt.collId2Meta[collId]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collId, 10))
}
err := mt.deleteCollectionsAndSegmentsMeta(collId, coll_meta.SegmentIds)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) HasCollection(collId int64) bool {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
_, ok := mt.collId2Meta[collId]
if !ok {
return false
}
return true
}
func (mt *metaTable) GetCollectionByName(collectionName string) (*pb.CollectionMeta, error) {
@ -89,61 +286,99 @@ func (mt *metaTable) GetCollectionByName(collectionName string) (*pb.CollectionM
return &col, nil
}
func (mt *metaTable) HasPartition(partitionTag, collectionName string) bool {
col_meta, err := mt.GetCollectionByName(collectionName)
func (mt *metaTable) AddPartition(collId int64, tag string) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
coll, ok := mt.collId2Meta[collId]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collId, 10))
}
for _, t := range coll.PartitionTags {
if t == tag {
return errors.Errorf("partition already exists.")
}
}
coll.PartitionTags = append(coll.PartitionTags, tag)
err := mt.saveCollectionMeta(&coll)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) HasPartition(collId int64, tag string) bool {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
col, ok := mt.collId2Meta[collId]
if !ok {
return false
}
for _, tag := range col_meta.PartitionTags {
if tag == partitionTag {
for _, partitionTag := range col.PartitionTags {
if partitionTag == tag {
return true
}
}
return false
}
func (mt *metaTable) DeletePartition(partitionTag, collectionName string) error {
func (mt *metaTable) DeletePartition(collId int64, tag string) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
col_id, ok := mt.collName2Id[collectionName]
coll_meta, ok := mt.collId2Meta[collId]
if !ok {
return errors.Errorf("can't find collection %s", collectionName)
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collId, 10))
}
col_meta, ok := mt.collId2Meta[col_id]
if !ok {
return errors.Errorf("can't find collection %s", collectionName)
}
pt := make([]string, 0, len(col_meta.PartitionTags))
for _, t := range col_meta.PartitionTags {
if t != partitionTag {
pt := make([]string, 0, len(coll_meta.PartitionTags))
for _, t := range coll_meta.PartitionTags {
if t != tag {
pt = append(pt, t)
}
}
if len(pt) == len(col_meta.PartitionTags) {
if len(pt) == len(coll_meta.PartitionTags) {
return nil
}
seg := make([]int64, 0, len(col_meta.SegmentIds))
for _, s := range col_meta.SegmentIds {
to_delete_seg := make([]int64, 0, len(coll_meta.SegmentIds))
seg := make([]int64, 0, len(coll_meta.SegmentIds))
for _, s := range coll_meta.SegmentIds {
sm, ok := mt.segId2Meta[s]
if !ok {
return errors.Errorf("can't find segment id = %d", s)
}
if sm.PartitionTag != partitionTag {
if sm.PartitionTag != tag {
seg = append(seg, s)
} else {
to_delete_seg = append(to_delete_seg, s)
}
}
col_meta.PartitionTags = pt
col_meta.SegmentIds = seg
coll_meta.PartitionTags = pt
coll_meta.SegmentIds = seg
return mt.saveCollectionMeta(&col_meta)
err := mt.saveCollectionAndDeleteSegmentsMeta(&coll_meta, to_delete_seg)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) AddSegment(seg *pb.SegmentMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
return mt.saveSegmentMeta(seg)
collId := seg.CollectionId
coll_meta := mt.collId2Meta[collId]
coll_meta.SegmentIds = append(coll_meta.SegmentIds, seg.SegmentId)
err := mt.saveCollectionsAndSegmentsMeta(&coll_meta, seg)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) GetSegmentById(segId int64) (*pb.SegmentMeta, error) {
@ -156,3 +391,51 @@ func (mt *metaTable) GetSegmentById(segId int64) (*pb.SegmentMeta, error) {
}
return &sm, nil
}
func (mt *metaTable) DeleteSegment(segId int64) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
seg_meta, ok := mt.segId2Meta[segId]
if !ok {
return errors.Errorf("can't find segment. id = " + strconv.FormatInt(segId, 10))
}
coll_meta, ok := mt.collId2Meta[seg_meta.CollectionId]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(seg_meta.CollectionId, 10))
}
for i := 0; i < len(coll_meta.SegmentIds); i++ {
if coll_meta.SegmentIds[i] == segId {
coll_meta.SegmentIds = append(coll_meta.SegmentIds[:i], coll_meta.SegmentIds[i+1:]...)
}
}
err := mt.saveCollectionAndDeleteSegmentsMeta(&coll_meta, []int64{segId})
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) CloseSegment(segId int64, closeTs Timestamp, num_rows int64) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
seg_meta, ok := mt.segId2Meta[segId]
if !ok {
return errors.Errorf("can't find segment id = " + strconv.FormatInt(segId, 10))
}
seg_meta.CloseTime = uint64(closeTs)
seg_meta.NumRows = num_rows
err := mt.saveSegmentMeta(&seg_meta)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}

View File

@ -10,6 +10,124 @@ import (
"go.etcd.io/etcd/clientv3"
)
func TestMetaTable_Collection(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
etcd_kv := kv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcd_kv)
assert.Nil(t, err)
defer meta.client.Close()
col_meta := pb.CollectionMeta{
Id: 100,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []int64{},
PartitionTags: []string{},
}
col_meta_2 := pb.CollectionMeta{
Id: 50,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []int64{},
PartitionTags: []string{},
}
col_meta_3 := pb.CollectionMeta{
Id: 30,
Schema: &schemapb.CollectionSchema{
Name: "coll2",
},
CreateTime: 0,
SegmentIds: []int64{},
PartitionTags: []string{},
}
col_meta_4 := pb.CollectionMeta{
Id: 30,
Schema: &schemapb.CollectionSchema{
Name: "coll2",
},
CreateTime: 0,
SegmentIds: []int64{1},
PartitionTags: []string{},
}
col_meta_5 := pb.CollectionMeta{
Id: 30,
Schema: &schemapb.CollectionSchema{
Name: "coll2",
},
CreateTime: 0,
SegmentIds: []int64{1},
PartitionTags: []string{"1"},
}
seg_id_1 := pb.SegmentMeta{
SegmentId: 200,
CollectionId: 100,
PartitionTag: "p1",
}
seg_id_2 := pb.SegmentMeta{
SegmentId: 300,
CollectionId: 100,
PartitionTag: "p1",
}
seg_id_3 := pb.SegmentMeta{
SegmentId: 400,
CollectionId: 100,
PartitionTag: "p2",
}
err = meta.AddCollection(&col_meta)
assert.Nil(t, err)
err = meta.AddCollection(&col_meta_2)
assert.NotNil(t, err)
err = meta.AddCollection(&col_meta_3)
assert.Nil(t, err)
err = meta.AddCollection(&col_meta_4)
assert.NotNil(t, err)
err = meta.AddCollection(&col_meta_5)
assert.NotNil(t, err)
has_collection := meta.HasCollection(col_meta.Id)
assert.True(t, has_collection)
err = meta.AddPartition(col_meta.Id, "p1")
assert.Nil(t, err)
err = meta.AddPartition(col_meta.Id, "p2")
assert.Nil(t, err)
err = meta.AddSegment(&seg_id_1)
assert.Nil(t, err)
err = meta.AddSegment(&seg_id_2)
assert.Nil(t, err)
err = meta.AddSegment(&seg_id_3)
assert.Nil(t, err)
get_col_meta, err := meta.GetCollectionByName(col_meta.Schema.Name)
assert.Nil(t, err)
assert.Equal(t, 3, len(get_col_meta.SegmentIds))
err = meta.DeleteCollection(col_meta.Id)
assert.Nil(t, err)
has_collection = meta.HasCollection(col_meta.Id)
assert.False(t, has_collection)
_, err = meta.GetSegmentById(seg_id_1.SegmentId)
assert.NotNil(t, err)
_, err = meta.GetSegmentById(seg_id_2.SegmentId)
assert.NotNil(t, err)
_, err = meta.GetSegmentById(seg_id_3.SegmentId)
assert.NotNil(t, err)
err = meta.reloadFromKV()
assert.Nil(t, err)
assert.Equal(t, 0, len(meta.proxyId2Meta))
assert.Equal(t, 0, len(meta.tenantId2Meta))
assert.Equal(t, 1, len(meta.collName2Id))
assert.Equal(t, 1, len(meta.collId2Meta))
assert.Equal(t, 0, len(meta.segId2Meta))
err = meta.DeleteCollection(col_meta_3.Id)
assert.Nil(t, err)
}
func TestMetaTable_DeletePartition(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
@ -25,18 +143,134 @@ func TestMetaTable_DeletePartition(t *testing.T) {
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []int64{200},
PartitionTags: []string{"p1"},
SegmentIds: []int64{},
PartitionTags: []string{},
}
seg_id := pb.SegmentMeta{
seg_id_1 := pb.SegmentMeta{
SegmentId: 200,
CollectionId: 100,
PartitionTag: "p1",
}
seg_id_2 := pb.SegmentMeta{
SegmentId: 300,
CollectionId: 100,
PartitionTag: "p1",
}
seg_id_3 := pb.SegmentMeta{
SegmentId: 400,
CollectionId: 100,
PartitionTag: "p2",
}
err = meta.AddCollection(&col_meta)
assert.Nil(t, err)
err = meta.AddPartition(col_meta.Id, "p1")
assert.Nil(t, err)
err = meta.AddPartition(col_meta.Id, "p2")
assert.Nil(t, err)
err = meta.AddSegment(&seg_id_1)
assert.Nil(t, err)
err = meta.AddSegment(&seg_id_2)
assert.Nil(t, err)
err = meta.AddSegment(&seg_id_3)
assert.Nil(t, err)
after_coll_meta, err := meta.GetCollectionByName("coll1")
assert.Nil(t, err)
assert.Equal(t, 2, len(after_coll_meta.PartitionTags))
assert.Equal(t, 3, len(after_coll_meta.SegmentIds))
err = meta.DeletePartition(100, "p1")
assert.Nil(t, err)
after_coll_meta, err = meta.GetCollectionByName("coll1")
assert.Nil(t, err)
assert.Equal(t, 1, len(after_coll_meta.PartitionTags))
assert.Equal(t, 1, len(after_coll_meta.SegmentIds))
has_partition := meta.HasPartition(col_meta.Id, "p1")
assert.False(t, has_partition)
has_partition = meta.HasPartition(col_meta.Id, "p2")
assert.True(t, has_partition)
_, err = meta.GetSegmentById(seg_id_1.SegmentId)
assert.NotNil(t, err)
_, err = meta.GetSegmentById(seg_id_2.SegmentId)
assert.NotNil(t, err)
_, err = meta.GetSegmentById(seg_id_3.SegmentId)
assert.Nil(t, err)
after_coll_meta, err = meta.GetCollectionByName("coll1")
assert.Nil(t, err)
err = meta.reloadFromKV()
assert.Nil(t, err)
assert.Equal(t, 0, len(meta.proxyId2Meta))
assert.Equal(t, 0, len(meta.tenantId2Meta))
assert.Equal(t, 1, len(meta.collName2Id))
assert.Equal(t, 1, len(meta.collId2Meta))
assert.Equal(t, 1, len(meta.segId2Meta))
}
func TestMetaTable_Segment(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
etcd_kv := kv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcd_kv)
assert.Nil(t, err)
defer meta.client.Close()
keys, _, err := meta.client.LoadWithPrefix("")
assert.Nil(t, err)
err = meta.client.MultiRemove(keys)
assert.Nil(t, err)
col_meta := pb.CollectionMeta{
Id: 100,
Schema: &schemapb.CollectionSchema{
Name: "coll1",
},
CreateTime: 0,
SegmentIds: []int64{},
PartitionTags: []string{},
}
seg_meta := pb.SegmentMeta{
SegmentId: 200,
CollectionId: 100,
PartitionTag: "p1",
}
err = meta.AddCollection(&col_meta)
assert.Nil(t, err)
err = meta.AddSegment(&seg_id)
err = meta.AddPartition(col_meta.Id, "p1")
assert.Nil(t, err)
err = meta.DeletePartition("p1", "coll1")
err = meta.AddSegment(&seg_meta)
assert.Nil(t, err)
get_seg_meta, err := meta.GetSegmentById(seg_meta.SegmentId)
assert.Nil(t, err)
assert.Equal(t, &seg_meta, get_seg_meta)
err = meta.CloseSegment(seg_meta.SegmentId, Timestamp(11), 111)
assert.Nil(t, err)
get_seg_meta, err = meta.GetSegmentById(seg_meta.SegmentId)
assert.Nil(t, err)
assert.Equal(t, get_seg_meta.NumRows, int64(111))
assert.Equal(t, get_seg_meta.CloseTime, uint64(11))
err = meta.DeleteSegment(seg_meta.SegmentId)
assert.Nil(t, err)
get_seg_meta, err = meta.GetSegmentById(seg_meta.SegmentId)
assert.Nil(t, get_seg_meta)
assert.NotNil(t, err)
get_col_meta, err := meta.GetCollectionByName(col_meta.Schema.Name)
assert.Nil(t, err)
assert.Equal(t, 0, len(get_col_meta.SegmentIds))
meta.tenantId2Meta = make(map[int64]pb.TenantMeta)
meta.proxyId2Meta = make(map[int64]pb.ProxyMeta)
meta.collId2Meta = make(map[int64]pb.CollectionMeta)
meta.collName2Id = make(map[string]int64)
meta.segId2Meta = make(map[int64]pb.SegmentMeta)
err = meta.reloadFromKV()
assert.Nil(t, err)
assert.Equal(t, 0, len(meta.proxyId2Meta))
assert.Equal(t, 0, len(meta.tenantId2Meta))
assert.Equal(t, 1, len(meta.collName2Id))
assert.Equal(t, 1, len(meta.collId2Meta))
assert.Equal(t, 0, len(meta.segId2Meta))
}

View File

@ -42,12 +42,12 @@ type showPartitionTask struct {
}
//////////////////////////////////////////////////////////////////////////
func (t *createPartitionTask) Type() internalpb.MsgType {
func (t *createPartitionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *createPartitionTask) Ts() (Timestamp, error) {
@ -91,12 +91,12 @@ func (t *createPartitionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *dropPartitionTask) Type() internalpb.MsgType {
func (t *dropPartitionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *dropPartitionTask) Ts() (Timestamp, error) {
@ -120,7 +120,7 @@ func (t *dropPartitionTask) Execute() error {
return err
}
err = t.mt.DeletePartition(partitionName.Tag, collectionName)
err = t.mt.DeletePartition(collectionMeta.Id, partitionName.Tag)
if err != nil {
return err
}
@ -143,12 +143,12 @@ func (t *dropPartitionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *hasPartitionTask) Type() internalpb.MsgType {
func (t *hasPartitionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *hasPartitionTask) Ts() (Timestamp, error) {
@ -166,19 +166,24 @@ func (t *hasPartitionTask) Execute() error {
partitionName := t.req.PartitionName
collectionName := partitionName.CollectionName
t.hasPartition = t.mt.HasPartition(partitionName.Tag, collectionName)
collectionMeta, err := t.mt.GetCollectionByName(collectionName)
if err != nil {
return err
}
t.hasPartition = t.mt.HasPartition(collectionMeta.Id, partitionName.Tag)
_ = t.Notify()
return nil
}
//////////////////////////////////////////////////////////////////////////
func (t *describePartitionTask) Type() internalpb.MsgType {
func (t *describePartitionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *describePartitionTask) Ts() (Timestamp, error) {
@ -210,12 +215,12 @@ func (t *describePartitionTask) Execute() error {
}
//////////////////////////////////////////////////////////////////////////
func (t *showPartitionTask) Type() internalpb.MsgType {
func (t *showPartitionTask) Type() internalpb.ReqType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
return t.req.ReqType
}
func (t *showPartitionTask) Ts() (Timestamp, error) {

View File

@ -18,7 +18,7 @@ type baseTask struct {
}
type task interface {
Type() internalpb.MsgType
Type() internalpb.ReqType
Ts() (Timestamp, error)
Execute() error
WaitToFinish(ctx context.Context) error

View File

@ -17,24 +17,24 @@ func GetMarshalers(inputMsgType MsgType, outputMsgType MsgType) (*TsMsgMarshaler
func GetMarshaler(MsgType MsgType) *TsMsgMarshaler {
switch MsgType {
case KInsert:
case kInsert:
insertMarshaler := &InsertMarshaler{}
var tsMsgMarshaller TsMsgMarshaler = insertMarshaler
return &tsMsgMarshaller
case KDelete:
case kDelete:
deleteMarshaler := &DeleteMarshaler{}
var tsMsgMarshaller TsMsgMarshaler = deleteMarshaler
return &tsMsgMarshaller
case KSearch:
case kSearch:
searchMarshaler := &SearchMarshaler{}
var tsMsgMarshaller TsMsgMarshaler = searchMarshaler
return &tsMsgMarshaller
case KSearchResult:
case kSearchResult:
searchResultMarshler := &SearchResultMarshaler{}
var tsMsgMarshaller TsMsgMarshaler = searchResultMarshler
return &tsMsgMarshaller
case KTimeTick:
timeSyncMarshaler := &TimeTickMarshaler{}
case kTimeSync:
timeSyncMarshaler := &TimeSyncMarshaler{}
var tsMsgMarshaller TsMsgMarshaler = timeSyncMarshaler
return &tsMsgMarshaller
default:
@ -145,10 +145,10 @@ func (srm *SearchResultMarshaler) Unmarshal(input []byte) (*TsMsg, commonPb.Stat
/////////////////////////////////////TimeSync///////////////////////////////////////////////
type TimeTickMarshaler struct{}
type TimeSyncMarshaler struct{}
func (tm *TimeTickMarshaler) Marshal(input *TsMsg) ([]byte, commonPb.Status) {
timeSyncTask := (*input).(TimeTickMsg)
func (tm *TimeSyncMarshaler) Marshal(input *TsMsg) ([]byte, commonPb.Status) {
timeSyncTask := (*input).(TimeSyncTask)
timeSyncMsg := &timeSyncTask.TimeTickMsg
mb, err := proto.Marshal(timeSyncMsg)
if err != nil {
@ -157,10 +157,10 @@ func (tm *TimeTickMarshaler) Marshal(input *TsMsg) ([]byte, commonPb.Status) {
return mb, commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS}
}
func (tm *TimeTickMarshaler) Unmarshal(input []byte) (*TsMsg, commonPb.Status) {
func (tm *TimeSyncMarshaler) Unmarshal(input []byte) (*TsMsg, commonPb.Status) {
timeSyncMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeSyncMsg)
timeSyncTask := TimeTickMsg{TimeTickMsg: timeSyncMsg}
timeSyncTask := TimeSyncTask{TimeTickMsg: timeSyncMsg}
if err != nil {
return nil, commonPb.Status{ErrorCode: commonPb.ErrorCode_UNEXPECTED_ERROR}
}

View File

@ -273,7 +273,7 @@ func (ms *PulsarTtMsgStream) findTimeTick(ctx context.Context,
(*ms.consumers[channelIndex]).Ack(pulsarMsg)
tsMsg, status := (*ms.msgUnmarshaler).Unmarshal(pulsarMsg.Payload())
// TODO:: Find the EOF
if (*tsMsg).Type() == KTimeSync {
if (*tsMsg).Type() == kTimeSync {
eofMsgMap[channelIndex] = (*tsMsg).EndTs()
wg.Done()
return

View File

@ -27,9 +27,9 @@ func repackFunc(msgs []*TsMsg, hashKeys [][]int32) map[int32]*MsgPack {
func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
var tsMsg TsMsg
switch msgType {
case KInsert:
case kInsert:
insertRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kInsert,
ReqType: internalPb.ReqType_kInsert,
ReqId: reqId,
CollectionName: "Collection",
PartitionTag: "Partition",
@ -43,9 +43,9 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
InsertRequest: insertRequest,
}
tsMsg = insertMsg
case KDelete:
case kDelete:
deleteRequest := internalPb.DeleteRequest{
MsgType: internalPb.MsgType_kDelete,
ReqType: internalPb.ReqType_kDelete,
ReqId: reqId,
CollectionName: "Collection",
ChannelId: 1,
@ -58,9 +58,9 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
DeleteRequest: deleteRequest,
}
tsMsg = deleteMsg
case KSearch:
case kSearch:
searchRequest := internalPb.SearchRequest{
MsgType: internalPb.MsgType_kSearch,
ReqType: internalPb.ReqType_kSearch,
ReqId: reqId,
ProxyId: 1,
Timestamp: 1,
@ -71,7 +71,7 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
SearchRequest: searchRequest,
}
tsMsg = searchMsg
case KSearchResult:
case kSearchResult:
searchResult := internalPb.SearchResult{
Status: &commonPb.Status{ErrorCode: commonPb.ErrorCode_SUCCESS},
ReqId: reqId,
@ -85,19 +85,19 @@ func getTsMsg(msgType MsgType, reqId int64, hashValue int32) *TsMsg {
SearchResult: searchResult,
}
tsMsg = searchResultMsg
case KTimeSync:
case kTimeSync:
timeSyncResult := internalPb.TimeTickMsg{
PeerId: reqId,
Timestamp: 1,
}
timeSyncMsg := TimeTickMsg{
timeSyncMsg := TimeSyncTask{
HashValues: []int32{hashValue},
TimeTickMsg: timeSyncResult,
}
tsMsg = timeSyncMsg
case KTimeTick:
case kTimeTick:
insertRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kTimeTick,
ReqType: internalPb.ReqType_kTimeTick,
ReqId: reqId,
CollectionName: "Collection",
PartitionTag: "Partition",
@ -166,11 +166,11 @@ func TestStream_Insert(t *testing.T) {
consumerSubName := "subInsert"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 1, 1))
//run stream
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KInsert, KInsert)
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kInsert, kInsert)
}
func TestStream_Delete(t *testing.T) {
@ -180,11 +180,11 @@ func TestStream_Delete(t *testing.T) {
consumerSubName := "subDelete"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 3, 3))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 3, 3))
//run stream
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KDelete, KDelete)
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kDelete, kDelete)
}
func TestStream_Search(t *testing.T) {
@ -194,11 +194,11 @@ func TestStream_Search(t *testing.T) {
consumerSubName := "subSearch"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 3, 3))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 3, 3))
//run stream
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KSearch, KSearch)
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kSearch, kSearch)
}
func TestStream_SearchResult(t *testing.T) {
@ -208,11 +208,11 @@ func TestStream_SearchResult(t *testing.T) {
consumerSubName := "subSearch"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 3, 3))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 3, 3))
//run stream
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KSearchResult, KSearchResult)
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kSearchResult, kSearchResult)
}
func TestStream_TimeSync(t *testing.T) {
@ -222,11 +222,11 @@ func TestStream_TimeSync(t *testing.T) {
consumerSubName := "subSearch"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeSync, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeSync, 3, 3))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 3, 3))
//run stream
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KTimeSync, KTimeSync)
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kTimeSync, kTimeSync)
}
func TestStream_BroadCast(t *testing.T) {
@ -236,9 +236,9 @@ func TestStream_BroadCast(t *testing.T) {
consumerSubName := "subInsert"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeTick, 3, 3))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeTick, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeTick, 3, 3))
//run stream
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, KInsert, KInsert)
initStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName, &msgPack, kInsert, kInsert)
}

View File

@ -14,14 +14,14 @@ func TestNewStream_Insert(t *testing.T) {
consumerSubName := "subInsert"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 1, 1))
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
(*inputStream).SetMsgMarshaler(GetMarshaler(KInsert), nil)
(*inputStream).SetMsgMarshaler(GetMarshaler(kInsert), nil)
(*inputStream).SetRepackFunc(repackFunc)
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KInsert))
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kInsert))
(*outputStream).Start()
//send msgPack
@ -52,14 +52,14 @@ func TestNewStream_Delete(t *testing.T) {
consumerSubName := "subDelete"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KDelete, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kDelete, 1, 1))
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
(*inputStream).SetMsgMarshaler(GetMarshaler(KDelete), nil)
(*inputStream).SetMsgMarshaler(GetMarshaler(kDelete), nil)
(*inputStream).SetRepackFunc(repackFunc)
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KDelete))
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kDelete))
(*outputStream).Start()
//send msgPack
@ -90,14 +90,14 @@ func TestNewStream_Search(t *testing.T) {
consumerSubName := "subSearch"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearch, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearch, 1, 1))
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
(*inputStream).SetMsgMarshaler(GetMarshaler(KSearch), nil)
(*inputStream).SetMsgMarshaler(GetMarshaler(kSearch), nil)
(*inputStream).SetRepackFunc(repackFunc)
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KSearch))
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kSearch))
(*outputStream).Start()
//send msgPack
@ -128,14 +128,14 @@ func TestNewStream_SearchResult(t *testing.T) {
consumerSubName := "subInsert"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KSearchResult, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kSearchResult, 1, 1))
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
(*inputStream).SetMsgMarshaler(GetMarshaler(KSearchResult), nil)
(*inputStream).SetMsgMarshaler(GetMarshaler(kSearchResult), nil)
(*inputStream).SetRepackFunc(repackFunc)
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KSearchResult))
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kSearchResult))
(*outputStream).Start()
//send msgPack
@ -166,14 +166,14 @@ func TestNewStream_TimeSync(t *testing.T) {
consumerSubName := "subInsert"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeSync, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KTimeSync, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kTimeSync, 1, 1))
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, false)
(*inputStream).SetMsgMarshaler(GetMarshaler(KTimeSync), nil)
(*inputStream).SetMsgMarshaler(GetMarshaler(kTimeSync), nil)
(*inputStream).SetRepackFunc(repackFunc)
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KTimeSync))
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kTimeSync))
(*outputStream).Start()
//send msgPack
@ -203,11 +203,11 @@ func TestNewStream_Insert_TimeTick(t *testing.T) {
consumerSubName := "subInsert"
msgPack := MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(KInsert, 1, 1))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 0, 0))
msgPack.Msgs = append(msgPack.Msgs, getTsMsg(kInsert, 1, 1))
insertRequest := internalPb.InsertRequest{
MsgType: internalPb.MsgType_kTimeTick,
ReqType: internalPb.ReqType_kTimeTick,
ReqId: 2,
CollectionName: "Collection",
PartitionTag: "Partition",
@ -226,9 +226,9 @@ func TestNewStream_Insert_TimeTick(t *testing.T) {
inputStream := NewInputStream(pulsarAddress, producerChannels, false)
outputStream := NewOutputStream(pulsarAddress, 100, 100, consumerChannels, consumerSubName, true)
(*inputStream).SetMsgMarshaler(GetMarshaler(KInsert), nil)
(*inputStream).SetMsgMarshaler(GetMarshaler(kInsert), nil)
(*inputStream).SetRepackFunc(repackFunc)
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(KInsert))
(*outputStream).SetMsgMarshaler(nil, GetMarshaler(kInsert))
(*outputStream).Start()
//send msgPack

View File

@ -8,14 +8,14 @@ import (
type MsgType uint32
const (
KInsert MsgType = 400
KDelete MsgType = 401
KSearch MsgType = 500
KSearchResult MsgType = 1000
kInsert MsgType = 400
kDelete MsgType = 401
kSearch MsgType = 500
kSearchResult MsgType = 1000
KSegmentStatics MsgType = 1100
KTimeTick MsgType = 1200
KTimeSync MsgType = 1201
kSegmentStatics MsgType = 1100
kTimeTick MsgType = 1200
kTimeSync MsgType = 1201
)
type TsMsg interface {
@ -67,10 +67,10 @@ func (it InsertTask) EndTs() Timestamp {
}
func (it InsertTask) Type() MsgType {
if it.MsgType == internalPb.MsgType_kTimeTick {
return KTimeSync
if it.ReqType == internalPb.ReqType_kTimeTick {
return kTimeSync
}
return KInsert
return kInsert
}
func (it InsertTask) HashKeys() []int32 {
@ -118,10 +118,10 @@ func (dt DeleteTask) EndTs() Timestamp {
}
func (dt DeleteTask) Type() MsgType {
if dt.MsgType == internalPb.MsgType_kTimeTick {
return KTimeSync
if dt.ReqType == internalPb.ReqType_kTimeTick {
return kTimeSync
}
return KDelete
return kDelete
}
func (dt DeleteTask) HashKeys() []int32 {
@ -147,10 +147,10 @@ func (st SearchTask) EndTs() Timestamp {
}
func (st SearchTask) Type() MsgType {
if st.MsgType == internalPb.MsgType_kTimeTick {
return KTimeSync
if st.ReqType == internalPb.ReqType_kTimeTick {
return kTimeSync
}
return KSearch
return kSearch
}
func (st SearchTask) HashKeys() []int32 {
@ -176,7 +176,7 @@ func (srt SearchResultTask) EndTs() Timestamp {
}
func (srt SearchResultTask) Type() MsgType {
return KSearchResult
return kSearchResult
}
func (srt SearchResultTask) HashKeys() []int32 {
@ -184,28 +184,28 @@ func (srt SearchResultTask) HashKeys() []int32 {
}
/////////////////////////////////////////TimeSync//////////////////////////////////////////
type TimeTickMsg struct {
type TimeSyncTask struct {
HashValues []int32
internalPb.TimeTickMsg
}
func (tst TimeTickMsg) SetTs(ts Timestamp) {
func (tst TimeSyncTask) SetTs(ts Timestamp) {
tst.Timestamp = uint64(ts)
}
func (tst TimeTickMsg) BeginTs() Timestamp {
func (tst TimeSyncTask) BeginTs() Timestamp {
return Timestamp(tst.Timestamp)
}
func (tst TimeTickMsg) EndTs() Timestamp {
func (tst TimeSyncTask) EndTs() Timestamp {
return Timestamp(tst.Timestamp)
}
func (tst TimeTickMsg) Type() MsgType {
return KTimeSync
func (tst TimeSyncTask) Type() MsgType {
return kTimeSync
}
func (tst TimeTickMsg) HashKeys() []int32 {
func (tst TimeSyncTask) HashKeys() []int32 {
return tst.HashValues
}

View File

@ -6,7 +6,7 @@ import "common.proto";
import "service_msg.proto";
enum MsgType {
enum ReqType {
kNone = 0;
/* Definition Requests: collection */
kCreateCollection = 100;
@ -72,7 +72,7 @@ message TsoResponse {
message CreateCollectionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -81,7 +81,7 @@ message CreateCollectionRequest {
message DropCollectionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -90,7 +90,7 @@ message DropCollectionRequest {
message HasCollectionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -99,7 +99,7 @@ message HasCollectionRequest {
message DescribeCollectionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -108,7 +108,7 @@ message DescribeCollectionRequest {
message ShowCollectionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -116,7 +116,7 @@ message ShowCollectionRequest {
message CreatePartitionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -125,7 +125,7 @@ message CreatePartitionRequest {
message DropPartitionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -134,7 +134,7 @@ message DropPartitionRequest {
message HasPartitionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -143,7 +143,7 @@ message HasPartitionRequest {
message DescribePartitionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -152,7 +152,7 @@ message DescribePartitionRequest {
message ShowPartitionRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
uint64 timestamp = 3;
int64 proxy_id = 4;
@ -161,7 +161,7 @@ message ShowPartitionRequest {
message InsertRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
string collection_name = 3;
string partition_tag = 4;
@ -175,7 +175,7 @@ message InsertRequest {
message DeleteRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
string collection_name = 3;
int64 channel_id = 4;
@ -186,7 +186,7 @@ message DeleteRequest {
message SearchRequest {
MsgType msg_type = 1;
ReqType req_type = 1;
int64 req_id = 2;
int64 proxy_id = 3;
uint64 timestamp = 4;
@ -205,6 +205,7 @@ message SearchResult {
repeated service.Hits hits = 7;
}
message TimeTickMsg {
int64 peer_id = 1;
uint64 timestamp = 2;

View File

@ -22,33 +22,33 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MsgType int32
type ReqType int32
const (
MsgType_kNone MsgType = 0
ReqType_kNone ReqType = 0
// Definition Requests: collection
MsgType_kCreateCollection MsgType = 100
MsgType_kDropCollection MsgType = 101
MsgType_kHasCollection MsgType = 102
MsgType_kDescribeCollection MsgType = 103
MsgType_kShowCollections MsgType = 104
ReqType_kCreateCollection ReqType = 100
ReqType_kDropCollection ReqType = 101
ReqType_kHasCollection ReqType = 102
ReqType_kDescribeCollection ReqType = 103
ReqType_kShowCollections ReqType = 104
// Definition Requests: partition
MsgType_kCreatePartition MsgType = 200
MsgType_kDropPartition MsgType = 201
MsgType_kHasPartition MsgType = 202
MsgType_kDescribePartition MsgType = 203
MsgType_kShowPartitions MsgType = 204
ReqType_kCreatePartition ReqType = 200
ReqType_kDropPartition ReqType = 201
ReqType_kHasPartition ReqType = 202
ReqType_kDescribePartition ReqType = 203
ReqType_kShowPartitions ReqType = 204
// Manipulation Requests
MsgType_kInsert MsgType = 400
MsgType_kDelete MsgType = 401
ReqType_kInsert ReqType = 400
ReqType_kDelete ReqType = 401
// Query
MsgType_kSearch MsgType = 500
ReqType_kSearch ReqType = 500
// System Control
MsgType_kTimeTick MsgType = 1200
MsgType_kTimeSync MsgType = 1201
ReqType_kTimeTick ReqType = 1200
ReqType_kTimeSync ReqType = 1201
)
var MsgType_name = map[int32]string{
var ReqType_name = map[int32]string{
0: "kNone",
100: "kCreateCollection",
101: "kDropCollection",
@ -67,7 +67,7 @@ var MsgType_name = map[int32]string{
1201: "kTimeSync",
}
var MsgType_value = map[string]int32{
var ReqType_value = map[string]int32{
"kNone": 0,
"kCreateCollection": 100,
"kDropCollection": 101,
@ -86,11 +86,11 @@ var MsgType_value = map[string]int32{
"kTimeSync": 1201,
}
func (x MsgType) String() string {
return proto.EnumName(MsgType_name, int32(x))
func (x ReqType) String() string {
return proto.EnumName(ReqType_name, int32(x))
}
func (MsgType) EnumDescriptor() ([]byte, []int) {
func (ReqType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{0}
}
@ -346,7 +346,7 @@ func (m *TsoResponse) GetCount() uint32 {
}
type CreateCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -381,11 +381,11 @@ func (m *CreateCollectionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_CreateCollectionRequest proto.InternalMessageInfo
func (m *CreateCollectionRequest) GetMsgType() MsgType {
func (m *CreateCollectionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *CreateCollectionRequest) GetReqId() int64 {
@ -417,7 +417,7 @@ func (m *CreateCollectionRequest) GetSchema() *commonpb.Blob {
}
type DropCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -452,11 +452,11 @@ func (m *DropCollectionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_DropCollectionRequest proto.InternalMessageInfo
func (m *DropCollectionRequest) GetMsgType() MsgType {
func (m *DropCollectionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *DropCollectionRequest) GetReqId() int64 {
@ -488,7 +488,7 @@ func (m *DropCollectionRequest) GetCollectionName() *servicepb.CollectionName {
}
type HasCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -523,11 +523,11 @@ func (m *HasCollectionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_HasCollectionRequest proto.InternalMessageInfo
func (m *HasCollectionRequest) GetMsgType() MsgType {
func (m *HasCollectionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *HasCollectionRequest) GetReqId() int64 {
@ -559,7 +559,7 @@ func (m *HasCollectionRequest) GetCollectionName() *servicepb.CollectionName {
}
type DescribeCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -594,11 +594,11 @@ func (m *DescribeCollectionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_DescribeCollectionRequest proto.InternalMessageInfo
func (m *DescribeCollectionRequest) GetMsgType() MsgType {
func (m *DescribeCollectionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *DescribeCollectionRequest) GetReqId() int64 {
@ -630,7 +630,7 @@ func (m *DescribeCollectionRequest) GetCollectionName() *servicepb.CollectionNam
}
type ShowCollectionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -664,11 +664,11 @@ func (m *ShowCollectionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_ShowCollectionRequest proto.InternalMessageInfo
func (m *ShowCollectionRequest) GetMsgType() MsgType {
func (m *ShowCollectionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *ShowCollectionRequest) GetReqId() int64 {
@ -693,7 +693,7 @@ func (m *ShowCollectionRequest) GetProxyId() int64 {
}
type CreatePartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -728,11 +728,11 @@ func (m *CreatePartitionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_CreatePartitionRequest proto.InternalMessageInfo
func (m *CreatePartitionRequest) GetMsgType() MsgType {
func (m *CreatePartitionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *CreatePartitionRequest) GetReqId() int64 {
@ -764,7 +764,7 @@ func (m *CreatePartitionRequest) GetPartitionName() *servicepb.PartitionName {
}
type DropPartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -799,11 +799,11 @@ func (m *DropPartitionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_DropPartitionRequest proto.InternalMessageInfo
func (m *DropPartitionRequest) GetMsgType() MsgType {
func (m *DropPartitionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *DropPartitionRequest) GetReqId() int64 {
@ -835,7 +835,7 @@ func (m *DropPartitionRequest) GetPartitionName() *servicepb.PartitionName {
}
type HasPartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -870,11 +870,11 @@ func (m *HasPartitionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_HasPartitionRequest proto.InternalMessageInfo
func (m *HasPartitionRequest) GetMsgType() MsgType {
func (m *HasPartitionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *HasPartitionRequest) GetReqId() int64 {
@ -906,7 +906,7 @@ func (m *HasPartitionRequest) GetPartitionName() *servicepb.PartitionName {
}
type DescribePartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -941,11 +941,11 @@ func (m *DescribePartitionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_DescribePartitionRequest proto.InternalMessageInfo
func (m *DescribePartitionRequest) GetMsgType() MsgType {
func (m *DescribePartitionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *DescribePartitionRequest) GetReqId() int64 {
@ -977,7 +977,7 @@ func (m *DescribePartitionRequest) GetPartitionName() *servicepb.PartitionName {
}
type ShowPartitionRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,4,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
@ -1012,11 +1012,11 @@ func (m *ShowPartitionRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_ShowPartitionRequest proto.InternalMessageInfo
func (m *ShowPartitionRequest) GetMsgType() MsgType {
func (m *ShowPartitionRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *ShowPartitionRequest) GetReqId() int64 {
@ -1048,7 +1048,7 @@ func (m *ShowPartitionRequest) GetCollectionName() *servicepb.CollectionName {
}
type InsertRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
PartitionTag string `protobuf:"bytes,4,opt,name=partition_tag,json=partitionTag,proto3" json:"partition_tag,omitempty"`
@ -1088,11 +1088,11 @@ func (m *InsertRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_InsertRequest proto.InternalMessageInfo
func (m *InsertRequest) GetMsgType() MsgType {
func (m *InsertRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *InsertRequest) GetReqId() int64 {
@ -1159,7 +1159,7 @@ func (m *InsertRequest) GetRowData() []*commonpb.Blob {
}
type DeleteRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
ChannelId int64 `protobuf:"varint,4,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
@ -1196,11 +1196,11 @@ func (m *DeleteRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
func (m *DeleteRequest) GetMsgType() MsgType {
func (m *DeleteRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *DeleteRequest) GetReqId() int64 {
@ -1246,7 +1246,7 @@ func (m *DeleteRequest) GetPrimaryKeys() []int64 {
}
type SearchRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqType ReqType `protobuf:"varint,1,opt,name=req_type,json=reqType,proto3,enum=milvus.proto.internal.ReqType" json:"req_type,omitempty"`
ReqId int64 `protobuf:"varint,2,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
ProxyId int64 `protobuf:"varint,3,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
@ -1282,11 +1282,11 @@ func (m *SearchRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_SearchRequest proto.InternalMessageInfo
func (m *SearchRequest) GetMsgType() MsgType {
func (m *SearchRequest) GetReqType() ReqType {
if m != nil {
return m.MsgType
return m.ReqType
}
return MsgType_kNone
return ReqType_kNone
}
func (m *SearchRequest) GetReqId() int64 {
@ -1632,7 +1632,7 @@ func (m *SegmentStatistics) GetNumRows() int64 {
}
func init() {
proto.RegisterEnum("milvus.proto.internal.MsgType", MsgType_name, MsgType_value)
proto.RegisterEnum("milvus.proto.internal.ReqType", ReqType_name, ReqType_value)
proto.RegisterEnum("milvus.proto.internal.PeerRole", PeerRole_name, PeerRole_value)
proto.RegisterType((*IdRequest)(nil), "milvus.proto.internal.IdRequest")
proto.RegisterType((*IdResponse)(nil), "milvus.proto.internal.IdResponse")
@ -1661,79 +1661,79 @@ func init() {
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
var fileDescriptor_7eb37f6b80b23116 = []byte{
// 1181 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1c, 0xc5,
0x13, 0xcf, 0xec, 0xd3, 0x5b, 0xeb, 0x5d, 0x8f, 0xdb, 0xf6, 0xdf, 0x9b, 0xfc, 0x21, 0x31, 0x13,
0x24, 0xac, 0x48, 0xd8, 0xc2, 0xe1, 0x40, 0xae, 0xc9, 0x1e, 0xb2, 0x44, 0x8e, 0xac, 0x59, 0x0b,
0x24, 0x24, 0x34, 0x9a, 0x9d, 0x29, 0x66, 0x5b, 0xf3, 0xe8, 0x71, 0x77, 0xaf, 0xcd, 0xfa, 0x0b,
0x70, 0x05, 0x71, 0xe4, 0xc6, 0x27, 0x80, 0x3b, 0x1f, 0x80, 0xd7, 0x9d, 0x2f, 0x01, 0x82, 0x48,
0xa0, 0x5c, 0x51, 0xf7, 0xcc, 0x3e, 0x66, 0xfd, 0xe0, 0x19, 0x64, 0xc9, 0xb7, 0xa9, 0x9a, 0x9e,
0xae, 0xfa, 0xfd, 0xea, 0xb1, 0x55, 0x0b, 0x84, 0x26, 0x12, 0x79, 0xe2, 0x46, 0x4e, 0x2c, 0x82,
0x9d, 0x94, 0x33, 0xc9, 0xc8, 0x46, 0x4c, 0xa3, 0xe3, 0x91, 0xc8, 0xa4, 0x9d, 0xc9, 0x81, 0x5b,
0xcb, 0x1e, 0x8b, 0x63, 0x96, 0x64, 0xea, 0x5b, 0xab, 0x02, 0xf9, 0x31, 0xf5, 0x70, 0xf6, 0x9d,
0xc5, 0xa0, 0xd1, 0xf3, 0x6d, 0x3c, 0x1a, 0xa1, 0x90, 0x64, 0x13, 0xea, 0x29, 0x22, 0x77, 0xa8,
0xdf, 0x31, 0xb6, 0x8c, 0xed, 0xb2, 0x5d, 0x53, 0x62, 0xcf, 0x27, 0xf7, 0xa1, 0xc2, 0x59, 0x84,
0x9d, 0xd2, 0x96, 0xb1, 0xdd, 0xde, 0xbb, 0xb3, 0x73, 0xae, 0xb1, 0x9d, 0x03, 0x44, 0x6e, 0xb3,
0x08, 0x6d, 0x7d, 0x98, 0xac, 0x43, 0xd5, 0x63, 0xa3, 0x44, 0x76, 0xca, 0x5b, 0xc6, 0x76, 0xcb,
0xce, 0x04, 0x2b, 0x00, 0x50, 0x06, 0x45, 0xca, 0x12, 0x81, 0xe4, 0x3e, 0xd4, 0x84, 0x74, 0xe5,
0x48, 0x68, 0x83, 0xcd, 0xbd, 0xff, 0x17, 0xaf, 0xce, 0xbd, 0xef, 0xeb, 0x23, 0x76, 0x7e, 0x94,
0xb4, 0xa1, 0x44, 0x7d, 0xed, 0x4b, 0xd9, 0x2e, 0x51, 0xff, 0x02, 0x43, 0x29, 0xc0, 0xa1, 0x60,
0xff, 0x25, 0xb4, 0x63, 0x68, 0x6a, 0x8b, 0xff, 0x04, 0xdb, 0x4b, 0xd0, 0x90, 0x34, 0x46, 0x21,
0xdd, 0x38, 0xd5, 0x3e, 0x55, 0xec, 0x99, 0xe2, 0x02, 0xbb, 0x3f, 0x18, 0xb0, 0xf9, 0x88, 0xa3,
0x2b, 0xf1, 0x11, 0x8b, 0x22, 0xf4, 0x24, 0x65, 0xc9, 0x04, 0xf7, 0x03, 0x58, 0x8a, 0x45, 0xe0,
0xc8, 0x71, 0x8a, 0xda, 0x8d, 0xf6, 0xde, 0xed, 0x0b, 0x20, 0xee, 0x8b, 0xe0, 0x70, 0x9c, 0xa2,
0x5d, 0x8f, 0xb3, 0x07, 0xb2, 0x01, 0x35, 0x8e, 0x47, 0xce, 0x94, 0xea, 0x2a, 0xc7, 0xa3, 0x9e,
0x5f, 0xf4, 0xb0, 0xbc, 0xe8, 0xe1, 0x4d, 0x58, 0x4a, 0x39, 0xfb, 0x70, 0xac, 0x3e, 0xab, 0xe8,
0xcf, 0xea, 0x5a, 0xee, 0xf9, 0xe4, 0x0d, 0xa8, 0x09, 0x6f, 0x88, 0xb1, 0xdb, 0xa9, 0x6a, 0x3e,
0x6e, 0x9e, 0xcb, 0xc7, 0xc3, 0x88, 0x0d, 0xec, 0xfc, 0xa0, 0xf5, 0xcc, 0x80, 0x8d, 0x2e, 0x67,
0xe9, 0x95, 0xc6, 0xb5, 0x0f, 0x2b, 0xde, 0xd4, 0x3f, 0x27, 0x71, 0x63, 0xcc, 0x01, 0xbe, 0x5a,
0xf4, 0x28, 0x2f, 0xbe, 0x9d, 0x19, 0x98, 0xa7, 0x6e, 0x8c, 0x76, 0xdb, 0x2b, 0xc8, 0xd6, 0x2f,
0x06, 0xac, 0x3f, 0x76, 0xc5, 0x75, 0x82, 0xfc, 0x9b, 0x01, 0x37, 0xbb, 0x28, 0x3c, 0x4e, 0x07,
0x78, 0x9d, 0x70, 0x7f, 0x6e, 0xc0, 0x46, 0x7f, 0xc8, 0x4e, 0xae, 0x32, 0x66, 0xeb, 0x67, 0x03,
0xfe, 0x97, 0x75, 0x97, 0x03, 0x97, 0x4b, 0x7a, 0x45, 0x23, 0xf3, 0x36, 0xb4, 0xd3, 0x89, 0x7b,
0xf3, 0x81, 0xb9, 0x7b, 0x7e, 0x60, 0xa6, 0x50, 0x74, 0x5c, 0x5a, 0xe9, 0xbc, 0x68, 0xfd, 0x64,
0xc0, 0xba, 0xea, 0x3a, 0xd7, 0x05, 0xef, 0x8f, 0x06, 0xac, 0x3d, 0x76, 0xc5, 0x75, 0x81, 0xfb,
0xcc, 0x80, 0xce, 0xa4, 0xdb, 0x5c, 0x17, 0xcc, 0xea, 0x47, 0x45, 0x75, 0x9a, 0xab, 0x8c, 0xf7,
0x5f, 0x6e, 0xae, 0xcf, 0x4b, 0xd0, 0xea, 0x25, 0x02, 0xb9, 0x7c, 0x71, 0x58, 0x5f, 0x3b, 0xeb,
0xb2, 0x42, 0xdc, 0x58, 0x74, 0x86, 0xdc, 0x85, 0x59, 0x40, 0x1c, 0xe9, 0x06, 0x1a, 0x7b, 0xc3,
0x5e, 0x9e, 0x2a, 0x0f, 0xdd, 0x80, 0xbc, 0x0c, 0x20, 0x30, 0x88, 0x31, 0x91, 0xca, 0x50, 0x55,
0x1b, 0x6a, 0xe4, 0x9a, 0x9e, 0xaf, 0x5e, 0x7b, 0x43, 0x37, 0x49, 0x30, 0x52, 0xaf, 0x6b, 0xd9,
0xeb, 0x5c, 0xd3, 0xf3, 0x0b, 0xcc, 0xd6, 0x8b, 0xcc, 0xde, 0x06, 0x98, 0x46, 0x40, 0x74, 0x96,
0xb6, 0xca, 0xdb, 0x15, 0x7b, 0x4e, 0xa3, 0x86, 0x63, 0xce, 0x4e, 0x1c, 0xea, 0x8b, 0x4e, 0x63,
0xab, 0xac, 0x86, 0x63, 0xce, 0x4e, 0x7a, 0xbe, 0x20, 0x6f, 0xc2, 0x92, 0x7a, 0xe1, 0xbb, 0xd2,
0xed, 0xc0, 0x56, 0xf9, 0xf2, 0xa1, 0x4d, 0xdd, 0xd1, 0x75, 0xa5, 0x6b, 0x7d, 0x54, 0x82, 0x56,
0x17, 0x23, 0x94, 0x78, 0x05, 0x98, 0x2f, 0xb2, 0x56, 0xb9, 0x8c, 0xb5, 0xea, 0x65, 0xac, 0xd5,
0xce, 0xb0, 0xf6, 0x0a, 0x2c, 0xa7, 0x9c, 0xc6, 0x2e, 0x1f, 0x3b, 0x21, 0x8e, 0x45, 0xa7, 0xae,
0xa9, 0x6b, 0xe6, 0xba, 0x27, 0x38, 0x16, 0xd6, 0x73, 0x03, 0x5a, 0x7d, 0x74, 0xb9, 0x37, 0x7c,
0x71, 0x4c, 0xcc, 0x23, 0x28, 0x17, 0x11, 0x14, 0x4a, 0xb1, 0xb2, 0x58, 0x8a, 0xf7, 0x60, 0x95,
0xa3, 0x18, 0x45, 0xd2, 0x99, 0x23, 0x28, 0xe3, 0x60, 0x25, 0x7b, 0xf1, 0x68, 0x4a, 0xd3, 0x2e,
0x54, 0x8f, 0x46, 0xc8, 0xc7, 0x3a, 0xed, 0x2e, 0xcd, 0x82, 0xec, 0x9c, 0xf5, 0x69, 0x09, 0x96,
0x27, 0xc8, 0xd5, 0x55, 0x7f, 0x6f, 0x1b, 0xfa, 0xeb, 0x90, 0x2d, 0x68, 0x69, 0x07, 0x9c, 0x84,
0xf9, 0x38, 0x8b, 0x78, 0x53, 0x2b, 0x9f, 0x32, 0x1f, 0x17, 0x69, 0xa9, 0xfe, 0x29, 0x5a, 0x6a,
0xe7, 0xd3, 0xb2, 0x03, 0x95, 0x21, 0x95, 0x59, 0xe8, 0x9b, 0x7b, 0xb7, 0xce, 0xef, 0x53, 0x8f,
0xa9, 0x14, 0xb6, 0x3e, 0x67, 0x75, 0xa1, 0x79, 0x48, 0x63, 0x3c, 0xa4, 0x5e, 0xb8, 0x2f, 0x82,
0x8b, 0x97, 0xd2, 0x4b, 0xb7, 0x40, 0xeb, 0x33, 0x03, 0xea, 0x4f, 0x70, 0xbc, 0xd7, 0xc7, 0x40,
0x33, 0xa4, 0x4b, 0x37, 0xbf, 0xa1, 0xaa, 0x2b, 0x97, 0xdc, 0x81, 0xe6, 0x5c, 0x6e, 0xe6, 0xec,
0xc1, 0x2c, 0x35, 0xff, 0xb8, 0x4b, 0x53, 0xe1, 0x1c, 0xbb, 0x51, 0x4e, 0xe0, 0x92, 0x5d, 0xa7,
0xe2, 0x1d, 0x25, 0xaa, 0x9b, 0x67, 0x4d, 0x4a, 0x74, 0xaa, 0x3a, 0xe9, 0x61, 0xda, 0xa5, 0x84,
0xf5, 0x3e, 0x40, 0xee, 0x9c, 0x82, 0x38, 0x8b, 0xa0, 0x31, 0x1f, 0xc1, 0xb7, 0xa0, 0x1e, 0xe2,
0x78, 0x4f, 0x60, 0xd0, 0x29, 0x69, 0xee, 0x2e, 0xaa, 0x82, 0xfc, 0x2a, 0x7b, 0x72, 0xdc, 0x4a,
0x60, 0xb5, 0x9f, 0x19, 0x53, 0xb9, 0x42, 0x85, 0xa4, 0x9e, 0x58, 0xe8, 0x9c, 0xc6, 0x62, 0xe7,
0xbc, 0x03, 0xcd, 0x18, 0x63, 0xc6, 0xc7, 0x8e, 0xa0, 0xa7, 0x38, 0x61, 0x23, 0x53, 0xf5, 0xe9,
0x29, 0x2a, 0xbc, 0xc9, 0x28, 0x76, 0x38, 0x3b, 0x11, 0x93, 0x84, 0x4a, 0x46, 0xb1, 0xcd, 0x4e,
0xc4, 0xbd, 0xaf, 0x4a, 0x50, 0xcf, 0x4b, 0x91, 0x34, 0xa0, 0x1a, 0x3e, 0x65, 0x09, 0x9a, 0x37,
0xc8, 0x06, 0xac, 0x86, 0x8b, 0x3b, 0xb7, 0xe9, 0x93, 0x35, 0x58, 0x09, 0x8b, 0x0b, 0xab, 0x89,
0x84, 0x40, 0x3b, 0x2c, 0x6c, 0x74, 0xe6, 0x07, 0x64, 0x13, 0xd6, 0xc2, 0xb3, 0x2b, 0x8f, 0x19,
0x90, 0x75, 0x30, 0xc3, 0xe2, 0x4e, 0x20, 0xcc, 0x21, 0xd9, 0x00, 0x33, 0x5c, 0x18, 0xc2, 0xcd,
0xaf, 0x0d, 0xb2, 0x06, 0xed, 0xb0, 0x30, 0xa9, 0x9a, 0xdf, 0x18, 0x84, 0x40, 0x2b, 0x9c, 0x1f,
0xe7, 0xcc, 0x6f, 0x0d, 0xb2, 0x09, 0x24, 0x3c, 0x33, 0xf3, 0x98, 0xdf, 0x19, 0x64, 0x1d, 0x56,
0xc2, 0xc2, 0x60, 0x20, 0xcc, 0xef, 0x0d, 0xb2, 0x0c, 0xf5, 0x30, 0xfb, 0xed, 0x34, 0x3f, 0x2e,
0x6b, 0x29, 0xeb, 0xe7, 0xe6, 0x27, 0x99, 0x94, 0x55, 0xb6, 0xf9, 0x6b, 0x99, 0xb4, 0xa1, 0x11,
0x4e, 0x52, 0xda, 0xfc, 0xa2, 0x31, 0x95, 0xfb, 0xe3, 0xc4, 0x33, 0xbf, 0x6c, 0xdc, 0x7b, 0x00,
0x4b, 0x93, 0xff, 0x4e, 0x08, 0x40, 0x6d, 0xdf, 0x15, 0x12, 0xb9, 0x79, 0x43, 0x3d, 0xdb, 0xe8,
0xfa, 0xc8, 0x4d, 0x43, 0x3d, 0xbf, 0xcb, 0xa9, 0xd2, 0x97, 0x14, 0xc5, 0x07, 0xaa, 0x94, 0xcd,
0xf2, 0xc3, 0xee, 0x7b, 0x0f, 0x03, 0x2a, 0x87, 0xa3, 0x81, 0x6a, 0x0e, 0xbb, 0xa7, 0x34, 0x8a,
0xe8, 0xa9, 0x44, 0x6f, 0xb8, 0x9b, 0x65, 0xca, 0xeb, 0x3e, 0x15, 0x92, 0xd3, 0xc1, 0x48, 0xa2,
0xbf, 0x3b, 0xc9, 0x97, 0x5d, 0x9d, 0x3e, 0x53, 0x31, 0x1d, 0x0c, 0x6a, 0x5a, 0x73, 0xff, 0xf7,
0x00, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x14, 0x7f, 0x35, 0x35, 0x13, 0x00, 0x00,
// 1178 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0xd5,
0x17, 0xef, 0xf8, 0x19, 0x1f, 0xc7, 0xce, 0xe4, 0x26, 0xf9, 0xc7, 0xed, 0x1f, 0x5a, 0x33, 0x45,
0x22, 0xaa, 0x84, 0x23, 0x5c, 0x16, 0x74, 0xdb, 0x7a, 0x51, 0x53, 0xb5, 0xaa, 0xc6, 0x11, 0x48,
0x48, 0x68, 0x34, 0x9e, 0x39, 0xd8, 0x57, 0xf3, 0xb8, 0x93, 0x7b, 0xaf, 0x13, 0x9c, 0x2f, 0xc0,
0x16, 0xc4, 0x92, 0x1d, 0x9f, 0x00, 0xf6, 0x7c, 0x00, 0x5e, 0x7b, 0xbe, 0x04, 0x08, 0x2a, 0x81,
0xba, 0x45, 0xf7, 0xce, 0xf8, 0x31, 0xce, 0x83, 0x67, 0x51, 0xa4, 0xec, 0xe6, 0x9c, 0xfb, 0x38,
0xe7, 0xf7, 0x3b, 0x0f, 0xdf, 0x63, 0x20, 0x34, 0x96, 0xc8, 0x63, 0x37, 0x74, 0x22, 0x31, 0xea,
0x24, 0x9c, 0x49, 0x46, 0x76, 0x22, 0x1a, 0x1e, 0x4d, 0x44, 0x2a, 0x75, 0x66, 0x1b, 0x6e, 0xac,
0x7b, 0x2c, 0x8a, 0x58, 0x9c, 0xaa, 0x6f, 0x6c, 0x0a, 0xe4, 0x47, 0xd4, 0xc3, 0xc5, 0x39, 0x8b,
0x41, 0xad, 0xef, 0xdb, 0x78, 0x38, 0x41, 0x21, 0xc9, 0x2e, 0x54, 0x13, 0x44, 0xee, 0x50, 0xbf,
0x65, 0xb4, 0x8d, 0xbd, 0xa2, 0x5d, 0x51, 0x62, 0xdf, 0x27, 0x77, 0xa1, 0xc4, 0x59, 0x88, 0xad,
0x42, 0xdb, 0xd8, 0x6b, 0x76, 0x6f, 0x75, 0xce, 0x34, 0xd6, 0x79, 0x8a, 0xc8, 0x6d, 0x16, 0xa2,
0xad, 0x37, 0x93, 0x6d, 0x28, 0x7b, 0x6c, 0x12, 0xcb, 0x56, 0xb1, 0x6d, 0xec, 0x35, 0xec, 0x54,
0xb0, 0x46, 0x00, 0xca, 0xa0, 0x48, 0x58, 0x2c, 0x90, 0xdc, 0x85, 0x8a, 0x90, 0xae, 0x9c, 0x08,
0x6d, 0xb0, 0xde, 0xfd, 0x7f, 0xfe, 0xea, 0xcc, 0xfb, 0x81, 0xde, 0x62, 0x67, 0x5b, 0x49, 0x13,
0x0a, 0xd4, 0xd7, 0xbe, 0x14, 0xed, 0x02, 0xf5, 0xcf, 0x31, 0x94, 0x00, 0x1c, 0x08, 0xf6, 0x5f,
0x42, 0x3b, 0x82, 0xba, 0xb6, 0xf8, 0x4f, 0xb0, 0xbd, 0x04, 0x35, 0x49, 0x23, 0x14, 0xd2, 0x8d,
0x12, 0xed, 0x53, 0xc9, 0x5e, 0x28, 0xce, 0xb1, 0xfb, 0x83, 0x01, 0xbb, 0x0f, 0x38, 0xba, 0x12,
0x1f, 0xb0, 0x30, 0x44, 0x4f, 0x52, 0x16, 0xcf, 0x70, 0xdf, 0x83, 0x35, 0x8e, 0x87, 0x8e, 0x9c,
0x26, 0xa8, 0xdd, 0x68, 0x76, 0x6f, 0x9e, 0x03, 0xd1, 0xc6, 0xc3, 0x83, 0x69, 0x82, 0x76, 0x95,
0xa7, 0x1f, 0x64, 0x07, 0x2a, 0xea, 0xe8, 0x9c, 0xea, 0x32, 0xc7, 0xc3, 0xbe, 0x9f, 0xf7, 0xb0,
0xb8, 0xea, 0xe1, 0x75, 0x58, 0x4b, 0x38, 0xfb, 0x70, 0xaa, 0x8e, 0x95, 0xf4, 0xb1, 0xaa, 0x96,
0xfb, 0x3e, 0x79, 0x03, 0x2a, 0xc2, 0x1b, 0x63, 0xe4, 0xb6, 0xca, 0x9a, 0x8f, 0xeb, 0x67, 0xf2,
0x71, 0x3f, 0x64, 0x43, 0x3b, 0xdb, 0x68, 0x3d, 0x33, 0x60, 0xa7, 0xc7, 0x59, 0x72, 0xa9, 0x71,
0x3d, 0x86, 0x0d, 0x6f, 0xee, 0x9f, 0x13, 0xbb, 0x11, 0x66, 0x00, 0x5f, 0xcd, 0x7b, 0x94, 0x15,
0x5f, 0x67, 0x01, 0xe6, 0x89, 0x1b, 0xa1, 0xdd, 0xf4, 0x72, 0xb2, 0xf5, 0x8b, 0x01, 0xdb, 0x0f,
0x5d, 0x71, 0x95, 0x20, 0xff, 0x66, 0xc0, 0xf5, 0x1e, 0x0a, 0x8f, 0xd3, 0x21, 0x5e, 0x25, 0xdc,
0x9f, 0x1b, 0xb0, 0x33, 0x18, 0xb3, 0xe3, 0xcb, 0x8c, 0xd9, 0xfa, 0xd9, 0x80, 0xff, 0xa5, 0xdd,
0xe5, 0xa9, 0xcb, 0x25, 0xbd, 0xa4, 0x91, 0x79, 0x1b, 0x9a, 0xc9, 0xcc, 0xbd, 0xe5, 0xc0, 0xdc,
0x3e, 0x3b, 0x30, 0x73, 0x28, 0x3a, 0x2e, 0x8d, 0x64, 0x59, 0xb4, 0x7e, 0x32, 0x60, 0x5b, 0x75,
0x9d, 0xab, 0x82, 0xf7, 0x47, 0x03, 0xb6, 0x1e, 0xba, 0xe2, 0xaa, 0xc0, 0x7d, 0x66, 0x40, 0x6b,
0xd6, 0x6d, 0xae, 0x0a, 0x66, 0xf5, 0xa3, 0xa2, 0x3a, 0xcd, 0x65, 0xc6, 0xfb, 0x2f, 0x37, 0xd7,
0xe7, 0x05, 0x68, 0xf4, 0x63, 0x81, 0x5c, 0xbe, 0x38, 0xac, 0xaf, 0x9d, 0x76, 0x59, 0x21, 0xae,
0xad, 0x3a, 0x43, 0x6e, 0xc3, 0x22, 0x20, 0x8e, 0x74, 0x47, 0x1a, 0x7b, 0xcd, 0x5e, 0x9f, 0x2b,
0x0f, 0xdc, 0x11, 0x79, 0x19, 0x40, 0xe0, 0x28, 0xc2, 0x58, 0x2a, 0x43, 0x65, 0x6d, 0xa8, 0x96,
0x69, 0xfa, 0xbe, 0x5a, 0xf6, 0xc6, 0x6e, 0x1c, 0x63, 0xa8, 0x96, 0x2b, 0xe9, 0x72, 0xa6, 0xe9,
0xfb, 0x39, 0x66, 0xab, 0x79, 0x66, 0x6f, 0x02, 0xcc, 0x23, 0x20, 0x5a, 0x6b, 0xed, 0xe2, 0x5e,
0xc9, 0x5e, 0xd2, 0xa8, 0xc7, 0x31, 0x67, 0xc7, 0x0e, 0xf5, 0x45, 0xab, 0xd6, 0x2e, 0xaa, 0xc7,
0x31, 0x67, 0xc7, 0x7d, 0x5f, 0x90, 0x37, 0x61, 0x4d, 0x2d, 0xf8, 0xae, 0x74, 0x5b, 0xd0, 0x2e,
0x5e, 0xfc, 0x68, 0x53, 0x77, 0xf4, 0x5c, 0xe9, 0x5a, 0x1f, 0x15, 0xa0, 0xd1, 0xc3, 0x10, 0x25,
0x5e, 0x02, 0xe6, 0xf3, 0xac, 0x95, 0x2e, 0x62, 0xad, 0x7c, 0x11, 0x6b, 0x95, 0x53, 0xac, 0xbd,
0x02, 0xeb, 0x09, 0xa7, 0x91, 0xcb, 0xa7, 0x4e, 0x80, 0x53, 0xd1, 0xaa, 0x6a, 0xea, 0xea, 0x99,
0xee, 0x11, 0x4e, 0x85, 0xf5, 0xdc, 0x80, 0xc6, 0x00, 0x5d, 0xee, 0x8d, 0x5f, 0x1c, 0x13, 0xcb,
0x08, 0x8a, 0x79, 0x04, 0xb9, 0x52, 0x2c, 0xad, 0x96, 0xe2, 0x1d, 0xd8, 0xe4, 0x28, 0x26, 0xa1,
0x74, 0x96, 0x08, 0x4a, 0x39, 0xd8, 0x48, 0x17, 0x1e, 0xcc, 0x69, 0xda, 0x87, 0xf2, 0xe1, 0x04,
0xf9, 0x54, 0xa7, 0xdd, 0x85, 0x59, 0x90, 0xee, 0xb3, 0x3e, 0x2d, 0xc0, 0xfa, 0x0c, 0xb9, 0xba,
0xea, 0xef, 0x4d, 0x43, 0x7f, 0x1d, 0xb2, 0x05, 0x0d, 0xed, 0x80, 0x13, 0x33, 0x1f, 0x17, 0x11,
0xaf, 0x6b, 0xe5, 0x13, 0xe6, 0xe3, 0x2a, 0x2d, 0xe5, 0x3f, 0x45, 0x4b, 0xe5, 0x6c, 0x5a, 0x3a,
0x50, 0x1a, 0x53, 0x99, 0x86, 0xbe, 0xde, 0xbd, 0x71, 0x76, 0x9f, 0x7a, 0x48, 0xa5, 0xb0, 0xf5,
0x3e, 0xab, 0x07, 0xf5, 0x03, 0x1a, 0xe1, 0x01, 0xf5, 0x82, 0xc7, 0x62, 0x74, 0xfe, 0x50, 0x7a,
0xe1, 0x14, 0x68, 0x7d, 0x66, 0x40, 0xf5, 0x11, 0x4e, 0xbb, 0x03, 0x1c, 0x69, 0x86, 0x74, 0xe9,
0x66, 0x37, 0x94, 0x75, 0xe5, 0x92, 0x5b, 0x50, 0x5f, 0xca, 0xcd, 0x8c, 0x3d, 0x58, 0xa4, 0xe6,
0x1f, 0x77, 0x69, 0x2a, 0x9c, 0x23, 0x37, 0xcc, 0x08, 0x5c, 0xb3, 0xab, 0x54, 0xbc, 0xa3, 0x44,
0x75, 0xf3, 0xa2, 0x49, 0x89, 0x56, 0x59, 0x27, 0x3d, 0xcc, 0xbb, 0x94, 0xb0, 0xde, 0x07, 0xc8,
0x9c, 0x53, 0x10, 0x17, 0x11, 0x34, 0x96, 0x23, 0xf8, 0x16, 0x54, 0x03, 0x9c, 0x76, 0x05, 0x8e,
0x5a, 0x05, 0xcd, 0xdd, 0x79, 0x55, 0x90, 0x5d, 0x65, 0xcf, 0xb6, 0x5b, 0x31, 0x6c, 0x0e, 0x52,
0x63, 0x2a, 0x57, 0xa8, 0x90, 0xd4, 0x13, 0x2b, 0x9d, 0xd3, 0x58, 0xed, 0x9c, 0xb7, 0xa0, 0x1e,
0x61, 0xc4, 0xf8, 0xd4, 0x11, 0xf4, 0x04, 0x67, 0x6c, 0xa4, 0xaa, 0x01, 0x3d, 0x41, 0x85, 0x37,
0x9e, 0x44, 0x0e, 0x67, 0xc7, 0x62, 0x96, 0x50, 0xf1, 0x24, 0xb2, 0xd9, 0xb1, 0xb8, 0xf3, 0x55,
0x01, 0xaa, 0x59, 0x29, 0x92, 0x1a, 0x94, 0x83, 0x27, 0x2c, 0x46, 0xf3, 0x1a, 0xd9, 0x81, 0xcd,
0x60, 0x75, 0xe6, 0x36, 0x7d, 0xb2, 0x05, 0x1b, 0x41, 0x7e, 0x60, 0x35, 0x91, 0x10, 0x68, 0x06,
0xb9, 0x89, 0xce, 0xfc, 0x80, 0xec, 0xc2, 0x56, 0x70, 0x7a, 0xe4, 0x31, 0x47, 0x64, 0x1b, 0xcc,
0x20, 0x3f, 0x13, 0x08, 0x73, 0x4c, 0x76, 0xc0, 0x0c, 0x56, 0x1e, 0xe1, 0xe6, 0xd7, 0x06, 0xd9,
0x82, 0x66, 0x90, 0x7b, 0xa9, 0x9a, 0xdf, 0x18, 0x84, 0x40, 0x23, 0x58, 0x7e, 0xce, 0x99, 0xdf,
0x1a, 0x64, 0x17, 0x48, 0x70, 0xea, 0xcd, 0x63, 0x7e, 0x67, 0x90, 0x6d, 0xd8, 0x08, 0x72, 0x0f,
0x03, 0x61, 0x7e, 0x6f, 0x90, 0x75, 0xa8, 0x06, 0xe9, 0x6f, 0xa7, 0xf9, 0x71, 0x51, 0x4b, 0x69,
0x3f, 0x37, 0x3f, 0x49, 0xa5, 0xb4, 0xb2, 0xcd, 0x5f, 0x8b, 0xa4, 0x09, 0xb5, 0x60, 0x96, 0xd2,
0xe6, 0x17, 0xb5, 0xb9, 0x3c, 0x98, 0xc6, 0x9e, 0xf9, 0x65, 0xed, 0xce, 0x3d, 0x58, 0x9b, 0xfd,
0x77, 0x42, 0x00, 0x2a, 0x8f, 0x5d, 0x21, 0x91, 0x9b, 0xd7, 0xd4, 0xb7, 0x8d, 0xae, 0x8f, 0xdc,
0x34, 0xd4, 0xf7, 0xbb, 0x9c, 0x2a, 0x7d, 0x41, 0x51, 0xfc, 0x54, 0x95, 0xb2, 0x59, 0xbc, 0xdf,
0x7b, 0xef, 0xfe, 0x88, 0xca, 0xf1, 0x64, 0xa8, 0x9a, 0xc3, 0xfe, 0x09, 0x0d, 0x43, 0x7a, 0x22,
0xd1, 0x1b, 0xef, 0xa7, 0x99, 0xf2, 0xba, 0x4f, 0x85, 0xe4, 0x74, 0x38, 0x91, 0xe8, 0xef, 0xcf,
0xf2, 0x65, 0x5f, 0xa7, 0xcf, 0x5c, 0x4c, 0x86, 0xc3, 0x8a, 0xd6, 0xdc, 0xfd, 0x3d, 0x00, 0x00,
0xff, 0xff, 0x82, 0x1f, 0xa0, 0x91, 0x35, 0x13, 0x00, 0x00,
}

View File

@ -1,93 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: msg_header.proto
package internalpb
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type MsgHeader struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
Message *any.Any `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *MsgHeader) Reset() { *m = MsgHeader{} }
func (m *MsgHeader) String() string { return proto.CompactTextString(m) }
func (*MsgHeader) ProtoMessage() {}
func (*MsgHeader) Descriptor() ([]byte, []int) {
return fileDescriptor_4712536c36da8833, []int{0}
}
func (m *MsgHeader) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MsgHeader.Unmarshal(m, b)
}
func (m *MsgHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MsgHeader.Marshal(b, m, deterministic)
}
func (m *MsgHeader) XXX_Merge(src proto.Message) {
xxx_messageInfo_MsgHeader.Merge(m, src)
}
func (m *MsgHeader) XXX_Size() int {
return xxx_messageInfo_MsgHeader.Size(m)
}
func (m *MsgHeader) XXX_DiscardUnknown() {
xxx_messageInfo_MsgHeader.DiscardUnknown(m)
}
var xxx_messageInfo_MsgHeader proto.InternalMessageInfo
func (m *MsgHeader) GetMsgType() MsgType {
if m != nil {
return m.MsgType
}
return MsgType_kNone
}
func (m *MsgHeader) GetMessage() *any.Any {
if m != nil {
return m.Message
}
return nil
}
func init() {
proto.RegisterType((*MsgHeader)(nil), "milvus.proto.internal.MsgHeader")
}
func init() { proto.RegisterFile("msg_header.proto", fileDescriptor_4712536c36da8833) }
var fileDescriptor_4712536c36da8833 = []byte{
// 222 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x31, 0x4b, 0xc4, 0x40,
0x10, 0x85, 0xc9, 0x15, 0x9e, 0x46, 0x10, 0x09, 0x0a, 0xe7, 0x15, 0x72, 0x58, 0xa5, 0x71, 0x16,
0x62, 0x65, 0x69, 0xb0, 0xb0, 0xb9, 0xe6, 0xb0, 0xb2, 0x39, 0xb2, 0x97, 0x71, 0xb2, 0xb0, 0x9b,
0x0d, 0x3b, 0xbb, 0xc2, 0xe6, 0xd7, 0x8b, 0x59, 0xd6, 0xea, 0xba, 0xf7, 0xcd, 0x1b, 0xe6, 0x63,
0xca, 0x5b, 0xc3, 0x74, 0x1c, 0xb0, 0xeb, 0xd1, 0xc1, 0xe4, 0xac, 0xb7, 0xd5, 0xbd, 0x51, 0xfa,
0x27, 0x70, 0x22, 0x50, 0xa3, 0x47, 0x37, 0x76, 0x7a, 0x5b, 0xe5, 0x74, 0x34, 0x4c, 0xa9, 0xdc,
0x3e, 0x90, 0xb5, 0xa4, 0x51, 0x2c, 0x24, 0xc3, 0xb7, 0xe8, 0xc6, 0x98, 0xaa, 0xa7, 0xb9, 0xbc,
0xda, 0x33, 0x7d, 0x2c, 0x87, 0xab, 0xd7, 0xf2, 0xf2, 0x4f, 0xe3, 0xe3, 0x84, 0x9b, 0x62, 0x57,
0xd4, 0x37, 0xcd, 0x23, 0x9c, 0xb5, 0xc0, 0x9e, 0xe9, 0x33, 0x4e, 0x78, 0x58, 0x9b, 0x14, 0xaa,
0xa6, 0x5c, 0x1b, 0x64, 0xee, 0x08, 0x37, 0xab, 0x5d, 0x51, 0x5f, 0x37, 0x77, 0x90, 0xa4, 0x90,
0xa5, 0xf0, 0x36, 0xc6, 0x76, 0x55, 0x17, 0x87, 0xbc, 0xd8, 0xbe, 0x7f, 0xb5, 0xa4, 0xfc, 0x10,
0x24, 0x9c, 0xac, 0x11, 0xb3, 0xd2, 0x5a, 0xcd, 0x1e, 0x4f, 0x83, 0x48, 0xce, 0xe7, 0x5e, 0xb1,
0x77, 0x4a, 0x06, 0x8f, 0xbd, 0xc8, 0xe6, 0xf4, 0xc3, 0x3f, 0x4e, 0x52, 0x5e, 0x2c, 0x93, 0x97,
0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x87, 0xfd, 0x10, 0x22, 0x01, 0x00, 0x00,
}

View File

@ -1,13 +0,0 @@
syntax = "proto3";
package milvus.proto.internal;
option go_package="github.com/zilliztech/milvus-distributed/internal/proto/internalpb";
import "internal_msg.proto";
import "google/protobuf/any.proto";
message MsgHeader {
MsgType msg_type = 1;
google.protobuf.Any message = 2[lazy=true];
}

View File

@ -22,7 +22,7 @@ func (it *insertTask) PreExecute() error {
func (it *insertTask) Execute() error {
ts := it.GetTs()
insertRequest := internalpb.InsertRequest{
MsgType: internalpb.MsgType_kInsert,
ReqType: internalpb.ReqType_kInsert,
ReqId: it.ReqId,
CollectionName: it.rowBatch.CollectionName,
PartitionTag: it.rowBatch.PartitionTag,

View File

@ -52,7 +52,7 @@ func (ins *proxyInstance) restartSchedulerRoutine(bufSize int) error {
select {
case t := <-ins.taskChan:
switch (*t).Type() {
case internalpb.MsgType_kInsert:
case internalpb.ReqType_kInsert:
ins.taskSch.DmQueue.Enqueue(t)
default:
return

View File

@ -15,7 +15,7 @@ import (
)
type BaseRequest interface {
Type() internalpb.MsgType
Type() internalpb.ReqType
PreExecute() commonpb.Status
Execute() commonpb.Status
PostExecute() commonpb.Status

View File

@ -19,8 +19,8 @@ type queryReq struct {
}
// BaseRequest interfaces
func (req *queryReq) Type() internalpb.MsgType {
return req.MsgType
func (req *queryReq) Type() internalpb.ReqType {
return req.ReqType
}
func (req *queryReq) PreExecute() commonpb.Status {

View File

@ -145,6 +145,38 @@ func (s *proxyServer) ShowPartitions(ctx context.Context, req *servicepb.Collect
}, nil
}
func (s *proxyServer) DeleteByID(ctx context.Context, req *pb.DeleteByIDParam) (*commonpb.Status, error) {
log.Printf("delete entites, total = %d", len(req.IdArray))
mReqMsg := pb.ManipulationReqMsg{
CollectionName: req.CollectionName,
ReqType: pb.ReqType_kDeleteEntityByID,
ProxyId: s.proxyId,
}
for _, id := range req.IdArray {
mReqMsg.PrimaryKeys = append(mReqMsg.PrimaryKeys, id)
}
if len(mReqMsg.PrimaryKeys) > 1 {
mReq := &manipulationReq{
stats: make([]commonpb.Status, 1),
msgs: append([]*pb.ManipulationReqMsg{}, &mReqMsg),
proxy: s,
}
if st := mReq.PreExecute(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
return &st, nil
}
if st := mReq.Execute(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
return &st, nil
}
if st := mReq.PostExecute(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
return &st, nil
}
if st := mReq.WaitToFinish(); st.ErrorCode != commonpb.ErrorCode_SUCCESS {
return &st, nil
}
}
return &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS}, nil
}
func (s *proxyServer) Insert(ctx context.Context, req *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
log.Printf("Insert Entities, total = %d", len(req.RowData))
msgMap := make(map[uint32]*pb.ManipulationReqMsg)
@ -230,7 +262,7 @@ func (s *proxyServer) Insert(ctx context.Context, req *servicepb.RowBatch) (*ser
func (s *proxyServer) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
qm := &queryReq{
SearchRequest: internalpb.SearchRequest{
MsgType: internalpb.MsgType_kSearch,
ReqType: internalpb.ReqType_kSearch,
ProxyId: s.proxyId,
ReqId: s.queryId.Add(1),
Timestamp: 0,

View File

@ -9,7 +9,7 @@ import (
type task interface {
Id() int64 // return ReqId
Type() internalpb.MsgType
Type() internalpb.ReqType
GetTs() typeutil.Timestamp
SetTs(ts typeutil.Timestamp)
PreExecute() error
@ -20,7 +20,7 @@ type task interface {
}
type baseTask struct {
ReqType internalpb.MsgType
ReqType internalpb.ReqType
ReqId int64
Ts typeutil.Timestamp
ProxyId int64
@ -30,7 +30,7 @@ func (bt *baseTask) Id() int64 {
return bt.ReqId
}
func (bt *baseTask) Type() internalpb.MsgType {
func (bt *baseTask) Type() internalpb.ReqType {
return bt.ReqType
}

View File

@ -46,7 +46,7 @@ func (c *Collection) DeletePartition(node *QueryNode, partition *Partition) {
for _, p := range c.Partitions {
if p.PartitionName == partition.PartitionName {
for _, s := range p.Segments {
delete(node.SegmentsMap, s.SegmentID)
delete(node.SegmentsMap, s.SegmentId)
}
} else {
tmpPartitions = append(tmpPartitions, p)

View File

@ -1,9 +1,5 @@
package reader
import (
"log"
)
type dmNode struct {
BaseNode
dmMsg dmMsg
@ -14,22 +10,7 @@ func (dmNode *dmNode) Name() string {
}
func (dmNode *dmNode) Operate(in []*Msg) []*Msg {
// TODO: add filtered by schema update
// But for now, we think all the messages are valid
if len(in) != 1 {
log.Println("Invalid operate message input in filteredDmNode")
// TODO: add error handling
}
dmMsg, ok := (*in[0]).(*dmMsg)
if !ok {
log.Println("type assertion failed for dmMsg")
// TODO: add error handling
}
var res Msg = dmMsg
return []*Msg{&res}
return in
}
func newDmNode() *dmNode {

View File

@ -1,11 +1,5 @@
package reader
import (
"log"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
)
type filteredDmNode struct {
BaseNode
filteredDmMsg filteredDmMsg
@ -16,41 +10,7 @@ func (fdmNode *filteredDmNode) Name() string {
}
func (fdmNode *filteredDmNode) Operate(in []*Msg) []*Msg {
if len(in) != 1 {
log.Println("Invalid operate message input in filteredDmNode")
// TODO: add error handling
}
fdmMsg, ok := (*in[0]).(*filteredDmMsg)
if !ok {
log.Println("type assertion failed for filteredDmMsg")
// TODO: add error handling
}
insertData := InsertData{
insertIDs: make(map[int64][]int64),
insertTimestamps: make(map[int64][]uint64),
insertRecords: make(map[int64][]*commonpb.Blob),
insertOffset: make(map[int64]int64),
}
var iMsg = insertMsg{
insertData: insertData,
timeRange: fdmMsg.timeRange,
}
for _, task := range fdmMsg.insertMessages {
if len(task.RowIds) != len(task.Timestamps) || len(task.RowIds) != len(task.RowData) {
// TODO: what if the messages are misaligned?
// Here, we ignore those messages and print error
log.Println("Error, misaligned messages detected")
continue
}
iMsg.insertData.insertIDs[task.SegmentId] = append(iMsg.insertData.insertIDs[task.SegmentId], task.RowIds...)
iMsg.insertData.insertTimestamps[task.SegmentId] = append(iMsg.insertData.insertTimestamps[task.SegmentId], task.Timestamps...)
iMsg.insertData.insertRecords[task.SegmentId] = append(iMsg.insertData.insertRecords[task.SegmentId], task.RowData...)
}
var res Msg = &iMsg
return []*Msg{&res}
return in
}
func newFilteredDmNode() *filteredDmNode {

View File

@ -1,17 +1,8 @@
package reader
import (
"errors"
"fmt"
"log"
"strconv"
"sync"
)
type insertNode struct {
BaseNode
SegmentsMap *map[int64]*Segment
insertMsg *insertMsg
insertMsg insertMsg
}
func (iNode *insertNode) Name() string {
@ -19,85 +10,7 @@ func (iNode *insertNode) Name() string {
}
func (iNode *insertNode) Operate(in []*Msg) []*Msg {
if len(in) != 1 {
log.Println("Invalid operate message input in insertNode")
// TODO: add error handling
}
insertMsg, ok := (*in[0]).(*insertMsg)
if !ok {
log.Println("type assertion failed for insertMsg")
// TODO: add error handling
}
iNode.insertMsg = insertMsg
var err = iNode.preInsert()
if err != nil {
log.Println("preInsert failed")
// TODO: add error handling
}
wg := sync.WaitGroup{}
for segmentID := range iNode.insertMsg.insertData.insertRecords {
wg.Add(1)
go iNode.insert(segmentID, &wg)
}
wg.Wait()
var res Msg = &serviceTimeMsg{
timeRange: insertMsg.timeRange,
}
return []*Msg{&res}
}
func (iNode *insertNode) preInsert() error {
for segmentID := range iNode.insertMsg.insertData.insertRecords {
var targetSegment, err = iNode.getSegmentBySegmentID(segmentID)
if err != nil {
return err
}
var numOfRecords = len(iNode.insertMsg.insertData.insertRecords[segmentID])
var offset = targetSegment.SegmentPreInsert(numOfRecords)
iNode.insertMsg.insertData.insertOffset[segmentID] = offset
}
return nil
}
func (iNode *insertNode) getSegmentBySegmentID(segmentID int64) (*Segment, error) {
targetSegment, ok := (*iNode.SegmentsMap)[segmentID]
if !ok {
return nil, errors.New("cannot found segment with id = " + strconv.FormatInt(segmentID, 10))
}
return targetSegment, nil
}
func (iNode *insertNode) insert(segmentID int64, wg *sync.WaitGroup) {
var targetSegment, err = iNode.getSegmentBySegmentID(segmentID)
if err != nil {
log.Println("cannot find segment:", segmentID)
// TODO: add error handling
return
}
ids := iNode.insertMsg.insertData.insertIDs[segmentID]
timestamps := iNode.insertMsg.insertData.insertTimestamps[segmentID]
records := iNode.insertMsg.insertData.insertRecords[segmentID]
offsets := iNode.insertMsg.insertData.insertOffset[segmentID]
err = targetSegment.SegmentInsert(offsets, &ids, &timestamps, &records)
if err != nil {
log.Println("insert failed")
// TODO: add error handling
return
}
fmt.Println("Do insert done, len = ", len(iNode.insertMsg.insertData.insertIDs[segmentID]))
wg.Done()
return in
}
func newInsertNode() *insertNode {

View File

@ -6,21 +6,13 @@ import (
"log"
"sync"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
msgPb "github.com/zilliztech/milvus-distributed/internal/proto/message"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
type manipulationService struct {
ctx context.Context
fg *flowgraph.TimeTickedFlowGraph
msgStream *msgstream.PulsarMsgStream
}
func (dmService *manipulationService) Start() {
dmService.initNodes()
go dmService.fg.Start()
dmService.consumeFromMsgStream()
ctx context.Context
fg *flowgraph.TimeTickedFlowGraph
}
func (dmService *manipulationService) initNodes() {
@ -93,34 +85,9 @@ func (dmService *manipulationService) initNodes() {
log.Fatal("set edges failed in node:", serviceTimeNode.Name())
}
err = dmService.fg.SetStartNode(msgStreamNode.Name())
if err != nil {
log.Fatal("set start node failed")
}
// TODO: add top nodes's initialization
}
func (dmService *manipulationService) consumeFromMsgStream() {
for {
select {
case <-dmService.ctx.Done():
log.Println("service stop")
return
default:
msgPack := dmService.msgStream.Consume()
var msgStreamMsg Msg = &msgStreamMsg{
tsMessages: msgPack.Msgs,
timeRange: TimeRange{
timestampMin: Timestamp(msgPack.BeginTs),
timestampMax: Timestamp(msgPack.EndTs),
},
}
dmService.fg.Input(&msgStreamMsg)
}
}
}
func (node *QueryNode) MessagesPreprocess(insertDeleteMessages []*msgPb.InsertOrDeleteMsg, timeRange TimeRange) msgPb.Status {
var tMax = timeRange.timestampMax
@ -149,7 +116,7 @@ func (node *QueryNode) MessagesPreprocess(insertDeleteMessages []*msgPb.InsertOr
}
node.insertData.insertIDs[msg.SegmentId] = append(node.insertData.insertIDs[msg.SegmentId], msg.Uid)
node.insertData.insertTimestamps[msg.SegmentId] = append(node.insertData.insertTimestamps[msg.SegmentId], msg.Timestamp)
// node.insertData.insertRecords[msg.SegmentID] = append(node.insertData.insertRecords[msg.SegmentID], msg.RowsData.Blob)
node.insertData.insertRecords[msg.SegmentId] = append(node.insertData.insertRecords[msg.SegmentId], msg.RowsData.Blob)
} else if msg.Op == msgPb.OpType_DELETE {
var r = DeleteRecord{
entityID: msg.Uid,
@ -203,7 +170,7 @@ func (node *QueryNode) MessagesPreprocess(insertDeleteMessages []*msgPb.InsertOr
}
node.insertData.insertIDs[msg.SegmentId] = append(node.insertData.insertIDs[msg.SegmentId], msg.Uid)
node.insertData.insertTimestamps[msg.SegmentId] = append(node.insertData.insertTimestamps[msg.SegmentId], msg.Timestamp)
// node.insertData.insertRecords[msg.SegmentID] = append(node.insertData.insertRecords[msg.SegmentID], msg.RowsData.Blob)
node.insertData.insertRecords[msg.SegmentId] = append(node.insertData.insertRecords[msg.SegmentId], msg.RowsData.Blob)
} else if msg.Op == msgPb.OpType_DELETE {
var r = DeleteRecord{
entityID: msg.Uid,

View File

@ -93,8 +93,7 @@ func TestInsertAndDelete_WriterDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -196,8 +195,7 @@ func TestInsertAndDelete_PreInsertAndDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -329,8 +327,7 @@ func TestInsertAndDelete_DoInsert(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -420,8 +417,7 @@ func TestInsertAndDelete_DoDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -540,8 +536,7 @@ func TestInsertAndDelete_DoInsertAndDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"

View File

@ -2,7 +2,6 @@ package reader
import (
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
@ -14,10 +13,8 @@ type msgStreamMsg struct {
}
type dmMsg struct {
insertMessages []*msgstream.InsertTask
// TODO: add delete message support
// deleteMessages []*msgstream.DeleteTask
timeRange TimeRange
tsMessages []*msgstream.TsMsg
timeRange TimeRange
}
type key2SegMsg struct {
@ -30,10 +27,8 @@ type schemaUpdateMsg struct {
}
type filteredDmMsg struct {
insertMessages []*msgstream.InsertTask
// TODO: add delete message support
// deleteMessages []*msgstream.DeleteTask
timeRange TimeRange
tsMessages []*msgstream.TsMsg
timeRange TimeRange
}
type insertMsg struct {
@ -58,7 +53,7 @@ type serviceTimeMsg struct {
type InsertData struct {
insertIDs map[int64][]int64
insertTimestamps map[int64][]uint64
insertRecords map[int64][]*commonpb.Blob
insertRecords map[int64][][]byte
insertOffset map[int64]int64
}

View File

@ -23,12 +23,12 @@ const (
SegmentPrefix = "/segment/"
)
func GetCollectionObjID(key string) string {
func GetCollectionObjId(key string) string {
prefix := path.Join(conf.Config.Etcd.Rootpath, CollectionPrefix) + "/"
return strings.TrimPrefix(key, prefix)
}
func GetSegmentObjID(key string) string {
func GetSegmentObjId(key string) string {
prefix := path.Join(conf.Config.Etcd.Rootpath, SegmentPrefix) + "/"
return strings.TrimPrefix(key, prefix)
}
@ -133,10 +133,10 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
func (node *QueryNode) processCreate(key string, msg string) {
println("process create", key)
if isCollectionObj(key) {
objID := GetCollectionObjID(key)
objID := GetCollectionObjId(key)
node.processCollectionCreate(objID, msg)
} else if isSegmentObj(key) {
objID := GetSegmentObjID(key)
objID := GetSegmentObjId(key)
node.processSegmentCreate(objID, msg)
} else {
println("can not process create msg:", key)
@ -170,10 +170,10 @@ func (node *QueryNode) processCollectionModify(id string, value string) {
func (node *QueryNode) processModify(key string, msg string) {
// println("process modify")
if isCollectionObj(key) {
objID := GetCollectionObjID(key)
objID := GetCollectionObjId(key)
node.processCollectionModify(objID, msg)
} else if isSegmentObj(key) {
objID := GetSegmentObjID(key)
objID := GetSegmentObjId(key)
node.processSegmentModify(objID, msg)
} else {
println("can not process modify msg:", key)
@ -183,7 +183,7 @@ func (node *QueryNode) processModify(key string, msg string) {
func (node *QueryNode) processSegmentDelete(id string) {
println("Delete segment: ", id)
segmentID, err := strconv.ParseInt(id, 10, 64)
segmentId, err := strconv.ParseInt(id, 10, 64)
if err != nil {
log.Println("Cannot parse segment id:" + id)
}
@ -191,7 +191,7 @@ func (node *QueryNode) processSegmentDelete(id string) {
for _, col := range node.Collections {
for _, p := range col.Partitions {
for _, s := range p.Segments {
if s.SegmentID == segmentID {
if s.SegmentId == segmentId {
p.DeleteSegment(node, s)
}
}
@ -202,22 +202,22 @@ func (node *QueryNode) processSegmentDelete(id string) {
func (node *QueryNode) processCollectionDelete(id string) {
println("Delete collection: ", id)
collectionID, err := strconv.ParseInt(id, 10, 64)
collectionId, err := strconv.ParseInt(id, 10, 64)
if err != nil {
log.Println("Cannot parse collection id:" + id)
}
targetCollection := node.GetCollectionByID(collectionID)
targetCollection := node.GetCollectionByID(collectionId)
node.DeleteCollection(targetCollection)
}
func (node *QueryNode) processDelete(key string) {
println("process delete")
if isCollectionObj(key) {
objID := GetCollectionObjID(key)
objID := GetCollectionObjId(key)
node.processCollectionDelete(objID)
} else if isSegmentObj(key) {
objID := GetSegmentObjID(key)
objID := GetSegmentObjId(key)
node.processSegmentDelete(objID)
} else {
println("can not process delete msg:", key)
@ -256,7 +256,7 @@ func (node *QueryNode) loadCollections() error {
return err
}
for i := range keys {
objID := GetCollectionObjID(keys[i])
objID := GetCollectionObjId(keys[i])
node.processCollectionCreate(objID, values[i])
}
return nil
@ -267,7 +267,7 @@ func (node *QueryNode) loadSegments() error {
return err
}
for i := range keys {
objID := GetSegmentObjID(keys[i])
objID := GetSegmentObjId(keys[i])
node.processSegmentCreate(objID, values[i])
}
return nil

View File

@ -20,28 +20,28 @@ func TestMeta_GetCollectionObjId(t *testing.T) {
conf.LoadConfig("config.yaml")
var key = "/collection/collection0"
var collectionObjID1 = GetCollectionObjID(key)
var collectionObjId1 = GetCollectionObjId(key)
assert.Equal(t, collectionObjID1, "/collection/collection0")
assert.Equal(t, collectionObjId1, "/collection/collection0")
key = "fakeKey"
var collectionObjID2 = GetCollectionObjID(key)
var collectionObjId2 = GetCollectionObjId(key)
assert.Equal(t, collectionObjID2, "fakeKey")
assert.Equal(t, collectionObjId2, "fakeKey")
}
func TestMeta_GetSegmentObjId(t *testing.T) {
conf.LoadConfig("config.yaml")
var key = "/segment/segment0"
var segmentObjID1 = GetSegmentObjID(key)
var segmentObjId1 = GetSegmentObjId(key)
assert.Equal(t, segmentObjID1, "/segment/segment0")
assert.Equal(t, segmentObjId1, "/segment/segment0")
key = "fakeKey"
var segmentObjID2 = GetSegmentObjID(key)
var segmentObjId2 = GetSegmentObjId(key)
assert.Equal(t, segmentObjID2, "fakeKey")
assert.Equal(t, segmentObjId2, "fakeKey")
}
func TestMeta_isCollectionObj(t *testing.T) {
@ -158,8 +158,7 @@ func TestMeta_ProcessCollectionCreate(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -186,8 +185,7 @@ func TestMeta_ProcessSegmentCreate(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -204,7 +202,7 @@ func TestMeta_ProcessSegmentCreate(t *testing.T) {
node.processSegmentCreate(id, value)
s := node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
assert.Equal(t, s.SegmentStatus, 0)
}
@ -213,8 +211,7 @@ func TestMeta_ProcessCreate(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -245,7 +242,7 @@ func TestMeta_ProcessCreate(t *testing.T) {
node.processCreate(key2, msg2)
s := node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
assert.Equal(t, s.SegmentStatus, 0)
}
@ -254,8 +251,7 @@ func TestMeta_ProcessSegmentModify(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -272,7 +268,7 @@ func TestMeta_ProcessSegmentModify(t *testing.T) {
node.processSegmentCreate(id, value)
var s = node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
assert.Equal(t, s.SegmentStatus, 0)
@ -284,7 +280,7 @@ func TestMeta_ProcessSegmentModify(t *testing.T) {
node.processSegmentModify(id, newValue)
s = node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177888))
assert.Equal(t, s.SegmentStatus, 0)
}
@ -293,8 +289,7 @@ func TestMeta_ProcessCollectionModify(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -338,8 +333,7 @@ func TestMeta_ProcessModify(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -370,7 +364,7 @@ func TestMeta_ProcessModify(t *testing.T) {
node.processCreate(key2, msg2)
s := node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
assert.Equal(t, s.SegmentStatus, 0)
@ -400,7 +394,7 @@ func TestMeta_ProcessModify(t *testing.T) {
node.processModify(key2, msg4)
s = node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177888))
assert.Equal(t, s.SegmentStatus, 0)
}
@ -409,8 +403,7 @@ func TestMeta_ProcessSegmentDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -427,7 +420,7 @@ func TestMeta_ProcessSegmentDelete(t *testing.T) {
node.processSegmentCreate(id, value)
s := node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
assert.Equal(t, s.SegmentStatus, 0)
@ -441,8 +434,7 @@ func TestMeta_ProcessCollectionDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -474,8 +466,7 @@ func TestMeta_ProcessDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -506,7 +497,7 @@ func TestMeta_ProcessDelete(t *testing.T) {
node.processCreate(key2, msg2)
s := node.SegmentsMap[int64(0)]
assert.Equal(t, s.SegmentID, int64(0))
assert.Equal(t, s.SegmentId, int64(0))
assert.Equal(t, s.SegmentCloseTime, uint64(70368744177663))
assert.Equal(t, s.SegmentStatus, 0)
@ -524,8 +515,7 @@ func TestMeta_ProcessResp(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -547,8 +537,7 @@ func TestMeta_LoadCollections(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -564,8 +553,7 @@ func TestMeta_LoadSegments(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -581,8 +569,7 @@ func TestMeta_InitFromMeta(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
node := CreateQueryNode(ctx, 0, 0, &mc)
@ -595,8 +582,7 @@ func TestMeta_RunMetaService(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
node := CreateQueryNode(ctx, 0, 0, nil)

View File

@ -1,11 +1,5 @@
package reader
import (
"log"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
)
type msgStreamNode struct {
BaseNode
msgStreamMsg msgStreamMsg
@ -16,36 +10,7 @@ func (msNode *msgStreamNode) Name() string {
}
func (msNode *msgStreamNode) Operate(in []*Msg) []*Msg {
if len(in) != 1 {
log.Println("Invalid operate message input in msgStreamNode")
// TODO: add error handling
}
streamMsg, ok := (*in[0]).(*msgStreamMsg)
if !ok {
log.Println("type assertion failed for msgStreamMsg")
// TODO: add error handling
}
// TODO: add time range check
var dmMsg = dmMsg{
insertMessages: make([]*msgstream.InsertTask, 0),
// deleteMessages: make([]*msgstream.DeleteTask, 0),
timeRange: streamMsg.timeRange,
}
for _, msg := range streamMsg.tsMessages {
switch (*msg).Type() {
case msgstream.KInsert:
dmMsg.insertMessages = append(dmMsg.insertMessages, (*msg).(*msgstream.InsertTask))
// case msgstream.KDelete:
// dmMsg.deleteMessages = append(dmMsg.deleteMessages, (*msg).(*msgstream.DeleteTask))
default:
log.Println("Non supporting message type:", (*msg).Type())
}
}
var res Msg = &dmMsg
return []*Msg{&res}
return in
}
func newMsgStreamNode() *msgStreamNode {

View File

@ -19,14 +19,14 @@ type Partition struct {
Segments []*Segment
}
func (p *Partition) NewSegment(segmentID int64) *Segment {
func (p *Partition) NewSegment(segmentId int64) *Segment {
/*
CSegmentBase
NewSegment(CPartition partition, unsigned long segment_id);
*/
segmentPtr := C.NewSegment(p.PartitionPtr, C.ulong(segmentID))
segmentPtr := C.NewSegment(p.PartitionPtr, C.ulong(segmentId))
var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentID: segmentID}
var newSegment = &Segment{SegmentPtr: segmentPtr, SegmentId: segmentId}
p.Segments = append(p.Segments, newSegment)
return newSegment
}
@ -42,8 +42,8 @@ func (p *Partition) DeleteSegment(node *QueryNode, segment *Segment) {
tmpSegments := make([]*Segment, 0)
for _, s := range p.Segments {
if s.SegmentID == segment.SegmentID {
delete(node.SegmentsMap, s.SegmentID)
if s.SegmentId == segment.SegmentId {
delete(node.SegmentsMap, s.SegmentId)
} else {
tmpSegments = append(tmpSegments, s)
}

View File

@ -20,13 +20,13 @@ func TestPartition_NewSegment(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, collection.CollectionID, uint64(0))
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentID, int64(0))
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentId, int64(0))
assert.Equal(t, len(collection.Partitions), 1)
assert.Equal(t, len(node.Collections), 1)
assert.Equal(t, len(node.Collections[0].Partitions[0].Segments), 1)
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, node.FoundSegmentBySegmentID(int64(0)), true)
}
@ -44,13 +44,13 @@ func TestPartition_DeleteSegment(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, collection.CollectionID, uint64(0))
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentID, int64(0))
assert.Equal(t, node.Collections[0].Partitions[0].Segments[0].SegmentId, int64(0))
assert.Equal(t, len(collection.Partitions), 1)
assert.Equal(t, len(node.Collections), 1)
assert.Equal(t, len(node.Collections[0].Partitions[0].Segments), 1)
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
// 2. Destruct collection, partition and segment
partition.DeleteSegment(node, segment)

View File

@ -15,9 +15,10 @@ import "C"
import (
"context"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"time"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/msgclient"
msgPb "github.com/zilliztech/milvus-distributed/internal/proto/message"
@ -85,7 +86,7 @@ type QueryNode struct {
// context
ctx context.Context
QueryNodeID uint64
QueryNodeId uint64
Collections []*Collection
SegmentsMap map[int64]*Segment
messageClient *msgclient.ReaderMessageClient
@ -99,7 +100,7 @@ type QueryNode struct {
InsertLogs []InsertLog
}
func NewQueryNode(ctx context.Context, queryNodeID uint64, timeSync uint64) *QueryNode {
func NewQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64) *QueryNode {
mc := msgclient.ReaderMessageClient{}
queryNodeTimeSync := &QueryNodeTime{
@ -127,7 +128,7 @@ func NewQueryNode(ctx context.Context, queryNodeID uint64, timeSync uint64) *Que
return &QueryNode{
ctx: ctx,
QueryNodeID: queryNodeID,
QueryNodeId: queryNodeId,
Collections: nil,
SegmentsMap: segmentsMap,
messageClient: &mc,
@ -146,7 +147,7 @@ func (node *QueryNode) Close() {
}
}
func CreateQueryNode(ctx context.Context, queryNodeID uint64, timeSync uint64, mc *msgclient.ReaderMessageClient) *QueryNode {
func CreateQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64, mc *msgclient.ReaderMessageClient) *QueryNode {
queryNodeTimeSync := &QueryNodeTime{
ReadTimeSyncMin: timeSync,
ReadTimeSyncMax: timeSync,
@ -175,7 +176,7 @@ func CreateQueryNode(ctx context.Context, queryNodeID uint64, timeSync uint64, m
return &QueryNode{
ctx: ctx,
QueryNodeID: queryNodeID,
QueryNodeId: queryNodeId,
Collections: nil,
SegmentsMap: segmentsMap,
messageClient: mc,
@ -201,8 +202,8 @@ func (node *QueryNode) QueryNodeDataInit() {
insertData := InsertData{
insertIDs: make(map[int64][]int64),
insertTimestamps: make(map[int64][]uint64),
// insertRecords: make(map[int64][][]byte),
insertOffset: make(map[int64]int64),
insertRecords: make(map[int64][][]byte),
insertOffset: make(map[int64]int64),
}
node.deletePreprocessData = deletePreprocessData
@ -235,7 +236,7 @@ func (node *QueryNode) DeleteCollection(collection *Collection) {
if col.CollectionID == collectionID {
for _, p := range collection.Partitions {
for _, s := range p.Segments {
delete(node.SegmentsMap, s.SegmentID)
delete(node.SegmentsMap, s.SegmentId)
}
}
} else {

View File

@ -1,34 +1,34 @@
package reader
type QueryNodeTime struct {
ReadTimeSyncMin Timestamp
ReadTimeSyncMax Timestamp
WriteTimeSync Timestamp
ServiceTimeSync Timestamp
TSOTimeSync Timestamp
ReadTimeSyncMin uint64
ReadTimeSyncMax uint64
WriteTimeSync uint64
ServiceTimeSync uint64
TSOTimeSync uint64
}
type TimeRange struct {
timestampMin Timestamp
timestampMax Timestamp
timestampMin uint64
timestampMax uint64
}
func (t *QueryNodeTime) updateReadTimeSync() {
func (t *QueryNodeTime) UpdateReadTimeSync() {
t.ReadTimeSyncMin = t.ReadTimeSyncMax
// TODO: Add time sync
t.ReadTimeSyncMax = 1
}
func (t *QueryNodeTime) updateWriteTimeSync() {
func (t *QueryNodeTime) UpdateWriteTimeSync() {
// TODO: Add time sync
t.WriteTimeSync = 0
}
func (t *QueryNodeTime) updateSearchServiceTime(timeRange TimeRange) {
func (t *QueryNodeTime) UpdateSearchTimeSync(timeRange TimeRange) {
t.ServiceTimeSync = timeRange.timestampMax
}
func (t *QueryNodeTime) updateTSOTimeSync() {
func (t *QueryNodeTime) UpdateTSOTimeSync() {
// TODO: Add time sync
t.TSOTimeSync = 0
}

View File

@ -15,7 +15,7 @@ func TestQueryNodeTime_UpdateReadTimeSync(t *testing.T) {
TSOTimeSync: uint64(4),
}
queryNodeTimeSync.updateReadTimeSync()
queryNodeTimeSync.UpdateReadTimeSync()
assert.Equal(t, queryNodeTimeSync.ReadTimeSyncMin, uint64(1))
}
@ -33,15 +33,15 @@ func TestQueryNodeTime_UpdateSearchTimeSync(t *testing.T) {
timestampMin: 0,
timestampMax: 1,
}
queryNodeTimeSync.updateSearchServiceTime(timeRange)
queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
assert.Equal(t, queryNodeTimeSync.ServiceTimeSync, uint64(1))
}
func TestQueryNodeTime_UpdateTSOTimeSync(t *testing.T) {
// TODO: add updateTSOTimeSync test
// TODO: add UpdateTSOTimeSync test
}
func TestQueryNodeTime_UpdateWriteTimeSync(t *testing.T) {
// TODO: add updateWriteTimeSync test
// TODO: add UpdateWriteTimeSync test
}

View File

@ -10,8 +10,7 @@ import (
func TestQueryNode_CreateQueryNode(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
node := CreateQueryNode(ctx, 0, 0, nil)
assert.NotNil(t, node)
@ -19,8 +18,7 @@ func TestQueryNode_CreateQueryNode(t *testing.T) {
func TestQueryNode_NewQueryNode(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
node := NewQueryNode(ctx, 0, 0)
assert.NotNil(t, node)
@ -28,8 +26,7 @@ func TestQueryNode_NewQueryNode(t *testing.T) {
func TestQueryNode_Close(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
node := CreateQueryNode(ctx, 0, 0, nil)
assert.NotNil(t, node)
@ -39,8 +36,7 @@ func TestQueryNode_Close(t *testing.T) {
func TestQueryNode_QueryNodeDataInit(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
node := CreateQueryNode(ctx, 0, 0, nil)
assert.NotNil(t, node)
@ -54,8 +50,7 @@ func TestQueryNode_QueryNodeDataInit(t *testing.T) {
func TestQueryNode_NewCollection(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
node := CreateQueryNode(ctx, 0, 0, nil)
assert.NotNil(t, node)
@ -68,8 +63,7 @@ func TestQueryNode_NewCollection(t *testing.T) {
func TestQueryNode_DeleteCollection(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
node := CreateQueryNode(ctx, 0, 0, nil)
assert.NotNil(t, node)

View File

@ -59,7 +59,7 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
}
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
@ -71,7 +71,7 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
}
}
} else {
@ -87,7 +87,7 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
assert.NotEqual(nil, 0, timeRange.timestampMax)
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
@ -99,10 +99,11 @@ func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
}
}
}
wg.Done()
}
func (node *QueryNode) RunSearch(wg *sync.WaitGroup) {
@ -128,4 +129,5 @@ func (node *QueryNode) RunSearch(wg *sync.WaitGroup) {
default:
}
}
wg.Done()
}

View File

@ -19,8 +19,7 @@ func TestReader_startQueryNode(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
@ -38,8 +37,7 @@ func TestReader_RunInsertDelete(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -69,8 +67,7 @@ func TestReader_RunSearch(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"

View File

@ -20,8 +20,7 @@ func TestResult_PublishSearchResult(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -62,8 +61,7 @@ func TestResult_PublishFailedSearchResult(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -91,8 +89,7 @@ func TestResult_PublicStatistic(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"

View File

@ -11,7 +11,7 @@ import (
func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
type SearchResultTmp struct {
ResultID int64
ResultId int64
ResultDistance float32
}
@ -20,7 +20,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// Traverse all messages in the current messageClient.
// TODO: Do not receive batched search requests
for _, msg := range searchMessages {
var clientID = msg.ClientId
var clientId = msg.ClientId
var searchTimestamp = msg.Timestamp
// ServiceTimeSync update by TimeSync, which is get from proxy.
@ -34,7 +34,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
var vector = msg.Records
// We now only the first Json is valid.
var queryJSON = msg.Json[0]
var queryJson = msg.Json[0]
// 1. Timestamp check
// TODO: return or wait? Or adding graceful time
@ -44,7 +44,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
}
// 2. Get query information from query json
query := node.QueryJSON2Info(&queryJSON)
query := node.QueryJson2Info(&queryJson)
// 2d slice for receiving multiple queries's results
var resultsTmp = make([][]SearchResultTmp, query.NumQueries)
for i := 0; i < int(query.NumQueries); i++ {
@ -58,7 +58,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
continue
}
//fmt.Println("Search in segment:", segment.SegmentID, ",segment rows:", segment.GetRowCount())
//fmt.Println("Search in segment:", segment.SegmentId, ",segment rows:", segment.GetRowCount())
var res, err = segment.SegmentSearch(query, searchTimestamp, vector)
if err != nil {
fmt.Println(err.Error())
@ -68,7 +68,7 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
for i := 0; i < int(query.NumQueries); i++ {
for j := i * query.TopK; j < (i+1)*query.TopK; j++ {
resultsTmp[i] = append(resultsTmp[i], SearchResultTmp{
ResultID: res.ResultIds[j],
ResultId: res.ResultIds[j],
ResultDistance: res.ResultDistances[j],
})
}
@ -98,11 +98,11 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
Entities: &entities,
Distances: make([]float32, 0),
QueryId: msg.Uid,
ProxyId: clientID,
ProxyId: clientId,
}
for _, rTmp := range resultsTmp {
for _, res := range rTmp {
results.Entities.Ids = append(results.Entities.Ids, res.ResultID)
results.Entities.Ids = append(results.Entities.Ids, res.ResultId)
results.Distances = append(results.Distances, res.ResultDistance)
results.Scores = append(results.Distances, float32(0))
}

View File

@ -1,15 +0,0 @@
package reader
import (
"context"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
)
type searchService struct {
ctx context.Context
queryNodeTime *QueryNodeTime
msgStream *msgstream.PulsarMsgStream
}
func (ss *searchService) Start() {}

View File

@ -18,8 +18,7 @@ import (
func TestSearch_Search(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
mc := msgclient.ReaderMessageClient{}
@ -115,7 +114,7 @@ func TestSearch_Search(t *testing.T) {
queryRawData = append(queryRawData, float32(i))
}
var queryJSON = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
var queryJson = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
searchMsg1 := msgPb.SearchMsg{
CollectionName: "collection0",
Records: &msgPb.VectorRowRecord{
@ -126,11 +125,11 @@ func TestSearch_Search(t *testing.T) {
Timestamp: uint64(0),
ClientId: int64(0),
ExtraParams: nil,
Json: []string{queryJSON},
Json: []string{queryJson},
}
searchMessages := []*msgPb.SearchMsg{&searchMsg1}
node.queryNodeTimeSync.updateSearchServiceTime(timeRange)
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
assert.Equal(t, node.queryNodeTimeSync.ServiceTimeSync, timeRange.timestampMax)
status := node.Search(searchMessages)

View File

@ -13,10 +13,12 @@ package reader
*/
import "C"
import (
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
msgPb "github.com/zilliztech/milvus-distributed/internal/proto/message"
"strconv"
"unsafe"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/errors"
msgPb "github.com/zilliztech/milvus-distributed/internal/proto/message"
)
const SegmentLifetime = 20000
@ -30,7 +32,7 @@ const (
type Segment struct {
SegmentPtr C.CSegmentBase
SegmentID int64
SegmentId int64
SegmentCloseTime uint64
LastMemSize int64
SegmentStatus int
@ -72,7 +74,7 @@ func (s *Segment) GetDeletedCount() int64 {
// int
// Close(CSegmentBase c_segment);
// */
// fmt.Println("Closing segment :", s.SegmentID)
// fmt.Println("Closing segment :", s.SegmentId)
//
// var status = C.Close(s.SegmentPtr)
// s.SegmentStatus = SegmentClosed
@ -126,7 +128,7 @@ func (s *Segment) SegmentPreDelete(numOfRecords int) int64 {
return int64(offset)
}
func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]uint64, records *[]*commonpb.Blob) error {
func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]uint64, records *[][]byte) error {
/*
int
Insert(CSegmentBase c_segment,
@ -139,37 +141,37 @@ func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]
signed long int count);
*/
// Blobs to one big blob
//var numOfRow = len(*entityIDs)
//var sizeofPerRow = len((*records)[0])
//
//assert.Equal(nil, numOfRow, len(*records))
//
//var rawData = make([]byte, numOfRow*sizeofPerRow)
//var copyOffset = 0
//for i := 0; i < len(*records); i++ {
// copy(rawData[copyOffset:], (*records)[i])
// copyOffset += sizeofPerRow
//}
//
//var cOffset = C.long(offset)
//var cNumOfRows = C.long(numOfRow)
//var cEntityIdsPtr = (*C.long)(&(*entityIDs)[0])
//var cTimestampsPtr = (*C.ulong)(&(*timestamps)[0])
//var cSizeofPerRow = C.int(sizeofPerRow)
//var cRawDataVoidPtr = unsafe.Pointer(&rawData[0])
//
//var status = C.Insert(s.SegmentPtr,
// cOffset,
// cNumOfRows,
// cEntityIdsPtr,
// cTimestampsPtr,
// cRawDataVoidPtr,
// cSizeofPerRow,
// cNumOfRows)
//
//if status != 0 {
// return errors.New("Insert failed, error code = " + strconv.Itoa(int(status)))
//}
var numOfRow = len(*entityIDs)
var sizeofPerRow = len((*records)[0])
assert.Equal(nil, numOfRow, len(*records))
var rawData = make([]byte, numOfRow*sizeofPerRow)
var copyOffset = 0
for i := 0; i < len(*records); i++ {
copy(rawData[copyOffset:], (*records)[i])
copyOffset += sizeofPerRow
}
var cOffset = C.long(offset)
var cNumOfRows = C.long(numOfRow)
var cEntityIdsPtr = (*C.long)(&(*entityIDs)[0])
var cTimestampsPtr = (*C.ulong)(&(*timestamps)[0])
var cSizeofPerRow = C.int(sizeofPerRow)
var cRawDataVoidPtr = unsafe.Pointer(&rawData[0])
var status = C.Insert(s.SegmentPtr,
cOffset,
cNumOfRows,
cEntityIdsPtr,
cTimestampsPtr,
cRawDataVoidPtr,
cSizeofPerRow,
cNumOfRows)
if status != 0 {
return errors.New("Insert failed, error code = " + strconv.Itoa(int(status)))
}
return nil
}
@ -226,9 +228,9 @@ func (s *Segment) SegmentSearch(query *QueryInfo, timestamp uint64, vectorRecord
var cQueryRawDataLength C.int
if vectorRecord.BinaryData != nil {
return nil, errors.New("data of binary type is not supported yet")
return nil, errors.New("Data of binary type is not supported yet")
} else if len(vectorRecord.FloatData) <= 0 {
return nil, errors.New("null query vector data")
return nil, errors.New("Null query vector data")
} else {
cQueryRawData = (*C.float)(&vectorRecord.FloatData[0])
cQueryRawDataLength = (C.int)(len(vectorRecord.FloatData))

View File

@ -11,7 +11,7 @@ import (
)
//func (node *QueryNode) SegmentsManagement() {
// //node.queryNodeTimeSync.updateTSOTimeSync()
// //node.queryNodeTimeSync.UpdateTSOTimeSync()
// //var timeNow = node.queryNodeTimeSync.TSOTimeSync
//
// timeNow := node.messageClient.GetTimeNow() >> 18

View File

@ -44,8 +44,7 @@ import (
func TestSegmentManagement_SegmentStatistic(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -74,8 +73,7 @@ func TestSegmentManagement_SegmentStatisticService(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"

View File

@ -23,7 +23,7 @@ func TestSegment_ConstructorAndDestructor(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Destruct collection, partition and segment
@ -49,12 +49,12 @@ func TestSegment_SegmentInsert(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
//ids := []int64{1, 2, 3}
//timestamps := []uint64{0, 0, 0}
ids := []int64{1, 2, 3}
timestamps := []uint64{0, 0, 0}
// 3. Create records, use schema below:
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
@ -81,8 +81,8 @@ func TestSegment_SegmentInsert(t *testing.T) {
assert.GreaterOrEqual(t, offset, int64(0))
// 5. Do Insert
//var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
//assert.NoError(t, err)
var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
assert.NoError(t, err)
// 6. Destruct collection, partition and segment
partition.DeleteSegment(node, segment)
@ -107,7 +107,7 @@ func TestSegment_SegmentDelete(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
@ -145,7 +145,7 @@ func TestSegment_SegmentSearch(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
@ -179,11 +179,11 @@ func TestSegment_SegmentSearch(t *testing.T) {
assert.GreaterOrEqual(t, offset, int64(0))
// 5. Do Insert
//var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
//assert.NoError(t, err)
var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
assert.NoError(t, err)
// 6. Do search
var queryJSON = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
var queryJson = "{\"field_name\":\"fakevec\",\"num_queries\":1,\"topK\":10}"
var queryRawData = make([]float32, 0)
for i := 0; i < 16; i++ {
queryRawData = append(queryRawData, float32(i))
@ -191,7 +191,7 @@ func TestSegment_SegmentSearch(t *testing.T) {
var vectorRecord = msgPb.VectorRowRecord{
FloatData: queryRawData,
}
query := node.QueryJSON2Info(&queryJSON)
query := node.QueryJson2Info(&queryJson)
var searchRes, searchErr = segment.SegmentSearch(query, timestamps[N/2], &vectorRecord)
assert.NoError(t, searchErr)
fmt.Println(searchRes)
@ -219,7 +219,7 @@ func TestSegment_SegmentPreInsert(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Do PreInsert
@ -249,7 +249,7 @@ func TestSegment_SegmentPreDelete(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Do PreDelete
@ -321,12 +321,12 @@ func TestSegment_GetRowCount(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
ids := []int64{1, 2, 3}
//timestamps := []uint64{0, 0, 0}
timestamps := []uint64{0, 0, 0}
// 3. Create records, use schema below:
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
@ -353,8 +353,8 @@ func TestSegment_GetRowCount(t *testing.T) {
assert.GreaterOrEqual(t, offset, int64(0))
// 5. Do Insert
//var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
//assert.NoError(t, err)
var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
assert.NoError(t, err)
// 6. Get segment row count
var rowCount = segment.GetRowCount()
@ -383,7 +383,7 @@ func TestSegment_GetDeletedCount(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
@ -426,12 +426,12 @@ func TestSegment_GetMemSize(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
//ids := []int64{1, 2, 3}
//timestamps := []uint64{0, 0, 0}
ids := []int64{1, 2, 3}
timestamps := []uint64{0, 0, 0}
// 3. Create records, use schema below:
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
@ -458,8 +458,8 @@ func TestSegment_GetMemSize(t *testing.T) {
assert.GreaterOrEqual(t, offset, int64(0))
// 5. Do Insert
//var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
//assert.NoError(t, err)
var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
assert.NoError(t, err)
// 6. Get memory usage in bytes
var memSize = segment.GetMemSize()
@ -496,12 +496,12 @@ func TestSegment_RealSchemaTest(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
// 2. Create ids and timestamps
//ids := []int64{1, 2, 3}
//timestamps := []uint64{0, 0, 0}
ids := []int64{1, 2, 3}
timestamps := []uint64{0, 0, 0}
// 3. Create records, use schema below:
// schema_tmp->AddField("fakeVec", DataType::VECTOR_FLOAT, 16);
@ -528,8 +528,8 @@ func TestSegment_RealSchemaTest(t *testing.T) {
assert.GreaterOrEqual(t, offset, int64(0))
// 5. Do Insert
//var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
//assert.NoError(t, err)
var err = segment.SegmentInsert(offset, &ids, &timestamps, &records)
assert.NoError(t, err)
// 6. Destruct collection, partition and segment
partition.DeleteSegment(node, segment)

View File

@ -1,10 +1,7 @@
package reader
import "log"
type serviceTimeNode struct {
BaseNode
queryNodeTime *QueryNodeTime
serviceTimeMsg serviceTimeMsg
}
@ -13,19 +10,7 @@ func (stNode *serviceTimeNode) Name() string {
}
func (stNode *serviceTimeNode) Operate(in []*Msg) []*Msg {
if len(in) != 1 {
log.Println("Invalid operate message input in serviceTimeNode")
// TODO: add error handling
}
serviceTimeMsg, ok := (*in[0]).(*serviceTimeMsg)
if !ok {
log.Println("type assertion failed for serviceTimeMsg")
// TODO: add error handling
}
stNode.queryNodeTime.updateSearchServiceTime(serviceTimeMsg.timeRange)
return nil
return in
}
func newServiceTimeNode() *serviceTimeNode {

View File

@ -66,9 +66,13 @@ func (node *QueryNode) GetSegmentBySegmentID(segmentID int64) (*Segment, error)
}
func (node *QueryNode) FoundSegmentBySegmentID(segmentID int64) bool {
_, ok := node.SegmentsMap[segmentID]
targetSegment := node.SegmentsMap[segmentID]
return ok
if targetSegment == nil {
return false
}
return true
}
func (c *Collection) GetPartitionByName(partitionName string) (partition *Partition) {
@ -107,12 +111,12 @@ func (node *QueryNode) WriteQueryLog() {
// write logs
for _, insertLog := range node.InsertLogs {
insertLogJSON, err := json.Marshal(&insertLog)
insertLogJson, err := json.Marshal(&insertLog)
if err != nil {
log.Fatal(err)
}
writeString := string(insertLogJSON) + "\n"
writeString := string(insertLogJson) + "\n"
fmt.Println(writeString)
_, err2 := f.WriteString(writeString)
@ -137,9 +141,9 @@ func (node *QueryNode) PrepareBatchMsg() []int {
return msgLen
}
func (node *QueryNode) QueryJSON2Info(queryJSON *string) *QueryInfo {
func (node *QueryNode) QueryJson2Info(queryJson *string) *QueryInfo {
var query QueryInfo
var err = json.Unmarshal([]byte(*queryJSON), &query)
var err = json.Unmarshal([]byte(*queryJson), &query)
if err != nil {
log.Fatal("Unmarshal query json failed")

View File

@ -18,8 +18,7 @@ func TestUtilFunctions_GetKey2Segments(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
ctx, _ := context.WithDeadline(context.Background(), d)
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -66,7 +65,7 @@ func TestUtilFunctions_GetCollectionByID(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
c := node.GetCollectionByID(int64(0))
@ -113,7 +112,7 @@ func TestUtilFunctions_GetSegmentBySegmentID(t *testing.T) {
// 2. Get segment by segment id
var s0, err = node.GetSegmentBySegmentID(0)
assert.NoError(t, err)
assert.Equal(t, s0.SegmentID, int64(0))
assert.Equal(t, s0.SegmentId, int64(0))
node.Close()
}
@ -130,7 +129,7 @@ func TestUtilFunctions_FoundSegmentBySegmentID(t *testing.T) {
assert.Equal(t, collection.CollectionName, "collection0")
assert.Equal(t, partition.PartitionName, "partition0")
assert.Equal(t, segment.SegmentID, int64(0))
assert.Equal(t, segment.SegmentId, int64(0))
assert.Equal(t, len(node.SegmentsMap), 1)
b1 := node.FoundSegmentBySegmentID(int64(0))
@ -169,8 +168,7 @@ func TestUtilFunctions_GetPartitionByName(t *testing.T) {
func TestUtilFunctions_PrepareBatchMsg(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx, _ := context.WithCancel(context.Background())
mc := msgclient.ReaderMessageClient{}
pulsarAddr := "pulsar://"
@ -191,8 +189,8 @@ func TestUtilFunctions_QueryJson2Info(t *testing.T) {
ctx := context.Background()
node := NewQueryNode(ctx, 0, 0)
var queryJSON = "{\"field_name\":\"age\",\"num_queries\":1,\"topK\":10}"
info := node.QueryJSON2Info(&queryJSON)
var queryJson = "{\"field_name\":\"age\",\"num_queries\":1,\"topK\":10}"
info := node.QueryJson2Info(&queryJson)
assert.Equal(t, info.FieldName, "age")
assert.Equal(t, info.NumQueries, int64(1))

View File

@ -1,14 +1,15 @@
package s3driver
package S3_driver_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
s3_driver "github.com/zilliztech/milvus-distributed/internal/storage/internal/S3"
)
var ctx = context.Background()
var client, err = NewS3Driver(ctx)
var client, err = s3_driver.NewS3Driver(ctx)
func TestS3Driver_PutRowAndGetRow(t *testing.T) {
err = client.PutRow(ctx, []byte("bar"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)

View File

@ -1,4 +1,4 @@
package s3driver
package S3_driver
import (
"bytes"

View File

@ -1,4 +1,4 @@
package s3driver
package S3_driver
import (
"context"

View File

@ -1,4 +1,4 @@
package miniodriver
package minio_driver
import (
"context"

View File

@ -1,4 +1,4 @@
package miniodriver
package minio_driver
import (
"bytes"

View File

@ -1,14 +1,15 @@
package miniodriver
package minio_driver_test
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
minio_driver "github.com/zilliztech/milvus-distributed/internal/storage/internal/minio"
)
var ctx = context.Background()
var client, err = NewMinioDriver(ctx)
var client, err = minio_driver.NewMinioDriver(ctx)
func TestMinioDriver_PutRowAndGetRow(t *testing.T) {
err = client.PutRow(ctx, []byte("bar"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)

View File

@ -17,10 +17,9 @@ type flowGraphStates struct {
}
type TimeTickedFlowGraph struct {
ctx context.Context
states *flowGraphStates
startNode *nodeCtx
nodeCtx map[string]*nodeCtx
ctx context.Context
states *flowGraphStates
nodeCtx map[string]*nodeCtx
}
func (fg *TimeTickedFlowGraph) AddNode(node *Node) {
@ -68,17 +67,6 @@ func (fg *TimeTickedFlowGraph) SetEdges(nodeName string, in []string, out []stri
return nil
}
func (fg *TimeTickedFlowGraph) SetStartNode(nodeName string) error {
startNode, ok := fg.nodeCtx[nodeName]
if !ok {
errMsg := "Cannot find node:" + nodeName
return errors.New(errMsg)
}
fg.startNode = startNode
return nil
}
func (fg *TimeTickedFlowGraph) Start() {
wg := sync.WaitGroup{}
for _, v := range fg.nodeCtx {
@ -88,11 +76,6 @@ func (fg *TimeTickedFlowGraph) Start() {
wg.Wait()
}
func (fg *TimeTickedFlowGraph) Input(msg *Msg) {
// start node should have only 1 input channel
fg.startNode.inputChannels[0] <- msg
}
func (fg *TimeTickedFlowGraph) Close() error {
for _, v := range fg.nodeCtx {
v.Close()

View File

@ -1,4 +1,4 @@
package typeutil
type Timestamp = uint64
type ID = int64
type Id = int64

View File

@ -14,8 +14,9 @@ done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
CPP_SRC_DIR="${SCRIPTS_DIR}/../internal/core"
CPP_BUILD_DIR="${CPP_SRC_DIR}/cmake_build"
BUILD_OUTPUT_DIR="${SCRIPTS_DIR}/../cmake_build"
BUILD_OUTPUT_DIR=${CPP_BUILD_DIR}
BUILD_TYPE="Release"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX="${CPP_SRC_DIR}/output"