mirror of https://github.com/milvus-io/milvus.git
Remove master and writenode
Signed-off-by: neza2017 <yefu.chen@zilliz.com>pull/4973/head^2
parent
d5346e132a
commit
854accf95b
|
@ -13,10 +13,6 @@ dir ('build/docker/deploy') {
|
|||
withCredentials([usernamePassword(credentialsId: "${env.DOCKER_CREDENTIALS_ID}", usernameVariable: 'DOCKER_USERNAME', passwordVariable: 'DOCKER_PASSWORD')]) {
|
||||
sh 'docker login -u ${DOCKER_USERNAME} -p ${DOCKER_PASSWORD} ${DOKCER_REGISTRY_URL}'
|
||||
|
||||
sh 'docker pull ${SOURCE_REPO}/master:${SOURCE_TAG} || true'
|
||||
sh 'docker-compose build --force-rm master'
|
||||
sh 'docker-compose push master'
|
||||
|
||||
sh 'docker pull ${SOURCE_REPO}/proxyservice:${SOURCE_TAG} || true'
|
||||
sh 'docker-compose build --force-rm proxyservice'
|
||||
sh 'docker-compose push proxyservice'
|
||||
|
|
|
@ -5,7 +5,6 @@ try {
|
|||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d minio'
|
||||
dir ('build/docker/deploy') {
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} pull'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d master'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxyservice'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxynode'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d indexservice'
|
||||
|
|
19
Makefile
19
Makefile
|
@ -78,11 +78,6 @@ endif
|
|||
|
||||
verifiers: getdeps cppcheck fmt static-check ruleguard
|
||||
|
||||
master: build-cpp
|
||||
@echo "Building each component's binary to './bin'"
|
||||
@echo "Building masterservice ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/masterservice $(PWD)/cmd/masterservice/main.go 1>/dev/null
|
||||
|
||||
|
||||
# Builds various components locally.
|
||||
proxynode: build-cpp
|
||||
|
@ -96,13 +91,6 @@ querynode: build-cpp
|
|||
@echo "Building query node ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/querynode $(PWD)/cmd/querynode/querynode.go 1>/dev/null
|
||||
|
||||
|
||||
# Builds various components locally.
|
||||
writenode: build-cpp
|
||||
@echo "Building each component's binary to './bin'"
|
||||
@echo "Building write node ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/writenode $(PWD)/cmd/writenode/writenode.go 1>/dev/null
|
||||
|
||||
# Builds various components locally.
|
||||
datanode: build-cpp
|
||||
@echo "Building each component's binary to './bin'"
|
||||
|
@ -134,8 +122,6 @@ build-go: build-cpp
|
|||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/proxynode $(PWD)/cmd/proxy/node/proxy_node.go 1>/dev/null
|
||||
@echo "Building query service ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/queryservice $(PWD)/cmd/queryservice/queryservice.go 1>/dev/null
|
||||
@echo "Building query node ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/writenode $(PWD)/cmd/writenode/writenode.go 1>/dev/null
|
||||
@echo "Building binlog ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/binlog $(PWD)/cmd/binlog/main.go 1>/dev/null
|
||||
@echo "Building singlenode ..."
|
||||
|
@ -179,17 +165,14 @@ test-cpp: build-cpp-with-unittest
|
|||
docker: verifiers
|
||||
@echo "Building query node docker image '$(TAG)'"
|
||||
@echo "Building proxy docker image '$(TAG)'"
|
||||
@echo "Building master docker image '$(TAG)'"
|
||||
|
||||
# Builds each component and installs it to $GOPATH/bin.
|
||||
install: all
|
||||
@echo "Installing binary to './bin'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/queryservice $(GOPATH)/bin/queryservice
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/querynode $(GOPATH)/bin/querynode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/master $(GOPATH)/bin/master
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxynode $(GOPATH)/bin/proxynode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxyservice $(GOPATH)/bin/proxyservice
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/writenode $(GOPATH)/bin/writenode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/singlenode $(GOPATH)/bin/singlenode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexservice $(GOPATH)/bin/indexservice
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexnode $(GOPATH)/bin/indexnode
|
||||
|
@ -202,12 +185,10 @@ clean:
|
|||
@find . -name '*~' | xargs rm -fv
|
||||
@rm -rf bin/
|
||||
@rm -rf lib/
|
||||
@rm -rf $(GOPATH)/bin/master
|
||||
@rm -rf $(GOPATH)/bin/proxynode
|
||||
@rm -rf $(GOPATH)/bin/proxyservice
|
||||
@rm -rf $(GOPATH)/bin/queryservice
|
||||
@rm -rf $(GOPATH)/bin/querynode
|
||||
@rm -rf $(GOPATH)/bin/writenode
|
||||
@rm -rf $(GOPATH)/bin/singlenode
|
||||
@rm -rf $(GOPATH)/bin/indexservice
|
||||
@rm -rf $(GOPATH)/bin/indexnode
|
||||
|
|
|
@ -1,20 +1,6 @@
|
|||
version: '3.5'
|
||||
|
||||
services:
|
||||
master:
|
||||
image: ${TARGET_REPO}/master:${TARGET_TAG}
|
||||
build:
|
||||
context: ../../../
|
||||
dockerfile: build/docker/deploy/master/DockerFile
|
||||
cache_from:
|
||||
- ${SOURCE_REPO}/master:${SOURCE_TAG}
|
||||
environment:
|
||||
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
|
||||
ETCD_ADDRESS: ${ETCD_ADDRESS}
|
||||
INDEX_SERVICE_ADDRESS: ${INDEX_SERVICE_ADDRESS}
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
proxyservice:
|
||||
image: ${TARGET_REPO}/proxyservice:${TARGET_TAG}
|
||||
build:
|
||||
|
@ -70,21 +56,6 @@ services:
|
|||
networks:
|
||||
- milvus
|
||||
|
||||
# writenode:
|
||||
# image: ${TARGET_REPO}/writenode:${TARGET_TAG}
|
||||
# build:
|
||||
# context: ../../../
|
||||
# dockerfile: build/docker/deploy/writenode/DockerFile
|
||||
# cache_from:
|
||||
# - ${SOURCE_REPO}/writenode:${SOURCE_TAG}
|
||||
# environment:
|
||||
# PULSAR_ADDRESS: ${PULSAR_ADDRESS}
|
||||
# ETCD_ADDRESS: ${ETCD_ADDRESS}
|
||||
# MASTER_ADDRESS: ${MASTER_ADDRESS}
|
||||
# MINIO_ADDRESS: ${MINIO_ADDRESS}
|
||||
# networks:
|
||||
# - milvus
|
||||
|
||||
datanode:
|
||||
image: ${TARGET_REPO}/datanode:${TARGET_TAG}
|
||||
build:
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
FROM alpine:3.12.1
|
||||
|
||||
COPY ./bin/master /milvus-distributed/bin/master
|
||||
|
||||
COPY ./configs/ /milvus-distributed/configs/
|
||||
|
||||
WORKDIR /milvus-distributed/
|
||||
|
||||
CMD ["./bin/master"]
|
||||
|
||||
EXPOSE 53100
|
|
@ -1,39 +0,0 @@
|
|||
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
FROM milvusdb/milvus-distributed-dev:amd64-ubuntu18.04-latest AS openblas
|
||||
|
||||
#FROM alpine
|
||||
FROM ubuntu:bionic-20200921
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libtbb-dev gfortran
|
||||
|
||||
#RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories
|
||||
|
||||
#RUN sed -i "s/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g" /etc/apk/repositories \
|
||||
# && apk add --no-cache libtbb gfortran
|
||||
|
||||
COPY --from=openblas /usr/lib/libopenblas-r0.3.9.so /usr/lib/
|
||||
|
||||
RUN ln -s /usr/lib/libopenblas-r0.3.9.so /usr/lib/libopenblas.so.0 && \
|
||||
ln -s /usr/lib/libopenblas.so.0 /usr/lib/libopenblas.so
|
||||
|
||||
COPY ./bin/writenode /milvus-distributed/bin/writenode
|
||||
|
||||
COPY ./configs/ /milvus-distributed/configs/
|
||||
|
||||
COPY ./lib/ /milvus-distributed/lib/
|
||||
|
||||
ENV LD_LIBRARY_PATH=/milvus-distributed/lib:$LD_LIBRARY_PATH:/usr/lib
|
||||
|
||||
WORKDIR /milvus-distributed/
|
||||
|
||||
CMD ["./bin/writenode"]
|
|
@ -1,55 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")
|
||||
flag.Parse()
|
||||
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
master.Init()
|
||||
|
||||
// Creates server.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
svr, err := master.CreateServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Run(int64(master.Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
sc := make(chan os.Signal, 1)
|
||||
signal.Notify(sc,
|
||||
syscall.SIGHUP,
|
||||
syscall.SIGINT,
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT)
|
||||
sig := <-sc
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
cancel()
|
||||
svr.Close()
|
||||
}
|
|
@ -7,7 +7,6 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -15,51 +14,10 @@ import (
|
|||
"go.uber.org/zap"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proxynode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/querynode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/writenode"
|
||||
)
|
||||
|
||||
func InitMaster(cpuprofile *string, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
if *cpuprofile != "" {
|
||||
f, err := os.Create(*cpuprofile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
master.Init()
|
||||
|
||||
// Creates server.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
svr, err := master.CreateServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Run(int64(master.Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
sc := make(chan os.Signal, 1)
|
||||
signal.Notify(sc,
|
||||
syscall.SIGHUP,
|
||||
syscall.SIGINT,
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT)
|
||||
sig := <-sc
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
cancel()
|
||||
svr.Close()
|
||||
}
|
||||
|
||||
func InitProxy(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
//proxynode.Init()
|
||||
|
@ -179,49 +137,9 @@ func InitIndexBuilder(wg *sync.WaitGroup) {
|
|||
}
|
||||
}
|
||||
|
||||
func InitWriteNode(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
writenode.Init()
|
||||
fmt.Println("WriteNodeID is", writenode.Params.WriteNodeID)
|
||||
// Creates server.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr := writenode.NewWriteNode(ctx, 111111)
|
||||
|
||||
sc := make(chan os.Signal, 1)
|
||||
signal.Notify(sc,
|
||||
syscall.SIGHUP,
|
||||
syscall.SIGINT,
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT)
|
||||
|
||||
var sig os.Signal
|
||||
go func() {
|
||||
sig = <-sc
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
|
||||
svr.Close()
|
||||
switch sig {
|
||||
case syscall.SIGTERM:
|
||||
exit(0)
|
||||
default:
|
||||
exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var wg sync.WaitGroup
|
||||
cpuprofile := flag.String("cpuprofile", "", "write cpu profile to file")
|
||||
flag.Parse()
|
||||
wg.Add(1)
|
||||
go InitMaster(cpuprofile, &wg)
|
||||
time.Sleep(time.Second * 1)
|
||||
wg.Add(1)
|
||||
go InitProxy(&wg)
|
||||
|
@ -229,8 +147,6 @@ func main() {
|
|||
go InitQueryNode(&wg)
|
||||
wg.Add(1)
|
||||
go InitIndexBuilder(&wg)
|
||||
wg.Add(1)
|
||||
go InitWriteNode(&wg)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/writenode"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
writenode.Init()
|
||||
fmt.Println("WriteNodeID is", writenode.Params.WriteNodeID)
|
||||
// Creates server.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr := writenode.NewWriteNode(ctx, 111111)
|
||||
|
||||
sc := make(chan os.Signal, 1)
|
||||
signal.Notify(sc,
|
||||
syscall.SIGHUP,
|
||||
syscall.SIGINT,
|
||||
syscall.SIGTERM,
|
||||
syscall.SIGQUIT)
|
||||
|
||||
var sig os.Signal
|
||||
go func() {
|
||||
sig = <-sc
|
||||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
<-ctx.Done()
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
|
||||
svr.Close()
|
||||
switch sig {
|
||||
case syscall.SIGTERM:
|
||||
exit(0)
|
||||
default:
|
||||
exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func exit(code int) {
|
||||
os.Exit(code)
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
|
||||
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
writeNode:
|
||||
stats:
|
||||
publishInterval: 1000 # milliseconds
|
||||
|
||||
dataSync:
|
||||
flowGraph:
|
||||
maxQueueLength: 1024
|
||||
maxParallelism: 1024
|
||||
|
||||
msgStream:
|
||||
dataDefinition:
|
||||
recvBufSize: 64 # msgPack chan buffer size
|
||||
pulsarBufSize: 64 # pulsar chan buffer size
|
||||
|
||||
insert:
|
||||
#streamBufSize: 1024 # msgPack chan buffer size
|
||||
recvBufSize: 1024 # msgPack chan buffer size
|
||||
pulsarBufSize: 1024 # pulsar chan buffer size
|
||||
|
||||
delete:
|
||||
#streamBufSize: 1024 # msgPack chan buffer size
|
||||
recvBufSize: 1024 # msgPack chan buffer size
|
||||
pulsarBufSize: 1024 # pulsar chan buffer size
|
||||
|
||||
flush:
|
||||
# max buffer size to flush
|
||||
insertBufSize: 500
|
||||
ddBufSize: 20
|
|
@ -1,115 +0,0 @@
|
|||
package indexnode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
|
||||
var buildClient *NodeImpl
|
||||
|
||||
var masterPort = 53101
|
||||
var masterServer *master.Master
|
||||
|
||||
func makeMasterAddress(port int64) string {
|
||||
masterAddr := "127.0.0.1:" + strconv.FormatInt(port, 10)
|
||||
return masterAddr
|
||||
}
|
||||
|
||||
func refreshMasterAddress() {
|
||||
masterAddr := makeMasterAddress(int64(masterPort))
|
||||
Params.MasterAddress = masterAddr
|
||||
master.Params.Port = masterPort
|
||||
}
|
||||
|
||||
func startMaster(ctx context.Context) {
|
||||
master.Init()
|
||||
refreshMasterAddress()
|
||||
etcdAddr := master.Params.EtcdAddress
|
||||
metaRootPath := master.Params.MetaRootPath
|
||||
|
||||
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
svr, err := master.CreateServer(ctx)
|
||||
masterServer = svr
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
if err := svr.Run(int64(master.Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for server!", svr.IsServing())
|
||||
|
||||
}
|
||||
|
||||
func startBuilder(ctx context.Context) {
|
||||
var err error
|
||||
buildClient, err = NewNodeImpl(ctx)
|
||||
if err != nil {
|
||||
log.Print("create builder failed", zap.Error(err))
|
||||
}
|
||||
|
||||
// TODO: change to wait until master is ready
|
||||
if err := buildClient.Start(); err != nil {
|
||||
log.Fatal("run builder failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func setup() {
|
||||
Params.Init()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
startMaster(ctx)
|
||||
startBuilder(ctx)
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
cancel()
|
||||
buildClient.Stop()
|
||||
masterServer.Close()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
code := m.Run()
|
||||
shutdown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
//func TestBuilder_GRPC(t *testing.T) {
|
||||
// typeParams := make(map[string]string)
|
||||
// typeParams["a"] = "1"
|
||||
// indexParams := make(map[string]string)
|
||||
// indexParams["b"] = "2"
|
||||
// columnDataPaths := []string{"dataA", "dataB"}
|
||||
// indexID, err := buildClient.BuildIndex(columnDataPaths, typeParams, indexParams)
|
||||
// assert.Nil(t, err)
|
||||
//
|
||||
// time.Sleep(time.Second * 3)
|
||||
//
|
||||
// description, err := buildClient.GetIndexStates([]UniqueID{indexID})
|
||||
// assert.Nil(t, err)
|
||||
// assert.Equal(t, commonpb.IndexState_INPROGRESS, description.States[0].State)
|
||||
// assert.Equal(t, indexID, description.States[0].IndexID)
|
||||
//
|
||||
// indexDataPaths, err := buildClient.GetIndexFilePaths([]UniqueID{indexID})
|
||||
// assert.Nil(t, err)
|
||||
// assert.Nil(t, indexDataPaths[0])
|
||||
//}
|
|
@ -1,25 +0,0 @@
|
|||
# How to start a master
|
||||
|
||||
## Requirements
|
||||
### Start a etcdv3
|
||||
```
|
||||
./etcd -listen-peer-urls=http://192.168.1.10:12380 -advertise-client-urls=http://192.168.1.10:12379 -listen-client-urls http://0.0.0.0:12379,http://0.0.0.0:14001 -initial-advertise-peer-urls=http://192.168.1.10:12380
|
||||
```
|
||||
## Start from code
|
||||
```
|
||||
go run cmd/master.go
|
||||
```
|
||||
|
||||
## Start with docker
|
||||
|
||||
|
||||
## What rules does master use to write data to kv storage?
|
||||
1.find the root path variable ```ETCD_ROOT_PATH ```which defined in common/config.go
|
||||
2.add prefix path ```segment``` if the resource is a segement
|
||||
3.add prefix path ```collection``` if the resource is a collection
|
||||
4.add resource uuid
|
||||
|
||||
### example
|
||||
if master create a collection with uuid ```46e468ee-b34a-419d-85ed-80c56bfa4e90```
|
||||
the corresponding key in etcd is $(ETCD_ROOT_PATH)/collection/46e468ee-b34a-419d-85ed-80c56bfa4e90
|
||||
|
|
@ -1,145 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
writerclient "github.com/zilliztech/milvus-distributed/internal/writenode/client"
|
||||
)
|
||||
|
||||
type WriteNodeClient interface {
|
||||
FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error
|
||||
DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error)
|
||||
GetInsertBinlogPaths(segmentID UniqueID) (map[UniqueID][]string, error)
|
||||
}
|
||||
|
||||
type MockWriteNodeClient struct {
|
||||
segmentID UniqueID
|
||||
flushTime time.Time
|
||||
partitionTag string
|
||||
timestamp Timestamp
|
||||
collectionID UniqueID
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.flushTime = time.Now()
|
||||
m.segmentID = segmentID
|
||||
m.collectionID = collectionID
|
||||
m.partitionTag = partitionTag
|
||||
m.timestamp = timestamp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockWriteNodeClient) DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error) {
|
||||
now := time.Now()
|
||||
m.lock.RLock()
|
||||
defer m.lock.RUnlock()
|
||||
if now.Sub(m.flushTime).Seconds() > 2 {
|
||||
return &writerclient.SegmentDescription{
|
||||
SegmentID: segmentID,
|
||||
IsClosed: true,
|
||||
OpenTime: 0,
|
||||
CloseTime: 1,
|
||||
}, nil
|
||||
}
|
||||
return &writerclient.SegmentDescription{
|
||||
SegmentID: segmentID,
|
||||
IsClosed: false,
|
||||
OpenTime: 0,
|
||||
CloseTime: 1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockWriteNodeClient) GetInsertBinlogPaths(segmentID UniqueID) (map[UniqueID][]string, error) {
|
||||
return map[UniqueID][]string{
|
||||
1: {"/binlog/insert/file_1"},
|
||||
100: {"/binlog/insert/file_100"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type BuildIndexClient interface {
|
||||
BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
|
||||
GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
|
||||
GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
|
||||
}
|
||||
|
||||
type MockBuildIndexClient struct {
|
||||
buildTime time.Time
|
||||
}
|
||||
|
||||
func (m *MockBuildIndexClient) BuildIndex(req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error) {
|
||||
m.buildTime = time.Now()
|
||||
return &indexpb.BuildIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
IndexID: int64(1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockBuildIndexClient) GetIndexStates(req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error) {
|
||||
now := time.Now()
|
||||
ret := &indexpb.IndexStatesResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}
|
||||
var indexStates []*indexpb.IndexInfo
|
||||
if now.Sub(m.buildTime).Seconds() > 2 {
|
||||
for _, indexID := range req.IndexIDs {
|
||||
indexState := &indexpb.IndexInfo{
|
||||
State: commonpb.IndexState_FINISHED,
|
||||
IndexID: indexID,
|
||||
}
|
||||
indexStates = append(indexStates, indexState)
|
||||
}
|
||||
ret.States = indexStates
|
||||
return ret, nil
|
||||
}
|
||||
for _, indexID := range req.IndexIDs {
|
||||
indexState := &indexpb.IndexInfo{
|
||||
State: commonpb.IndexState_INPROGRESS,
|
||||
IndexID: indexID,
|
||||
}
|
||||
indexStates = append(indexStates, indexState)
|
||||
}
|
||||
ret.States = indexStates
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (m *MockBuildIndexClient) GetIndexFilePaths(req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error) {
|
||||
var filePathInfos []*indexpb.IndexFilePathInfo
|
||||
for _, indexID := range req.IndexIDs {
|
||||
filePaths := &indexpb.IndexFilePathInfo{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
IndexID: indexID,
|
||||
IndexFilePaths: []string{"/binlog/index/file_1", "/binlog/index/file_2", "/binlog/index/file_3"},
|
||||
}
|
||||
filePathInfos = append(filePathInfos, filePaths)
|
||||
}
|
||||
|
||||
return &indexpb.IndexFilePathsResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
FilePaths: filePathInfos,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type LoadIndexClient interface {
|
||||
LoadIndex(indexPaths []string, segmentID int64, fieldID int64, fieldName string, indexParams map[string]string) error
|
||||
}
|
||||
|
||||
type MockLoadIndexClient struct {
|
||||
}
|
||||
|
||||
func (m *MockLoadIndexClient) LoadIndex(indexPaths []string, segmentID int64, fieldID int64, fieldName string, indexParams map[string]string) error {
|
||||
return nil
|
||||
}
|
|
@ -1,315 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
type createCollectionTask struct {
|
||||
baseTask
|
||||
req *milvuspb.CreateCollectionRequest
|
||||
}
|
||||
|
||||
type dropCollectionTask struct {
|
||||
baseTask
|
||||
req *milvuspb.DropCollectionRequest
|
||||
segManager SegmentManager
|
||||
}
|
||||
|
||||
type hasCollectionTask struct {
|
||||
baseTask
|
||||
hasCollection bool
|
||||
req *milvuspb.HasCollectionRequest
|
||||
}
|
||||
|
||||
type describeCollectionTask struct {
|
||||
baseTask
|
||||
description *milvuspb.DescribeCollectionResponse
|
||||
req *milvuspb.DescribeCollectionRequest
|
||||
}
|
||||
|
||||
type showCollectionsTask struct {
|
||||
baseTask
|
||||
stringListResponse *milvuspb.ShowCollectionResponse
|
||||
req *milvuspb.ShowCollectionRequest
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *createCollectionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *createCollectionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *createCollectionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
var schema schemapb.CollectionSchema
|
||||
err := proto.UnmarshalMerge(t.req.Schema, &schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for index, singleFiled := range schema.Fields {
|
||||
singleFiled.FieldID = int64(index + 100)
|
||||
}
|
||||
|
||||
zeroField := &schemapb.FieldSchema{
|
||||
FieldID: int64(0),
|
||||
Name: "RowID",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT64,
|
||||
}
|
||||
|
||||
oneField := &schemapb.FieldSchema{
|
||||
FieldID: int64(1),
|
||||
Name: "Timestamp",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT64,
|
||||
}
|
||||
|
||||
schema.Fields = append(schema.Fields, zeroField, oneField)
|
||||
|
||||
collectionID, err := t.sch.globalIDAllocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collection := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &schema,
|
||||
CreateTime: ts,
|
||||
SegmentIDs: make([]UniqueID, 0),
|
||||
PartitionTags: make([]string, 0),
|
||||
}
|
||||
err = t.mt.AddCollection(&collection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: t.req.Base.Timestamp,
|
||||
EndTimestamp: t.req.Base.Timestamp,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
|
||||
createCollectionMsg := &internalpb2.CreateCollectionRequest{
|
||||
Base: t.req.Base,
|
||||
DbName: "",
|
||||
CollectionName: t.req.CollectionName,
|
||||
DbID: 0,
|
||||
CollectionID: collectionID,
|
||||
}
|
||||
|
||||
createCollectionMsg.Schema, err = proto.Marshal(&schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
timeTickMsg := &ms.CreateCollectionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
CreateCollectionRequest: *createCollectionMsg,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *dropCollectionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *dropCollectionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *dropCollectionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
collectionName := t.req.CollectionName
|
||||
collectionMeta, err := t.mt.GetCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
collectionID := collectionMeta.ID
|
||||
|
||||
err = t.mt.DeleteCollection(collectionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// before drop collection in segment manager, if segment manager receive a time tick from write node,
|
||||
// maybe this collection can not be found in meta table.
|
||||
if err = t.segManager.DropCollection(collectionID); err != nil {
|
||||
return err
|
||||
}
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
|
||||
dropReq := internalpb2.DropCollectionRequest{
|
||||
Base: t.req.Base,
|
||||
DbName: "",
|
||||
CollectionName: t.req.CollectionName,
|
||||
DbID: 0,
|
||||
CollectionID: collectionID,
|
||||
}
|
||||
timeTickMsg := &ms.DropCollectionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
DropCollectionRequest: dropReq,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *hasCollectionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *hasCollectionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *hasCollectionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
collectionName := t.req.CollectionName
|
||||
_, err := t.mt.GetCollectionByName(collectionName)
|
||||
if err == nil {
|
||||
t.hasCollection = true
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *describeCollectionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *describeCollectionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *describeCollectionTask) filterSchema() error {
|
||||
// remove system field
|
||||
var newFields []*schemapb.FieldSchema
|
||||
for _, fieldMeta := range t.description.Schema.Fields {
|
||||
fieldID := fieldMeta.FieldID
|
||||
// todo not hardcode
|
||||
if fieldID < 100 {
|
||||
continue
|
||||
}
|
||||
newFields = append(newFields, fieldMeta)
|
||||
}
|
||||
t.description.Schema.Fields = newFields
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *describeCollectionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
collectionName := t.req.CollectionName
|
||||
collection, err := t.mt.GetCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cloneSchema := proto.Clone(collection.Schema)
|
||||
t.description.Schema = cloneSchema.(*schemapb.CollectionSchema)
|
||||
return t.filterSchema()
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *showCollectionsTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *showCollectionsTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *showCollectionsTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
colls, err := t.mt.ListCollections()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.stringListResponse.CollectionNames = colls
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software distributed under the License
|
||||
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
|
||||
# or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
master: # 21
|
||||
address: localhost
|
||||
port: 53100
|
||||
pulsarmoniterinterval: 1
|
||||
pulsartopic: "monitor-topic"
|
||||
|
||||
proxyidlist: [1, 2]
|
||||
proxyTimeSyncChannels: ["proxy1", "proxy2"]
|
||||
proxyTimeSyncSubName: "proxy-topic"
|
||||
softTimeTickBarrierInterval: 500
|
||||
|
||||
writeidlist: [3, 4]
|
||||
writeTimeSyncChannels: ["write3", "write4"]
|
||||
writeTimeSyncSubName: "write-topic"
|
||||
|
||||
dmTimeSyncChannels: ["dm5", "dm6"]
|
||||
k2sTimeSyncChannels: ["k2s7", "k2s8"]
|
||||
|
||||
defaultSizePerRecord: 1024
|
||||
minimumAssignSize: 1048576
|
||||
segmentThreshold: 536870912
|
||||
segmentExpireDuration: 2000
|
||||
segmentThresholdFactor: 0.75
|
||||
querynodenum: 1
|
||||
writenodenum: 1
|
||||
statsChannels: "statistic"
|
||||
|
||||
etcd: # 4
|
||||
address: localhost
|
||||
port: 2379
|
||||
rootpath: by-dev
|
||||
segthreshold: 10000
|
||||
|
||||
timesync: # 1
|
||||
interval: 400
|
||||
|
||||
storage: # 5
|
||||
driver: TIKV
|
||||
address: localhost
|
||||
port: 2379
|
||||
accesskey:
|
||||
secretkey:
|
||||
|
||||
pulsar: # 6
|
||||
authentication: false
|
||||
user: user-default
|
||||
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
|
||||
address: localhost
|
||||
port: 6650
|
||||
topicnum: 128
|
||||
|
||||
reader: # 7
|
||||
clientid: 0
|
||||
stopflag: -1
|
||||
readerqueuesize: 10000
|
||||
searchchansize: 10000
|
||||
key2segchansize: 10000
|
||||
topicstart: 0
|
||||
topicend: 128
|
||||
|
||||
writer: # 8
|
||||
clientid: 0
|
||||
stopflag: -2
|
||||
readerqueuesize: 10000
|
||||
searchbyidchansize: 10000
|
||||
parallelism: 100
|
||||
topicstart: 0
|
||||
topicend: 128
|
||||
bucket: "zilliz-hz"
|
||||
|
||||
proxy: # 21
|
||||
timezone: UTC+8
|
||||
proxy_id: 1
|
||||
numReaderNodes: 2
|
||||
tsoSaveInterval: 200
|
||||
timeTickInterval: 200
|
||||
|
||||
pulsarTopics:
|
||||
readerTopicPrefix: "milvusReader"
|
||||
numReaderTopics: 2
|
||||
deleteTopic: "milvusDeleter"
|
||||
queryTopic: "milvusQuery"
|
||||
resultTopic: "milvusResult"
|
||||
resultGroup: "milvusResultGroup"
|
||||
timeTickTopic: "milvusTimeTick"
|
||||
|
||||
network:
|
||||
address: 0.0.0.0
|
||||
port: 19530
|
||||
|
||||
logs:
|
||||
level: debug
|
||||
trace.enable: true
|
||||
path: /tmp/logs
|
||||
max_log_file_size: 1024MB
|
||||
log_rotate_num: 0
|
||||
|
||||
storage:
|
||||
path: /var/lib/milvus
|
||||
auto_flush_interval: 1
|
|
@ -1,13 +0,0 @@
|
|||
package master
|
||||
|
||||
// system filed id:
|
||||
// 0: unique row id
|
||||
// 1: timestamp
|
||||
// 100: first user field id
|
||||
// 101: second user field id
|
||||
// 102: ...
|
||||
|
||||
const (
|
||||
RowIDField = 0
|
||||
TimeStampField = 1
|
||||
)
|
|
@ -1,173 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
)
|
||||
|
||||
type FlushScheduler struct {
|
||||
client WriteNodeClient
|
||||
metaTable *metaTable
|
||||
segmentFlushChan chan UniqueID
|
||||
segmentDescribeChan chan UniqueID
|
||||
indexBuilderSch persistenceScheduler
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
globalTSOAllocator func() (Timestamp, error)
|
||||
}
|
||||
|
||||
func NewFlushScheduler(ctx context.Context, client WriteNodeClient, metaTable *metaTable, buildScheduler *IndexBuildScheduler, globalTSOAllocator func() (Timestamp, error)) *FlushScheduler {
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
|
||||
return &FlushScheduler{
|
||||
client: client,
|
||||
metaTable: metaTable,
|
||||
indexBuilderSch: buildScheduler,
|
||||
segmentFlushChan: make(chan UniqueID, 100),
|
||||
segmentDescribeChan: make(chan UniqueID, 100),
|
||||
ctx: ctx2,
|
||||
cancel: cancel,
|
||||
globalTSOAllocator: globalTSOAllocator,
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *FlushScheduler) schedule(id interface{}) error {
|
||||
segmentID := id.(UniqueID)
|
||||
segmentMeta, err := scheduler.metaTable.GetSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := scheduler.globalTSOAllocator()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// todo set corrent timestamp
|
||||
err = scheduler.client.FlushSegment(segmentID, segmentMeta.CollectionID, segmentMeta.PartitionTag, ts)
|
||||
if err != nil {
|
||||
log.Println("flushsegment: ", segmentID, " error :", err.Error())
|
||||
return err
|
||||
}
|
||||
//log.Printf("flush segment %d", segmentID)
|
||||
scheduler.segmentDescribeChan <- segmentID
|
||||
|
||||
return nil
|
||||
}
|
||||
func (scheduler *FlushScheduler) describe() error {
|
||||
timeTick := time.Tick(100 * time.Millisecond)
|
||||
descTasks := make(map[UniqueID]bool)
|
||||
closable := make([]UniqueID, 0)
|
||||
for {
|
||||
select {
|
||||
case <-scheduler.ctx.Done():
|
||||
{
|
||||
log.Printf("broadcast context done, exit")
|
||||
return errors.New("broadcast done exit")
|
||||
}
|
||||
case <-timeTick:
|
||||
for singleSegmentID := range descTasks {
|
||||
description, err := scheduler.client.DescribeSegment(singleSegmentID)
|
||||
if err != nil {
|
||||
log.Printf("describe segment %d err %s", singleSegmentID, err.Error())
|
||||
continue
|
||||
}
|
||||
if !description.IsClosed {
|
||||
//log.Println("describe segment ", singleSegmentID, " IsClosed :False")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("flush segment %d is closed", singleSegmentID)
|
||||
mapData, err := scheduler.client.GetInsertBinlogPaths(singleSegmentID)
|
||||
if err != nil {
|
||||
log.Printf("get insert binlog paths err, segID: %d, err: %s", singleSegmentID, err.Error())
|
||||
continue
|
||||
}
|
||||
segMeta, err := scheduler.metaTable.GetSegmentByID(singleSegmentID)
|
||||
if err != nil {
|
||||
log.Printf("get segment from metable failed, segID: %d, err: %s", singleSegmentID, err.Error())
|
||||
continue
|
||||
}
|
||||
for fieldID, data := range mapData {
|
||||
// check field indexable
|
||||
indexable, err := scheduler.metaTable.IsIndexable(segMeta.CollectionID, fieldID)
|
||||
if err != nil {
|
||||
log.Printf("check field indexable from meta table failed, collID: %d, fieldID: %d, err %s", segMeta.CollectionID, fieldID, err.Error())
|
||||
continue
|
||||
}
|
||||
if !indexable {
|
||||
continue
|
||||
}
|
||||
info := &IndexBuildInfo{
|
||||
segmentID: singleSegmentID,
|
||||
fieldID: fieldID,
|
||||
binlogFilePath: data,
|
||||
}
|
||||
err = scheduler.indexBuilderSch.Enqueue(info)
|
||||
log.Printf("segment %d field %d enqueue build index scheduler", singleSegmentID, fieldID)
|
||||
if err != nil {
|
||||
log.Printf("index build enqueue failed, %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Save data to meta table
|
||||
segMeta.BinlogFilePaths = make([]*etcdpb.FieldBinlogFiles, 0)
|
||||
for k, v := range mapData {
|
||||
segMeta.BinlogFilePaths = append(segMeta.BinlogFilePaths, &etcdpb.FieldBinlogFiles{
|
||||
FieldID: k,
|
||||
BinlogFiles: v,
|
||||
})
|
||||
}
|
||||
if err = scheduler.metaTable.UpdateSegment(segMeta); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("flush segment %d finished", singleSegmentID)
|
||||
closable = append(closable, singleSegmentID)
|
||||
}
|
||||
|
||||
// remove closed segment and clear closable
|
||||
for _, segID := range closable {
|
||||
delete(descTasks, segID)
|
||||
}
|
||||
closable = closable[:0]
|
||||
case segID := <-scheduler.segmentDescribeChan:
|
||||
descTasks[segID] = false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (scheduler *FlushScheduler) scheduleLoop() {
|
||||
for {
|
||||
select {
|
||||
case id := <-scheduler.segmentFlushChan:
|
||||
err := scheduler.schedule(id)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
case <-scheduler.ctx.Done():
|
||||
log.Print("server is closed, exit flush scheduler loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *FlushScheduler) Enqueue(id interface{}) error {
|
||||
scheduler.segmentFlushChan <- id.(UniqueID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *FlushScheduler) Start() error {
|
||||
go scheduler.scheduleLoop()
|
||||
go scheduler.describe()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *FlushScheduler) Close() {
|
||||
scheduler.cancel()
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Allocator is a Timestamp Oracle allocator.
|
||||
type Allocator interface {
|
||||
// Initialize is used to initialize a TSO allocator.
|
||||
// It will synchronize TSO with etcd and initialize the
|
||||
// memory for later allocation work.
|
||||
Initialize() error
|
||||
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
|
||||
UpdateTSO() error
|
||||
// SetTSO sets the physical part with given tso. It's mainly used for BR restore
|
||||
// and can not forcibly set the TSO smaller than now.
|
||||
SetTSO(tso uint64) error
|
||||
// GenerateTSO is used to generate a given number of TSOs.
|
||||
// Make sure you have initialized the TSO allocator before calling.
|
||||
GenerateTSO(count uint32) (uint64, error)
|
||||
// Reset is used to reset the TSO allocator.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// GlobalTSOAllocator is the global single point TSO allocator.
|
||||
type GlobalTSOAllocator struct {
|
||||
tso *timestampOracle
|
||||
}
|
||||
|
||||
// NewGlobalTSOAllocator creates a new global TSO allocator.
|
||||
func NewGlobalTSOAllocator(key string, kvBase kv.TxnBase) *GlobalTSOAllocator {
|
||||
var saveInterval = 3 * time.Second
|
||||
return &GlobalTSOAllocator{
|
||||
tso: ×tampOracle{
|
||||
kvBase: kvBase,
|
||||
saveInterval: saveInterval,
|
||||
maxResetTSGap: func() time.Duration { return 3 * time.Second },
|
||||
key: key,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize will initialize the created global TSO allocator.
|
||||
func (gta *GlobalTSOAllocator) Initialize() error {
|
||||
return gta.tso.InitTimestamp()
|
||||
}
|
||||
|
||||
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
|
||||
func (gta *GlobalTSOAllocator) UpdateTSO() error {
|
||||
return gta.tso.UpdateTimestamp()
|
||||
}
|
||||
|
||||
// SetTSO sets the physical part with given tso.
|
||||
func (gta *GlobalTSOAllocator) SetTSO(tso uint64) error {
|
||||
return gta.tso.ResetUserTimestamp(tso)
|
||||
}
|
||||
|
||||
// GenerateTSO is used to generate a given number of TSOs.
|
||||
// Make sure you have initialized the TSO allocator before calling.
|
||||
func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
|
||||
var physical, logical int64
|
||||
if count == 0 {
|
||||
return 0, errors.New("tso count should be positive")
|
||||
}
|
||||
|
||||
maxRetryCount := 10
|
||||
|
||||
for i := 0; i < maxRetryCount; i++ {
|
||||
current := (*atomicObject)(atomic.LoadPointer(>a.tso.TSO))
|
||||
if current == nil || current.physical.Equal(typeutil.ZeroTime) {
|
||||
// If it's leader, maybe SyncTimestamp hasn't completed yet
|
||||
log.Println("sync hasn't completed yet, wait for a while")
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
physical = current.physical.UnixNano() / int64(time.Millisecond)
|
||||
logical = atomic.AddInt64(¤t.logical, int64(count))
|
||||
if logical >= maxLogical {
|
||||
log.Println("logical part outside of max logical interval, please check ntp time",
|
||||
zap.Int("retry-count", i))
|
||||
time.Sleep(UpdateTimestampStep)
|
||||
continue
|
||||
}
|
||||
return tsoutil.ComposeTS(physical, logical), nil
|
||||
}
|
||||
return 0, errors.New("can not get timestamp")
|
||||
}
|
||||
|
||||
func (gta *GlobalTSOAllocator) Alloc(count uint32) (typeutil.Timestamp, error) {
|
||||
//return gta.tso.SyncTimestamp()
|
||||
start, err := gta.GenerateTSO(count)
|
||||
if err != nil {
|
||||
return typeutil.ZeroTimestamp, err
|
||||
}
|
||||
//ret := make([]typeutil.Timestamp, count)
|
||||
//for i:=uint32(0); i < count; i++{
|
||||
// ret[i] = start + uint64(i)
|
||||
//}
|
||||
return start, err
|
||||
}
|
||||
|
||||
func (gta *GlobalTSOAllocator) AllocOne() (typeutil.Timestamp, error) {
|
||||
return gta.GenerateTSO(1)
|
||||
}
|
||||
|
||||
// Reset is used to reset the TSO allocator.
|
||||
func (gta *GlobalTSOAllocator) Reset() {
|
||||
gta.tso.ResetTimestamp()
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
)
|
||||
|
||||
var gTestTsoAllocator Allocator
|
||||
var gTestIDAllocator *GlobalIDAllocator
|
||||
|
||||
func TestGlobalTSOAllocator_All(t *testing.T) {
|
||||
Init()
|
||||
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", tsoutil.NewTSOKVBase([]string{Params.EtcdAddress}, "/test/root/kv", "tso"))
|
||||
gTestIDAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{Params.EtcdAddress}, "/test/root/kv", "gid"))
|
||||
|
||||
t.Run("Initialize", func(t *testing.T) {
|
||||
err := gTestTsoAllocator.Initialize()
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("GenerateTSO", func(t *testing.T) {
|
||||
count := 1000
|
||||
perCount := uint32(100)
|
||||
startTs, err := gTestTsoAllocator.GenerateTSO(perCount)
|
||||
assert.Nil(t, err)
|
||||
lastPhysical, lastLogical := tsoutil.ParseTS(startTs)
|
||||
for i := 0; i < count; i++ {
|
||||
ts, _ := gTestTsoAllocator.GenerateTSO(perCount)
|
||||
physical, logical := tsoutil.ParseTS(ts)
|
||||
if lastPhysical.Equal(physical) {
|
||||
diff := logical - lastLogical
|
||||
assert.Equal(t, uint64(perCount), diff)
|
||||
}
|
||||
lastPhysical, lastLogical = physical, logical
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("SetTSO", func(t *testing.T) {
|
||||
curTime := time.Now()
|
||||
nextTime := curTime.Add(2 * time.Second)
|
||||
physical := nextTime.UnixNano() / int64(time.Millisecond)
|
||||
logical := int64(0)
|
||||
err := gTestTsoAllocator.SetTSO(tsoutil.ComposeTS(physical, logical))
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("UpdateTSO", func(t *testing.T) {
|
||||
err := gTestTsoAllocator.UpdateTSO()
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Reset", func(t *testing.T) {
|
||||
gTestTsoAllocator.Reset()
|
||||
})
|
||||
|
||||
t.Run("Initialize", func(t *testing.T) {
|
||||
err := gTestIDAllocator.Initialize()
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("AllocOne", func(t *testing.T) {
|
||||
one, err := gTestIDAllocator.AllocOne()
|
||||
assert.Nil(t, err)
|
||||
ano, err := gTestIDAllocator.AllocOne()
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqual(t, one, ano)
|
||||
})
|
||||
|
||||
t.Run("Alloc", func(t *testing.T) {
|
||||
count := uint32(2 << 10)
|
||||
idStart, idEnd, err := gTestIDAllocator.Alloc(count)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, count, uint32(idEnd-idStart))
|
||||
})
|
||||
|
||||
}
|
|
@ -1,501 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
const slowThreshold = 5 * time.Millisecond
|
||||
|
||||
func (s *Master) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
var t task = &createCollectionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
response := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
response.Reason = "Enqueue failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
response.Reason = "Create collection failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Master) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
var t task = &dropCollectionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
segManager: s.segmentManager,
|
||||
}
|
||||
|
||||
response := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
response.Reason = "Enqueue failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
response.Reason = "Drop collection failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Master) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
var t task = &hasCollectionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
hasCollection: false,
|
||||
}
|
||||
|
||||
st := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
}
|
||||
|
||||
response := &milvuspb.BoolResponse{
|
||||
Status: st,
|
||||
Value: false,
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
st.Reason = "Enqueue failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
st.Reason = "Has collection failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
st.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
response.Value = t.(*hasCollectionTask).hasCollection
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Master) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
var t task = &describeCollectionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
description: nil,
|
||||
}
|
||||
|
||||
response := &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
Schema: nil,
|
||||
}
|
||||
|
||||
t.(*describeCollectionTask).description = response
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
response.Status.Reason = "Enqueue failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
response.Status.Reason = "Describe collection failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Master) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
var t task = &showCollectionsTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
stringListResponse: nil,
|
||||
}
|
||||
|
||||
response := &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "",
|
||||
},
|
||||
CollectionNames: nil,
|
||||
}
|
||||
|
||||
t.(*showCollectionsTask).stringListResponse = response
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
response.Status.Reason = "Enqueue filed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
response.Status.Reason = "Show Collections failed: " + err.Error()
|
||||
return response, nil
|
||||
}
|
||||
|
||||
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return response, nil
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (s *Master) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
var t task = &createPartitionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "Enqueue failed",
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "WaitToFinish failed",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Master) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
var t task = &dropPartitionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "Enqueue failed",
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "WaitToFinish failed",
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Master) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
var t task = &hasPartitionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
hasPartition: false,
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "Enqueue failed",
|
||||
},
|
||||
Value: t.(*hasPartitionTask).hasPartition,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: t.(*hasPartitionTask).hasPartition,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
Value: t.(*hasPartitionTask).hasPartition,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Master) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
var t task = &showPartitionTask{
|
||||
req: in,
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
resp: nil,
|
||||
}
|
||||
|
||||
var err = s.scheduler.Enqueue(t)
|
||||
if err != nil {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "Enqueue failed",
|
||||
},
|
||||
PartitionNames: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = t.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "WaitToFinish failed",
|
||||
},
|
||||
PartitionNames: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return t.(*showPartitionTask).resp, nil
|
||||
}
|
||||
|
||||
//----------------------------------------Internal GRPC Service--------------------------------
|
||||
|
||||
func (s *Master) AllocTimestamp(ctx context.Context, request *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
|
||||
count := request.GetCount()
|
||||
ts, err := s.tsoAllocator.Alloc(count)
|
||||
|
||||
if err != nil {
|
||||
return &masterpb.TsoResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
|
||||
}, nil
|
||||
}
|
||||
|
||||
response := &masterpb.TsoResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
|
||||
Timestamp: ts,
|
||||
Count: count,
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Master) AllocID(ctx context.Context, request *masterpb.IDRequest) (*masterpb.IDResponse, error) {
|
||||
count := request.GetCount()
|
||||
ts, err := s.idAllocator.AllocOne()
|
||||
|
||||
if err != nil {
|
||||
return &masterpb.IDResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
|
||||
}, nil
|
||||
}
|
||||
|
||||
response := &masterpb.IDResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_SUCCESS},
|
||||
ID: ts,
|
||||
Count: count,
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (s *Master) AssignSegmentID(ctx context.Context, request *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
|
||||
segInfos, _ := s.segmentManager.AssignSegment(request.SegIDRequests)
|
||||
return &datapb.AssignSegIDResponse{
|
||||
SegIDAssignments: segInfos,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Master) CreateIndex(ctx context.Context, req *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
ret := &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
}
|
||||
task := &createIndexTask{
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
req: req,
|
||||
indexBuildScheduler: s.indexBuildSch,
|
||||
indexLoadScheduler: s.indexLoadSch,
|
||||
segManager: s.segmentManager,
|
||||
}
|
||||
|
||||
err := s.scheduler.Enqueue(task)
|
||||
if err != nil {
|
||||
ret.Reason = "Enqueue failed: " + err.Error()
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
err = task.WaitToFinish(ctx)
|
||||
if err != nil {
|
||||
ret.Reason = "Create Index error: " + err.Error()
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
ret.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *Master) DescribeIndex(ctx context.Context, req *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
resp := &milvuspb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
|
||||
//CollectionName: req.CollectionName,
|
||||
//FieldName: req.FieldName,
|
||||
}
|
||||
//resp.
|
||||
task := &describeIndexTask{
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
req: req,
|
||||
resp: resp,
|
||||
}
|
||||
|
||||
if err := s.scheduler.Enqueue(task); err != nil {
|
||||
task.resp.Status.Reason = fmt.Sprintf("Enqueue failed: %s", err.Error())
|
||||
return task.resp, nil
|
||||
}
|
||||
|
||||
if err := task.WaitToFinish(ctx); err != nil {
|
||||
task.resp.Status.Reason = fmt.Sprintf("Describe Index failed: %s", err.Error())
|
||||
return task.resp, nil
|
||||
}
|
||||
|
||||
resp.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return task.resp, nil
|
||||
|
||||
}
|
||||
|
||||
func (s *Master) GetIndexState(ctx context.Context, req *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
resp := &milvuspb.IndexStateResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
State: commonpb.IndexState_NONE,
|
||||
}
|
||||
task := &getIndexStateTask{
|
||||
baseTask: baseTask{
|
||||
sch: s.scheduler,
|
||||
mt: s.metaTable,
|
||||
cv: make(chan error),
|
||||
},
|
||||
req: req,
|
||||
resp: resp,
|
||||
runtimeStats: s.runtimeStats,
|
||||
}
|
||||
|
||||
if err := s.scheduler.Enqueue(task); err != nil {
|
||||
task.resp.Status.Reason = "Enqueue failed :" + err.Error()
|
||||
return task.resp, nil
|
||||
}
|
||||
|
||||
if err := task.WaitToFinish(ctx); err != nil {
|
||||
resp.Status.Reason = "Describe index progress failed:" + err.Error()
|
||||
return task.resp, nil
|
||||
}
|
||||
|
||||
task.resp.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
|
||||
return task.resp, nil
|
||||
}
|
||||
|
||||
func (s *Master) GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) GetComponentStatesRPC(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ComponentStates, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) GetTimeTickChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) GetStatisticsChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) DescribeSegment(ctx context.Context, request *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) ShowSegments(ctx context.Context, request *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (s *Master) GetDdChannelRPC(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
)
|
||||
|
||||
type IDAllocator interface {
|
||||
Alloc(count uint32) (UniqueID, UniqueID, error)
|
||||
AllocOne() (UniqueID, error)
|
||||
UpdateID() error
|
||||
}
|
||||
|
||||
// GlobalTSOAllocator is the global single point TSO allocator.
|
||||
type GlobalIDAllocator struct {
|
||||
allocator Allocator
|
||||
}
|
||||
|
||||
func NewGlobalIDAllocator(key string, base kv.TxnBase) *GlobalIDAllocator {
|
||||
return &GlobalIDAllocator{
|
||||
allocator: NewGlobalTSOAllocator(key, base),
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize will initialize the created global TSO allocator.
|
||||
func (gia *GlobalIDAllocator) Initialize() error {
|
||||
return gia.allocator.Initialize()
|
||||
}
|
||||
|
||||
// GenerateTSO is used to generate a given number of TSOs.
|
||||
// Make sure you have initialized the TSO allocator before calling.
|
||||
func (gia *GlobalIDAllocator) Alloc(count uint32) (UniqueID, UniqueID, error) {
|
||||
timestamp, err := gia.allocator.GenerateTSO(count)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
idStart := UniqueID(timestamp)
|
||||
idEnd := idStart + int64(count)
|
||||
return idStart, idEnd, nil
|
||||
}
|
||||
|
||||
func (gia *GlobalIDAllocator) AllocOne() (UniqueID, error) {
|
||||
timestamp, err := gia.allocator.GenerateTSO(1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
idStart := UniqueID(timestamp)
|
||||
return idStart, nil
|
||||
}
|
||||
|
||||
func (gia *GlobalIDAllocator) UpdateID() error {
|
||||
return gia.allocator.UpdateTSO()
|
||||
}
|
|
@ -1,297 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
)
|
||||
|
||||
type IndexBuildInfo struct {
|
||||
segmentID UniqueID
|
||||
fieldID UniqueID
|
||||
binlogFilePath []string
|
||||
}
|
||||
type IndexBuildChannelInfo struct {
|
||||
id UniqueID
|
||||
info *IndexBuildInfo
|
||||
indexParams []*commonpb.KeyValuePair
|
||||
}
|
||||
|
||||
type IndexBuildScheduler struct {
|
||||
client BuildIndexClient
|
||||
metaTable *metaTable
|
||||
indexBuildChan chan *IndexBuildInfo
|
||||
indexLoadSch persistenceScheduler
|
||||
indexDescribeID chan UniqueID
|
||||
indexDescribe chan *IndexBuildChannelInfo
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewIndexBuildScheduler(ctx context.Context, client BuildIndexClient, metaTable *metaTable, indexLoadScheduler *IndexLoadScheduler) *IndexBuildScheduler {
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
|
||||
return &IndexBuildScheduler{
|
||||
client: client,
|
||||
metaTable: metaTable,
|
||||
indexLoadSch: indexLoadScheduler,
|
||||
indexBuildChan: make(chan *IndexBuildInfo, 100),
|
||||
indexDescribe: make(chan *IndexBuildChannelInfo, 100),
|
||||
ctx: ctx2,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *IndexBuildScheduler) schedule(info interface{}) error {
|
||||
indexBuildInfo := info.(*IndexBuildInfo)
|
||||
segMeta, err := scheduler.metaTable.GetSegmentByID(indexBuildInfo.segmentID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// parse index params
|
||||
typeParams, err := scheduler.metaTable.GetFieldTypeParams(segMeta.CollectionID, indexBuildInfo.fieldID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
indexParams, err := scheduler.metaTable.GetFieldIndexParams(segMeta.CollectionID, indexBuildInfo.fieldID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
typeParamsMap := make(map[string]string)
|
||||
indexParamsMap := make(map[string]string)
|
||||
for _, kv := range typeParams {
|
||||
typeParamsMap[kv.Key] = kv.Value
|
||||
}
|
||||
for _, kv := range indexParams {
|
||||
indexParamsMap[kv.Key] = kv.Value
|
||||
}
|
||||
|
||||
parseMap := func(mStr string) (map[string]string, error) {
|
||||
buffer := make(map[string]interface{})
|
||||
err := json.Unmarshal([]byte(mStr), &buffer)
|
||||
if err != nil {
|
||||
return nil, errors.New("Unmarshal params failed")
|
||||
}
|
||||
ret := make(map[string]string)
|
||||
for key, value := range buffer {
|
||||
valueStr := fmt.Sprintf("%v", value)
|
||||
ret[key] = valueStr
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
var typeParamsKV []*commonpb.KeyValuePair
|
||||
for key := range typeParamsMap {
|
||||
if key == "params" {
|
||||
mapParams, err := parseMap(typeParamsMap[key])
|
||||
if err != nil {
|
||||
log.Println("parse params error: ", err)
|
||||
}
|
||||
for pk, pv := range mapParams {
|
||||
typeParamsKV = append(typeParamsKV, &commonpb.KeyValuePair{
|
||||
Key: pk,
|
||||
Value: pv,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
typeParamsKV = append(typeParamsKV, &commonpb.KeyValuePair{
|
||||
Key: key,
|
||||
Value: typeParamsMap[key],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var indexParamsKV []*commonpb.KeyValuePair
|
||||
for key := range indexParamsMap {
|
||||
if key == "params" {
|
||||
mapParams, err := parseMap(indexParamsMap[key])
|
||||
if err != nil {
|
||||
log.Println("parse params error: ", err)
|
||||
}
|
||||
for pk, pv := range mapParams {
|
||||
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
|
||||
Key: pk,
|
||||
Value: pv,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
|
||||
Key: key,
|
||||
Value: indexParamsMap[key],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
requset := &indexpb.BuildIndexRequest{
|
||||
DataPaths: indexBuildInfo.binlogFilePath,
|
||||
TypeParams: typeParamsKV,
|
||||
IndexParams: indexParamsKV,
|
||||
}
|
||||
|
||||
indexResp, err := scheduler.client.BuildIndex(requset)
|
||||
if err != nil {
|
||||
log.Printf("build index for segment %d field %d, failed:%s", indexBuildInfo.segmentID, indexBuildInfo.fieldID, err.Error())
|
||||
return err
|
||||
}
|
||||
indexID := indexResp.IndexID
|
||||
|
||||
err = scheduler.metaTable.AddFieldIndexMeta(&etcdpb.FieldIndexMeta{
|
||||
SegmentID: indexBuildInfo.segmentID,
|
||||
FieldID: indexBuildInfo.fieldID,
|
||||
IndexID: indexID,
|
||||
IndexParams: indexParams,
|
||||
State: commonpb.IndexState_NONE,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("WARNING: " + err.Error())
|
||||
//return err
|
||||
}
|
||||
|
||||
scheduler.indexDescribe <- &IndexBuildChannelInfo{
|
||||
id: indexID,
|
||||
info: indexBuildInfo,
|
||||
indexParams: indexParams,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *IndexBuildScheduler) describe() error {
|
||||
for {
|
||||
select {
|
||||
case <-scheduler.ctx.Done():
|
||||
{
|
||||
log.Printf("broadcast context done, exit")
|
||||
return errors.New("broadcast done exit")
|
||||
}
|
||||
case channelInfo := <-scheduler.indexDescribe:
|
||||
indexID := channelInfo.id
|
||||
indexBuildInfo := channelInfo.info
|
||||
for {
|
||||
indexIDs := []UniqueID{channelInfo.id}
|
||||
request := &indexpb.IndexStatesRequest{
|
||||
IndexIDs: indexIDs,
|
||||
}
|
||||
description, err := scheduler.client.GetIndexStates(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if description.States[0].State == commonpb.IndexState_FINISHED {
|
||||
log.Printf("build index for segment %d field %d is finished", indexBuildInfo.segmentID, indexBuildInfo.fieldID)
|
||||
request := &indexpb.IndexFilePathsRequest{
|
||||
IndexIDs: indexIDs,
|
||||
}
|
||||
|
||||
response, err := scheduler.client.GetIndexFilePaths(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var filePathsInfos [][]string
|
||||
for _, indexID := range indexIDs {
|
||||
for _, filePathInfo := range response.FilePaths {
|
||||
if indexID == filePathInfo.IndexID {
|
||||
filePathsInfos = append(filePathsInfos, filePathInfo.IndexFilePaths)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
filePaths := filePathsInfos[0]
|
||||
|
||||
//TODO: remove fileName
|
||||
var fieldName string
|
||||
segMeta := scheduler.metaTable.segID2Meta[indexBuildInfo.segmentID]
|
||||
collMeta := scheduler.metaTable.collID2Meta[segMeta.CollectionID]
|
||||
if collMeta.Schema != nil {
|
||||
for _, field := range collMeta.Schema.Fields {
|
||||
if field.FieldID == indexBuildInfo.fieldID {
|
||||
fieldName = field.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info := &IndexLoadInfo{
|
||||
segmentID: indexBuildInfo.segmentID,
|
||||
fieldID: indexBuildInfo.fieldID,
|
||||
fieldName: fieldName,
|
||||
indexFilePaths: filePaths,
|
||||
indexParams: channelInfo.indexParams,
|
||||
}
|
||||
// Save data to meta table
|
||||
err = scheduler.metaTable.UpdateFieldIndexMeta(&etcdpb.FieldIndexMeta{
|
||||
SegmentID: indexBuildInfo.segmentID,
|
||||
FieldID: indexBuildInfo.fieldID,
|
||||
IndexID: indexID,
|
||||
IndexParams: channelInfo.indexParams,
|
||||
State: commonpb.IndexState_FINISHED,
|
||||
IndexFilePaths: filePaths,
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("indexbuilder scheduler updateFiledIndexMetaFailed", indexBuildInfo.segmentID)
|
||||
return err
|
||||
}
|
||||
|
||||
err = scheduler.indexLoadSch.Enqueue(info)
|
||||
log.Printf("build index for segment %d field %d enqueue load index", indexBuildInfo.segmentID, indexBuildInfo.fieldID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("build index for segment %d field %d finished", indexBuildInfo.segmentID, indexBuildInfo.fieldID)
|
||||
break
|
||||
} else {
|
||||
// save status to meta table
|
||||
err = scheduler.metaTable.UpdateFieldIndexMeta(&etcdpb.FieldIndexMeta{
|
||||
SegmentID: indexBuildInfo.segmentID,
|
||||
FieldID: indexBuildInfo.fieldID,
|
||||
IndexID: indexID,
|
||||
IndexParams: channelInfo.indexParams,
|
||||
State: description.States[0].State,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (scheduler *IndexBuildScheduler) scheduleLoop() {
|
||||
for {
|
||||
select {
|
||||
case info := <-scheduler.indexBuildChan:
|
||||
err := scheduler.schedule(info)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
case <-scheduler.ctx.Done():
|
||||
log.Print("server is closed, exit index build loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *IndexBuildScheduler) Enqueue(info interface{}) error {
|
||||
scheduler.indexBuildChan <- info.(*IndexBuildInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *IndexBuildScheduler) Start() error {
|
||||
go scheduler.scheduleLoop()
|
||||
go scheduler.describe()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *IndexBuildScheduler) Close() {
|
||||
scheduler.cancel()
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
)
|
||||
|
||||
type IndexLoadInfo struct {
|
||||
segmentID UniqueID
|
||||
fieldID UniqueID
|
||||
fieldName string
|
||||
indexParams []*commonpb.KeyValuePair
|
||||
indexFilePaths []string
|
||||
}
|
||||
|
||||
type IndexLoadScheduler struct {
|
||||
indexLoadChan chan *IndexLoadInfo
|
||||
client LoadIndexClient
|
||||
metaTable *metaTable
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func NewIndexLoadScheduler(ctx context.Context, client LoadIndexClient, metaTable *metaTable) *IndexLoadScheduler {
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
indexLoadChan := make(chan *IndexLoadInfo, 100)
|
||||
|
||||
return &IndexLoadScheduler{
|
||||
client: client,
|
||||
metaTable: metaTable,
|
||||
indexLoadChan: indexLoadChan,
|
||||
ctx: ctx2,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *IndexLoadScheduler) schedule(info interface{}) error {
|
||||
indexLoadInfo := info.(*IndexLoadInfo)
|
||||
indexParams := make(map[string]string)
|
||||
for _, kv := range indexLoadInfo.indexParams {
|
||||
indexParams[kv.Key] = kv.Value
|
||||
}
|
||||
err := scheduler.client.LoadIndex(indexLoadInfo.indexFilePaths, indexLoadInfo.segmentID, indexLoadInfo.fieldID, indexLoadInfo.fieldName, indexParams)
|
||||
//TODO: Save data to meta table
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (scheduler *IndexLoadScheduler) scheduleLoop() {
|
||||
for {
|
||||
select {
|
||||
case info := <-scheduler.indexLoadChan:
|
||||
err := scheduler.schedule(info)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
case <-scheduler.ctx.Done():
|
||||
log.Print("server is closed, exit flush scheduler loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (scheduler *IndexLoadScheduler) Enqueue(info interface{}) error {
|
||||
scheduler.indexLoadChan <- info.(*IndexLoadInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *IndexLoadScheduler) Start() error {
|
||||
go scheduler.scheduleLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scheduler *IndexLoadScheduler) Close() {
|
||||
scheduler.cancel()
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
type createIndexTask struct {
|
||||
baseTask
|
||||
req *milvuspb.CreateIndexRequest
|
||||
indexBuildScheduler *IndexBuildScheduler
|
||||
indexLoadScheduler *IndexLoadScheduler
|
||||
segManager SegmentManager
|
||||
}
|
||||
|
||||
func (task *createIndexTask) Type() commonpb.MsgType {
|
||||
return commonpb.MsgType_kCreateIndex
|
||||
}
|
||||
|
||||
func (task *createIndexTask) Ts() (Timestamp, error) {
|
||||
return task.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (task *createIndexTask) Execute() error {
|
||||
collMeta, err := task.mt.GetCollectionByName(task.req.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var fieldID int64 = -1
|
||||
for _, fieldSchema := range collMeta.Schema.Fields {
|
||||
if fieldSchema.Name == task.req.FieldName {
|
||||
fieldID = fieldSchema.FieldID
|
||||
break
|
||||
}
|
||||
}
|
||||
if fieldID == -1 {
|
||||
return fmt.Errorf("can not find field name %s", task.req.FieldName)
|
||||
}
|
||||
|
||||
// pre checks
|
||||
isIndexable, err := task.mt.IsIndexable(collMeta.ID, fieldID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isIndexable {
|
||||
return fmt.Errorf("field %s is not vector", task.req.FieldName)
|
||||
}
|
||||
|
||||
// modify schema
|
||||
if err := task.mt.UpdateFieldIndexParams(task.req.CollectionName, task.req.FieldName, task.req.ExtraParams); err != nil {
|
||||
return err
|
||||
}
|
||||
// check if closed segment has the same index build history
|
||||
for _, segID := range collMeta.SegmentIDs {
|
||||
segMeta, err := task.mt.GetSegmentByID(segID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if segMeta.CloseTime == 0 {
|
||||
continue
|
||||
}
|
||||
hasIndexMeta, err := task.mt.HasFieldIndexMeta(segID, fieldID, task.req.ExtraParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasIndexMeta {
|
||||
// load index
|
||||
indexMeta, err := task.mt.GetFieldIndexMeta(segID, fieldID, task.req.ExtraParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = task.indexLoadScheduler.Enqueue(&IndexLoadInfo{
|
||||
segmentID: segID,
|
||||
fieldID: fieldID,
|
||||
fieldName: task.req.FieldName,
|
||||
indexFilePaths: indexMeta.IndexFilePaths,
|
||||
indexParams: indexMeta.IndexParams,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// create index
|
||||
for _, kv := range segMeta.BinlogFilePaths {
|
||||
if kv.FieldID != fieldID {
|
||||
continue
|
||||
}
|
||||
err := task.indexBuildScheduler.Enqueue(&IndexBuildInfo{
|
||||
segmentID: segID,
|
||||
fieldID: fieldID,
|
||||
binlogFilePath: kv.BinlogFiles,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// close unfilled segment
|
||||
return task.segManager.ForceClose(collMeta.ID)
|
||||
}
|
||||
|
||||
type describeIndexTask struct {
|
||||
baseTask
|
||||
req *milvuspb.DescribeIndexRequest
|
||||
resp *milvuspb.DescribeIndexResponse
|
||||
}
|
||||
|
||||
func (task *describeIndexTask) Type() commonpb.MsgType {
|
||||
return commonpb.MsgType_kDescribeIndex
|
||||
}
|
||||
|
||||
func (task *describeIndexTask) Ts() (Timestamp, error) {
|
||||
return task.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (task *describeIndexTask) Execute() error {
|
||||
collMeta, err := task.mt.GetCollectionByName(task.req.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fieldID int64 = -1
|
||||
for _, fieldSchema := range collMeta.Schema.Fields {
|
||||
if fieldSchema.Name == task.req.FieldName {
|
||||
fieldID = fieldSchema.FieldID
|
||||
break
|
||||
}
|
||||
}
|
||||
if fieldID == -1 {
|
||||
return fmt.Errorf("can not find field %s", task.req.FieldName)
|
||||
}
|
||||
indexParams, err := task.mt.GetFieldIndexParams(collMeta.ID, fieldID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
description := &milvuspb.IndexDescription{
|
||||
IndexName: "", // todo add IndexName to master meta_table
|
||||
Params: indexParams,
|
||||
}
|
||||
task.resp.IndexDescriptions = []*milvuspb.IndexDescription{description}
|
||||
return nil
|
||||
}
|
||||
|
||||
type getIndexStateTask struct {
|
||||
baseTask
|
||||
req *milvuspb.IndexStateRequest
|
||||
runtimeStats *RuntimeStats
|
||||
resp *milvuspb.IndexStateResponse
|
||||
}
|
||||
|
||||
func (task *getIndexStateTask) Type() commonpb.MsgType {
|
||||
return commonpb.MsgType_kGetIndexState
|
||||
}
|
||||
|
||||
func (task *getIndexStateTask) Ts() (Timestamp, error) {
|
||||
return task.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (task *getIndexStateTask) Execute() error {
|
||||
// get field id, collection id
|
||||
collMeta, err := task.mt.GetCollectionByName(task.req.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var fieldID int64 = -1
|
||||
for _, fieldSchema := range collMeta.Schema.Fields {
|
||||
if fieldSchema.Name == task.req.FieldName {
|
||||
fieldID = fieldSchema.FieldID
|
||||
break
|
||||
}
|
||||
}
|
||||
if fieldID == -1 {
|
||||
return fmt.Errorf("can not find field %s", task.req.FieldName)
|
||||
}
|
||||
|
||||
// total segment nums
|
||||
totalSegmentNums := len(collMeta.SegmentIDs)
|
||||
|
||||
indexParams, err := task.mt.GetFieldIndexParams(collMeta.ID, fieldID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get completed segment nums from querynode's runtime stats
|
||||
relatedSegments := task.runtimeStats.GetTotalNumOfRelatedSegments(collMeta.ID, fieldID, indexParams)
|
||||
task.resp = &milvuspb.IndexStateResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
}
|
||||
|
||||
if int64(totalSegmentNums) == relatedSegments {
|
||||
task.resp.State = commonpb.IndexState_FINISHED
|
||||
} else {
|
||||
task.resp.State = commonpb.IndexState_INPROGRESS
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,428 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
grpcindexserviceclient "github.com/zilliztech/milvus-distributed/internal/distributed/indexservice/client"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/querynode/client"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
writerclient "github.com/zilliztech/milvus-distributed/internal/writenode/client"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// Server is the pd server.
|
||||
|
||||
type (
|
||||
UniqueID = typeutil.UniqueID
|
||||
Timestamp = typeutil.Timestamp
|
||||
)
|
||||
|
||||
type Master struct {
|
||||
// Server state.
|
||||
isServing int64
|
||||
|
||||
// Server start timestamp
|
||||
startTimestamp int64
|
||||
|
||||
ctx context.Context
|
||||
serverLoopCtx context.Context
|
||||
serverLoopCancel func()
|
||||
serverLoopWg sync.WaitGroup
|
||||
|
||||
//grpc server
|
||||
grpcServer *grpc.Server
|
||||
grpcErr chan error
|
||||
|
||||
kvBase *etcdkv.EtcdKV
|
||||
scheduler *ddRequestScheduler
|
||||
flushSch *FlushScheduler
|
||||
indexBuildSch *IndexBuildScheduler
|
||||
indexLoadSch *IndexLoadScheduler
|
||||
metaTable *metaTable
|
||||
timesSyncMsgProducer *timeSyncMsgProducer
|
||||
|
||||
// tso ticker
|
||||
tsoTicker *time.Ticker
|
||||
|
||||
// Add callback functions at different stages
|
||||
startCallbacks []func()
|
||||
closeCallbacks []func()
|
||||
|
||||
segmentManager SegmentManager
|
||||
segmentAssigner *SegmentAssigner
|
||||
statProcessor *StatsProcessor
|
||||
segmentStatusMsg ms.MsgStream
|
||||
|
||||
//id allocator
|
||||
idAllocator *GlobalIDAllocator
|
||||
//tso allocator
|
||||
tsoAllocator *GlobalTSOAllocator
|
||||
|
||||
runtimeStats *RuntimeStats
|
||||
}
|
||||
|
||||
func newKVBase(kvRoot string, etcdAddr []string) *etcdkv.EtcdKV {
|
||||
cli, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: etcdAddr,
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
kvBase := etcdkv.NewEtcdKV(cli, kvRoot)
|
||||
return kvBase
|
||||
}
|
||||
|
||||
func Init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
// CreateServer creates the UNINITIALIZED pd server with given configuration.
|
||||
func CreateServer(ctx context.Context) (*Master, error) {
|
||||
//Init(etcdAddr, kvRootPath)
|
||||
etcdAddress := Params.EtcdAddress
|
||||
metaRootPath := Params.MetaRootPath
|
||||
kvRootPath := Params.KvRootPath
|
||||
pulsarAddr := Params.PulsarAddress
|
||||
|
||||
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etcdKV := etcdkv.NewEtcdKV(etcdClient, metaRootPath)
|
||||
metakv, err := NewMetaTable(etcdKV)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//timeSyncMsgProducer
|
||||
tsMsgProducer, err := NewTimeSyncMsgProducer(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pulsarProxyServiceStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //output stream
|
||||
pulsarProxyServiceStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarProxyServiceStream.CreatePulsarConsumers(Params.ProxyServiceTimeTickChannelNames, Params.MsgChannelSubName, util.NewUnmarshalDispatcher(), 1024)
|
||||
pulsarProxyServiceStream.Start()
|
||||
proxyTimeTickBarrier := newProxyServiceTimeTickBarrier(ctx, pulsarProxyServiceStream)
|
||||
tsMsgProducer.SetProxyTtBarrier(proxyTimeTickBarrier)
|
||||
|
||||
pulsarWriteStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //output stream
|
||||
pulsarWriteStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarWriteStream.CreatePulsarConsumers(Params.WriteNodeTimeTickChannelNames, Params.MsgChannelSubName, util.NewUnmarshalDispatcher(), 1024)
|
||||
pulsarWriteStream.Start()
|
||||
var writeStream ms.MsgStream = pulsarWriteStream
|
||||
writeTimeTickBarrier := newHardTimeTickBarrier(ctx, &writeStream, Params.WriteNodeIDList)
|
||||
tsMsgProducer.SetWriteNodeTtBarrier(writeTimeTickBarrier)
|
||||
|
||||
pulsarDDStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDDStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDDStream.CreatePulsarProducers(Params.DDChannelNames)
|
||||
tsMsgProducer.SetDDSyncStream(pulsarDDStream)
|
||||
|
||||
pulsarDMStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDMStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDMStream.CreatePulsarProducers(Params.InsertChannelNames)
|
||||
tsMsgProducer.SetDMSyncStream(pulsarDMStream)
|
||||
|
||||
pulsarK2SStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarK2SStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarK2SStream.CreatePulsarProducers(Params.K2SChannelNames)
|
||||
tsMsgProducer.SetK2sSyncStream(pulsarK2SStream)
|
||||
|
||||
proxyTtBarrierWatcher := make(chan *ms.TimeTickMsg, 1024)
|
||||
writeNodeTtBarrierWatcher := make(chan *ms.TimeTickMsg, 1024)
|
||||
tsMsgProducer.WatchProxyTtBarrier(proxyTtBarrierWatcher)
|
||||
tsMsgProducer.WatchWriteNodeTtBarrier(writeNodeTtBarrierWatcher)
|
||||
|
||||
// stats msg stream
|
||||
statsMs := pulsarms.NewPulsarMsgStream(ctx, 1024)
|
||||
statsMs.SetPulsarClient(pulsarAddr)
|
||||
statsMs.CreatePulsarConsumers([]string{Params.QueryNodeStatsChannelName}, Params.MsgChannelSubName, util.NewUnmarshalDispatcher(), 1024)
|
||||
statsMs.Start()
|
||||
|
||||
m := &Master{
|
||||
ctx: ctx,
|
||||
startTimestamp: time.Now().Unix(),
|
||||
kvBase: newKVBase(kvRootPath, []string{etcdAddress}),
|
||||
metaTable: metakv,
|
||||
timesSyncMsgProducer: tsMsgProducer,
|
||||
grpcErr: make(chan error),
|
||||
segmentStatusMsg: statsMs,
|
||||
}
|
||||
|
||||
//init idAllocator
|
||||
m.idAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
|
||||
if err := m.idAllocator.Initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//init tsoAllocator
|
||||
m.tsoAllocator = NewGlobalTSOAllocator("timestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "tso"))
|
||||
if err := m.tsoAllocator.Initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.scheduler = NewDDRequestScheduler(ctx)
|
||||
m.scheduler.SetDDMsgStream(pulsarDDStream)
|
||||
m.scheduler.SetIDAllocator(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
|
||||
|
||||
flushClient, err := writerclient.NewWriterClient(Params.EtcdAddress, Params.MetaRootPath, Params.WriteNodeSegKvSubPath, pulsarDDStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buildIndexClient := grpcindexserviceclient.NewClient(Params.IndexBuilderAddress)
|
||||
queryNodeClient := client.NewQueryNodeClient(ctx, Params.PulsarAddress, Params.LoadIndexChannelNames)
|
||||
|
||||
m.indexLoadSch = NewIndexLoadScheduler(ctx, queryNodeClient, m.metaTable)
|
||||
m.indexBuildSch = NewIndexBuildScheduler(ctx, buildIndexClient, m.metaTable, m.indexLoadSch)
|
||||
m.flushSch = NewFlushScheduler(ctx, flushClient, m.metaTable, m.indexBuildSch, func() (Timestamp, error) { return m.tsoAllocator.AllocOne() })
|
||||
|
||||
m.segmentAssigner = NewSegmentAssigner(ctx, metakv,
|
||||
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
|
||||
proxyTtBarrierWatcher,
|
||||
)
|
||||
|
||||
m.segmentManager, err = NewSegmentManager(ctx, metakv,
|
||||
func() (UniqueID, error) { return m.idAllocator.AllocOne() },
|
||||
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
|
||||
writeNodeTtBarrierWatcher,
|
||||
m.flushSch,
|
||||
m.segmentAssigner)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.runtimeStats = NewRuntimeStats()
|
||||
m.statProcessor = NewStatsProcessor(metakv, m.runtimeStats,
|
||||
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
|
||||
)
|
||||
|
||||
m.grpcServer = grpc.NewServer()
|
||||
masterpb.RegisterMasterServiceServer(m.grpcServer, m)
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// AddStartCallback adds a callback in the startServer phase.
|
||||
func (s *Master) AddStartCallback(callbacks ...func()) {
|
||||
s.startCallbacks = append(s.startCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
// AddCloseCallback adds a callback in the Close phase.
|
||||
func (s *Master) AddCloseCallback(callbacks ...func()) {
|
||||
s.closeCallbacks = append(s.closeCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
// Close closes the server.
|
||||
func (s *Master) Close() {
|
||||
if !atomic.CompareAndSwapInt64(&s.isServing, 1, 0) {
|
||||
// server is already closed
|
||||
return
|
||||
}
|
||||
|
||||
log.Print("closing server")
|
||||
|
||||
s.stopServerLoop()
|
||||
s.segmentAssigner.Close()
|
||||
s.segmentManager.Close()
|
||||
if s.kvBase != nil {
|
||||
s.kvBase.Close()
|
||||
}
|
||||
|
||||
// Run callbacks
|
||||
for _, cb := range s.closeCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
log.Print("close server")
|
||||
}
|
||||
|
||||
// IsClosed checks whether server is closed or not.
|
||||
func (s *Master) IsClosed() bool {
|
||||
return atomic.LoadInt64(&s.isServing) == 0
|
||||
}
|
||||
|
||||
func (s *Master) IsServing() bool {
|
||||
return !s.IsClosed()
|
||||
}
|
||||
|
||||
// Run runs the pd server.
|
||||
func (s *Master) Run(grpcPort int64) error {
|
||||
if err := s.startServerLoop(s.ctx, grpcPort); err != nil {
|
||||
return err
|
||||
}
|
||||
s.segmentAssigner.Start()
|
||||
s.segmentManager.Start()
|
||||
atomic.StoreInt64(&s.isServing, 1)
|
||||
|
||||
// Run callbacks
|
||||
for _, cb := range s.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Context returns the context of server.
|
||||
func (s *Master) Context() context.Context {
|
||||
return s.ctx
|
||||
}
|
||||
|
||||
// LoopContext returns the loop context of server.
|
||||
func (s *Master) LoopContext() context.Context {
|
||||
return s.serverLoopCtx
|
||||
}
|
||||
|
||||
func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
|
||||
s.serverLoopCtx, s.serverLoopCancel = context.WithCancel(ctx)
|
||||
//go s.Se
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
if err := s.timesSyncMsgProducer.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
if err := s.scheduler.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.serverLoopWg.Add(1)
|
||||
if err := s.indexLoadSch.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.serverLoopWg.Add(1)
|
||||
if err := s.indexBuildSch.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.serverLoopWg.Add(1)
|
||||
if err := s.flushSch.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.grpcLoop(grpcPort)
|
||||
|
||||
if err := <-s.grpcErr; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.statisticsLoop()
|
||||
|
||||
s.serverLoopWg.Add(1)
|
||||
go s.tsLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Master) stopServerLoop() {
|
||||
s.timesSyncMsgProducer.Close()
|
||||
s.serverLoopWg.Done()
|
||||
s.scheduler.Close()
|
||||
s.serverLoopWg.Done()
|
||||
s.flushSch.Close()
|
||||
s.serverLoopWg.Done()
|
||||
s.indexBuildSch.Close()
|
||||
s.serverLoopWg.Done()
|
||||
s.indexLoadSch.Close()
|
||||
s.serverLoopWg.Done()
|
||||
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
log.Printf("server is closed, exit grpc server")
|
||||
}
|
||||
s.serverLoopCancel()
|
||||
s.serverLoopWg.Wait()
|
||||
}
|
||||
|
||||
// StartTimestamp returns the start timestamp of this server
|
||||
func (s *Master) StartTimestamp() int64 {
|
||||
return s.startTimestamp
|
||||
}
|
||||
|
||||
func (s *Master) checkGrpcReady(ctx context.Context, targetCh chan error) {
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
targetCh <- nil
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Master) grpcLoop(grpcPort int64) {
|
||||
defer s.serverLoopWg.Done()
|
||||
|
||||
defaultGRPCPort := ":"
|
||||
defaultGRPCPort += strconv.FormatInt(grpcPort, 10)
|
||||
lis, err := net.Listen("tcp", defaultGRPCPort)
|
||||
if err != nil {
|
||||
log.Printf("failed to listen: %v", err)
|
||||
s.grpcErr <- err
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(s.serverLoopCtx)
|
||||
defer cancel()
|
||||
go s.checkGrpcReady(ctx, s.grpcErr)
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
s.grpcErr <- err
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Master) tsLoop() {
|
||||
defer s.serverLoopWg.Done()
|
||||
s.tsoTicker = time.NewTicker(UpdateTimestampStep)
|
||||
defer s.tsoTicker.Stop()
|
||||
ctx, cancel := context.WithCancel(s.serverLoopCtx)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-s.tsoTicker.C:
|
||||
if err := s.tsoAllocator.UpdateTSO(); err != nil {
|
||||
log.Println("failed to update timestamp", err)
|
||||
return
|
||||
}
|
||||
if err := s.idAllocator.UpdateID(); err != nil {
|
||||
log.Println("failed to update id", err)
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
// Server is closed and it should return nil.
|
||||
log.Println("tsLoop is closed")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Master) statisticsLoop() {
|
||||
defer s.serverLoopWg.Done()
|
||||
defer s.segmentStatusMsg.Close()
|
||||
ctx, cancel := context.WithCancel(s.serverLoopCtx)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-s.segmentStatusMsg.Chan():
|
||||
err := s.statProcessor.ProcessQueryNodeStats(msg)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Print("server is closed, exit segment statistics loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,718 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
)
|
||||
|
||||
type metaTable struct {
|
||||
client kv.TxnBase // client of a reliable kv service, i.e. etcd client
|
||||
tenantID2Meta map[UniqueID]pb.TenantMeta // tenant id to tenant meta
|
||||
proxyID2Meta map[UniqueID]pb.ProxyMeta // proxy id to proxy meta
|
||||
collID2Meta map[UniqueID]pb.CollectionMeta // collection id to collection meta
|
||||
collName2ID map[string]UniqueID // collection name to collection id
|
||||
segID2Meta map[UniqueID]pb.SegmentMeta // segment id to segment meta
|
||||
segID2IndexMetas map[UniqueID][]pb.FieldIndexMeta // segment id to array of field index meta
|
||||
|
||||
tenantLock sync.RWMutex
|
||||
proxyLock sync.RWMutex
|
||||
ddLock sync.RWMutex
|
||||
indexLock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
|
||||
mt := &metaTable{
|
||||
client: kv,
|
||||
tenantLock: sync.RWMutex{},
|
||||
proxyLock: sync.RWMutex{},
|
||||
ddLock: sync.RWMutex{},
|
||||
}
|
||||
err := mt.reloadFromKV()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mt, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) reloadFromKV() error {
|
||||
|
||||
mt.tenantID2Meta = make(map[UniqueID]pb.TenantMeta)
|
||||
mt.proxyID2Meta = make(map[UniqueID]pb.ProxyMeta)
|
||||
mt.collID2Meta = make(map[UniqueID]pb.CollectionMeta)
|
||||
mt.collName2ID = make(map[string]UniqueID)
|
||||
mt.segID2Meta = make(map[UniqueID]pb.SegmentMeta)
|
||||
mt.segID2IndexMetas = make(map[UniqueID][]pb.FieldIndexMeta)
|
||||
|
||||
_, values, err := mt.client.LoadWithPrefix("tenant")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
tenantMeta := pb.TenantMeta{}
|
||||
err := proto.UnmarshalText(value, &tenantMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.tenantID2Meta[tenantMeta.ID] = tenantMeta
|
||||
}
|
||||
|
||||
_, values, err = mt.client.LoadWithPrefix("proxy")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
proxyMeta := pb.ProxyMeta{}
|
||||
err = proto.UnmarshalText(value, &proxyMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.proxyID2Meta[proxyMeta.ID] = proxyMeta
|
||||
}
|
||||
|
||||
_, values, err = mt.client.LoadWithPrefix("collection")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
collectionMeta := pb.CollectionMeta{}
|
||||
err = proto.UnmarshalText(value, &collectionMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.collID2Meta[collectionMeta.ID] = collectionMeta
|
||||
mt.collName2ID[collectionMeta.Schema.Name] = collectionMeta.ID
|
||||
}
|
||||
|
||||
_, values, err = mt.client.LoadWithPrefix("segment")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
segmentMeta := pb.SegmentMeta{}
|
||||
err = proto.UnmarshalText(value, &segmentMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.segID2Meta[segmentMeta.SegmentID] = segmentMeta
|
||||
}
|
||||
|
||||
_, values, err = mt.client.LoadWithPrefix("indexmeta")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range values {
|
||||
indexMeta := pb.FieldIndexMeta{}
|
||||
err = proto.UnmarshalText(v, &indexMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.segID2IndexMetas[indexMeta.SegmentID] = append(mt.segID2IndexMetas[indexMeta.SegmentID], indexMeta)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// metaTable.ddLock.Lock() before call this function
|
||||
func (mt *metaTable) saveCollectionMeta(coll *pb.CollectionMeta) error {
|
||||
collBytes := proto.MarshalTextString(coll)
|
||||
mt.collID2Meta[coll.ID] = *coll
|
||||
mt.collName2ID[coll.Schema.Name] = coll.ID
|
||||
return mt.client.Save("/collection/"+strconv.FormatInt(coll.ID, 10), collBytes)
|
||||
}
|
||||
|
||||
// metaTable.ddLock.Lock() before call this function
|
||||
func (mt *metaTable) saveSegmentMeta(seg *pb.SegmentMeta) error {
|
||||
segBytes := proto.MarshalTextString(seg)
|
||||
|
||||
mt.segID2Meta[seg.SegmentID] = *seg
|
||||
|
||||
return mt.client.Save("/segment/"+strconv.FormatInt(seg.SegmentID, 10), segBytes)
|
||||
}
|
||||
|
||||
// metaTable.ddLock.Lock() before call this function
|
||||
func (mt *metaTable) saveCollectionAndDeleteSegmentsMeta(coll *pb.CollectionMeta, segIDs []UniqueID) error {
|
||||
segIDStrs := make([]string, 0, len(segIDs))
|
||||
for _, segID := range segIDs {
|
||||
segIDStrs = append(segIDStrs, "/segment/"+strconv.FormatInt(segID, 10))
|
||||
}
|
||||
|
||||
kvs := make(map[string]string)
|
||||
collStrs := proto.MarshalTextString(coll)
|
||||
|
||||
kvs["/collection/"+strconv.FormatInt(coll.ID, 10)] = collStrs
|
||||
|
||||
for _, segID := range segIDs {
|
||||
_, ok := mt.segID2Meta[segID]
|
||||
|
||||
if ok {
|
||||
delete(mt.segID2Meta, segID)
|
||||
}
|
||||
}
|
||||
|
||||
mt.collID2Meta[coll.ID] = *coll
|
||||
|
||||
return mt.client.MultiSaveAndRemove(kvs, segIDStrs)
|
||||
}
|
||||
|
||||
// metaTable.ddLock.Lock() before call this function
|
||||
func (mt *metaTable) saveCollectionsAndSegmentsMeta(coll *pb.CollectionMeta, seg *pb.SegmentMeta) error {
|
||||
kvs := make(map[string]string)
|
||||
collBytes := proto.MarshalTextString(coll)
|
||||
|
||||
kvs["/collection/"+strconv.FormatInt(coll.ID, 10)] = collBytes
|
||||
|
||||
mt.collID2Meta[coll.ID] = *coll
|
||||
mt.collName2ID[coll.Schema.Name] = coll.ID
|
||||
|
||||
segBytes := proto.MarshalTextString(seg)
|
||||
|
||||
kvs["/segment/"+strconv.FormatInt(seg.SegmentID, 10)] = segBytes
|
||||
|
||||
mt.segID2Meta[seg.SegmentID] = *seg
|
||||
|
||||
return mt.client.MultiSave(kvs)
|
||||
}
|
||||
|
||||
// metaTable.ddLock.Lock() before call this function
|
||||
func (mt *metaTable) deleteCollectionsAndSegmentsMeta(collID UniqueID, segIDs []UniqueID) error {
|
||||
collIDStr := "/collection/" + strconv.FormatInt(collID, 10)
|
||||
|
||||
totalIDStrs := make([]string, 0, 1+len(segIDs))
|
||||
totalIDStrs = append(totalIDStrs, collIDStr)
|
||||
for _, singleID := range segIDs {
|
||||
totalIDStrs = append(totalIDStrs, "/segment/"+strconv.FormatInt(singleID, 10))
|
||||
}
|
||||
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
|
||||
if ok {
|
||||
delete(mt.collID2Meta, collID)
|
||||
}
|
||||
|
||||
_, ok = mt.collName2ID[collMeta.Schema.Name]
|
||||
|
||||
if ok {
|
||||
delete(mt.collName2ID, collMeta.Schema.Name)
|
||||
}
|
||||
|
||||
for _, segID := range segIDs {
|
||||
_, ok := mt.segID2Meta[segID]
|
||||
|
||||
if ok {
|
||||
delete(mt.segID2Meta, segID)
|
||||
}
|
||||
}
|
||||
|
||||
return mt.client.MultiRemove(totalIDStrs)
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddCollection(coll *pb.CollectionMeta) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
if len(coll.SegmentIDs) != 0 {
|
||||
return errors.Errorf("segment should be empty when creating collection")
|
||||
}
|
||||
|
||||
if len(coll.PartitionTags) == 0 {
|
||||
coll.PartitionTags = append(coll.PartitionTags, Params.DefaultPartitionTag)
|
||||
}
|
||||
_, ok := mt.collName2ID[coll.Schema.Name]
|
||||
if ok {
|
||||
return errors.Errorf("collection alread exists with name = " + coll.Schema.Name)
|
||||
}
|
||||
err := mt.saveCollectionMeta(coll)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) DeleteCollection(collID UniqueID) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
|
||||
}
|
||||
|
||||
err := mt.deleteCollectionsAndSegmentsMeta(collID, collMeta.SegmentIDs)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
|
||||
// remove index meta
|
||||
for _, v := range collMeta.SegmentIDs {
|
||||
if err := mt.removeSegmentIndexMeta(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) HasCollection(collID UniqueID) bool {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
_, ok := mt.collID2Meta[collID]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetCollectionByName(collectionName string) (*pb.CollectionMeta, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
vid, ok := mt.collName2ID[collectionName]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("can't find collection: " + collectionName)
|
||||
}
|
||||
col, ok := mt.collID2Meta[vid]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("can't find collection: " + collectionName)
|
||||
}
|
||||
return &col, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) ListCollections() ([]string, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
colls := make([]string, 0, len(mt.collName2ID))
|
||||
for name := range mt.collName2ID {
|
||||
colls = append(colls, name)
|
||||
}
|
||||
return colls, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddPartition(collID UniqueID, tag string) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
coll, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
|
||||
}
|
||||
|
||||
// number of partition tags (except _default) should be limited to 4096 by default
|
||||
if int64(len(coll.PartitionTags)) > Params.MaxPartitionNum {
|
||||
return errors.New("maximum partition's number should be limit to " + strconv.FormatInt(Params.MaxPartitionNum, 10))
|
||||
}
|
||||
for _, t := range coll.PartitionTags {
|
||||
if t == tag {
|
||||
return errors.Errorf("partition already exists.")
|
||||
}
|
||||
}
|
||||
coll.PartitionTags = append(coll.PartitionTags, tag)
|
||||
|
||||
err := mt.saveCollectionMeta(&coll)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) HasPartition(collID UniqueID, tag string) bool {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
col, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
for _, partitionTag := range col.PartitionTags {
|
||||
if partitionTag == tag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (mt *metaTable) DeletePartition(collID UniqueID, tag string) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
if tag == Params.DefaultPartitionTag {
|
||||
return errors.New("default partition cannot be deleted")
|
||||
}
|
||||
|
||||
collMeta, ok := mt.collID2Meta[collID]
|
||||
if !ok {
|
||||
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
|
||||
}
|
||||
|
||||
// check tag exists
|
||||
exist := false
|
||||
|
||||
pt := make([]string, 0, len(collMeta.PartitionTags))
|
||||
for _, t := range collMeta.PartitionTags {
|
||||
if t != tag {
|
||||
pt = append(pt, t)
|
||||
} else {
|
||||
exist = true
|
||||
}
|
||||
}
|
||||
if !exist {
|
||||
return errors.New("partition " + tag + " does not exist")
|
||||
}
|
||||
if len(pt) == len(collMeta.PartitionTags) {
|
||||
return nil
|
||||
}
|
||||
|
||||
toDeleteSeg := make([]UniqueID, 0, len(collMeta.SegmentIDs))
|
||||
seg := make([]UniqueID, 0, len(collMeta.SegmentIDs))
|
||||
for _, s := range collMeta.SegmentIDs {
|
||||
sm, ok := mt.segID2Meta[s]
|
||||
if !ok {
|
||||
return errors.Errorf("DeletePartition:can't find segment id = %d", s)
|
||||
}
|
||||
if sm.PartitionTag != tag {
|
||||
seg = append(seg, s)
|
||||
} else {
|
||||
toDeleteSeg = append(toDeleteSeg, s)
|
||||
}
|
||||
}
|
||||
collMeta.PartitionTags = pt
|
||||
collMeta.SegmentIDs = seg
|
||||
|
||||
err := mt.saveCollectionAndDeleteSegmentsMeta(&collMeta, toDeleteSeg)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddSegment(seg *pb.SegmentMeta) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
collID := seg.CollectionID
|
||||
collMeta := mt.collID2Meta[collID]
|
||||
collMeta.SegmentIDs = append(collMeta.SegmentIDs, seg.SegmentID)
|
||||
err := mt.saveCollectionsAndSegmentsMeta(&collMeta, seg)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) UpdateSegment(seg *pb.SegmentMeta) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
collID := seg.CollectionID
|
||||
collMeta := mt.collID2Meta[collID]
|
||||
isNewSegID := true
|
||||
for _, segID := range collMeta.SegmentIDs {
|
||||
if segID == seg.SegmentID {
|
||||
isNewSegID = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if isNewSegID {
|
||||
collMeta.SegmentIDs = append(collMeta.SegmentIDs, seg.SegmentID)
|
||||
if err := mt.saveCollectionsAndSegmentsMeta(&collMeta, seg); err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := mt.saveSegmentMeta(seg); err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetSegmentByID(segID UniqueID) (*pb.SegmentMeta, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
sm, ok := mt.segID2Meta[segID]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("GetSegmentByID:can't find segment id = %d", segID)
|
||||
}
|
||||
return &sm, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) DeleteSegment(segID UniqueID) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
segMeta, ok := mt.segID2Meta[segID]
|
||||
if !ok {
|
||||
return errors.Errorf("DeleteSegment:can't find segment. id = " + strconv.FormatInt(segID, 10))
|
||||
}
|
||||
|
||||
collMeta, ok := mt.collID2Meta[segMeta.CollectionID]
|
||||
if !ok {
|
||||
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(segMeta.CollectionID, 10))
|
||||
}
|
||||
|
||||
for i := 0; i < len(collMeta.SegmentIDs); i++ {
|
||||
if collMeta.SegmentIDs[i] == segID {
|
||||
collMeta.SegmentIDs = append(collMeta.SegmentIDs[:i], collMeta.SegmentIDs[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
err := mt.saveCollectionAndDeleteSegmentsMeta(&collMeta, []UniqueID{segID})
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
|
||||
return mt.removeSegmentIndexMeta(segID)
|
||||
}
|
||||
func (mt *metaTable) CloseSegment(segID UniqueID, closeTs Timestamp) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
segMeta, ok := mt.segID2Meta[segID]
|
||||
if !ok {
|
||||
return errors.Errorf("CloseSegment:can't find segment id = " + strconv.FormatInt(segID, 10))
|
||||
}
|
||||
|
||||
segMeta.CloseTime = closeTs
|
||||
|
||||
err := mt.saveSegmentMeta(&segMeta)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AddFieldIndexMeta(meta *pb.FieldIndexMeta) error {
|
||||
mt.indexLock.Lock()
|
||||
defer mt.indexLock.Unlock()
|
||||
|
||||
segID := meta.SegmentID
|
||||
if _, ok := mt.segID2IndexMetas[segID]; !ok {
|
||||
mt.segID2IndexMetas[segID] = make([]pb.FieldIndexMeta, 0)
|
||||
}
|
||||
for _, v := range mt.segID2IndexMetas[segID] {
|
||||
if v.FieldID == meta.FieldID && typeutil.CompareIndexParams(v.IndexParams, meta.IndexParams) {
|
||||
return fmt.Errorf("segment %d field id %d's index meta already exist", segID, meta.FieldID)
|
||||
}
|
||||
}
|
||||
mt.segID2IndexMetas[segID] = append(mt.segID2IndexMetas[segID], *meta)
|
||||
err := mt.saveFieldIndexMetaToEtcd(meta)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) saveFieldIndexMetaToEtcd(meta *pb.FieldIndexMeta) error {
|
||||
key := "/indexmeta/" + strconv.FormatInt(meta.SegmentID, 10) + strconv.FormatInt(meta.FieldID, 10) + strconv.FormatInt(meta.IndexID, 10)
|
||||
marshaledMeta := proto.MarshalTextString(meta)
|
||||
return mt.client.Save(key, marshaledMeta)
|
||||
}
|
||||
|
||||
func (mt *metaTable) DeleteFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexParams []*commonpb.KeyValuePair) error {
|
||||
mt.indexLock.Lock()
|
||||
defer mt.indexLock.Unlock()
|
||||
|
||||
if _, ok := mt.segID2IndexMetas[segID]; !ok {
|
||||
return fmt.Errorf("can not find index meta of segment %d", segID)
|
||||
}
|
||||
|
||||
for i, v := range mt.segID2IndexMetas[segID] {
|
||||
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
|
||||
mt.segID2IndexMetas[segID] = append(mt.segID2IndexMetas[segID][:i], mt.segID2IndexMetas[segID][i+1:]...)
|
||||
err := mt.deleteFieldIndexMetaToEtcd(segID, fieldID, v.IndexID)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("can not find index meta of field %d", fieldID)
|
||||
}
|
||||
|
||||
func (mt *metaTable) deleteFieldIndexMetaToEtcd(segID UniqueID, fieldID UniqueID, indexID UniqueID) error {
|
||||
key := "/indexmeta/" + strconv.FormatInt(segID, 10) + strconv.FormatInt(fieldID, 10) + strconv.FormatInt(indexID, 10)
|
||||
return mt.client.Remove(key)
|
||||
}
|
||||
|
||||
func (mt *metaTable) HasFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexParams []*commonpb.KeyValuePair) (bool, error) {
|
||||
mt.indexLock.RLock()
|
||||
defer mt.indexLock.RUnlock()
|
||||
|
||||
if _, ok := mt.segID2IndexMetas[segID]; !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, v := range mt.segID2IndexMetas[segID] {
|
||||
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetFieldIndexMeta(segID UniqueID, fieldID UniqueID, indexParams []*commonpb.KeyValuePair) (*pb.FieldIndexMeta, error) {
|
||||
mt.indexLock.RLock()
|
||||
defer mt.indexLock.RUnlock()
|
||||
|
||||
if _, ok := mt.segID2IndexMetas[segID]; !ok {
|
||||
return nil, fmt.Errorf("can not find segment %d", segID)
|
||||
}
|
||||
|
||||
for _, v := range mt.segID2IndexMetas[segID] {
|
||||
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
|
||||
return &v, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("can not find field %d", fieldID)
|
||||
}
|
||||
|
||||
func (mt *metaTable) UpdateFieldIndexMeta(meta *pb.FieldIndexMeta) error {
|
||||
mt.indexLock.Lock()
|
||||
defer mt.indexLock.Unlock()
|
||||
|
||||
segID := meta.SegmentID
|
||||
if _, ok := mt.segID2IndexMetas[segID]; !ok {
|
||||
mt.segID2IndexMetas[segID] = make([]pb.FieldIndexMeta, 0)
|
||||
}
|
||||
for i, v := range mt.segID2IndexMetas[segID] {
|
||||
if v.FieldID == meta.FieldID && typeutil.CompareIndexParams(v.IndexParams, meta.IndexParams) {
|
||||
mt.segID2IndexMetas[segID][i] = *meta
|
||||
err := mt.deleteFieldIndexMetaToEtcd(segID, v.FieldID, v.IndexID)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
err = mt.saveFieldIndexMetaToEtcd(meta)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
mt.segID2IndexMetas[segID] = append(mt.segID2IndexMetas[segID], *meta)
|
||||
err := mt.saveFieldIndexMetaToEtcd(meta)
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) removeSegmentIndexMeta(segID UniqueID) error {
|
||||
mt.indexLock.Lock()
|
||||
defer mt.indexLock.Unlock()
|
||||
|
||||
delete(mt.segID2IndexMetas, segID)
|
||||
keys, _, err := mt.client.LoadWithPrefix("indexmeta/" + strconv.FormatInt(segID, 10))
|
||||
if err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
if err = mt.client.MultiRemove(keys); err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetFieldTypeParams(collID UniqueID, fieldID UniqueID) ([]*commonpb.KeyValuePair, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
if _, ok := mt.collID2Meta[collID]; !ok {
|
||||
return nil, fmt.Errorf("can not find collection with id %d", collID)
|
||||
}
|
||||
|
||||
for _, fieldSchema := range mt.collID2Meta[collID].Schema.Fields {
|
||||
if fieldSchema.FieldID == fieldID {
|
||||
return fieldSchema.TypeParams, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("can not find field %d in collection %d", fieldID, collID)
|
||||
}
|
||||
|
||||
func (mt *metaTable) GetFieldIndexParams(collID UniqueID, fieldID UniqueID) ([]*commonpb.KeyValuePair, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
if _, ok := mt.collID2Meta[collID]; !ok {
|
||||
return nil, fmt.Errorf("can not find collection with id %d", collID)
|
||||
}
|
||||
|
||||
for _, fieldSchema := range mt.collID2Meta[collID].Schema.Fields {
|
||||
if fieldSchema.FieldID == fieldID {
|
||||
return fieldSchema.IndexParams, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("can not find field %d in collection %d", fieldID, collID)
|
||||
}
|
||||
|
||||
func (mt *metaTable) UpdateFieldIndexParams(collName string, fieldName string, indexParams []*commonpb.KeyValuePair) error {
|
||||
mt.ddLock.Lock()
|
||||
defer mt.ddLock.Unlock()
|
||||
|
||||
vid, ok := mt.collName2ID[collName]
|
||||
if !ok {
|
||||
return errors.Errorf("can't find collection: " + collName)
|
||||
}
|
||||
meta, ok := mt.collID2Meta[vid]
|
||||
if !ok {
|
||||
return errors.Errorf("can't find collection: " + collName)
|
||||
}
|
||||
|
||||
for _, fieldSchema := range meta.Schema.Fields {
|
||||
if fieldSchema.Name == fieldName {
|
||||
fieldSchema.IndexParams = indexParams
|
||||
if err := mt.saveCollectionMeta(&meta); err != nil {
|
||||
_ = mt.reloadFromKV()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("can not find field with id %s", fieldName)
|
||||
}
|
||||
|
||||
func (mt *metaTable) IsIndexable(collID UniqueID, fieldID UniqueID) (bool, error) {
|
||||
mt.ddLock.RLock()
|
||||
defer mt.ddLock.RUnlock()
|
||||
|
||||
if _, ok := mt.collID2Meta[collID]; !ok {
|
||||
return false, fmt.Errorf("can not find collection with id %d", collID)
|
||||
}
|
||||
|
||||
for _, v := range mt.collID2Meta[collID].Schema.Fields {
|
||||
// field is vector type and index params is not empty
|
||||
if v.FieldID == fieldID && (v.DataType == schemapb.DataType_VECTOR_BINARY || v.DataType == schemapb.DataType_VECTOR_FLOAT) &&
|
||||
len(v.IndexParams) != 0 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// fieldID is not in schema(eg: timestamp) or not indexable
|
||||
return false, nil
|
||||
}
|
|
@ -1,504 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func TestMetaTable_Collection(t *testing.T) {
|
||||
Init()
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
colMeta := pb.CollectionMeta{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll1",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
colMeta2 := pb.CollectionMeta{
|
||||
ID: 50,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll1",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
colMeta3 := pb.CollectionMeta{
|
||||
ID: 30,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll2",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
colMeta4 := pb.CollectionMeta{
|
||||
ID: 30,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll2",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{1},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
colMeta5 := pb.CollectionMeta{
|
||||
ID: 30,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll2",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{"1"},
|
||||
}
|
||||
segID1 := pb.SegmentMeta{
|
||||
SegmentID: 200,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p1",
|
||||
}
|
||||
segID2 := pb.SegmentMeta{
|
||||
SegmentID: 300,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p1",
|
||||
}
|
||||
segID3 := pb.SegmentMeta{
|
||||
SegmentID: 400,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p2",
|
||||
}
|
||||
err = meta.AddCollection(&colMeta)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddCollection(&colMeta2)
|
||||
assert.NotNil(t, err)
|
||||
err = meta.AddCollection(&colMeta3)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddCollection(&colMeta4)
|
||||
assert.NotNil(t, err)
|
||||
err = meta.AddCollection(&colMeta5)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
collsName, err := meta.ListCollections()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, len(collsName), 2)
|
||||
e1 := reflect.DeepEqual(collsName, []string{"coll1", "coll2"})
|
||||
e2 := reflect.DeepEqual(collsName, []string{"coll2", "coll1"})
|
||||
assert.True(t, e1 || e2)
|
||||
|
||||
hasCollection := meta.HasCollection(colMeta.ID)
|
||||
assert.True(t, hasCollection)
|
||||
err = meta.AddPartition(colMeta.ID, "p1")
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddPartition(colMeta.ID, "p2")
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&segID1)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&segID2)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&segID3)
|
||||
assert.Nil(t, err)
|
||||
getColMeta, err := meta.GetCollectionByName("coll5")
|
||||
assert.NotNil(t, err)
|
||||
assert.Nil(t, getColMeta)
|
||||
getColMeta, err = meta.GetCollectionByName(colMeta.Schema.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 3, len(getColMeta.SegmentIDs))
|
||||
err = meta.DeleteCollection(colMeta.ID)
|
||||
assert.Nil(t, err)
|
||||
err = meta.DeleteCollection(500)
|
||||
assert.NotNil(t, err)
|
||||
hasCollection = meta.HasCollection(colMeta.ID)
|
||||
assert.False(t, hasCollection)
|
||||
_, err = meta.GetSegmentByID(segID1.SegmentID)
|
||||
assert.NotNil(t, err)
|
||||
_, err = meta.GetSegmentByID(segID2.SegmentID)
|
||||
assert.NotNil(t, err)
|
||||
_, err = meta.GetSegmentByID(segID3.SegmentID)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
err = meta.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 0, len(meta.proxyID2Meta))
|
||||
assert.Equal(t, 0, len(meta.tenantID2Meta))
|
||||
assert.Equal(t, 1, len(meta.collName2ID))
|
||||
assert.Equal(t, 1, len(meta.collID2Meta))
|
||||
assert.Equal(t, 0, len(meta.segID2Meta))
|
||||
|
||||
err = meta.DeleteCollection(colMeta3.ID)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestMetaTable_DeletePartition(t *testing.T) {
|
||||
Init()
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
colMeta := pb.CollectionMeta{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll1",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
segID1 := pb.SegmentMeta{
|
||||
SegmentID: 200,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p1",
|
||||
}
|
||||
segID2 := pb.SegmentMeta{
|
||||
SegmentID: 300,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p1",
|
||||
}
|
||||
segID3 := pb.SegmentMeta{
|
||||
SegmentID: 400,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p2",
|
||||
}
|
||||
err = meta.AddCollection(&colMeta)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddPartition(500, "p1")
|
||||
assert.NotNil(t, err)
|
||||
err = meta.AddPartition(colMeta.ID, "p1")
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddPartition(colMeta.ID, "p2")
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddPartition(colMeta.ID, "p2")
|
||||
assert.NotNil(t, err)
|
||||
err = meta.AddSegment(&segID1)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&segID2)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&segID3)
|
||||
assert.Nil(t, err)
|
||||
afterCollMeta, err := meta.GetCollectionByName("coll1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 3, len(afterCollMeta.PartitionTags))
|
||||
assert.Equal(t, 3, len(afterCollMeta.SegmentIDs))
|
||||
err = meta.DeletePartition(100, "p1")
|
||||
assert.Nil(t, err)
|
||||
err = meta.DeletePartition(500, "p1")
|
||||
assert.NotNil(t, err)
|
||||
afterCollMeta, err = meta.GetCollectionByName("coll1")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 2, len(afterCollMeta.PartitionTags))
|
||||
assert.Equal(t, 1, len(afterCollMeta.SegmentIDs))
|
||||
hasPartition := meta.HasPartition(colMeta.ID, "p1")
|
||||
assert.False(t, hasPartition)
|
||||
hasPartition = meta.HasPartition(colMeta.ID, "p2")
|
||||
assert.True(t, hasPartition)
|
||||
_, err = meta.GetSegmentByID(segID1.SegmentID)
|
||||
assert.NotNil(t, err)
|
||||
_, err = meta.GetSegmentByID(segID2.SegmentID)
|
||||
assert.NotNil(t, err)
|
||||
_, err = meta.GetSegmentByID(segID3.SegmentID)
|
||||
assert.Nil(t, err)
|
||||
afterCollMeta, err = meta.GetCollectionByName("coll1")
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = meta.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 0, len(meta.proxyID2Meta))
|
||||
assert.Equal(t, 0, len(meta.tenantID2Meta))
|
||||
assert.Equal(t, 1, len(meta.collName2ID))
|
||||
assert.Equal(t, 1, len(meta.collID2Meta))
|
||||
assert.Equal(t, 1, len(meta.segID2Meta))
|
||||
|
||||
// delete not exist
|
||||
err = meta.DeletePartition(100, "not_exist")
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMetaTable_Segment(t *testing.T) {
|
||||
Init()
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
keys, _, err := meta.client.LoadWithPrefix("")
|
||||
assert.Nil(t, err)
|
||||
err = meta.client.MultiRemove(keys)
|
||||
assert.Nil(t, err)
|
||||
|
||||
colMeta := pb.CollectionMeta{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll1",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
segMeta := pb.SegmentMeta{
|
||||
SegmentID: 200,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p1",
|
||||
}
|
||||
err = meta.AddCollection(&colMeta)
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddPartition(colMeta.ID, "p1")
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&segMeta)
|
||||
assert.Nil(t, err)
|
||||
getSegMeta, err := meta.GetSegmentByID(segMeta.SegmentID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, &segMeta, getSegMeta)
|
||||
segMeta.NumRows = 111
|
||||
segMeta.MemSize = 100000
|
||||
err = meta.UpdateSegment(&segMeta)
|
||||
assert.Nil(t, err)
|
||||
err = meta.CloseSegment(segMeta.SegmentID, Timestamp(11))
|
||||
assert.Nil(t, err)
|
||||
err = meta.CloseSegment(1000, Timestamp(11))
|
||||
assert.NotNil(t, err)
|
||||
getSegMeta, err = meta.GetSegmentByID(segMeta.SegmentID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, getSegMeta.NumRows, int64(111))
|
||||
assert.Equal(t, getSegMeta.CloseTime, uint64(11))
|
||||
assert.Equal(t, int64(100000), getSegMeta.MemSize)
|
||||
err = meta.DeleteSegment(segMeta.SegmentID)
|
||||
assert.Nil(t, err)
|
||||
err = meta.DeleteSegment(1000)
|
||||
assert.NotNil(t, err)
|
||||
getSegMeta, err = meta.GetSegmentByID(segMeta.SegmentID)
|
||||
assert.Nil(t, getSegMeta)
|
||||
assert.NotNil(t, err)
|
||||
getColMeta, err := meta.GetCollectionByName(colMeta.Schema.Name)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 0, len(getColMeta.SegmentIDs))
|
||||
|
||||
meta.tenantID2Meta = make(map[UniqueID]pb.TenantMeta)
|
||||
meta.proxyID2Meta = make(map[UniqueID]pb.ProxyMeta)
|
||||
meta.collID2Meta = make(map[UniqueID]pb.CollectionMeta)
|
||||
meta.collName2ID = make(map[string]UniqueID)
|
||||
meta.segID2Meta = make(map[UniqueID]pb.SegmentMeta)
|
||||
|
||||
err = meta.reloadFromKV()
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, 0, len(meta.proxyID2Meta))
|
||||
assert.Equal(t, 0, len(meta.tenantID2Meta))
|
||||
assert.Equal(t, 1, len(meta.collName2ID))
|
||||
assert.Equal(t, 1, len(meta.collID2Meta))
|
||||
assert.Equal(t, 0, len(meta.segID2Meta))
|
||||
|
||||
}
|
||||
|
||||
func TestMetaTable_UpdateSegment(t *testing.T) {
|
||||
Init()
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
colMeta := pb.CollectionMeta{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll1",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
segMeta := pb.SegmentMeta{
|
||||
SegmentID: 200,
|
||||
CollectionID: 100,
|
||||
PartitionTag: "p1",
|
||||
NumRows: 110,
|
||||
}
|
||||
err = meta.AddCollection(&colMeta)
|
||||
assert.Nil(t, err)
|
||||
err = meta.UpdateSegment(&segMeta)
|
||||
assert.Nil(t, err)
|
||||
seg, err := meta.GetSegmentByID(200)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, seg.NumRows, int64(110))
|
||||
segMeta.NumRows = 210
|
||||
err = meta.UpdateSegment(&segMeta)
|
||||
assert.Nil(t, err)
|
||||
seg, err = meta.GetSegmentByID(200)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, seg.NumRows, int64(210))
|
||||
}
|
||||
|
||||
func TestMetaTable_AddPartition_Limit(t *testing.T) {
|
||||
Init()
|
||||
Params.MaxPartitionNum = 256 // adding 4096 partitions is too slow
|
||||
etcdAddr := Params.EtcdAddress
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
colMeta := pb.CollectionMeta{
|
||||
ID: 100,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "coll1",
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
}
|
||||
err = meta.AddCollection(&colMeta)
|
||||
assert.Nil(t, err)
|
||||
|
||||
for i := 0; i < int(Params.MaxPartitionNum); i++ {
|
||||
err := meta.AddPartition(100, "partition_"+strconv.Itoa(i))
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
err = meta.AddPartition(100, "partition_limit")
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMetaTable_LoadIndexMetaFromKv(t *testing.T) {
|
||||
Init()
|
||||
etcdAddr := Params.EtcdAddress
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
kv := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta := pb.FieldIndexMeta{
|
||||
SegmentID: 1,
|
||||
FieldID: 100,
|
||||
IndexID: 1000,
|
||||
IndexParams: []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}},
|
||||
State: commonpb.IndexState_FINISHED,
|
||||
IndexFilePaths: []string{"path1"},
|
||||
}
|
||||
marshalRes := proto.MarshalTextString(&meta)
|
||||
err = kv.Save("/indexmeta/"+strconv.FormatInt(meta.SegmentID, 10)+strconv.FormatInt(meta.FieldID, 10)+strconv.FormatInt(meta.IndexID, 10), marshalRes)
|
||||
assert.Nil(t, err)
|
||||
|
||||
metaTable, err := NewMetaTable(kv)
|
||||
assert.Nil(t, err)
|
||||
res, err := metaTable.HasFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, res)
|
||||
}
|
||||
|
||||
func TestMetaTable_IndexMeta(t *testing.T) {
|
||||
Init()
|
||||
etcdAddr := Params.EtcdAddress
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = meta.AddFieldIndexMeta(&pb.FieldIndexMeta{
|
||||
SegmentID: 1,
|
||||
FieldID: 100,
|
||||
IndexID: 1000,
|
||||
IndexParams: []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}},
|
||||
State: commonpb.IndexState_INPROGRESS,
|
||||
IndexFilePaths: []string{},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddFieldIndexMeta(&pb.FieldIndexMeta{
|
||||
SegmentID: 1,
|
||||
FieldID: 100,
|
||||
IndexID: 1000,
|
||||
IndexParams: []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}},
|
||||
State: commonpb.IndexState_INPROGRESS,
|
||||
IndexFilePaths: []string{},
|
||||
})
|
||||
assert.NotNil(t, err)
|
||||
|
||||
res, err := meta.HasFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, res)
|
||||
res, err = meta.HasFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v2"}})
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, res)
|
||||
|
||||
err = meta.UpdateFieldIndexMeta(&pb.FieldIndexMeta{
|
||||
SegmentID: 1,
|
||||
FieldID: 100,
|
||||
IndexID: 1000,
|
||||
IndexParams: []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}},
|
||||
State: commonpb.IndexState_FINISHED,
|
||||
IndexFilePaths: []string{},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.EqualValues(t, commonpb.IndexState_FINISHED, meta.segID2IndexMetas[1][0].State)
|
||||
err = meta.DeleteFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
|
||||
assert.Nil(t, err)
|
||||
res, err = meta.HasFieldIndexMeta(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}})
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, res)
|
||||
}
|
|
@ -1,357 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type ParamTable struct {
|
||||
paramtable.BaseTable
|
||||
|
||||
Address string
|
||||
Port int
|
||||
|
||||
EtcdAddress string
|
||||
MetaRootPath string
|
||||
KvRootPath string
|
||||
WriteNodeSegKvSubPath string
|
||||
PulsarAddress string
|
||||
IndexBuilderAddress string
|
||||
|
||||
// nodeID
|
||||
ProxyIDList []typeutil.UniqueID
|
||||
WriteNodeIDList []typeutil.UniqueID
|
||||
|
||||
TopicNum int
|
||||
QueryNodeNum int
|
||||
SoftTimeTickBarrierInterval typeutil.Timestamp
|
||||
|
||||
// segment
|
||||
SegmentSize float64
|
||||
SegmentSizeFactor float64
|
||||
DefaultRecordSize int64
|
||||
MinSegIDAssignCnt int64
|
||||
MaxSegIDAssignCnt int64
|
||||
SegIDAssignExpiration int64
|
||||
|
||||
// msgChannel
|
||||
ProxyServiceTimeTickChannelNames []string
|
||||
WriteNodeTimeTickChannelNames []string
|
||||
DDChannelNames []string
|
||||
InsertChannelNames []string
|
||||
K2SChannelNames []string
|
||||
QueryNodeStatsChannelName string
|
||||
MsgChannelSubName string
|
||||
|
||||
MaxPartitionNum int64
|
||||
DefaultPartitionTag string
|
||||
|
||||
LoadIndexChannelNames []string
|
||||
}
|
||||
|
||||
var Params ParamTable
|
||||
|
||||
func (p *ParamTable) Init() {
|
||||
// load yaml
|
||||
p.BaseTable.Init()
|
||||
|
||||
err := p.LoadYaml("advanced/master.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set members
|
||||
p.initAddress()
|
||||
p.initPort()
|
||||
|
||||
p.initEtcdAddress()
|
||||
p.initMetaRootPath()
|
||||
p.initKvRootPath()
|
||||
p.initWriteNodeSegKvSubPath()
|
||||
p.initPulsarAddress()
|
||||
p.initIndexBuilderAddress()
|
||||
|
||||
p.initProxyIDList()
|
||||
p.initWriteNodeIDList()
|
||||
|
||||
p.initTopicNum()
|
||||
p.initQueryNodeNum()
|
||||
p.initSoftTimeTickBarrierInterval()
|
||||
|
||||
p.initSegmentSize()
|
||||
p.initSegmentSizeFactor()
|
||||
p.initDefaultRecordSize()
|
||||
p.initMinSegIDAssignCnt()
|
||||
p.initMaxSegIDAssignCnt()
|
||||
p.initSegIDAssignExpiration()
|
||||
|
||||
p.initProxyServiceTimeTickChannelNames()
|
||||
p.initWriteNodeTimeTickChannelNames()
|
||||
p.initInsertChannelNames()
|
||||
p.initDDChannelNames()
|
||||
p.initK2SChannelNames()
|
||||
p.initQueryNodeStatsChannelName()
|
||||
p.initMsgChannelSubName()
|
||||
p.initMaxPartitionNum()
|
||||
p.initDefaultPartitionTag()
|
||||
|
||||
p.initLoadIndexChannelNames()
|
||||
}
|
||||
|
||||
func (p *ParamTable) initAddress() {
|
||||
masterAddress, err := p.Load("master.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.Address = masterAddress
|
||||
}
|
||||
|
||||
func (p *ParamTable) initPort() {
|
||||
p.Port = p.ParseInt("master.port")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initEtcdAddress() {
|
||||
addr, err := p.Load("_EtcdAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.EtcdAddress = addr
|
||||
}
|
||||
|
||||
func (p *ParamTable) initPulsarAddress() {
|
||||
addr, err := p.Load("_PulsarAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.PulsarAddress = addr
|
||||
}
|
||||
|
||||
func (p *ParamTable) initIndexBuilderAddress() {
|
||||
ret, err := p.Load("IndexServiceAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.IndexBuilderAddress = ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMetaRootPath() {
|
||||
rootPath, err := p.Load("etcd.rootPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
subPath, err := p.Load("etcd.metaSubPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MetaRootPath = rootPath + "/" + subPath
|
||||
}
|
||||
|
||||
func (p *ParamTable) initKvRootPath() {
|
||||
rootPath, err := p.Load("etcd.rootPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
subPath, err := p.Load("etcd.kvSubPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.KvRootPath = rootPath + "/" + subPath
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeSegKvSubPath() {
|
||||
subPath, err := p.Load("etcd.writeNodeSegKvSubPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.WriteNodeSegKvSubPath = subPath + "/"
|
||||
}
|
||||
|
||||
func (p *ParamTable) initTopicNum() {
|
||||
iRangeStr, err := p.Load("msgChannel.channelRange.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rangeSlice := paramtable.ConvertRangeToIntRange(iRangeStr, ",")
|
||||
p.TopicNum = rangeSlice[1] - rangeSlice[0]
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSegmentSize() {
|
||||
p.SegmentSize = p.ParseFloat("master.segment.size")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSegmentSizeFactor() {
|
||||
p.SegmentSizeFactor = p.ParseFloat("master.segment.sizeFactor")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDefaultRecordSize() {
|
||||
p.DefaultRecordSize = p.ParseInt64("master.segment.defaultSizePerRecord")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMinSegIDAssignCnt() {
|
||||
p.MinSegIDAssignCnt = p.ParseInt64("master.segment.minIDAssignCnt")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMaxSegIDAssignCnt() {
|
||||
p.MaxSegIDAssignCnt = p.ParseInt64("master.segment.maxIDAssignCnt")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSegIDAssignExpiration() {
|
||||
p.SegIDAssignExpiration = p.ParseInt64("master.segment.IDAssignExpiration")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initQueryNodeNum() {
|
||||
p.QueryNodeNum = len(p.QueryNodeIDList())
|
||||
}
|
||||
|
||||
func (p *ParamTable) initQueryNodeStatsChannelName() {
|
||||
channels, err := p.Load("msgChannel.chanNamePrefix.queryNodeStats")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.QueryNodeStatsChannelName = channels
|
||||
}
|
||||
|
||||
func (p *ParamTable) initProxyIDList() {
|
||||
p.ProxyIDList = p.BaseTable.ProxyIDList()
|
||||
}
|
||||
|
||||
func (p *ParamTable) initProxyServiceTimeTickChannelNames() {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.proxyServiceTimeTick")
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
p.ProxyServiceTimeTickChannelNames = []string{ch}
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMsgChannelSubName() {
|
||||
name, err := p.Load("msgChannel.subNamePrefix.masterSubNamePrefix")
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
p.MsgChannelSubName = name
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSoftTimeTickBarrierInterval() {
|
||||
t, err := p.Load("master.timeSync.softTimeTickBarrierInterval")
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
v, err := strconv.ParseInt(t, 10, 64)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
p.SoftTimeTickBarrierInterval = tsoutil.ComposeTS(v, 0)
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeIDList() {
|
||||
p.WriteNodeIDList = p.BaseTable.WriteNodeIDList()
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeTimeTickChannelNames() {
|
||||
ch, err := p.Load("msgChannel.chanNamePrefix.writeNodeTimeTick")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
id, err := p.Load("nodeID.writeNodeIDList")
|
||||
if err != nil {
|
||||
log.Panicf("load write node id list error, %s", err.Error())
|
||||
}
|
||||
ids := strings.Split(id, ",")
|
||||
channels := make([]string, 0, len(ids))
|
||||
for _, i := range ids {
|
||||
_, err := strconv.ParseInt(i, 10, 64)
|
||||
if err != nil {
|
||||
log.Panicf("load write node id list error, %s", err.Error())
|
||||
}
|
||||
channels = append(channels, ch+"-"+i)
|
||||
}
|
||||
p.WriteNodeTimeTickChannelNames = channels
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDDChannelNames() {
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
prefix += "-"
|
||||
iRangeStr, err := p.Load("msgChannel.channelRange.dataDefinition")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
p.DDChannelNames = ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertChannelNames() {
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
prefix += "-"
|
||||
iRangeStr, err := p.Load("msgChannel.channelRange.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
p.InsertChannelNames = ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) initK2SChannelNames() {
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.k2s")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
prefix += "-"
|
||||
iRangeStr, err := p.Load("msgChannel.channelRange.k2s")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
p.K2SChannelNames = ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMaxPartitionNum() {
|
||||
str, err := p.Load("master.maxPartitionNum")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
maxPartitionNum, err := strconv.ParseInt(str, 10, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MaxPartitionNum = maxPartitionNum
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDefaultPartitionTag() {
|
||||
defaultTag, err := p.Load("common.defaultPartitionTag")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.DefaultPartitionTag = defaultTag
|
||||
}
|
||||
|
||||
func (p *ParamTable) initLoadIndexChannelNames() {
|
||||
loadIndexChannelName, err := p.Load("msgChannel.chanNamePrefix.cmd")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.LoadIndexChannelNames = []string{loadIndexChannelName}
|
||||
}
|
|
@ -1,127 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParamTable_Init(t *testing.T) {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func TestParamTable_Address(t *testing.T) {
|
||||
address := Params.Address
|
||||
assert.Equal(t, address, "localhost")
|
||||
}
|
||||
|
||||
func TestParamTable_Port(t *testing.T) {
|
||||
port := Params.Port
|
||||
assert.Equal(t, port, 53100)
|
||||
}
|
||||
|
||||
func TestParamTable_MetaRootPath(t *testing.T) {
|
||||
path := Params.MetaRootPath
|
||||
assert.Equal(t, path, "by-dev/meta")
|
||||
}
|
||||
|
||||
func TestParamTable_KVRootPath(t *testing.T) {
|
||||
path := Params.KvRootPath
|
||||
assert.Equal(t, path, "by-dev/kv")
|
||||
}
|
||||
|
||||
func TestParamTableIndexServiceAddress(t *testing.T) {
|
||||
path := Params.IndexBuilderAddress
|
||||
assert.Equal(t, path, "localhost:31000")
|
||||
}
|
||||
|
||||
func TestParamTable_TopicNum(t *testing.T) {
|
||||
num := Params.TopicNum
|
||||
fmt.Println("TopicNum:", num)
|
||||
}
|
||||
|
||||
func TestParamTable_SegmentSize(t *testing.T) {
|
||||
size := Params.SegmentSize
|
||||
assert.Equal(t, size, float64(512))
|
||||
}
|
||||
|
||||
func TestParamTable_SegmentSizeFactor(t *testing.T) {
|
||||
factor := Params.SegmentSizeFactor
|
||||
assert.Equal(t, factor, 0.75)
|
||||
}
|
||||
|
||||
func TestParamTable_DefaultRecordSize(t *testing.T) {
|
||||
size := Params.DefaultRecordSize
|
||||
assert.Equal(t, size, int64(1024))
|
||||
}
|
||||
|
||||
func TestParamTable_MinSegIDAssignCnt(t *testing.T) {
|
||||
cnt := Params.MinSegIDAssignCnt
|
||||
assert.Equal(t, cnt, int64(1024))
|
||||
}
|
||||
|
||||
func TestParamTable_MaxSegIDAssignCnt(t *testing.T) {
|
||||
cnt := Params.MaxSegIDAssignCnt
|
||||
assert.Equal(t, cnt, int64(16384))
|
||||
}
|
||||
|
||||
func TestParamTable_SegIDAssignExpiration(t *testing.T) {
|
||||
expiration := Params.SegIDAssignExpiration
|
||||
assert.Equal(t, expiration, int64(2000))
|
||||
}
|
||||
|
||||
func TestParamTable_QueryNodeNum(t *testing.T) {
|
||||
num := Params.QueryNodeNum
|
||||
fmt.Println("QueryNodeNum", num)
|
||||
}
|
||||
|
||||
func TestParamTable_QueryNodeStatsChannelName(t *testing.T) {
|
||||
name := Params.QueryNodeStatsChannelName
|
||||
assert.Equal(t, name, "query-node-stats")
|
||||
}
|
||||
|
||||
func TestParamTable_ProxyIDList(t *testing.T) {
|
||||
ids := Params.ProxyIDList
|
||||
assert.Equal(t, len(ids), 1)
|
||||
assert.Equal(t, ids[0], int64(0))
|
||||
}
|
||||
|
||||
func TestParamTable_ProxyTimeTickChannelNames(t *testing.T) {
|
||||
names := Params.ProxyServiceTimeTickChannelNames
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "proxyTimeTick-0")
|
||||
}
|
||||
|
||||
func TestParamTable_MsgChannelSubName(t *testing.T) {
|
||||
name := Params.MsgChannelSubName
|
||||
assert.Equal(t, name, "master")
|
||||
}
|
||||
|
||||
func TestParamTable_SoftTimeTickBarrierInterval(t *testing.T) {
|
||||
interval := Params.SoftTimeTickBarrierInterval
|
||||
assert.Equal(t, interval, Timestamp(0x7d00000))
|
||||
}
|
||||
|
||||
func TestParamTable_WriteNodeIDList(t *testing.T) {
|
||||
ids := Params.WriteNodeIDList
|
||||
assert.Equal(t, len(ids), 1)
|
||||
assert.Equal(t, ids[0], int64(3))
|
||||
}
|
||||
|
||||
func TestParamTable_WriteNodeTimeTickChannelNames(t *testing.T) {
|
||||
names := Params.WriteNodeTimeTickChannelNames
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "writeNodeTimeTick-3")
|
||||
}
|
||||
|
||||
func TestParamTable_InsertChannelNames(t *testing.T) {
|
||||
names := Params.InsertChannelNames
|
||||
assert.Equal(t, Params.TopicNum, len(names))
|
||||
}
|
||||
|
||||
func TestParamTable_K2SChannelNames(t *testing.T) {
|
||||
names := Params.K2SChannelNames
|
||||
assert.Equal(t, len(names), 1)
|
||||
assert.Equal(t, names[0], "k2s-0")
|
||||
}
|
|
@ -1,281 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"log"
|
||||
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
const partitionMetaPrefix = "partition/"
|
||||
|
||||
type createPartitionTask struct {
|
||||
baseTask
|
||||
req *milvuspb.CreatePartitionRequest
|
||||
}
|
||||
|
||||
type dropPartitionTask struct {
|
||||
baseTask
|
||||
req *milvuspb.DropPartitionRequest
|
||||
}
|
||||
|
||||
type hasPartitionTask struct {
|
||||
baseTask
|
||||
hasPartition bool
|
||||
req *milvuspb.HasPartitionRequest
|
||||
}
|
||||
|
||||
//type describePartitionTask struct {
|
||||
// baseTask
|
||||
// description *milvuspb.PartitionDescription
|
||||
// req *internalpb.DescribePartitionRequest
|
||||
//}
|
||||
|
||||
type showPartitionTask struct {
|
||||
baseTask
|
||||
resp *milvuspb.ShowPartitionResponse
|
||||
req *milvuspb.ShowPartitionRequest
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *createPartitionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *createPartitionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return Timestamp(t.req.Base.Timestamp), nil
|
||||
}
|
||||
|
||||
func (t *createPartitionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
partitionName := t.req.PartitionName
|
||||
collectionName := t.req.CollectionName
|
||||
collectionMeta, err := t.mt.GetCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = t.mt.AddPartition(collectionMeta.ID, partitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
|
||||
partitionMsg := internalpb2.CreatePartitionRequest{
|
||||
Base: t.req.Base,
|
||||
DbName: "",
|
||||
CollectionName: t.req.CollectionName,
|
||||
PartitionName: t.req.PartitionName,
|
||||
DbID: 0, // todo add DbID
|
||||
CollectionID: collectionMeta.ID,
|
||||
PartitionID: 0, // todo add partitionID
|
||||
}
|
||||
timeTickMsg := &ms.CreatePartitionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
CreatePartitionRequest: partitionMsg,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *dropPartitionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *dropPartitionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *dropPartitionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
partitionName := t.req.PartitionName
|
||||
collectionName := t.req.CollectionName
|
||||
collectionMeta, err := t.mt.GetCollectionByName(collectionName)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = t.mt.DeletePartition(collectionMeta.ID, partitionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, err := t.Ts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
|
||||
dropMsg := internalpb2.DropPartitionRequest{
|
||||
Base: t.req.Base,
|
||||
DbName: "", // tod add DbName
|
||||
CollectionName: t.req.CollectionName,
|
||||
PartitionName: t.req.PartitionName,
|
||||
DbID: 0, // todo add DbID
|
||||
CollectionID: collectionMeta.ID,
|
||||
PartitionID: 0, // todo addd PartitionID
|
||||
}
|
||||
timeTickMsg := &ms.DropPartitionMsg{
|
||||
BaseMsg: baseMsg,
|
||||
DropPartitionRequest: dropMsg,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
return t.sch.ddMsgStream.Broadcast(&msgPack)
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *hasPartitionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *hasPartitionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *hasPartitionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
partitionName := t.req.PartitionName
|
||||
collectionName := t.req.CollectionName
|
||||
collectionMeta, err := t.mt.GetCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.hasPartition = t.mt.HasPartition(collectionMeta.ID, partitionName)
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
//func (t *describePartitionTask) Type() commonpb.MsgType {
|
||||
// if t.req == nil {
|
||||
// log.Printf("null request")
|
||||
// return 0
|
||||
// }
|
||||
// return t.req.MsgType
|
||||
//}
|
||||
//
|
||||
//func (t *describePartitionTask) Ts() (Timestamp, error) {
|
||||
// if t.req == nil {
|
||||
// return 0, errors.New("null request")
|
||||
// }
|
||||
// return t.req.Timestamp, nil
|
||||
//}
|
||||
//
|
||||
//func (t *describePartitionTask) Execute() error {
|
||||
// if t.req == nil {
|
||||
// return errors.New("null request")
|
||||
// }
|
||||
//
|
||||
// partitionName := t.req.PartitionName
|
||||
//
|
||||
// description := milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
// },
|
||||
// Name: partitionName,
|
||||
// Statistics: nil,
|
||||
// }
|
||||
//
|
||||
// t.description = &description
|
||||
//
|
||||
// return nil
|
||||
//
|
||||
//}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
func (t *showPartitionTask) Type() commonpb.MsgType {
|
||||
if t.req == nil {
|
||||
log.Printf("null request")
|
||||
return 0
|
||||
}
|
||||
return t.req.Base.MsgType
|
||||
}
|
||||
|
||||
func (t *showPartitionTask) Ts() (Timestamp, error) {
|
||||
if t.req == nil {
|
||||
return 0, errors.New("null request")
|
||||
}
|
||||
return t.req.Base.Timestamp, nil
|
||||
}
|
||||
|
||||
func (t *showPartitionTask) Execute() error {
|
||||
if t.req == nil {
|
||||
return errors.New("null request")
|
||||
}
|
||||
|
||||
collMeta, err := t.mt.GetCollectionByName(t.req.CollectionName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
partitions := make([]string, 0)
|
||||
partitions = append(partitions, collMeta.PartitionTags...)
|
||||
|
||||
stringListResponse := milvuspb.ShowPartitionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
PartitionNames: partitions,
|
||||
}
|
||||
|
||||
t.resp = &stringListResponse
|
||||
|
||||
return nil
|
||||
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func TestPersistenceScheduler(t *testing.T) {
|
||||
//Init environment Params
|
||||
Init()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
//Init client, use Mock instead
|
||||
flushClient := &MockWriteNodeClient{}
|
||||
buildIndexClient := &MockBuildIndexClient{}
|
||||
loadIndexClient := &MockLoadIndexClient{}
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
err = meta.AddCollection(&etcdpb.CollectionMeta{
|
||||
ID: 1,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: "testcoll",
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
{FieldID: 1},
|
||||
{FieldID: 100, DataType: schemapb.DataType_VECTOR_FLOAT, IndexParams: []*commonpb.KeyValuePair{{Key: "k", Value: "v"}}},
|
||||
},
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
err = meta.AddSegment(&etcdpb.SegmentMeta{
|
||||
SegmentID: 1,
|
||||
CollectionID: 1,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
//Init scheduler
|
||||
indexLoadSch := NewIndexLoadScheduler(ctx, loadIndexClient, meta)
|
||||
indexBuildSch := NewIndexBuildScheduler(ctx, buildIndexClient, meta, indexLoadSch)
|
||||
cnt := 0
|
||||
flushSch := NewFlushScheduler(ctx, flushClient, meta, indexBuildSch, func() (Timestamp, error) {
|
||||
cnt++
|
||||
return Timestamp(cnt), nil
|
||||
})
|
||||
|
||||
//scheduler start
|
||||
err = indexLoadSch.Start()
|
||||
assert.Nil(t, err)
|
||||
defer indexLoadSch.Close()
|
||||
|
||||
err = indexBuildSch.Start()
|
||||
assert.Nil(t, err)
|
||||
defer indexBuildSch.Close()
|
||||
|
||||
err = flushSch.Start()
|
||||
assert.Nil(t, err)
|
||||
defer flushSch.Close()
|
||||
|
||||
//start from flush scheduler
|
||||
err = flushSch.Enqueue(UniqueID(1))
|
||||
assert.Nil(t, err)
|
||||
//wait flush segment request sent to write node
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
segDes, err := flushClient.DescribeSegment(UniqueID(1))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, false, segDes.IsClosed)
|
||||
|
||||
//wait flush to finish
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
segDes, err = flushClient.DescribeSegment(UniqueID(1))
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, UniqueID(1), segDes.SegmentID)
|
||||
assert.Equal(t, true, segDes.IsClosed)
|
||||
|
||||
//wait flush segment request sent to build index node
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
req := &indexpb.IndexStatesRequest{
|
||||
IndexIDs: []UniqueID{UniqueID(1)},
|
||||
}
|
||||
idxDes, err := buildIndexClient.GetIndexStates(req)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.IndexState_INPROGRESS, idxDes.States[0].State)
|
||||
|
||||
//wait build index to finish
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
req2 := &indexpb.IndexStatesRequest{
|
||||
IndexIDs: []UniqueID{UniqueID(1)},
|
||||
}
|
||||
idxDes, err = buildIndexClient.GetIndexStates(req2)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, commonpb.IndexState_FINISHED, idxDes.States[0].State)
|
||||
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package master
|
||||
|
||||
type persistenceScheduler interface {
|
||||
Enqueue(interface{}) error
|
||||
schedule(interface{}) error
|
||||
scheduleLoop()
|
||||
|
||||
Start() error
|
||||
Close()
|
||||
}
|
||||
type MockFlushScheduler struct {
|
||||
}
|
||||
|
||||
func (m *MockFlushScheduler) Enqueue(i interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockFlushScheduler) schedule(i interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockFlushScheduler) scheduleLoop() {
|
||||
}
|
||||
|
||||
func (m *MockFlushScheduler) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockFlushScheduler) Close() {
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type RuntimeStats struct {
|
||||
collStats map[UniqueID]*CollRuntimeStats // collection id to array of field statistic
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (rs *RuntimeStats) UpdateFieldStat(collID UniqueID, fieldID UniqueID, stats *FieldIndexRuntimeStats) error {
|
||||
rs.mu.Lock()
|
||||
defer rs.mu.Unlock()
|
||||
|
||||
peerID := stats.peerID
|
||||
_, ok := rs.collStats[collID]
|
||||
if !ok {
|
||||
rs.collStats[collID] = &CollRuntimeStats{
|
||||
fieldIndexStats: make(map[UniqueID][]*FieldIndexRuntimeStats),
|
||||
}
|
||||
}
|
||||
|
||||
collRuntimeStats := rs.collStats[collID]
|
||||
fieldStats := collRuntimeStats.fieldIndexStats[fieldID]
|
||||
for i, v := range fieldStats {
|
||||
if v.peerID == peerID && typeutil.CompareIndexParams(v.indexParams, stats.indexParams) {
|
||||
fieldStats[i] = stats
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
collRuntimeStats.fieldIndexStats[fieldID] = append(collRuntimeStats.fieldIndexStats[fieldID], stats)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *RuntimeStats) GetTotalNumOfRelatedSegments(collID UniqueID, fieldID UniqueID, indexParams []*commonpb.KeyValuePair) int64 {
|
||||
rs.mu.RLock()
|
||||
defer rs.mu.RUnlock()
|
||||
|
||||
collStats, ok := rs.collStats[collID]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
fieldStats, ok := collStats.fieldIndexStats[fieldID]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
var total int64 = 0
|
||||
for _, stat := range fieldStats {
|
||||
if typeutil.CompareIndexParams(stat.indexParams, indexParams) {
|
||||
total += stat.numOfRelatedSegments
|
||||
}
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
type CollRuntimeStats struct {
|
||||
fieldIndexStats map[UniqueID][]*FieldIndexRuntimeStats
|
||||
}
|
||||
|
||||
type FieldIndexRuntimeStats struct {
|
||||
peerID int64
|
||||
indexParams []*commonpb.KeyValuePair
|
||||
numOfRelatedSegments int64
|
||||
}
|
||||
|
||||
func NewRuntimeStats() *RuntimeStats {
|
||||
return &RuntimeStats{
|
||||
collStats: make(map[UniqueID]*CollRuntimeStats),
|
||||
}
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
)
|
||||
|
||||
func TestRuntimeStats_UpdateFieldStats(t *testing.T) {
|
||||
runtimeStats := NewRuntimeStats()
|
||||
cases := []*struct {
|
||||
collID UniqueID
|
||||
fieldID UniqueID
|
||||
peerID int64
|
||||
nums int64
|
||||
}{
|
||||
{1, 1, 2, 10},
|
||||
{1, 2, 2, 20},
|
||||
{2, 2, 2, 30},
|
||||
{2, 2, 3, 40},
|
||||
{1, 1, 2, 100},
|
||||
}
|
||||
for _, testcase := range cases {
|
||||
err := runtimeStats.UpdateFieldStat(testcase.collID, testcase.fieldID, &FieldIndexRuntimeStats{
|
||||
peerID: testcase.peerID,
|
||||
indexParams: []*commonpb.KeyValuePair{},
|
||||
numOfRelatedSegments: testcase.nums,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
statsArray := runtimeStats.collStats[testcase.collID].fieldIndexStats[testcase.fieldID]
|
||||
assert.NotEmpty(t, statsArray)
|
||||
|
||||
found := 0
|
||||
for _, s := range statsArray {
|
||||
if s.peerID == testcase.peerID {
|
||||
found++
|
||||
assert.EqualValues(t, s.numOfRelatedSegments, testcase.nums)
|
||||
}
|
||||
}
|
||||
assert.EqualValues(t, 1, found)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRuntimeStats_GetTotalNumOfRelatedSegments(t *testing.T) {
|
||||
runtimeStats := NewRuntimeStats()
|
||||
runtimeStats.collStats = make(map[UniqueID]*CollRuntimeStats)
|
||||
|
||||
runtimeStats.collStats[1] = &CollRuntimeStats{
|
||||
fieldIndexStats: map[UniqueID][]*FieldIndexRuntimeStats{
|
||||
100: {
|
||||
{1, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}}, 10},
|
||||
{3, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}}, 20},
|
||||
{2, []*commonpb.KeyValuePair{{Key: "k2", Value: "v2"}}, 20},
|
||||
},
|
||||
200: {
|
||||
{1, []*commonpb.KeyValuePair{}, 20},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runtimeStats.collStats[2] = &CollRuntimeStats{
|
||||
fieldIndexStats: map[UniqueID][]*FieldIndexRuntimeStats{
|
||||
100: {
|
||||
{1, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}}, 10},
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.EqualValues(t, 30, runtimeStats.GetTotalNumOfRelatedSegments(1, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}}))
|
||||
assert.EqualValues(t, 20, runtimeStats.GetTotalNumOfRelatedSegments(1, 100, []*commonpb.KeyValuePair{{Key: "k2", Value: "v2"}}))
|
||||
assert.EqualValues(t, 20, runtimeStats.GetTotalNumOfRelatedSegments(1, 200, []*commonpb.KeyValuePair{}))
|
||||
assert.EqualValues(t, 10, runtimeStats.GetTotalNumOfRelatedSegments(2, 100, []*commonpb.KeyValuePair{{Key: "k1", Value: "v1"}}))
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
//type ddRequestScheduler interface {}
|
||||
|
||||
//type ddReqFIFOScheduler struct {}
|
||||
|
||||
type ddRequestScheduler struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
globalIDAllocator func() (UniqueID, error)
|
||||
reqQueue chan task
|
||||
scheduleTimeStamp Timestamp
|
||||
ddMsgStream ms.MsgStream
|
||||
}
|
||||
|
||||
func NewDDRequestScheduler(ctx context.Context) *ddRequestScheduler {
|
||||
const channelSize = 1024
|
||||
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
|
||||
rs := ddRequestScheduler{
|
||||
ctx: ctx2,
|
||||
cancel: cancel,
|
||||
reqQueue: make(chan task, channelSize),
|
||||
}
|
||||
return &rs
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) Enqueue(task task) error {
|
||||
rs.reqQueue <- task
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) SetIDAllocator(allocGlobalID func() (UniqueID, error)) {
|
||||
rs.globalIDAllocator = allocGlobalID
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) SetDDMsgStream(ddStream ms.MsgStream) {
|
||||
rs.ddMsgStream = ddStream
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) scheduleLoop() {
|
||||
for {
|
||||
select {
|
||||
case task := <-rs.reqQueue:
|
||||
err := rs.schedule(task)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
case <-rs.ctx.Done():
|
||||
log.Print("server is closed, exit task execution loop")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) schedule(t task) error {
|
||||
timeStamp, err := t.Ts()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return err
|
||||
}
|
||||
if timeStamp < rs.scheduleTimeStamp {
|
||||
t.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, rs.scheduleTimeStamp))
|
||||
} else {
|
||||
rs.scheduleTimeStamp = timeStamp
|
||||
err = t.Execute()
|
||||
t.Notify(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) Start() error {
|
||||
go rs.scheduleLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *ddRequestScheduler) Close() {
|
||||
rs.cancel()
|
||||
}
|
|
@ -1,408 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func filterSchema(schema *schemapb.CollectionSchema) *schemapb.CollectionSchema {
|
||||
cloneSchema := proto.Clone(schema).(*schemapb.CollectionSchema)
|
||||
// remove system field
|
||||
var newFields []*schemapb.FieldSchema
|
||||
for _, fieldMeta := range cloneSchema.Fields {
|
||||
fieldID := fieldMeta.FieldID
|
||||
// todo not hardcode
|
||||
if fieldID < 100 {
|
||||
continue
|
||||
}
|
||||
newFields = append(newFields, fieldMeta)
|
||||
}
|
||||
cloneSchema.Fields = newFields
|
||||
return cloneSchema
|
||||
}
|
||||
|
||||
func TestMaster_Scheduler_Collection(t *testing.T) {
|
||||
Init()
|
||||
etcdAddress := Params.EtcdAddress
|
||||
kvRootPath := Params.MetaRootPath
|
||||
pulsarAddr := Params.PulsarAddress
|
||||
|
||||
producerChannels := []string{"ddstream"}
|
||||
consumerChannels := []string{"ddstream"}
|
||||
consumerSubName := "substream"
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pulsarDDStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDDStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDDStream.CreatePulsarProducers(producerChannels)
|
||||
pulsarDDStream.Start()
|
||||
defer pulsarDDStream.Close()
|
||||
|
||||
consumeMs := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
|
||||
consumeMs.SetPulsarClient(pulsarAddr)
|
||||
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, util.NewUnmarshalDispatcher(), 1024)
|
||||
consumeMs.Start()
|
||||
defer consumeMs.Close()
|
||||
|
||||
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
|
||||
err = idAllocator.Initialize()
|
||||
assert.Nil(t, err)
|
||||
|
||||
scheduler := NewDDRequestScheduler(ctx)
|
||||
scheduler.SetDDMsgStream(pulsarDDStream)
|
||||
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
|
||||
scheduler.Start()
|
||||
defer scheduler.Close()
|
||||
|
||||
rand.Seed(time.Now().Unix())
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
Description: "string",
|
||||
AutoID: true,
|
||||
Fields: nil,
|
||||
}
|
||||
|
||||
schemaBytes, err := proto.Marshal(&sch)
|
||||
assert.Nil(t, err)
|
||||
|
||||
////////////////////////////CreateCollection////////////////////////
|
||||
createCollectionReq := milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreateCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
Schema: schemaBytes,
|
||||
}
|
||||
|
||||
var createCollectionTask task = &createCollectionTask{
|
||||
req: &createCollectionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(createCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
err = createCollectionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(12))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var consumeMsg ms.MsgStream = consumeMs
|
||||
var createCollectionMsg *ms.CreateCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createCollectionMsg = v.(*ms.CreateCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createCollectionReq.Base.MsgType, createCollectionMsg.CreateCollectionRequest.Base.MsgType)
|
||||
assert.Equal(t, createCollectionReq.Base.MsgID, createCollectionMsg.CreateCollectionRequest.Base.MsgID)
|
||||
assert.Equal(t, createCollectionReq.Base.Timestamp, createCollectionMsg.CreateCollectionRequest.Base.Timestamp)
|
||||
assert.Equal(t, createCollectionReq.Base.SourceID, createCollectionMsg.CreateCollectionRequest.Base.SourceID)
|
||||
|
||||
var schema1 schemapb.CollectionSchema
|
||||
proto.UnmarshalMerge(createCollectionReq.Schema, &schema1)
|
||||
|
||||
var schema2 schemapb.CollectionSchema
|
||||
proto.UnmarshalMerge(createCollectionMsg.CreateCollectionRequest.Schema, &schema2)
|
||||
filterSchema2 := filterSchema(&schema2)
|
||||
filterSchema2Value, _ := proto.Marshal(filterSchema2)
|
||||
fmt.Println("aaaa")
|
||||
fmt.Println(schema1.String())
|
||||
fmt.Println("bbbb")
|
||||
fmt.Println(schema2.String())
|
||||
assert.Equal(t, createCollectionReq.Schema, filterSchema2Value)
|
||||
|
||||
////////////////////////////DropCollection////////////////////////
|
||||
dropCollectionReq := milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 13,
|
||||
SourceID: 1,
|
||||
},
|
||||
CollectionName: sch.Name,
|
||||
}
|
||||
|
||||
var dropCollectionTask task = &dropCollectionTask{
|
||||
req: &dropCollectionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
segManager: NewMockSegmentManager(),
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(dropCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
err = dropCollectionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(14))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var dropCollectionMsg *ms.DropCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
dropCollectionMsg = v.(*ms.DropCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dropCollectionReq.Base.MsgType, dropCollectionMsg.DropCollectionRequest.Base.MsgType)
|
||||
assert.Equal(t, dropCollectionReq.Base.MsgID, dropCollectionMsg.DropCollectionRequest.Base.MsgID)
|
||||
assert.Equal(t, dropCollectionReq.Base.Timestamp, dropCollectionMsg.DropCollectionRequest.Base.Timestamp)
|
||||
assert.Equal(t, dropCollectionReq.Base.SourceID, dropCollectionMsg.DropCollectionRequest.Base.MsgID)
|
||||
assert.Equal(t, dropCollectionReq.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName)
|
||||
|
||||
}
|
||||
|
||||
func TestMaster_Scheduler_Partition(t *testing.T) {
|
||||
Init()
|
||||
etcdAddress := Params.EtcdAddress
|
||||
kvRootPath := Params.MetaRootPath
|
||||
pulsarAddr := Params.PulsarAddress
|
||||
|
||||
producerChannels := []string{"ddstream"}
|
||||
consumerChannels := []string{"ddstream"}
|
||||
consumerSubName := "substream"
|
||||
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.Nil(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pulsarDDStream := pulsarms.NewPulsarMsgStream(ctx, 1024) //input stream
|
||||
pulsarDDStream.SetPulsarClient(pulsarAddr)
|
||||
pulsarDDStream.CreatePulsarProducers(producerChannels)
|
||||
pulsarDDStream.Start()
|
||||
defer pulsarDDStream.Close()
|
||||
|
||||
consumeMs := pulsarms.NewPulsarTtMsgStream(ctx, 1024)
|
||||
consumeMs.SetPulsarClient(pulsarAddr)
|
||||
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, util.NewUnmarshalDispatcher(), 1024)
|
||||
consumeMs.Start()
|
||||
defer consumeMs.Close()
|
||||
|
||||
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
|
||||
err = idAllocator.Initialize()
|
||||
assert.Nil(t, err)
|
||||
|
||||
scheduler := NewDDRequestScheduler(ctx)
|
||||
scheduler.SetDDMsgStream(pulsarDDStream)
|
||||
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
|
||||
scheduler.Start()
|
||||
defer scheduler.Close()
|
||||
|
||||
rand.Seed(time.Now().Unix())
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
|
||||
Description: "string",
|
||||
AutoID: true,
|
||||
Fields: nil,
|
||||
}
|
||||
|
||||
schemaBytes, err := proto.Marshal(&sch)
|
||||
assert.Nil(t, err)
|
||||
|
||||
////////////////////////////CreateCollection////////////////////////
|
||||
createCollectionReq := milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreateCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
Schema: schemaBytes,
|
||||
}
|
||||
|
||||
var createCollectionTask task = &createCollectionTask{
|
||||
req: &createCollectionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(createCollectionTask)
|
||||
assert.Nil(t, err)
|
||||
err = createCollectionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(12))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var consumeMsg ms.MsgStream = consumeMs
|
||||
var createCollectionMsg *ms.CreateCollectionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createCollectionMsg = v.(*ms.CreateCollectionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createCollectionReq.Base.MsgType, createCollectionMsg.CreateCollectionRequest.Base.MsgType)
|
||||
assert.Equal(t, createCollectionReq.Base.MsgID, createCollectionMsg.CreateCollectionRequest.Base.MsgID)
|
||||
assert.Equal(t, createCollectionReq.Base.Timestamp, createCollectionMsg.CreateCollectionRequest.Base.Timestamp)
|
||||
assert.Equal(t, createCollectionReq.Base.SourceID, createCollectionMsg.CreateCollectionRequest.Base.SourceID)
|
||||
//assert.Equal(t, createCollectionReq.Schema, createCollectionMsg.CreateCollectionRequest.Schema)
|
||||
|
||||
var schema1 schemapb.CollectionSchema
|
||||
proto.UnmarshalMerge(createCollectionReq.Schema, &schema1)
|
||||
|
||||
var schema2 schemapb.CollectionSchema
|
||||
proto.UnmarshalMerge(createCollectionMsg.CreateCollectionRequest.Schema, &schema2)
|
||||
filterSchema2 := filterSchema(&schema2)
|
||||
filterSchema2Value, _ := proto.Marshal(filterSchema2)
|
||||
fmt.Println("aaaa")
|
||||
fmt.Println(schema1.String())
|
||||
fmt.Println("bbbb")
|
||||
fmt.Println(schema2.String())
|
||||
assert.Equal(t, createCollectionReq.Schema, filterSchema2Value)
|
||||
|
||||
////////////////////////////CreatePartition////////////////////////
|
||||
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
|
||||
createPartitionReq := milvuspb.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreatePartition,
|
||||
MsgID: 1,
|
||||
Timestamp: 13,
|
||||
SourceID: 1,
|
||||
},
|
||||
CollectionName: sch.Name,
|
||||
PartitionName: partitionName,
|
||||
}
|
||||
|
||||
var createPartitionTask task = &createPartitionTask{
|
||||
req: &createPartitionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(createPartitionTask)
|
||||
assert.Nil(t, err)
|
||||
err = createPartitionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(14))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var createPartitionMsg *ms.CreatePartitionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
createPartitionMsg = v.(*ms.CreatePartitionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, createPartitionReq.Base.MsgType, createPartitionMsg.CreatePartitionRequest.Base.MsgType)
|
||||
assert.Equal(t, createPartitionReq.Base.MsgID, createPartitionMsg.CreatePartitionRequest.Base.MsgID)
|
||||
assert.Equal(t, createPartitionReq.Base.Timestamp, createPartitionMsg.CreatePartitionRequest.Base.Timestamp)
|
||||
assert.Equal(t, createPartitionReq.Base.SourceID, createPartitionMsg.CreatePartitionRequest.Base.MsgID)
|
||||
assert.Equal(t, createPartitionReq.CollectionName, createPartitionMsg.CreatePartitionRequest.CollectionName)
|
||||
assert.Equal(t, createPartitionReq.PartitionName, createPartitionMsg.CreatePartitionRequest.PartitionName)
|
||||
|
||||
////////////////////////////DropPartition////////////////////////
|
||||
dropPartitionReq := milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropPartition,
|
||||
MsgID: 1,
|
||||
Timestamp: 15,
|
||||
SourceID: 1,
|
||||
},
|
||||
CollectionName: sch.Name,
|
||||
PartitionName: partitionName,
|
||||
}
|
||||
|
||||
var dropPartitionTask task = &dropPartitionTask{
|
||||
req: &dropPartitionReq,
|
||||
baseTask: baseTask{
|
||||
sch: scheduler,
|
||||
mt: meta,
|
||||
cv: make(chan error),
|
||||
},
|
||||
}
|
||||
|
||||
err = scheduler.Enqueue(dropPartitionTask)
|
||||
assert.Nil(t, err)
|
||||
err = dropPartitionTask.WaitToFinish(ctx)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = mockTimeTickBroadCast(pulsarDDStream, Timestamp(16))
|
||||
assert.NoError(t, err)
|
||||
|
||||
var dropPartitionMsg *ms.DropPartitionMsg
|
||||
for {
|
||||
result := consumeMsg.Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
dropPartitionMsg = v.(*ms.DropPartitionMsg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Equal(t, dropPartitionReq.Base.MsgType, dropPartitionMsg.DropPartitionRequest.Base.MsgType)
|
||||
assert.Equal(t, dropPartitionReq.Base.MsgID, dropPartitionMsg.DropPartitionRequest.Base.MsgID)
|
||||
assert.Equal(t, dropPartitionReq.Base.Timestamp, dropPartitionMsg.DropPartitionRequest.Base.Timestamp)
|
||||
assert.Equal(t, dropPartitionReq.Base.SourceID, dropPartitionMsg.DropPartitionRequest.Base.SourceID)
|
||||
assert.Equal(t, dropPartitionReq.CollectionName, dropPartitionMsg.DropPartitionRequest.CollectionName)
|
||||
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type Assignment struct {
|
||||
rowNums int
|
||||
expireTime Timestamp
|
||||
}
|
||||
|
||||
type Status struct {
|
||||
total int
|
||||
lastExpireTime Timestamp
|
||||
assignments []*Assignment
|
||||
}
|
||||
|
||||
type SegmentAssigner struct {
|
||||
mt *metaTable
|
||||
segmentStatus map[UniqueID]*Status //segment id -> status
|
||||
|
||||
globalTSOAllocator func() (Timestamp, error)
|
||||
segmentExpireDuration int64
|
||||
|
||||
proxyTimeSyncChan chan *ms.TimeTickMsg
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
waitGroup sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type AssignResult struct {
|
||||
isSuccess bool
|
||||
expireTime Timestamp
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) OpenSegment(segmentID UniqueID, numRows int) error {
|
||||
assigner.mu.Lock()
|
||||
defer assigner.mu.Unlock()
|
||||
if _, ok := assigner.segmentStatus[segmentID]; ok {
|
||||
return errors.Errorf("can not reopen segment %d", segmentID)
|
||||
}
|
||||
|
||||
newStatus := &Status{
|
||||
total: numRows,
|
||||
assignments: make([]*Assignment, 0),
|
||||
}
|
||||
assigner.segmentStatus[segmentID] = newStatus
|
||||
return nil
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) CloseSegment(segmentID UniqueID) error {
|
||||
assigner.mu.Lock()
|
||||
defer assigner.mu.Unlock()
|
||||
if _, ok := assigner.segmentStatus[segmentID]; !ok {
|
||||
return errors.Errorf("can not find segment %d", segmentID)
|
||||
}
|
||||
|
||||
delete(assigner.segmentStatus, segmentID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) Assign(segmentID UniqueID, numRows int) (*AssignResult, error) {
|
||||
assigner.mu.Lock()
|
||||
defer assigner.mu.Unlock()
|
||||
|
||||
res := &AssignResult{false, 0}
|
||||
status, ok := assigner.segmentStatus[segmentID]
|
||||
if !ok {
|
||||
return res, errors.Errorf("segment %d is not opened", segmentID)
|
||||
}
|
||||
|
||||
allocated, err := assigner.totalOfAssignments(segmentID)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
segMeta, err := assigner.mt.GetSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
free := status.total - int(segMeta.NumRows) - allocated
|
||||
if numRows > free {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
ts, err := assigner.globalTSOAllocator()
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
physicalTs, logicalTs := tsoutil.ParseTS(ts)
|
||||
expirePhysicalTs := physicalTs.Add(time.Duration(assigner.segmentExpireDuration) * time.Millisecond)
|
||||
expireTs := tsoutil.ComposeTS(expirePhysicalTs.UnixNano()/int64(time.Millisecond), int64(logicalTs))
|
||||
status.lastExpireTime = expireTs
|
||||
status.assignments = append(status.assignments, &Assignment{
|
||||
numRows,
|
||||
ts,
|
||||
})
|
||||
|
||||
res.isSuccess = true
|
||||
res.expireTime = expireTs
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) CheckAssignmentExpired(segmentID UniqueID, timestamp Timestamp) (bool, error) {
|
||||
assigner.mu.Lock()
|
||||
defer assigner.mu.Unlock()
|
||||
status, ok := assigner.segmentStatus[segmentID]
|
||||
if !ok {
|
||||
return false, errors.Errorf("can not find segment %d", segmentID)
|
||||
}
|
||||
|
||||
if timestamp >= status.lastExpireTime {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) Start() {
|
||||
assigner.waitGroup.Add(1)
|
||||
go assigner.startProxyTimeSync()
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) Close() {
|
||||
assigner.cancel()
|
||||
assigner.waitGroup.Wait()
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) startProxyTimeSync() {
|
||||
defer assigner.waitGroup.Done()
|
||||
for {
|
||||
select {
|
||||
case <-assigner.ctx.Done():
|
||||
log.Println("proxy time sync stopped")
|
||||
return
|
||||
case msg := <-assigner.proxyTimeSyncChan:
|
||||
if err := assigner.syncProxyTimeStamp(msg.TimeTickMsg.Base.Timestamp); err != nil {
|
||||
log.Println("proxy time sync error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) totalOfAssignments(segmentID UniqueID) (int, error) {
|
||||
if _, ok := assigner.segmentStatus[segmentID]; !ok {
|
||||
return -1, errors.Errorf("can not find segment %d", segmentID)
|
||||
}
|
||||
|
||||
status := assigner.segmentStatus[segmentID]
|
||||
res := 0
|
||||
for _, v := range status.assignments {
|
||||
res += v.rowNums
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (assigner *SegmentAssigner) syncProxyTimeStamp(timeTick Timestamp) error {
|
||||
assigner.mu.Lock()
|
||||
defer assigner.mu.Unlock()
|
||||
for _, status := range assigner.segmentStatus {
|
||||
for i := 0; i < len(status.assignments); {
|
||||
if timeTick >= status.assignments[i].expireTime {
|
||||
status.assignments[i] = status.assignments[len(status.assignments)-1]
|
||||
status.assignments = status.assignments[:len(status.assignments)-1]
|
||||
continue
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewSegmentAssigner(ctx context.Context, metaTable *metaTable,
|
||||
globalTSOAllocator func() (Timestamp, error), proxyTimeSyncChan chan *ms.TimeTickMsg) *SegmentAssigner {
|
||||
assignCtx, cancel := context.WithCancel(ctx)
|
||||
return &SegmentAssigner{
|
||||
mt: metaTable,
|
||||
segmentStatus: make(map[UniqueID]*Status),
|
||||
globalTSOAllocator: globalTSOAllocator,
|
||||
segmentExpireDuration: Params.SegIDAssignExpiration,
|
||||
proxyTimeSyncChan: proxyTimeSyncChan,
|
||||
ctx: assignCtx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
|
@ -1,147 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
)
|
||||
|
||||
func TestSegmentManager_AssignSegmentID(t *testing.T) {
|
||||
Init()
|
||||
Params.TopicNum = 5
|
||||
Params.QueryNodeNum = 3
|
||||
Params.SegmentSize = 536870912 / 1024 / 1024
|
||||
Params.SegmentSizeFactor = 0.75
|
||||
Params.DefaultRecordSize = 1024
|
||||
Params.MinSegIDAssignCnt = 1048576 / 1024
|
||||
Params.SegIDAssignExpiration = 2000
|
||||
collName := "coll_segmgr_test"
|
||||
collID := int64(1001)
|
||||
partitionTag := "test"
|
||||
etcdAddress := Params.EtcdAddress
|
||||
|
||||
var cnt int64
|
||||
globalTsoAllocator := func() (Timestamp, error) {
|
||||
val := atomic.AddInt64(&cnt, 1)
|
||||
phy := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
ts := tsoutil.ComposeTS(phy, val)
|
||||
return ts, nil
|
||||
}
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/test/root"
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
_, err = cli.Delete(ctx, rootPath, clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
kvBase := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
defer kvBase.Close()
|
||||
mt, err := NewMetaTable(kvBase)
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddCollection(&pb.CollectionMeta{
|
||||
ID: collID,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: collName,
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddPartition(collID, partitionTag)
|
||||
assert.Nil(t, err)
|
||||
timestamp, err := globalTsoAllocator()
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddSegment(&pb.SegmentMeta{
|
||||
SegmentID: 100,
|
||||
CollectionID: collID,
|
||||
PartitionTag: partitionTag,
|
||||
ChannelStart: 0,
|
||||
ChannelEnd: 1,
|
||||
OpenTime: timestamp,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
proxySyncChan := make(chan *msgstream.TimeTickMsg)
|
||||
|
||||
segAssigner := NewSegmentAssigner(ctx, mt, globalTsoAllocator, proxySyncChan)
|
||||
|
||||
segAssigner.Start()
|
||||
defer segAssigner.Close()
|
||||
|
||||
_, err = segAssigner.Assign(100, 100)
|
||||
assert.NotNil(t, err)
|
||||
err = segAssigner.OpenSegment(100, 100000)
|
||||
assert.Nil(t, err)
|
||||
result, err := segAssigner.Assign(100, 10000)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, result.isSuccess)
|
||||
|
||||
result, err = segAssigner.Assign(100, 95000)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, result.isSuccess)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
timestamp, err = globalTsoAllocator()
|
||||
assert.Nil(t, err)
|
||||
tickMsg := &msgstream.TimeTickMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: timestamp, EndTimestamp: timestamp, HashValues: []uint32{},
|
||||
},
|
||||
TimeTickMsg: internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: timestamp,
|
||||
SourceID: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
proxySyncChan <- tickMsg
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
result, err = segAssigner.Assign(100, 100000)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, result.isSuccess)
|
||||
|
||||
err = segAssigner.CloseSegment(100)
|
||||
assert.Nil(t, err)
|
||||
_, err = segAssigner.Assign(100, 100)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
err = mt.AddSegment(&pb.SegmentMeta{
|
||||
SegmentID: 200,
|
||||
CollectionID: collID,
|
||||
PartitionTag: partitionTag,
|
||||
ChannelStart: 1,
|
||||
ChannelEnd: 1,
|
||||
OpenTime: 100,
|
||||
NumRows: 10000,
|
||||
MemSize: 100,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = segAssigner.OpenSegment(200, 20000)
|
||||
assert.Nil(t, err)
|
||||
result, err = segAssigner.Assign(200, 10001)
|
||||
assert.Nil(t, err)
|
||||
assert.False(t, result.isSuccess)
|
||||
result, err = segAssigner.Assign(200, 10000)
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, result.isSuccess)
|
||||
}
|
|
@ -1,471 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"log"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type collectionStatus struct {
|
||||
segments []*segmentStatus
|
||||
}
|
||||
type segmentStatus struct {
|
||||
segmentID UniqueID
|
||||
total int
|
||||
closable bool
|
||||
}
|
||||
|
||||
type channelRange struct {
|
||||
channelStart int32
|
||||
channelEnd int32
|
||||
}
|
||||
|
||||
type SegmentManager interface {
|
||||
Start()
|
||||
Close()
|
||||
AssignSegment(segIDReq []*datapb.SegIDRequest) ([]*datapb.SegIDAssignment, error)
|
||||
ForceClose(collID UniqueID) error
|
||||
DropCollection(collID UniqueID) error
|
||||
}
|
||||
|
||||
type SegmentManagerImpl struct {
|
||||
metaTable *metaTable
|
||||
channelRanges []*channelRange
|
||||
collStatus map[UniqueID]*collectionStatus // collection id to collection status
|
||||
defaultSizePerRecord int64
|
||||
segmentThreshold float64
|
||||
segmentThresholdFactor float64
|
||||
numOfChannels int
|
||||
numOfQueryNodes int
|
||||
globalIDAllocator func() (UniqueID, error)
|
||||
globalTSOAllocator func() (Timestamp, error)
|
||||
mu sync.RWMutex
|
||||
|
||||
assigner *SegmentAssigner
|
||||
|
||||
writeNodeTimeSyncChan chan *ms.TimeTickMsg
|
||||
flushScheduler persistenceScheduler
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
waitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) AssignSegment(segIDReq []*datapb.SegIDRequest) ([]*datapb.SegIDAssignment, error) {
|
||||
manager.mu.Lock()
|
||||
defer manager.mu.Unlock()
|
||||
|
||||
res := make([]*datapb.SegIDAssignment, 0)
|
||||
|
||||
for _, req := range segIDReq {
|
||||
result := &datapb.SegIDAssignment{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
},
|
||||
}
|
||||
collName := req.CollName
|
||||
paritionName := req.PartitionName
|
||||
count := req.Count
|
||||
channelID := req.ChannelName
|
||||
|
||||
collMeta, err := manager.metaTable.GetCollectionByName(collName)
|
||||
if err != nil {
|
||||
result.Status.Reason = err.Error()
|
||||
res = append(res, result)
|
||||
continue
|
||||
}
|
||||
|
||||
collID := collMeta.GetID()
|
||||
if !manager.metaTable.HasPartition(collID, paritionName) {
|
||||
result.Status.Reason = "partition tag " + paritionName + " can not find in coll " + strconv.FormatInt(collID, 10)
|
||||
res = append(res, result)
|
||||
continue
|
||||
}
|
||||
channelIDInt, _ := strconv.ParseInt(channelID, 10, 64)
|
||||
assignInfo, err := manager.assignSegment(collName, collID, paritionName, count, int32(channelIDInt))
|
||||
if err != nil {
|
||||
result.Status.Reason = err.Error()
|
||||
res = append(res, result)
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, assignInfo)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) assignSegment(
|
||||
collName string,
|
||||
collID UniqueID,
|
||||
paritionName string,
|
||||
count uint32,
|
||||
channelID int32) (*datapb.SegIDAssignment, error) {
|
||||
|
||||
collStatus, ok := manager.collStatus[collID]
|
||||
if !ok {
|
||||
collStatus = &collectionStatus{
|
||||
segments: make([]*segmentStatus, 0),
|
||||
}
|
||||
manager.collStatus[collID] = collStatus
|
||||
}
|
||||
for _, segStatus := range collStatus.segments {
|
||||
if segStatus.closable {
|
||||
continue
|
||||
}
|
||||
match, err := manager.isMatch(segStatus.segmentID, paritionName, channelID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !match {
|
||||
continue
|
||||
}
|
||||
|
||||
result, err := manager.assigner.Assign(segStatus.segmentID, int(count))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !result.isSuccess {
|
||||
continue
|
||||
}
|
||||
|
||||
return &datapb.SegIDAssignment{
|
||||
SegID: segStatus.segmentID,
|
||||
ChannelName: strconv.Itoa(int(channelID)),
|
||||
Count: count,
|
||||
CollName: collName,
|
||||
PartitionName: paritionName,
|
||||
ExpireTime: result.expireTime,
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
total, err := manager.estimateTotalRows(collName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if int(count) > total {
|
||||
return nil, errors.Errorf("request count %d is larger than total rows %d", count, total)
|
||||
}
|
||||
|
||||
id, err := manager.openNewSegment(channelID, collID, paritionName, total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := manager.assigner.Assign(id, int(count))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !result.isSuccess {
|
||||
return nil, errors.Errorf("assign failed for segment %d", id)
|
||||
}
|
||||
return &datapb.SegIDAssignment{
|
||||
SegID: id,
|
||||
ChannelName: strconv.Itoa(int(channelID)),
|
||||
Count: count,
|
||||
CollName: collName,
|
||||
PartitionName: paritionName,
|
||||
ExpireTime: result.expireTime,
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: "",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) isMatch(segmentID UniqueID, paritionName string, channelID int32) (bool, error) {
|
||||
segMeta, err := manager.metaTable.GetSegmentByID(segmentID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if channelID < segMeta.GetChannelStart() ||
|
||||
channelID > segMeta.GetChannelEnd() || segMeta.PartitionTag != paritionName {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) estimateTotalRows(collName string) (int, error) {
|
||||
collMeta, err := manager.metaTable.GetCollectionByName(collName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
sizePerRecord, err := typeutil.EstimateSizePerRecord(collMeta.Schema)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return int(manager.segmentThreshold / float64(sizePerRecord)), nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) openNewSegment(channelID int32, collID UniqueID, paritionName string, numRows int) (UniqueID, error) {
|
||||
// find the channel range
|
||||
channelStart, channelEnd := int32(-1), int32(-1)
|
||||
for _, r := range manager.channelRanges {
|
||||
if channelID >= r.channelStart && channelID <= r.channelEnd {
|
||||
channelStart = r.channelStart
|
||||
channelEnd = r.channelEnd
|
||||
break
|
||||
}
|
||||
}
|
||||
if channelStart == -1 {
|
||||
return -1, errors.Errorf("can't find the channel range which contains channel %d", channelID)
|
||||
}
|
||||
|
||||
newID, err := manager.globalIDAllocator()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
openTime, err := manager.globalTSOAllocator()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
err = manager.metaTable.AddSegment(&etcdpb.SegmentMeta{
|
||||
SegmentID: newID,
|
||||
CollectionID: collID,
|
||||
PartitionTag: paritionName,
|
||||
ChannelStart: channelStart,
|
||||
ChannelEnd: channelEnd,
|
||||
OpenTime: openTime,
|
||||
NumRows: 0,
|
||||
MemSize: 0,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
err = manager.assigner.OpenSegment(newID, numRows)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
segStatus := &segmentStatus{
|
||||
segmentID: newID,
|
||||
total: numRows,
|
||||
closable: false,
|
||||
}
|
||||
|
||||
collStatus := manager.collStatus[collID]
|
||||
collStatus.segments = append(collStatus.segments, segStatus)
|
||||
|
||||
return newID, nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) Start() {
|
||||
manager.waitGroup.Add(1)
|
||||
go manager.startWriteNodeTimeSync()
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) Close() {
|
||||
manager.cancel()
|
||||
manager.waitGroup.Wait()
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) startWriteNodeTimeSync() {
|
||||
defer manager.waitGroup.Done()
|
||||
for {
|
||||
select {
|
||||
case <-manager.ctx.Done():
|
||||
log.Println("write node time sync stopped")
|
||||
return
|
||||
case msg := <-manager.writeNodeTimeSyncChan:
|
||||
if err := manager.syncWriteNodeTimestamp(msg.TimeTickMsg.Base.Timestamp); err != nil {
|
||||
log.Println("write node time sync error: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) syncWriteNodeTimestamp(timeTick Timestamp) error {
|
||||
manager.mu.Lock()
|
||||
defer manager.mu.Unlock()
|
||||
for _, status := range manager.collStatus {
|
||||
for i := 0; i < len(status.segments); i++ {
|
||||
segStatus := status.segments[i]
|
||||
if !segStatus.closable {
|
||||
closable, err := manager.judgeSegmentClosable(segStatus)
|
||||
if err != nil {
|
||||
log.Printf("check segment closable error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
segStatus.closable = closable
|
||||
if !segStatus.closable {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
isExpired, err := manager.assigner.CheckAssignmentExpired(segStatus.segmentID, timeTick)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isExpired {
|
||||
continue
|
||||
}
|
||||
status.segments = append(status.segments[:i], status.segments[i+1:]...)
|
||||
i--
|
||||
ts, err := manager.globalTSOAllocator()
|
||||
if err != nil {
|
||||
log.Printf("allocate tso error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if err = manager.metaTable.CloseSegment(segStatus.segmentID, ts); err != nil {
|
||||
log.Printf("meta table close segment error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if err = manager.assigner.CloseSegment(segStatus.segmentID); err != nil {
|
||||
log.Printf("assigner close segment error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
if err = manager.flushScheduler.Enqueue(segStatus.segmentID); err != nil {
|
||||
log.Printf("flush scheduler enqueue error: %s", err.Error())
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) judgeSegmentClosable(status *segmentStatus) (bool, error) {
|
||||
segMeta, err := manager.metaTable.GetSegmentByID(status.segmentID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if segMeta.NumRows >= int64(manager.segmentThresholdFactor*float64(status.total)) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) initChannelRanges() error {
|
||||
div, rem := manager.numOfChannels/manager.numOfQueryNodes, manager.numOfChannels%manager.numOfQueryNodes
|
||||
for i, j := 0, 0; i < manager.numOfChannels; j++ {
|
||||
if j < rem {
|
||||
manager.channelRanges = append(manager.channelRanges, &channelRange{
|
||||
channelStart: int32(i),
|
||||
channelEnd: int32(i + div),
|
||||
})
|
||||
i += div + 1
|
||||
} else {
|
||||
manager.channelRanges = append(manager.channelRanges, &channelRange{
|
||||
channelStart: int32(i),
|
||||
channelEnd: int32(i + div - 1),
|
||||
})
|
||||
i += div
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForceClose set segments of collection with collID closable, segment will be closed after the assignments of it has expired
|
||||
func (manager *SegmentManagerImpl) ForceClose(collID UniqueID) error {
|
||||
manager.mu.Lock()
|
||||
defer manager.mu.Unlock()
|
||||
status, ok := manager.collStatus[collID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, segStatus := range status.segments {
|
||||
segStatus.closable = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (manager *SegmentManagerImpl) DropCollection(collID UniqueID) error {
|
||||
manager.mu.Lock()
|
||||
defer manager.mu.Unlock()
|
||||
|
||||
status, ok := manager.collStatus[collID]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, segStatus := range status.segments {
|
||||
if err := manager.assigner.CloseSegment(segStatus.segmentID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
delete(manager.collStatus, collID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewSegmentManager(ctx context.Context,
|
||||
meta *metaTable,
|
||||
globalIDAllocator func() (UniqueID, error),
|
||||
globalTSOAllocator func() (Timestamp, error),
|
||||
syncWriteNodeChan chan *ms.TimeTickMsg,
|
||||
scheduler persistenceScheduler,
|
||||
assigner *SegmentAssigner) (*SegmentManagerImpl, error) {
|
||||
|
||||
assignerCtx, cancel := context.WithCancel(ctx)
|
||||
segManager := &SegmentManagerImpl{
|
||||
metaTable: meta,
|
||||
channelRanges: make([]*channelRange, 0),
|
||||
collStatus: make(map[UniqueID]*collectionStatus),
|
||||
segmentThreshold: Params.SegmentSize * 1024 * 1024,
|
||||
segmentThresholdFactor: Params.SegmentSizeFactor,
|
||||
defaultSizePerRecord: Params.DefaultRecordSize,
|
||||
numOfChannels: Params.TopicNum,
|
||||
numOfQueryNodes: Params.QueryNodeNum,
|
||||
globalIDAllocator: globalIDAllocator,
|
||||
globalTSOAllocator: globalTSOAllocator,
|
||||
|
||||
assigner: assigner,
|
||||
writeNodeTimeSyncChan: syncWriteNodeChan,
|
||||
flushScheduler: scheduler,
|
||||
|
||||
ctx: assignerCtx,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
if err := segManager.initChannelRanges(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return segManager, nil
|
||||
}
|
||||
|
||||
type mockSegmentManager struct {
|
||||
}
|
||||
|
||||
func (manager *mockSegmentManager) Start() {
|
||||
}
|
||||
|
||||
func (manager *mockSegmentManager) Close() {
|
||||
}
|
||||
|
||||
func (manager *mockSegmentManager) AssignSegment(segIDReq []*datapb.SegIDRequest) ([]*datapb.SegIDAssignment, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (manager *mockSegmentManager) ForceClose(collID UniqueID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (manager *mockSegmentManager) DropCollection(collID UniqueID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// only used in unit tests
|
||||
func NewMockSegmentManager() SegmentManager {
|
||||
return &mockSegmentManager{}
|
||||
}
|
|
@ -1,291 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func TestSegmentManager_AssignSegment(t *testing.T) {
|
||||
ctx, cancelFunc := context.WithCancel(context.TODO())
|
||||
defer cancelFunc()
|
||||
|
||||
Init()
|
||||
Params.TopicNum = 5
|
||||
Params.QueryNodeNum = 3
|
||||
Params.SegmentSize = 536870912 / 1024 / 1024
|
||||
Params.SegmentSizeFactor = 0.75
|
||||
Params.DefaultRecordSize = 1024
|
||||
Params.MinSegIDAssignCnt = 1048576 / 1024
|
||||
Params.SegIDAssignExpiration = 2000
|
||||
etcdAddress := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/test/root"
|
||||
_, err = cli.Delete(ctx, rootPath, clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
kvBase := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
defer kvBase.Close()
|
||||
mt, err := NewMetaTable(kvBase)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collName := "segmgr_test_coll"
|
||||
var collID int64 = 1001
|
||||
partitionName := "test_part"
|
||||
schema := &schemapb.CollectionSchema{
|
||||
Name: collName,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
{FieldID: 1, Name: "f1", IsPrimaryKey: false, DataType: schemapb.DataType_INT32},
|
||||
{FieldID: 2, Name: "f2", IsPrimaryKey: false, DataType: schemapb.DataType_VECTOR_FLOAT, TypeParams: []*commonpb.KeyValuePair{
|
||||
{Key: "dim", Value: "128"},
|
||||
}},
|
||||
},
|
||||
}
|
||||
err = mt.AddCollection(&pb.CollectionMeta{
|
||||
ID: collID,
|
||||
Schema: schema,
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddPartition(collID, partitionName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var cnt int64
|
||||
globalIDAllocator := func() (UniqueID, error) {
|
||||
val := atomic.AddInt64(&cnt, 1)
|
||||
return val, nil
|
||||
}
|
||||
globalTsoAllocator := func() (Timestamp, error) {
|
||||
val := atomic.AddInt64(&cnt, 1)
|
||||
phy := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
ts := tsoutil.ComposeTS(phy, val)
|
||||
return ts, nil
|
||||
}
|
||||
syncWriteChan := make(chan *msgstream.TimeTickMsg)
|
||||
syncProxyChan := make(chan *msgstream.TimeTickMsg)
|
||||
|
||||
segAssigner := NewSegmentAssigner(ctx, mt, globalTsoAllocator, syncProxyChan)
|
||||
mockScheduler := &MockFlushScheduler{}
|
||||
segManager, err := NewSegmentManager(ctx, mt, globalIDAllocator, globalTsoAllocator, syncWriteChan, mockScheduler, segAssigner)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segManager.Start()
|
||||
defer segManager.Close()
|
||||
sizePerRecord, err := typeutil.EstimateSizePerRecord(schema)
|
||||
assert.Nil(t, err)
|
||||
maxCount := uint32(Params.SegmentSize * 1024 * 1024 / float64(sizePerRecord))
|
||||
cases := []struct {
|
||||
Count uint32
|
||||
ChannelID int32
|
||||
Err bool
|
||||
SameIDWith int
|
||||
NotSameIDWith int
|
||||
ResultCount int32
|
||||
}{
|
||||
{1000, 1, false, -1, -1, 1000},
|
||||
{2000, 0, false, 0, -1, 2000},
|
||||
{maxCount - 2999, 1, false, -1, 0, int32(maxCount - 2999)},
|
||||
{maxCount - 3000, 1, false, 0, -1, int32(maxCount - 3000)},
|
||||
{2000000000, 1, true, -1, -1, -1},
|
||||
{1000, 3, false, -1, 0, 1000},
|
||||
{maxCount, 2, false, -1, -1, int32(maxCount)},
|
||||
}
|
||||
|
||||
var results = make([]*datapb.SegIDAssignment, 0)
|
||||
for _, c := range cases {
|
||||
result, _ := segManager.AssignSegment([]*datapb.SegIDRequest{
|
||||
{Count: c.Count,
|
||||
ChannelName: strconv.FormatInt(int64(c.ChannelID), 10),
|
||||
CollName: collName, PartitionName: partitionName},
|
||||
})
|
||||
results = append(results, result...)
|
||||
if c.Err {
|
||||
assert.EqualValues(t, commonpb.ErrorCode_UNEXPECTED_ERROR, result[0].Status.ErrorCode)
|
||||
continue
|
||||
}
|
||||
assert.EqualValues(t, commonpb.ErrorCode_SUCCESS, result[0].Status.ErrorCode)
|
||||
if c.SameIDWith != -1 {
|
||||
assert.EqualValues(t, result[0].SegID, results[c.SameIDWith].SegID)
|
||||
}
|
||||
if c.NotSameIDWith != -1 {
|
||||
assert.NotEqualValues(t, result[0].SegID, results[c.NotSameIDWith].SegID)
|
||||
}
|
||||
if c.ResultCount != -1 {
|
||||
assert.EqualValues(t, result[0].Count, c.ResultCount)
|
||||
}
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(Params.SegIDAssignExpiration) * time.Millisecond)
|
||||
timestamp, err := globalTsoAllocator()
|
||||
assert.Nil(t, err)
|
||||
err = mt.UpdateSegment(&pb.SegmentMeta{
|
||||
SegmentID: results[0].SegID,
|
||||
CollectionID: collID,
|
||||
PartitionTag: partitionName,
|
||||
ChannelStart: 0,
|
||||
ChannelEnd: 1,
|
||||
CloseTime: timestamp,
|
||||
NumRows: 400000,
|
||||
MemSize: 500000,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
tsMsg := &msgstream.TimeTickMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: timestamp, EndTimestamp: timestamp, HashValues: []uint32{},
|
||||
},
|
||||
TimeTickMsg: internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: timestamp,
|
||||
SourceID: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
syncWriteChan <- tsMsg
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
segMeta, err := mt.GetSegmentByID(results[0].SegID)
|
||||
assert.Nil(t, err)
|
||||
assert.NotEqualValues(t, 0, segMeta.CloseTime)
|
||||
}
|
||||
|
||||
func TestSegmentManager_SycnWritenode(t *testing.T) {
|
||||
ctx, cancelFunc := context.WithCancel(context.TODO())
|
||||
defer cancelFunc()
|
||||
|
||||
Init()
|
||||
Params.TopicNum = 5
|
||||
Params.QueryNodeNum = 3
|
||||
Params.SegmentSize = 536870912 / 1024 / 1024
|
||||
Params.SegmentSizeFactor = 0.75
|
||||
Params.DefaultRecordSize = 1024
|
||||
Params.MinSegIDAssignCnt = 1048576 / 1024
|
||||
Params.SegIDAssignExpiration = 2000
|
||||
etcdAddress := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/test/root"
|
||||
_, err = cli.Delete(ctx, rootPath, clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
kvBase := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
defer kvBase.Close()
|
||||
mt, err := NewMetaTable(kvBase)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collName := "segmgr_test_coll"
|
||||
var collID int64 = 1001
|
||||
partitionName := "test_part"
|
||||
schema := &schemapb.CollectionSchema{
|
||||
Name: collName,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
{FieldID: 1, Name: "f1", IsPrimaryKey: false, DataType: schemapb.DataType_INT32},
|
||||
{FieldID: 2, Name: "f2", IsPrimaryKey: false, DataType: schemapb.DataType_VECTOR_FLOAT, TypeParams: []*commonpb.KeyValuePair{
|
||||
{Key: "dim", Value: "128"},
|
||||
}},
|
||||
},
|
||||
}
|
||||
err = mt.AddCollection(&pb.CollectionMeta{
|
||||
ID: collID,
|
||||
Schema: schema,
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddPartition(collID, partitionName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var cnt int64
|
||||
globalIDAllocator := func() (UniqueID, error) {
|
||||
val := atomic.AddInt64(&cnt, 1)
|
||||
return val, nil
|
||||
}
|
||||
globalTsoAllocator := func() (Timestamp, error) {
|
||||
val := atomic.AddInt64(&cnt, 1)
|
||||
phy := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
ts := tsoutil.ComposeTS(phy, val)
|
||||
return ts, nil
|
||||
}
|
||||
syncWriteChan := make(chan *msgstream.TimeTickMsg)
|
||||
syncProxyChan := make(chan *msgstream.TimeTickMsg)
|
||||
|
||||
segAssigner := NewSegmentAssigner(ctx, mt, globalTsoAllocator, syncProxyChan)
|
||||
mockScheduler := &MockFlushScheduler{}
|
||||
segManager, err := NewSegmentManager(ctx, mt, globalIDAllocator, globalTsoAllocator, syncWriteChan, mockScheduler, segAssigner)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segManager.Start()
|
||||
defer segManager.Close()
|
||||
sizePerRecord, err := typeutil.EstimateSizePerRecord(schema)
|
||||
assert.Nil(t, err)
|
||||
maxCount := uint32(Params.SegmentSize * 1024 * 1024 / float64(sizePerRecord))
|
||||
|
||||
req := []*datapb.SegIDRequest{
|
||||
{Count: maxCount, ChannelName: "1", CollName: collName, PartitionName: partitionName},
|
||||
{Count: maxCount, ChannelName: "2", CollName: collName, PartitionName: partitionName},
|
||||
{Count: maxCount, ChannelName: "3", CollName: collName, PartitionName: partitionName},
|
||||
}
|
||||
assignSegment, err := segManager.AssignSegment(req)
|
||||
assert.Nil(t, err)
|
||||
timestamp, err := globalTsoAllocator()
|
||||
assert.Nil(t, err)
|
||||
for i := 0; i < len(assignSegment); i++ {
|
||||
assert.EqualValues(t, maxCount, assignSegment[i].Count)
|
||||
assert.EqualValues(t, strconv.Itoa(i+1), assignSegment[i].ChannelName)
|
||||
|
||||
err = mt.UpdateSegment(&pb.SegmentMeta{
|
||||
SegmentID: assignSegment[i].SegID,
|
||||
CollectionID: collID,
|
||||
PartitionTag: partitionName,
|
||||
ChannelStart: 0,
|
||||
ChannelEnd: 1,
|
||||
CloseTime: timestamp,
|
||||
NumRows: int64(maxCount),
|
||||
MemSize: 500000,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(Params.SegIDAssignExpiration) * time.Millisecond)
|
||||
|
||||
timestamp, err = globalTsoAllocator()
|
||||
assert.Nil(t, err)
|
||||
tsMsg := &msgstream.TimeTickMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: timestamp, EndTimestamp: timestamp, HashValues: []uint32{},
|
||||
},
|
||||
TimeTickMsg: internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: timestamp,
|
||||
SourceID: 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
syncWriteChan <- tsMsg
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
segManager.mu.RLock()
|
||||
defer segManager.mu.RUnlock()
|
||||
status := segManager.collStatus[collID]
|
||||
assert.Empty(t, status.segments)
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
type StatsProcessor struct {
|
||||
metaTable *metaTable
|
||||
runtimeStats *RuntimeStats
|
||||
|
||||
segmentThreshold float64
|
||||
segmentThresholdFactor float64
|
||||
globalTSOAllocator func() (Timestamp, error)
|
||||
}
|
||||
|
||||
func (processor *StatsProcessor) ProcessQueryNodeStats(msgPack *msgstream.MsgPack) error {
|
||||
for _, msg := range msgPack.Msgs {
|
||||
statsMsg, ok := msg.(*msgstream.QueryNodeStatsMsg)
|
||||
if !ok {
|
||||
return errors.Errorf("Type of message is not QueryNodeSegStatsMsg")
|
||||
}
|
||||
|
||||
for _, segStat := range statsMsg.GetSegStats() {
|
||||
if err := processor.processSegmentStat(segStat); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, fieldStat := range statsMsg.GetFieldStats() {
|
||||
if err := processor.processFieldStat(statsMsg.Base.SourceID, fieldStat); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (processor *StatsProcessor) processSegmentStat(segStats *internalpb2.SegmentStats) error {
|
||||
if !segStats.GetRecentlyModified() {
|
||||
return nil
|
||||
}
|
||||
|
||||
segID := segStats.GetSegmentID()
|
||||
segMeta, err := processor.metaTable.GetSegmentByID(segID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segMeta.NumRows = segStats.NumRows
|
||||
segMeta.MemSize = segStats.MemorySize
|
||||
|
||||
return processor.metaTable.UpdateSegment(segMeta)
|
||||
}
|
||||
|
||||
func (processor *StatsProcessor) processFieldStat(peerID int64, fieldStats *internalpb2.FieldStats) error {
|
||||
collID := fieldStats.CollectionID
|
||||
fieldID := fieldStats.FieldID
|
||||
|
||||
for _, stat := range fieldStats.IndexStats {
|
||||
fieldStats := &FieldIndexRuntimeStats{
|
||||
peerID: peerID,
|
||||
indexParams: stat.IndexParams,
|
||||
numOfRelatedSegments: stat.NumRelatedSegments,
|
||||
}
|
||||
|
||||
if err := processor.runtimeStats.UpdateFieldStat(collID, fieldID, fieldStats); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewStatsProcessor(mt *metaTable, runTimeStats *RuntimeStats, globalTSOAllocator func() (Timestamp, error)) *StatsProcessor {
|
||||
return &StatsProcessor{
|
||||
metaTable: mt,
|
||||
runtimeStats: runTimeStats,
|
||||
segmentThreshold: Params.SegmentSize * 1024 * 1024,
|
||||
segmentThresholdFactor: Params.SegmentSizeFactor,
|
||||
globalTSOAllocator: globalTSOAllocator,
|
||||
}
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
func TestStatsProcess(t *testing.T) {
|
||||
Init()
|
||||
etcdAddress := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
assert.Nil(t, err)
|
||||
rootPath := "/test/root"
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
_, err = cli.Delete(ctx, rootPath, clientv3.WithPrefix())
|
||||
assert.Nil(t, err)
|
||||
|
||||
kvBase := etcdkv.NewEtcdKV(cli, rootPath)
|
||||
mt, err := NewMetaTable(kvBase)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var cnt int64 = 0
|
||||
globalTsoAllocator := func() (Timestamp, error) {
|
||||
val := atomic.AddInt64(&cnt, 1)
|
||||
phy := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
ts := tsoutil.ComposeTS(phy, val)
|
||||
return ts, nil
|
||||
}
|
||||
runtimeStats := NewRuntimeStats()
|
||||
statsProcessor := NewStatsProcessor(mt, runtimeStats, globalTsoAllocator)
|
||||
|
||||
ts, err := globalTsoAllocator()
|
||||
assert.Nil(t, err)
|
||||
|
||||
collID := int64(1001)
|
||||
collName := "test_coll"
|
||||
partitionTag := "test_part"
|
||||
err = mt.AddCollection(&pb.CollectionMeta{
|
||||
ID: collID,
|
||||
Schema: &schemapb.CollectionSchema{
|
||||
Name: collName,
|
||||
},
|
||||
CreateTime: 0,
|
||||
SegmentIDs: []UniqueID{},
|
||||
PartitionTags: []string{},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddPartition(collID, partitionTag)
|
||||
assert.Nil(t, err)
|
||||
err = mt.AddSegment(&pb.SegmentMeta{
|
||||
SegmentID: 100,
|
||||
CollectionID: collID,
|
||||
PartitionTag: partitionTag,
|
||||
ChannelStart: 0,
|
||||
ChannelEnd: 1,
|
||||
OpenTime: ts,
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
stats := internalpb2.QueryNodeStats{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kQueryNodeStats,
|
||||
SourceID: 1,
|
||||
},
|
||||
SegStats: []*internalpb2.SegmentStats{
|
||||
{SegmentID: 100, MemorySize: 2500000, NumRows: 25000, RecentlyModified: true},
|
||||
},
|
||||
FieldStats: []*internalpb2.FieldStats{
|
||||
{CollectionID: 1, FieldID: 100, IndexStats: []*internalpb2.IndexStats{
|
||||
{IndexParams: []*commonpb.KeyValuePair{}, NumRelatedSegments: 100},
|
||||
}},
|
||||
{CollectionID: 2, FieldID: 100, IndexStats: []*internalpb2.IndexStats{
|
||||
{IndexParams: []*commonpb.KeyValuePair{}, NumRelatedSegments: 200},
|
||||
}},
|
||||
},
|
||||
}
|
||||
baseMsg := msgstream.BaseMsg{
|
||||
BeginTimestamp: 0,
|
||||
EndTimestamp: 0,
|
||||
HashValues: []uint32{1},
|
||||
}
|
||||
msg := msgstream.QueryNodeStatsMsg{
|
||||
QueryNodeStats: stats,
|
||||
BaseMsg: baseMsg,
|
||||
}
|
||||
|
||||
var tsMsg msgstream.TsMsg = &msg
|
||||
msgPack := msgstream.MsgPack{
|
||||
Msgs: make([]msgstream.TsMsg, 0),
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, tsMsg)
|
||||
err = statsProcessor.ProcessQueryNodeStats(&msgPack)
|
||||
assert.Nil(t, err)
|
||||
|
||||
segMeta, _ := mt.GetSegmentByID(100)
|
||||
assert.Equal(t, int64(100), segMeta.SegmentID)
|
||||
assert.Equal(t, int64(2500000), segMeta.MemSize)
|
||||
assert.Equal(t, int64(25000), segMeta.NumRows)
|
||||
|
||||
assert.EqualValues(t, 100, runtimeStats.collStats[1].fieldIndexStats[100][0].numOfRelatedSegments)
|
||||
assert.EqualValues(t, 200, runtimeStats.collStats[2].fieldIndexStats[100][0].numOfRelatedSegments)
|
||||
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
)
|
||||
|
||||
// TODO: get timestamp from timestampOracle
|
||||
|
||||
type baseTask struct {
|
||||
sch *ddRequestScheduler
|
||||
mt *metaTable
|
||||
cv chan error
|
||||
}
|
||||
|
||||
type task interface {
|
||||
Type() commonpb.MsgType
|
||||
Ts() (Timestamp, error)
|
||||
Execute() error
|
||||
WaitToFinish(ctx context.Context) error
|
||||
Notify(err error)
|
||||
}
|
||||
|
||||
func (bt *baseTask) Notify(err error) {
|
||||
bt.cv <- err
|
||||
}
|
||||
|
||||
func (bt *baseTask) WaitToFinish(ctx context.Context) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.Errorf("context done")
|
||||
case err, ok := <-bt.cv:
|
||||
if !ok {
|
||||
return errors.Errorf("notify chan closed")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,320 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
func TestMaster_CreateCollectionTask(t *testing.T) {
|
||||
req := milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreateCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
Schema: nil,
|
||||
}
|
||||
var collectionTask task = &createCollectionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kCreateCollection, collectionTask.Type())
|
||||
ts, err := collectionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collectionTask = &createCollectionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, collectionTask.Type())
|
||||
ts, err = collectionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = collectionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMaster_DropCollectionTask(t *testing.T) {
|
||||
req := milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropPartition,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var collectionTask task = &dropCollectionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
segManager: NewMockSegmentManager(),
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kDropPartition, collectionTask.Type())
|
||||
ts, err := collectionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collectionTask = &dropCollectionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
segManager: NewMockSegmentManager(),
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, collectionTask.Type())
|
||||
ts, err = collectionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = collectionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMaster_HasCollectionTask(t *testing.T) {
|
||||
req := milvuspb.HasCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kHasCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var collectionTask task = &hasCollectionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kHasCollection, collectionTask.Type())
|
||||
ts, err := collectionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collectionTask = &hasCollectionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, collectionTask.Type())
|
||||
ts, err = collectionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = collectionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMaster_ShowCollectionTask(t *testing.T) {
|
||||
req := milvuspb.ShowCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var collectionTask task = &showCollectionsTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kShowCollections, collectionTask.Type())
|
||||
ts, err := collectionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collectionTask = &showCollectionsTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, collectionTask.Type())
|
||||
ts, err = collectionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = collectionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMaster_DescribeCollectionTask(t *testing.T) {
|
||||
req := milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var collectionTask task = &describeCollectionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kDescribeCollection, collectionTask.Type())
|
||||
ts, err := collectionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
collectionTask = &describeCollectionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, collectionTask.Type())
|
||||
ts, err = collectionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = collectionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestMaster_CreatePartitionTask(t *testing.T) {
|
||||
req := milvuspb.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreatePartition,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var partitionTask task = &createPartitionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kCreatePartition, partitionTask.Type())
|
||||
ts, err := partitionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
partitionTask = &createPartitionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, partitionTask.Type())
|
||||
ts, err = partitionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = partitionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
func TestMaster_DropPartitionTask(t *testing.T) {
|
||||
req := milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropPartition,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var partitionTask task = &dropPartitionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kDropPartition, partitionTask.Type())
|
||||
ts, err := partitionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
partitionTask = &dropPartitionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, partitionTask.Type())
|
||||
ts, err = partitionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = partitionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
func TestMaster_HasPartitionTask(t *testing.T) {
|
||||
req := milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kHasPartition,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var partitionTask task = &hasPartitionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kHasPartition, partitionTask.Type())
|
||||
ts, err := partitionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
partitionTask = &hasPartitionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, partitionTask.Type())
|
||||
ts, err = partitionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = partitionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
//func TestMaster_DescribePartitionTask(t *testing.T) {
|
||||
// req := milvuspb.DescribePartitionRequest{
|
||||
// MsgType: commonpb.MsgType_kDescribePartition,
|
||||
// ReqID: 1,
|
||||
// Timestamp: 11,
|
||||
// ProxyID: 1,
|
||||
// PartitionName: nil,
|
||||
// }
|
||||
// var partitionTask task = &describePartitionTask{
|
||||
// req: &req,
|
||||
// baseTask: baseTask{},
|
||||
// }
|
||||
// assert.Equal(t, commonpb.MsgType_kDescribePartition, partitionTask.Type())
|
||||
// ts, err := partitionTask.Ts()
|
||||
// assert.Equal(t, uint64(11), ts)
|
||||
// assert.Nil(t, err)
|
||||
//
|
||||
// partitionTask = &describePartitionTask{
|
||||
// req: nil,
|
||||
// baseTask: baseTask{},
|
||||
// }
|
||||
//
|
||||
// assert.Equal(t, commonpb.MsgType_kNone, partitionTask.Type())
|
||||
// ts, err = partitionTask.Ts()
|
||||
// assert.Equal(t, uint64(0), ts)
|
||||
// assert.NotNil(t, err)
|
||||
// err = partitionTask.Execute()
|
||||
// assert.NotNil(t, err)
|
||||
//}
|
||||
|
||||
func TestMaster_ShowPartitionTask(t *testing.T) {
|
||||
req := milvuspb.ShowPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowPartitions,
|
||||
MsgID: 1,
|
||||
Timestamp: 11,
|
||||
SourceID: 1,
|
||||
},
|
||||
}
|
||||
var partitionTask task = &showPartitionTask{
|
||||
req: &req,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
assert.Equal(t, commonpb.MsgType_kShowPartitions, partitionTask.Type())
|
||||
ts, err := partitionTask.Ts()
|
||||
assert.Equal(t, uint64(11), ts)
|
||||
assert.Nil(t, err)
|
||||
|
||||
partitionTask = &showPartitionTask{
|
||||
req: nil,
|
||||
baseTask: baseTask{},
|
||||
}
|
||||
|
||||
assert.Equal(t, commonpb.MsgType_kNone, partitionTask.Type())
|
||||
ts, err = partitionTask.Ts()
|
||||
assert.Equal(t, uint64(0), ts)
|
||||
assert.NotNil(t, err)
|
||||
err = partitionTask.Execute()
|
||||
assert.NotNil(t, err)
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
)
|
||||
|
||||
type (
|
||||
TestTickBarrier struct {
|
||||
value int64
|
||||
ctx context.Context
|
||||
}
|
||||
)
|
||||
|
||||
func (ttBarrier *TestTickBarrier) GetTimeTick() (Timestamp, error) {
|
||||
time.Sleep(1 * time.Second)
|
||||
ttBarrier.value++
|
||||
return Timestamp(ttBarrier.value), nil
|
||||
}
|
||||
|
||||
func (ttBarrier *TestTickBarrier) Start() error {
|
||||
go func(ctx context.Context) {
|
||||
<-ctx.Done()
|
||||
log.Printf("barrier context done, exit")
|
||||
}(ttBarrier.ctx)
|
||||
return nil
|
||||
}
|
||||
func (ttBarrier *TestTickBarrier) Close() {
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
}
|
||||
|
||||
func initTestPulsarStream(ctx context.Context, pulsarAddress string,
|
||||
producerChannels []string,
|
||||
consumerChannels []string,
|
||||
consumerSubName string, opts ...ms.RepackFunc) (*ms.MsgStream, *ms.MsgStream) {
|
||||
|
||||
// set input stream
|
||||
inputStream := pulsarms.NewPulsarMsgStream(ctx, 100)
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
for _, opt := range opts {
|
||||
inputStream.SetRepackFunc(opt)
|
||||
}
|
||||
var input ms.MsgStream = inputStream
|
||||
|
||||
// set output stream
|
||||
outputStream := pulsarms.NewPulsarMsgStream(ctx, 100)
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
var output ms.MsgStream = outputStream
|
||||
|
||||
return &input, &output
|
||||
}
|
||||
|
||||
func receiveMsg(stream *ms.MsgStream) []uint64 {
|
||||
receiveCount := 0
|
||||
var results []uint64
|
||||
for {
|
||||
result := (*stream).Consume()
|
||||
if len(result.Msgs) > 0 {
|
||||
msgs := result.Msgs
|
||||
for _, v := range msgs {
|
||||
timetickmsg := v.(*ms.TimeTickMsg)
|
||||
results = append(results, timetickmsg.TimeTickMsg.Base.Timestamp)
|
||||
receiveCount++
|
||||
if receiveCount == 10 {
|
||||
return results
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
|
||||
Init()
|
||||
pulsarAddress := Params.PulsarAddress
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
producerChannels := []string{"proxyDMTtBarrier"}
|
||||
consumerChannels := []string{"proxyDMTtBarrier"}
|
||||
consumerSubName := "proxyDMTtBarrier"
|
||||
proxyDMTtInputStream, proxyDMTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
|
||||
producerChannels = []string{"proxyDDTtBarrier"}
|
||||
consumerChannels = []string{"proxyDDTtBarrier"}
|
||||
consumerSubName = "proxyDDTtBarrier"
|
||||
proxyDDTtInputStream, proxyDDTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
|
||||
producerChannels = []string{"writeNodeBarrier"}
|
||||
consumerChannels = []string{"writeNodeBarrier"}
|
||||
consumerSubName = "writeNodeBarrier"
|
||||
writeNodeInputStream, writeNodeOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
|
||||
timeSyncProducer, _ := NewTimeSyncMsgProducer(ctx)
|
||||
timeSyncProducer.SetProxyTtBarrier(&TestTickBarrier{ctx: ctx})
|
||||
timeSyncProducer.SetWriteNodeTtBarrier(&TestTickBarrier{ctx: ctx})
|
||||
timeSyncProducer.SetDMSyncStream(*proxyDMTtInputStream)
|
||||
timeSyncProducer.SetDDSyncStream(*proxyDDTtInputStream)
|
||||
timeSyncProducer.SetK2sSyncStream(*writeNodeInputStream)
|
||||
(*proxyDMTtOutputStream).Start()
|
||||
(*proxyDDTtOutputStream).Start()
|
||||
(*writeNodeOutputStream).Start()
|
||||
timeSyncProducer.Start()
|
||||
expected := []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
result1 := receiveMsg(proxyDMTtOutputStream)
|
||||
assert.Equal(t, expected, result1)
|
||||
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
result1 = receiveMsg(proxyDDTtOutputStream)
|
||||
assert.Equal(t, expected, result1)
|
||||
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
result2 := receiveMsg(writeNodeOutputStream)
|
||||
assert.Equal(t, expected, result2)
|
||||
|
||||
timeSyncProducer.Close()
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
type timeSyncMsgProducer struct {
|
||||
//softTimeTickBarrier
|
||||
proxyTtBarrier TimeTickBarrier
|
||||
//hardTimeTickBarrier
|
||||
writeNodeTtBarrier TimeTickBarrier
|
||||
|
||||
ddSyncStream ms.MsgStream // insert & delete
|
||||
dmSyncStream ms.MsgStream
|
||||
k2sSyncStream ms.MsgStream
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
proxyWatchers []chan *ms.TimeTickMsg
|
||||
writeNodeWatchers []chan *ms.TimeTickMsg
|
||||
}
|
||||
|
||||
func NewTimeSyncMsgProducer(ctx context.Context) (*timeSyncMsgProducer, error) {
|
||||
ctx2, cancel := context.WithCancel(ctx)
|
||||
return &timeSyncMsgProducer{ctx: ctx2, cancel: cancel}, nil
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetProxyTtBarrier(proxyTtBarrier TimeTickBarrier) {
|
||||
syncMsgProducer.proxyTtBarrier = proxyTtBarrier
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetWriteNodeTtBarrier(writeNodeTtBarrier TimeTickBarrier) {
|
||||
syncMsgProducer.writeNodeTtBarrier = writeNodeTtBarrier
|
||||
}
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetDDSyncStream(ddSync ms.MsgStream) {
|
||||
syncMsgProducer.ddSyncStream = ddSync
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetDMSyncStream(dmSync ms.MsgStream) {
|
||||
syncMsgProducer.dmSyncStream = dmSync
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) SetK2sSyncStream(k2sSync ms.MsgStream) {
|
||||
syncMsgProducer.k2sSyncStream = k2sSync
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) WatchProxyTtBarrier(watcher chan *ms.TimeTickMsg) {
|
||||
syncMsgProducer.proxyWatchers = append(syncMsgProducer.proxyWatchers, watcher)
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) WatchWriteNodeTtBarrier(watcher chan *ms.TimeTickMsg) {
|
||||
syncMsgProducer.writeNodeWatchers = append(syncMsgProducer.writeNodeWatchers, watcher)
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, streams []ms.MsgStream, channels []chan *ms.TimeTickMsg) error {
|
||||
for {
|
||||
select {
|
||||
case <-syncMsgProducer.ctx.Done():
|
||||
{
|
||||
log.Printf("broadcast context done, exit")
|
||||
return errors.Errorf("broadcast done exit")
|
||||
}
|
||||
default:
|
||||
timetick, err := barrier.GetTimeTick()
|
||||
if err != nil {
|
||||
log.Printf("broadcast get time tick error")
|
||||
}
|
||||
msgPack := ms.MsgPack{}
|
||||
baseMsg := ms.BaseMsg{
|
||||
BeginTimestamp: timetick,
|
||||
EndTimestamp: timetick,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
timeTickResult := internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: timetick,
|
||||
SourceID: 0,
|
||||
},
|
||||
}
|
||||
timeTickMsg := &ms.TimeTickMsg{
|
||||
BaseMsg: baseMsg,
|
||||
TimeTickMsg: timeTickResult,
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
|
||||
for _, stream := range streams {
|
||||
err = stream.Broadcast(&msgPack)
|
||||
}
|
||||
|
||||
for _, channel := range channels {
|
||||
channel <- timeTickMsg
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) Start() error {
|
||||
err := syncMsgProducer.proxyTtBarrier.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = syncMsgProducer.writeNodeTtBarrier.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, []ms.MsgStream{syncMsgProducer.dmSyncStream, syncMsgProducer.ddSyncStream}, syncMsgProducer.proxyWatchers)
|
||||
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, []ms.MsgStream{syncMsgProducer.k2sSyncStream}, syncMsgProducer.writeNodeWatchers)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (syncMsgProducer *timeSyncMsgProducer) Close() {
|
||||
syncMsgProducer.ddSyncStream.Close()
|
||||
syncMsgProducer.dmSyncStream.Close()
|
||||
syncMsgProducer.k2sSyncStream.Close()
|
||||
syncMsgProducer.cancel()
|
||||
syncMsgProducer.proxyTtBarrier.Close()
|
||||
syncMsgProducer.writeNodeTtBarrier.Close()
|
||||
}
|
|
@ -1,276 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type (
|
||||
TimeTickBarrier interface {
|
||||
GetTimeTick() (Timestamp, error)
|
||||
Start() error
|
||||
Close()
|
||||
}
|
||||
|
||||
softTimeTickBarrier struct {
|
||||
peer2LastTt map[UniqueID]Timestamp
|
||||
minTtInterval Timestamp
|
||||
lastTt int64
|
||||
outTt chan Timestamp
|
||||
ttStream ms.MsgStream
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
hardTimeTickBarrier struct {
|
||||
peer2Tt map[UniqueID]Timestamp
|
||||
outTt chan Timestamp
|
||||
ttStream ms.MsgStream
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
proxyServiceTimeTickBarrier struct {
|
||||
ttStream ms.MsgStream
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
)
|
||||
|
||||
func (ttBarrier *proxyServiceTimeTickBarrier) GetTimeTick() (Timestamp, error) {
|
||||
select {
|
||||
case <-ttBarrier.ctx.Done():
|
||||
return 0, errors.Errorf("[GetTimeTick] closed.")
|
||||
case ttmsgs := <-ttBarrier.ttStream.Chan():
|
||||
log.Println("ttmsgs: ", ttmsgs)
|
||||
tempMin := Timestamp(math.MaxUint64)
|
||||
for _, ttmsg := range ttmsgs.Msgs {
|
||||
timeTickMsg, ok := ttmsg.(*ms.TimeTickMsg)
|
||||
if !ok {
|
||||
log.Println("something wrong in time tick message!")
|
||||
}
|
||||
if timeTickMsg.Base.Timestamp < tempMin {
|
||||
tempMin = timeTickMsg.Base.Timestamp
|
||||
}
|
||||
}
|
||||
log.Println("[GetTimeTick]: ", tempMin)
|
||||
return tempMin, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ttBarrier *proxyServiceTimeTickBarrier) Start() error {
|
||||
ttBarrier.ttStream.Start()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ttBarrier *proxyServiceTimeTickBarrier) Close() {
|
||||
ttBarrier.ttStream.Close()
|
||||
ttBarrier.cancel()
|
||||
}
|
||||
|
||||
func newProxyServiceTimeTickBarrier(ctx context.Context, stream ms.MsgStream) TimeTickBarrier {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
return &proxyServiceTimeTickBarrier{
|
||||
ttStream: stream,
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (ttBarrier *softTimeTickBarrier) GetTimeTick() (Timestamp, error) {
|
||||
select {
|
||||
case <-ttBarrier.ctx.Done():
|
||||
return 0, errors.Errorf("[GetTimeTick] closed.")
|
||||
case ts, ok := <-ttBarrier.outTt:
|
||||
if !ok {
|
||||
return 0, errors.Errorf("[GetTimeTick] closed.")
|
||||
}
|
||||
num := len(ttBarrier.outTt)
|
||||
for i := 0; i < num; i++ {
|
||||
ts, ok = <-ttBarrier.outTt
|
||||
if !ok {
|
||||
return 0, errors.Errorf("[GetTimeTick] closed.")
|
||||
}
|
||||
}
|
||||
atomic.StoreInt64(&(ttBarrier.lastTt), int64(ts))
|
||||
return ts, ttBarrier.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (ttBarrier *softTimeTickBarrier) Start() error {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ttBarrier.ctx.Done():
|
||||
log.Printf("[TtBarrierStart] %s\n", ttBarrier.ctx.Err())
|
||||
return
|
||||
|
||||
case ttmsgs := <-ttBarrier.ttStream.Chan():
|
||||
if len(ttmsgs.Msgs) > 0 {
|
||||
for _, timetickmsg := range ttmsgs.Msgs {
|
||||
ttmsg := timetickmsg.(*ms.TimeTickMsg)
|
||||
oldT, ok := ttBarrier.peer2LastTt[ttmsg.Base.SourceID]
|
||||
// log.Printf("[softTimeTickBarrier] peer(%d)=%d\n", ttmsg.PeerID, ttmsg.Timestamp)
|
||||
|
||||
if !ok {
|
||||
log.Printf("[softTimeTickBarrier] Warning: peerID %d not exist\n", ttmsg.Base.SourceID)
|
||||
continue
|
||||
}
|
||||
if ttmsg.Base.Timestamp > oldT {
|
||||
ttBarrier.peer2LastTt[ttmsg.Base.SourceID] = ttmsg.Base.Timestamp
|
||||
|
||||
// get a legal Timestamp
|
||||
ts := ttBarrier.minTimestamp()
|
||||
lastTt := atomic.LoadInt64(&(ttBarrier.lastTt))
|
||||
if lastTt != 0 && ttBarrier.minTtInterval > ts-Timestamp(lastTt) {
|
||||
continue
|
||||
}
|
||||
ttBarrier.outTt <- ts
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func newSoftTimeTickBarrier(ctx context.Context,
|
||||
ttStream *ms.MsgStream,
|
||||
peerIds []UniqueID,
|
||||
minTtInterval Timestamp) *softTimeTickBarrier {
|
||||
|
||||
if len(peerIds) <= 0 {
|
||||
log.Printf("[newSoftTimeTickBarrier] Error: peerIds is empty!\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
sttbarrier := softTimeTickBarrier{}
|
||||
sttbarrier.minTtInterval = minTtInterval
|
||||
sttbarrier.ttStream = *ttStream
|
||||
sttbarrier.outTt = make(chan Timestamp, 1024)
|
||||
sttbarrier.ctx, sttbarrier.cancel = context.WithCancel(ctx)
|
||||
sttbarrier.peer2LastTt = make(map[UniqueID]Timestamp)
|
||||
for _, id := range peerIds {
|
||||
sttbarrier.peer2LastTt[id] = Timestamp(0)
|
||||
}
|
||||
if len(peerIds) != len(sttbarrier.peer2LastTt) {
|
||||
log.Printf("[newSoftTimeTickBarrier] Warning: there are duplicate peerIds!\n")
|
||||
}
|
||||
|
||||
return &sttbarrier
|
||||
}
|
||||
|
||||
func (ttBarrier *softTimeTickBarrier) Close() {
|
||||
ttBarrier.cancel()
|
||||
}
|
||||
|
||||
func (ttBarrier *softTimeTickBarrier) minTimestamp() Timestamp {
|
||||
tempMin := Timestamp(math.MaxUint64)
|
||||
for _, tt := range ttBarrier.peer2LastTt {
|
||||
if tt < tempMin {
|
||||
tempMin = tt
|
||||
}
|
||||
}
|
||||
return tempMin
|
||||
}
|
||||
|
||||
func (ttBarrier *hardTimeTickBarrier) GetTimeTick() (Timestamp, error) {
|
||||
select {
|
||||
case <-ttBarrier.ctx.Done():
|
||||
return 0, errors.Errorf("[GetTimeTick] closed.")
|
||||
case ts, ok := <-ttBarrier.outTt:
|
||||
if !ok {
|
||||
return 0, errors.Errorf("[GetTimeTick] closed.")
|
||||
}
|
||||
return ts, ttBarrier.ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (ttBarrier *hardTimeTickBarrier) Start() error {
|
||||
go func() {
|
||||
// Last timestamp synchronized
|
||||
state := Timestamp(0)
|
||||
for {
|
||||
select {
|
||||
case <-ttBarrier.ctx.Done():
|
||||
log.Printf("[TtBarrierStart] %s\n", ttBarrier.ctx.Err())
|
||||
return
|
||||
|
||||
case ttmsgs := <-ttBarrier.ttStream.Chan():
|
||||
if len(ttmsgs.Msgs) > 0 {
|
||||
for _, timetickmsg := range ttmsgs.Msgs {
|
||||
|
||||
// Suppose ttmsg.Timestamp from stream is always larger than the previous one,
|
||||
// that `ttmsg.Timestamp > oldT`
|
||||
ttmsg := timetickmsg.(*ms.TimeTickMsg)
|
||||
|
||||
oldT, ok := ttBarrier.peer2Tt[ttmsg.Base.SourceID]
|
||||
if !ok {
|
||||
log.Printf("[hardTimeTickBarrier] Warning: peerID %d not exist\n", ttmsg.Base.SourceID)
|
||||
continue
|
||||
}
|
||||
|
||||
if oldT > state {
|
||||
log.Printf("[hardTimeTickBarrier] Warning: peer(%d) timestamp(%d) ahead\n",
|
||||
ttmsg.Base.SourceID, ttmsg.Base.Timestamp)
|
||||
}
|
||||
|
||||
ttBarrier.peer2Tt[ttmsg.Base.SourceID] = ttmsg.Base.Timestamp
|
||||
|
||||
newState := ttBarrier.minTimestamp()
|
||||
if newState > state {
|
||||
ttBarrier.outTt <- newState
|
||||
state = newState
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ttBarrier *hardTimeTickBarrier) minTimestamp() Timestamp {
|
||||
tempMin := Timestamp(math.MaxUint64)
|
||||
for _, tt := range ttBarrier.peer2Tt {
|
||||
if tt < tempMin {
|
||||
tempMin = tt
|
||||
}
|
||||
}
|
||||
return tempMin
|
||||
}
|
||||
|
||||
func newHardTimeTickBarrier(ctx context.Context,
|
||||
ttStream *ms.MsgStream,
|
||||
peerIds []UniqueID) *hardTimeTickBarrier {
|
||||
|
||||
if len(peerIds) <= 0 {
|
||||
log.Printf("[newSoftTimeTickBarrier] Error: peerIds is empty!")
|
||||
return nil
|
||||
}
|
||||
|
||||
sttbarrier := hardTimeTickBarrier{}
|
||||
sttbarrier.ttStream = *ttStream
|
||||
sttbarrier.outTt = make(chan Timestamp, 1024)
|
||||
sttbarrier.ctx, sttbarrier.cancel = context.WithCancel(ctx)
|
||||
|
||||
sttbarrier.peer2Tt = make(map[UniqueID]Timestamp)
|
||||
for _, id := range peerIds {
|
||||
sttbarrier.peer2Tt[id] = Timestamp(0)
|
||||
}
|
||||
if len(peerIds) != len(sttbarrier.peer2Tt) {
|
||||
log.Printf("[newSoftTimeTickBarrier] Warning: there are duplicate peerIds!")
|
||||
}
|
||||
|
||||
return &sttbarrier
|
||||
}
|
||||
|
||||
func (ttBarrier *hardTimeTickBarrier) Close() {
|
||||
ttBarrier.cancel()
|
||||
}
|
|
@ -1,437 +0,0 @@
|
|||
package master
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
)
|
||||
|
||||
func getTtMsg(msgType commonpb.MsgType, peerID UniqueID, timeStamp uint64) ms.TsMsg {
|
||||
baseMsg := ms.BaseMsg{
|
||||
HashValues: []uint32{uint32(peerID)},
|
||||
}
|
||||
timeTickResult := internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: timeStamp,
|
||||
SourceID: peerID,
|
||||
},
|
||||
}
|
||||
timeTickMsg := &ms.TimeTickMsg{
|
||||
BaseMsg: baseMsg,
|
||||
TimeTickMsg: timeTickResult,
|
||||
}
|
||||
|
||||
return timeTickMsg
|
||||
}
|
||||
|
||||
func initPulsarStream(pulsarAddress string,
|
||||
producerChannels []string,
|
||||
consumerChannels []string,
|
||||
consumerSubName string) (*ms.MsgStream, *ms.MsgStream) {
|
||||
|
||||
// set input stream
|
||||
inputStream := pulsarms.NewPulsarMsgStream(context.Background(), 100)
|
||||
inputStream.SetPulsarClient(pulsarAddress)
|
||||
inputStream.CreatePulsarProducers(producerChannels)
|
||||
var input ms.MsgStream = inputStream
|
||||
|
||||
// set output stream
|
||||
outputStream := pulsarms.NewPulsarMsgStream(context.Background(), 100)
|
||||
outputStream.SetPulsarClient(pulsarAddress)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
outputStream.CreatePulsarConsumers(consumerChannels, consumerSubName, unmarshalDispatcher, 100)
|
||||
outputStream.Start()
|
||||
var output ms.MsgStream = outputStream
|
||||
|
||||
return &input, &output
|
||||
}
|
||||
|
||||
func getMsgPack(ttmsgs [][2]int) *ms.MsgPack {
|
||||
msgPack := ms.MsgPack{}
|
||||
for _, vi := range ttmsgs {
|
||||
msgPack.Msgs = append(msgPack.Msgs, getTtMsg(commonpb.MsgType_kTimeTick, UniqueID(vi[0]), Timestamp(vi[1])))
|
||||
}
|
||||
return &msgPack
|
||||
}
|
||||
|
||||
func getEmptyMsgPack() *ms.MsgPack {
|
||||
msgPack := ms.MsgPack{}
|
||||
return &msgPack
|
||||
}
|
||||
|
||||
func producer(channels []string, ttmsgs [][2]int) (*ms.MsgStream, *ms.MsgStream) {
|
||||
Init()
|
||||
pulsarAddress := Params.PulsarAddress
|
||||
consumerSubName := "subTimetick"
|
||||
producerChannels := channels
|
||||
consumerChannels := channels
|
||||
|
||||
inputStream, outputStream := initPulsarStream(pulsarAddress, producerChannels, consumerChannels, consumerSubName)
|
||||
|
||||
msgPackAddr := getMsgPack(ttmsgs)
|
||||
(*inputStream).Produce(msgPackAddr)
|
||||
return inputStream, outputStream
|
||||
}
|
||||
|
||||
func TestTt_NewSoftTtBarrier(t *testing.T) {
|
||||
channels := []string{"NewSoftTtBarrier"}
|
||||
ttmsgs := [][2]int{
|
||||
{1, 10},
|
||||
{2, 20},
|
||||
{3, 30},
|
||||
{4, 40},
|
||||
{1, 30},
|
||||
{2, 30},
|
||||
}
|
||||
|
||||
inStream, ttStream := producer(channels, ttmsgs)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
minTtInterval := Timestamp(10)
|
||||
|
||||
validPeerIds := []UniqueID{1, 2, 3}
|
||||
|
||||
sttbarrier := newSoftTimeTickBarrier(context.TODO(), ttStream, validPeerIds, minTtInterval)
|
||||
assert.NotNil(t, sttbarrier)
|
||||
sttbarrier.Close()
|
||||
|
||||
validPeerIds2 := []UniqueID{1, 1, 1}
|
||||
sttbarrier = newSoftTimeTickBarrier(context.TODO(), ttStream, validPeerIds2, minTtInterval)
|
||||
assert.NotNil(t, sttbarrier)
|
||||
sttbarrier.Close()
|
||||
|
||||
// invalid peerIds
|
||||
invalidPeerIds1 := make([]UniqueID, 0, 3)
|
||||
sttbarrier = newSoftTimeTickBarrier(context.TODO(), ttStream, invalidPeerIds1, minTtInterval)
|
||||
assert.Nil(t, sttbarrier)
|
||||
|
||||
invalidPeerIds2 := []UniqueID{}
|
||||
sttbarrier = newSoftTimeTickBarrier(context.TODO(), ttStream, invalidPeerIds2, minTtInterval)
|
||||
assert.Nil(t, sttbarrier)
|
||||
}
|
||||
|
||||
func TestTt_NewHardTtBarrier(t *testing.T) {
|
||||
channels := []string{"NewHardTtBarrier"}
|
||||
ttmsgs := [][2]int{
|
||||
{1, 10},
|
||||
{2, 20},
|
||||
{3, 30},
|
||||
{4, 40},
|
||||
{1, 30},
|
||||
{2, 30},
|
||||
}
|
||||
inStream, ttStream := producer(channels, ttmsgs)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
validPeerIds := []UniqueID{1, 2, 3}
|
||||
|
||||
sttbarrier := newHardTimeTickBarrier(context.TODO(), ttStream, validPeerIds)
|
||||
assert.NotNil(t, sttbarrier)
|
||||
sttbarrier.Close()
|
||||
|
||||
validPeerIds2 := []UniqueID{1, 1, 1}
|
||||
sttbarrier = newHardTimeTickBarrier(context.TODO(), ttStream, validPeerIds2)
|
||||
assert.NotNil(t, sttbarrier)
|
||||
sttbarrier.Close()
|
||||
|
||||
// invalid peerIds
|
||||
invalidPeerIds1 := make([]UniqueID, 0, 3)
|
||||
sttbarrier = newHardTimeTickBarrier(context.TODO(), ttStream, invalidPeerIds1)
|
||||
assert.Nil(t, sttbarrier)
|
||||
|
||||
invalidPeerIds2 := []UniqueID{}
|
||||
sttbarrier = newHardTimeTickBarrier(context.TODO(), ttStream, invalidPeerIds2)
|
||||
assert.Nil(t, sttbarrier)
|
||||
}
|
||||
|
||||
func TestTt_SoftTtBarrierStart(t *testing.T) {
|
||||
channels := []string{"SoftTtBarrierStart"}
|
||||
|
||||
ttmsgs := [][2]int{
|
||||
{1, 10},
|
||||
{2, 20},
|
||||
{3, 30},
|
||||
{4, 40},
|
||||
{1, 30},
|
||||
{2, 30},
|
||||
}
|
||||
inStream, ttStream := producer(channels, ttmsgs)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
minTtInterval := Timestamp(10)
|
||||
peerIds := []UniqueID{1, 2, 3}
|
||||
sttbarrier := newSoftTimeTickBarrier(context.TODO(), ttStream, peerIds, minTtInterval)
|
||||
require.NotNil(t, sttbarrier)
|
||||
|
||||
sttbarrier.Start()
|
||||
defer sttbarrier.Close()
|
||||
|
||||
// Make sure all msgs in outputStream is consumed
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
ts, err := sttbarrier.GetTimeTick()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, Timestamp(30), ts)
|
||||
}
|
||||
|
||||
func TestTt_SoftTtBarrierGetTimeTickClose(t *testing.T) {
|
||||
channels := []string{"SoftTtBarrierGetTimeTickClose"}
|
||||
//ttmsgs := [][2]int{
|
||||
// {1, 10},
|
||||
// {2, 20},
|
||||
// {3, 30},
|
||||
// {4, 40},
|
||||
// {1, 30},
|
||||
// {2, 30},
|
||||
//}
|
||||
inStream, ttStream := producer(channels, nil)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
minTtInterval := Timestamp(10)
|
||||
validPeerIds := []UniqueID{1, 2, 3}
|
||||
|
||||
sttbarrier := newSoftTimeTickBarrier(context.TODO(), ttStream, validPeerIds, minTtInterval)
|
||||
require.NotNil(t, sttbarrier)
|
||||
|
||||
sttbarrier.Start()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
sttbarrier.Close()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
ts, err := sttbarrier.GetTimeTick()
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, Timestamp(0), ts)
|
||||
|
||||
// Receive empty msgPacks
|
||||
channels01 := []string{"GetTimeTick01"}
|
||||
ttmsgs01 := [][2]int{}
|
||||
inStream01, ttStream01 := producer(channels01, ttmsgs01)
|
||||
defer func() {
|
||||
(*inStream01).Close()
|
||||
(*ttStream01).Close()
|
||||
}()
|
||||
|
||||
minTtInterval = Timestamp(10)
|
||||
validPeerIds = []UniqueID{1, 2, 3}
|
||||
|
||||
sttbarrier01 := newSoftTimeTickBarrier(context.TODO(), ttStream01, validPeerIds, minTtInterval)
|
||||
require.NotNil(t, sttbarrier01)
|
||||
sttbarrier01.Start()
|
||||
|
||||
var wg1 sync.WaitGroup
|
||||
wg1.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg1.Done()
|
||||
sttbarrier01.Close()
|
||||
}()
|
||||
|
||||
wg1.Wait()
|
||||
ts, err = sttbarrier01.GetTimeTick()
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, Timestamp(0), ts)
|
||||
}
|
||||
|
||||
func TestTt_SoftTtBarrierGetTimeTickCancel(t *testing.T) {
|
||||
channels := []string{"SoftTtBarrierGetTimeTickCancel"}
|
||||
//ttmsgs := [][2]int{
|
||||
// {1, 10},
|
||||
// {2, 20},
|
||||
// {3, 30},
|
||||
// {4, 40},
|
||||
// {1, 30},
|
||||
// {2, 30},
|
||||
//}
|
||||
inStream, ttStream := producer(channels, nil)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
minTtInterval := Timestamp(10)
|
||||
validPeerIds := []UniqueID{1, 2, 3}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sttbarrier := newSoftTimeTickBarrier(ctx, ttStream, validPeerIds, minTtInterval)
|
||||
require.NotNil(t, sttbarrier)
|
||||
|
||||
sttbarrier.Start()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
ts, err := sttbarrier.GetTimeTick()
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, Timestamp(0), ts)
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
func TestTt_HardTtBarrierStart(t *testing.T) {
|
||||
channels := []string{"HardTtBarrierStart"}
|
||||
|
||||
ttmsgs := [][2]int{
|
||||
{1, 10},
|
||||
{2, 10},
|
||||
{3, 10},
|
||||
}
|
||||
|
||||
inStream, ttStream := producer(channels, ttmsgs)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
peerIds := []UniqueID{1, 2, 3}
|
||||
sttbarrier := newHardTimeTickBarrier(context.TODO(), ttStream, peerIds)
|
||||
require.NotNil(t, sttbarrier)
|
||||
|
||||
sttbarrier.Start()
|
||||
defer sttbarrier.Close()
|
||||
|
||||
// Make sure all msgs in outputStream is consumed
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
ts, err := sttbarrier.GetTimeTick()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, Timestamp(10), ts)
|
||||
}
|
||||
|
||||
func TestTt_HardTtBarrierGetTimeTick(t *testing.T) {
|
||||
|
||||
channels := []string{"HardTtBarrierGetTimeTick"}
|
||||
|
||||
ttmsgs := [][2]int{
|
||||
{1, 10},
|
||||
{1, 20},
|
||||
{1, 30},
|
||||
{2, 10},
|
||||
{2, 20},
|
||||
{3, 10},
|
||||
{3, 20},
|
||||
}
|
||||
|
||||
inStream, ttStream := producer(channels, ttmsgs)
|
||||
defer func() {
|
||||
(*inStream).Close()
|
||||
(*ttStream).Close()
|
||||
}()
|
||||
|
||||
peerIds := []UniqueID{1, 2, 3}
|
||||
sttbarrier := newHardTimeTickBarrier(context.TODO(), ttStream, peerIds)
|
||||
require.NotNil(t, sttbarrier)
|
||||
|
||||
sttbarrier.Start()
|
||||
defer sttbarrier.Close()
|
||||
|
||||
// Make sure all msgs in outputStream is consumed
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
ts, err := sttbarrier.GetTimeTick()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, Timestamp(10), ts)
|
||||
|
||||
ts, err = sttbarrier.GetTimeTick()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, Timestamp(20), ts)
|
||||
|
||||
// ---------------------stuck--------------------------
|
||||
channelsStuck := []string{"HardTtBarrierGetTimeTickStuck"}
|
||||
|
||||
ttmsgsStuck := [][2]int{
|
||||
{1, 10},
|
||||
{2, 10},
|
||||
}
|
||||
|
||||
inStreamStuck, ttStreamStuck := producer(channelsStuck, ttmsgsStuck)
|
||||
defer func() {
|
||||
(*inStreamStuck).Close()
|
||||
(*ttStreamStuck).Close()
|
||||
}()
|
||||
|
||||
peerIdsStuck := []UniqueID{1, 2, 3}
|
||||
sttbarrierStuck := newHardTimeTickBarrier(context.TODO(), ttStreamStuck, peerIdsStuck)
|
||||
require.NotNil(t, sttbarrierStuck)
|
||||
|
||||
sttbarrierStuck.Start()
|
||||
go func() {
|
||||
time.Sleep(1 * time.Second)
|
||||
sttbarrierStuck.Close()
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// This will stuck
|
||||
ts, err = sttbarrierStuck.GetTimeTick()
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, Timestamp(0), ts)
|
||||
|
||||
// ---------------------context cancel------------------------
|
||||
channelsCancel := []string{"HardTtBarrierGetTimeTickCancel"}
|
||||
|
||||
ttmsgsCancel := [][2]int{
|
||||
{1, 10},
|
||||
{2, 10},
|
||||
}
|
||||
|
||||
inStreamCancel, ttStreamCancel := producer(channelsCancel, ttmsgsCancel)
|
||||
defer func() {
|
||||
(*inStreamCancel).Close()
|
||||
(*ttStreamCancel).Close()
|
||||
}()
|
||||
|
||||
peerIdsCancel := []UniqueID{1, 2, 3}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sttbarrierCancel := newHardTimeTickBarrier(ctx, ttStreamCancel, peerIdsCancel)
|
||||
require.NotNil(t, sttbarrierCancel)
|
||||
|
||||
sttbarrierCancel.Start()
|
||||
go func() {
|
||||
time.Sleep(1 * time.Second)
|
||||
cancel()
|
||||
}()
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// This will stuck
|
||||
ts, err = sttbarrierCancel.GetTimeTick()
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, Timestamp(0), ts)
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
// Copyright 2016 TiKV Project Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// UpdateTimestampStep is used to update timestamp.
|
||||
UpdateTimestampStep = 50 * time.Millisecond
|
||||
// updateTimestampGuard is the min timestamp interval.
|
||||
updateTimestampGuard = time.Millisecond
|
||||
// maxLogical is the max upper limit for logical time.
|
||||
// When a TSO's logical time reaches this limit,
|
||||
// the physical time will be forced to increase.
|
||||
maxLogical = int64(1 << 18)
|
||||
)
|
||||
|
||||
// atomicObject is used to store the current TSO in memory.
|
||||
type atomicObject struct {
|
||||
physical time.Time
|
||||
logical int64
|
||||
}
|
||||
|
||||
// timestampOracle is used to maintain the logic of tso.
|
||||
type timestampOracle struct {
|
||||
key string
|
||||
kvBase kv.TxnBase
|
||||
|
||||
// TODO: remove saveInterval
|
||||
saveInterval time.Duration
|
||||
maxResetTSGap func() time.Duration
|
||||
// For tso, set after the PD becomes a leader.
|
||||
TSO unsafe.Pointer
|
||||
lastSavedTime atomic.Value
|
||||
}
|
||||
|
||||
func (t *timestampOracle) loadTimestamp() (time.Time, error) {
|
||||
strData, err := t.kvBase.Load(t.key)
|
||||
|
||||
var binData []byte = []byte(strData)
|
||||
|
||||
if err != nil {
|
||||
return typeutil.ZeroTime, err
|
||||
}
|
||||
if len(binData) == 0 {
|
||||
return typeutil.ZeroTime, nil
|
||||
}
|
||||
return typeutil.ParseTimestamp(binData)
|
||||
}
|
||||
|
||||
// save timestamp, if lastTs is 0, we think the timestamp doesn't exist, so create it,
|
||||
// otherwise, update it.
|
||||
func (t *timestampOracle) saveTimestamp(ts time.Time) error {
|
||||
data := typeutil.Uint64ToBytes(uint64(ts.UnixNano()))
|
||||
err := t.kvBase.Save(t.key, string(data))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
t.lastSavedTime.Store(ts)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *timestampOracle) InitTimestamp() error {
|
||||
|
||||
//last, err := t.loadTimestamp()
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
next := time.Now()
|
||||
|
||||
// If the current system time minus the saved etcd timestamp is less than `updateTimestampGuard`,
|
||||
// the timestamp allocation will start from the saved etcd timestamp temporarily.
|
||||
//if typeutil.SubTimeByWallClock(next, last) < updateTimestampGuard {
|
||||
// next = last.Add(updateTimestampGuard)
|
||||
//}
|
||||
|
||||
save := next.Add(t.saveInterval)
|
||||
if err := t.saveTimestamp(save); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//log.Print("sync and save timestamp", zap.Time("last", last), zap.Time("save", save), zap.Time("next", next))
|
||||
|
||||
current := &atomicObject{
|
||||
physical: next,
|
||||
}
|
||||
atomic.StorePointer(&t.TSO, unsafe.Pointer(current))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetUserTimestamp update the physical part with specified tso.
|
||||
func (t *timestampOracle) ResetUserTimestamp(tso uint64) error {
|
||||
physical, _ := tsoutil.ParseTS(tso)
|
||||
next := physical.Add(time.Millisecond)
|
||||
prev := (*atomicObject)(atomic.LoadPointer(&t.TSO))
|
||||
|
||||
// do not update
|
||||
if typeutil.SubTimeByWallClock(next, prev.physical) <= 3*updateTimestampGuard {
|
||||
return errors.New("the specified ts too small than now")
|
||||
}
|
||||
|
||||
if typeutil.SubTimeByWallClock(next, prev.physical) >= t.maxResetTSGap() {
|
||||
return errors.New("the specified ts too large than now")
|
||||
}
|
||||
|
||||
save := next.Add(t.saveInterval)
|
||||
if err := t.saveTimestamp(save); err != nil {
|
||||
return err
|
||||
}
|
||||
update := &atomicObject{
|
||||
physical: next,
|
||||
}
|
||||
atomic.CompareAndSwapPointer(&t.TSO, unsafe.Pointer(prev), unsafe.Pointer(update))
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateTimestamp is used to update the timestamp.
|
||||
// This function will do two things:
|
||||
// 1. When the logical time is going to be used up, increase the current physical time.
|
||||
// 2. When the time window is not big enough, which means the saved etcd time minus the next physical time
|
||||
// will be less than or equal to `updateTimestampGuard`, then the time window needs to be updated and
|
||||
// we also need to save the next physical time plus `TsoSaveInterval` into etcd.
|
||||
//
|
||||
// Here is some constraints that this function must satisfy:
|
||||
// 1. The saved time is monotonically increasing.
|
||||
// 2. The physical time is monotonically increasing.
|
||||
// 3. The physical time is always less than the saved timestamp.
|
||||
func (t *timestampOracle) UpdateTimestamp() error {
|
||||
prev := (*atomicObject)(atomic.LoadPointer(&t.TSO))
|
||||
now := time.Now()
|
||||
|
||||
jetLag := typeutil.SubTimeByWallClock(now, prev.physical)
|
||||
if jetLag > 3*UpdateTimestampStep {
|
||||
log.Print("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now))
|
||||
}
|
||||
|
||||
var next time.Time
|
||||
prevLogical := atomic.LoadInt64(&prev.logical)
|
||||
// If the system time is greater, it will be synchronized with the system time.
|
||||
if jetLag > updateTimestampGuard {
|
||||
next = now
|
||||
} else if prevLogical > maxLogical/2 {
|
||||
// The reason choosing maxLogical/2 here is that it's big enough for common cases.
|
||||
// Because there is enough timestamp can be allocated before next update.
|
||||
log.Print("the logical time may be not enough", zap.Int64("prev-logical", prevLogical))
|
||||
next = prev.physical.Add(time.Millisecond)
|
||||
} else {
|
||||
// It will still use the previous physical time to alloc the timestamp.
|
||||
return nil
|
||||
}
|
||||
|
||||
// It is not safe to increase the physical time to `next`.
|
||||
// The time window needs to be updated and saved to etcd.
|
||||
if typeutil.SubTimeByWallClock(t.lastSavedTime.Load().(time.Time), next) <= updateTimestampGuard {
|
||||
save := next.Add(t.saveInterval)
|
||||
if err := t.saveTimestamp(save); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
current := &atomicObject{
|
||||
physical: next,
|
||||
logical: 0,
|
||||
}
|
||||
|
||||
atomic.StorePointer(&t.TSO, unsafe.Pointer(current))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResetTimestamp is used to reset the timestamp.
|
||||
func (t *timestampOracle) ResetTimestamp() {
|
||||
zero := &atomicObject{
|
||||
physical: time.Now(),
|
||||
}
|
||||
atomic.StorePointer(&t.TSO, unsafe.Pointer(zero))
|
||||
}
|
|
@ -7,13 +7,13 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"github.com/zilliztech/milvus-distributed/internal/masterservice"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
)
|
||||
|
||||
type RmqMsgStream struct {
|
||||
isServing int64
|
||||
idAllocator *master.GlobalIDAllocator
|
||||
idAllocator *masterservice.GlobalIDAllocator
|
||||
ctx context.Context
|
||||
serverLoopWg sync.WaitGroup
|
||||
serverLoopCtx context.Context
|
||||
|
@ -53,7 +53,7 @@ func (ms *RmqMsgStream) stopServerLoop() {
|
|||
func (ms *RmqMsgStream) tsLoop() {
|
||||
defer ms.serverLoopWg.Done()
|
||||
|
||||
ms.tsoTicker = time.NewTicker(master.UpdateTimestampStep)
|
||||
ms.tsoTicker = time.NewTicker(masterservice.UpdateTimestampStep)
|
||||
defer ms.tsoTicker.Stop()
|
||||
|
||||
ctx, cancel := context.WithCancel(ms.serverLoopCtx)
|
||||
|
|
|
@ -1,594 +0,0 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
|
||||
//var proxyConn *grpc.ClientConn
|
||||
//var proxyClient milvuspb.MilvusServiceClient
|
||||
|
||||
var proxyServer *NodeImpl
|
||||
|
||||
var masterServer *master.Master
|
||||
|
||||
var testNum = 10
|
||||
|
||||
func makeNewChannalNames(names []string, suffix string) []string {
|
||||
var ret []string
|
||||
for _, name := range names {
|
||||
ret = append(ret, name+suffix)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func refreshChannelNames() {
|
||||
suffix := "_test" + strconv.FormatInt(rand.Int63n(100), 10)
|
||||
master.Params.DDChannelNames = makeNewChannalNames(master.Params.DDChannelNames, suffix)
|
||||
master.Params.WriteNodeTimeTickChannelNames = makeNewChannalNames(master.Params.WriteNodeTimeTickChannelNames, suffix)
|
||||
master.Params.InsertChannelNames = makeNewChannalNames(master.Params.InsertChannelNames, suffix)
|
||||
master.Params.K2SChannelNames = makeNewChannalNames(master.Params.K2SChannelNames, suffix)
|
||||
master.Params.ProxyServiceTimeTickChannelNames = makeNewChannalNames(master.Params.ProxyServiceTimeTickChannelNames, suffix)
|
||||
}
|
||||
|
||||
func startMaster(ctx context.Context) {
|
||||
master.Init()
|
||||
refreshChannelNames()
|
||||
etcdAddr := master.Params.EtcdAddress
|
||||
metaRootPath := master.Params.MetaRootPath
|
||||
|
||||
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
svr, err := master.CreateServer(ctx)
|
||||
masterServer = svr
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
if err := svr.Run(int64(master.Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for server!", svr.IsServing())
|
||||
|
||||
}
|
||||
|
||||
func startProxy(ctx context.Context) {
|
||||
|
||||
svr, err := NewProxyNodeImpl(ctx)
|
||||
proxyServer = svr
|
||||
if err != nil {
|
||||
log.Print("create proxynode failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Init(); err != nil {
|
||||
log.Fatal("init proxynode failed", zap.Error(err))
|
||||
}
|
||||
|
||||
// TODO: change to wait until master is ready
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run proxynode failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func setup() {
|
||||
Params.Init()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
startMaster(ctx)
|
||||
startProxy(ctx)
|
||||
//proxyAddr := Params.NetworkAddress()
|
||||
//addr := strings.Split(proxyAddr, ":")
|
||||
//if addr[0] == "0.0.0.0" {
|
||||
// proxyAddr = "127.0.0.1:" + addr[1]
|
||||
//}
|
||||
//
|
||||
//conn, err := grpc.DialContext(ctx, proxyAddr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
//if err != nil {
|
||||
// log.Fatalf("Connect to proxynode failed, error= %v", err)
|
||||
//}
|
||||
//proxyConn = conn
|
||||
//proxyClient = milvuspb.NewMilvusServiceClient(proxyConn)
|
||||
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
cancel()
|
||||
masterServer.Close()
|
||||
proxyServer.Stop()
|
||||
}
|
||||
|
||||
func hasCollection(t *testing.T, name string) bool {
|
||||
resp, err := proxyServer.HasCollection(&milvuspb.HasCollectionRequest{CollectionName: name})
|
||||
msg := "Has Collection " + name + " should succeed!"
|
||||
assert.Nil(t, err, msg)
|
||||
return resp.Value
|
||||
}
|
||||
|
||||
func createCollection(t *testing.T, name string) {
|
||||
has := hasCollection(t, name)
|
||||
if has {
|
||||
dropCollection(t, name)
|
||||
}
|
||||
|
||||
schema := &schemapb.CollectionSchema{
|
||||
Name: name,
|
||||
Description: "no description",
|
||||
AutoID: true,
|
||||
Fields: make([]*schemapb.FieldSchema, 2),
|
||||
}
|
||||
fieldName := "Field1"
|
||||
schema.Fields[0] = &schemapb.FieldSchema{
|
||||
Name: fieldName,
|
||||
Description: "no description",
|
||||
DataType: schemapb.DataType_INT32,
|
||||
}
|
||||
fieldName = "vec"
|
||||
schema.Fields[1] = &schemapb.FieldSchema{
|
||||
Name: fieldName,
|
||||
Description: "vector",
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "metric_type",
|
||||
Value: "L2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
schemaBytes, err := proto.Marshal(schema)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
req := &milvuspb.CreateCollectionRequest{
|
||||
CollectionName: name,
|
||||
Schema: schemaBytes,
|
||||
}
|
||||
resp, err := proxyServer.CreateCollection(req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Create Collection " + name + " should succeed!"
|
||||
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
}
|
||||
|
||||
func dropCollection(t *testing.T, name string) {
|
||||
req := &milvuspb.DropCollectionRequest{
|
||||
CollectionName: name,
|
||||
}
|
||||
resp, err := proxyServer.DropCollection(req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Drop Collection " + name + " should succeed! err :" + resp.Reason
|
||||
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
}
|
||||
|
||||
func createIndex(t *testing.T, collectionName, fieldName string) {
|
||||
|
||||
req := &milvuspb.CreateIndexRequest{
|
||||
CollectionName: collectionName,
|
||||
FieldName: fieldName,
|
||||
ExtraParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "metric_type",
|
||||
Value: "L2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := proxyServer.CreateIndex(req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Create Index for " + fieldName + " should succeed!"
|
||||
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
}
|
||||
|
||||
func TestProxy_CreateCollection(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
println("collectionName:", collectionName)
|
||||
createCollection(t, collectionName)
|
||||
dropCollection(t, collectionName)
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_HasCollection(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
msg := "Should has Collection " + collectionName
|
||||
assert.Equal(t, has, true, msg)
|
||||
dropCollection(t, collectionName)
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_DescribeCollection(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
resp, err := proxyServer.DescribeCollection(&milvuspb.DescribeCollectionRequest{CollectionName: collectionName})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
msg := "Describe Collection " + strconv.Itoa(i) + " should succeed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
t.Logf("Describe Collection %v: %v", i, resp)
|
||||
dropCollection(t, collectionName)
|
||||
}
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_ShowCollections(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
resp, err := proxyServer.ShowCollections(&milvuspb.ShowCollectionRequest{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
msg := "Show collections " + strconv.Itoa(i) + " should succeed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
t.Logf("Show collections %v: %v", i, resp)
|
||||
dropCollection(t, collectionName)
|
||||
}
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_Insert(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
req := &milvuspb.InsertRequest{
|
||||
CollectionName: collectionName,
|
||||
PartitionName: "haha",
|
||||
RowData: make([]*commonpb.Blob, 0),
|
||||
HashKeys: make([]uint32, 0),
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
resp, err := proxyServer.Insert(req)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
msg := "Insert into Collection " + strconv.Itoa(i) + " should succeed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
dropCollection(t, collectionName)
|
||||
}
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_Search(t *testing.T) {
|
||||
var sendWg sync.WaitGroup
|
||||
var queryWg sync.WaitGroup
|
||||
queryDone := make(chan int)
|
||||
|
||||
sendWg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
queryResultChannels := []string{"QueryResult"}
|
||||
bufSize := 1024
|
||||
queryResultMsgStream := pulsarms.NewPulsarMsgStream(ctx, int64(bufSize))
|
||||
pulsarAddress := Params.PulsarAddress
|
||||
queryResultMsgStream.SetPulsarClient(pulsarAddress)
|
||||
assert.NotEqual(t, queryResultMsgStream, nil, "query result message stream should not be nil!")
|
||||
queryResultMsgStream.CreatePulsarProducers(queryResultChannels)
|
||||
|
||||
i := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Logf("query result message stream is closed ...")
|
||||
queryResultMsgStream.Close()
|
||||
return
|
||||
case <-queryDone:
|
||||
return
|
||||
default:
|
||||
for j := 0; j < 4; j++ {
|
||||
searchResultMsg := &msgstream.SearchResultMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{1},
|
||||
},
|
||||
SearchResults: internalpb2.SearchResults{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kSearchResult,
|
||||
MsgID: int64(i % testNum),
|
||||
},
|
||||
},
|
||||
}
|
||||
msgPack := &msgstream.MsgPack{
|
||||
Msgs: make([]msgstream.TsMsg, 1),
|
||||
}
|
||||
msgPack.Msgs[0] = searchResultMsg
|
||||
queryResultMsgStream.Produce(msgPack)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}(&sendWg)
|
||||
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
req := &milvuspb.SearchRequest{
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
queryWg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
//createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if !has {
|
||||
createCollection(t, collectionName)
|
||||
}
|
||||
resp, err := proxyServer.Search(req)
|
||||
t.Logf("response of search collection %v: %v", i, resp)
|
||||
assert.Nil(t, err)
|
||||
dropCollection(t, collectionName)
|
||||
}(&queryWg)
|
||||
}
|
||||
|
||||
t.Log("wait query to finish...")
|
||||
queryWg.Wait()
|
||||
t.Log("query finish ...")
|
||||
queryDone <- 1
|
||||
sendWg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_AssignSegID(t *testing.T) {
|
||||
collectionName := "CreateCollection1"
|
||||
createCollection(t, collectionName)
|
||||
testNum := 1
|
||||
futureTS := tsoutil.ComposeTS(time.Now().Add(time.Second*-1000).UnixNano()/int64(time.Millisecond), 0)
|
||||
for i := 0; i < testNum; i++ {
|
||||
segID, err := proxyServer.segAssigner.GetSegmentID(collectionName, Params.DefaultPartitionTag, int32(i), 200000, futureTS)
|
||||
assert.Nil(t, err)
|
||||
fmt.Println("segID", segID)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProxy_DropCollection(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
dropCollection(t, collectionName)
|
||||
}
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_PartitionGRPC(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
collName := "collPartTest"
|
||||
createCollection(t, collName)
|
||||
|
||||
for i := 0; i < testNum; i++ {
|
||||
wg.Add(1)
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tag := fmt.Sprintf("partition_%d", i)
|
||||
preq := &milvuspb.HasPartitionRequest{
|
||||
CollectionName: collName,
|
||||
PartitionName: tag,
|
||||
}
|
||||
|
||||
stb, err := proxyServer.HasPartition(preq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, stb.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, stb.Value, false)
|
||||
|
||||
cpreq := &milvuspb.CreatePartitionRequest{
|
||||
CollectionName: collName,
|
||||
PartitionName: tag,
|
||||
}
|
||||
st, err := proxyServer.CreatePartition(cpreq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
stb, err = proxyServer.HasPartition(preq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, stb.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, stb.Value, true)
|
||||
|
||||
//std, err := proxyServer.DescribePartition(ctx, preq)
|
||||
//assert.Nil(t, err)
|
||||
//assert.Equal(t, std.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
sts, err := proxyServer.ShowPartitions(&milvuspb.ShowPartitionRequest{CollectionName: collName})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, sts.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.True(t, len(sts.PartitionNames) >= 2)
|
||||
assert.True(t, len(sts.PartitionNames) <= testNum+1)
|
||||
|
||||
dpreq := &milvuspb.DropPartitionRequest{
|
||||
CollectionName: collName,
|
||||
PartitionName: tag,
|
||||
}
|
||||
st, err = proxyServer.DropPartition(dpreq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
dropCollection(t, collName)
|
||||
}
|
||||
|
||||
func TestProxy_CreateIndex(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collName := "collName" + strconv.FormatInt(int64(i), 10)
|
||||
fieldName := "Field1"
|
||||
if i%2 == 0 {
|
||||
fieldName = "vec"
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collName)
|
||||
if i%2 == 0 {
|
||||
createIndex(t, collName, fieldName)
|
||||
}
|
||||
dropCollection(t, collName)
|
||||
// dropIndex(t, collectionName, fieldName, indexName)
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_DescribeIndex(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collName := "collName" + strconv.FormatInt(int64(i), 10)
|
||||
fieldName := "Field1"
|
||||
if i%2 == 0 {
|
||||
fieldName = "vec"
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collName)
|
||||
if i%2 == 0 {
|
||||
createIndex(t, collName, fieldName)
|
||||
}
|
||||
req := &milvuspb.DescribeIndexRequest{
|
||||
CollectionName: collName,
|
||||
FieldName: fieldName,
|
||||
}
|
||||
resp, err := proxyServer.DescribeIndex(req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Describe Index for " + fieldName + "should successed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
dropCollection(t, collName)
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_GetIndexState(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collName := "collName" + strconv.FormatInt(int64(i), 10)
|
||||
fieldName := "Field1"
|
||||
if i%2 == 0 {
|
||||
fieldName = "vec"
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(group *sync.WaitGroup) {
|
||||
defer group.Done()
|
||||
createCollection(t, collName)
|
||||
if i%2 == 0 {
|
||||
createIndex(t, collName, fieldName)
|
||||
}
|
||||
req := &milvuspb.IndexStateRequest{
|
||||
CollectionName: collName,
|
||||
FieldName: fieldName,
|
||||
}
|
||||
resp, err := proxyServer.GetIndexState(req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Describe Index Progress for " + fieldName + "should succeed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
assert.True(t, resp.State == commonpb.IndexState_FINISHED)
|
||||
dropCollection(t, collName)
|
||||
}(&wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
code := m.Run()
|
||||
shutdown()
|
||||
os.Exit(code)
|
||||
}
|
|
@ -8,7 +8,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/master"
|
||||
ms "github.com/zilliztech/milvus-distributed/internal/masterservice"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
|
|
@ -1,138 +0,0 @@
|
|||
package writerclient
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/writerpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
type Client struct {
|
||||
kvClient kv.TxnBase // client of a reliable kv service, i.e. etcd client
|
||||
kvPrefix string
|
||||
|
||||
flushStream msgstream.MsgStream
|
||||
}
|
||||
|
||||
func NewWriterClient(etcdAddress string, kvRootPath string, writeNodeSegKvSubPath string, flushStream msgstream.MsgStream) (*Client, error) {
|
||||
// init kv client
|
||||
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvClient := etcdkv.NewEtcdKV(etcdClient, kvRootPath)
|
||||
|
||||
return &Client{
|
||||
kvClient: kvClient,
|
||||
kvPrefix: writeNodeSegKvSubPath,
|
||||
flushStream: flushStream,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type SegmentDescription struct {
|
||||
SegmentID UniqueID
|
||||
IsClosed bool
|
||||
OpenTime Timestamp
|
||||
CloseTime Timestamp
|
||||
}
|
||||
|
||||
func (c *Client) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
|
||||
baseMsg := msgstream.BaseMsg{
|
||||
BeginTimestamp: 0,
|
||||
EndTimestamp: 0,
|
||||
HashValues: []uint32{0},
|
||||
}
|
||||
|
||||
flushMsg := internalpb2.FlushMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kFlush,
|
||||
Timestamp: timestamp,
|
||||
},
|
||||
SegmentID: segmentID,
|
||||
CollectionID: collectionID,
|
||||
PartitionTag: partitionTag,
|
||||
}
|
||||
|
||||
fMsg := &msgstream.FlushMsg{
|
||||
BaseMsg: baseMsg,
|
||||
FlushMsg: flushMsg,
|
||||
}
|
||||
msgPack := msgstream.MsgPack{}
|
||||
msgPack.Msgs = append(msgPack.Msgs, fMsg)
|
||||
|
||||
err := c.flushStream.Produce(&msgPack)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) DescribeSegment(segmentID UniqueID) (*SegmentDescription, error) {
|
||||
// query etcd
|
||||
ret := &SegmentDescription{
|
||||
SegmentID: segmentID,
|
||||
IsClosed: false,
|
||||
}
|
||||
|
||||
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
|
||||
|
||||
etcdKV, ok := c.kvClient.(*etcdkv.EtcdKV)
|
||||
if !ok {
|
||||
return nil, errors.New("type assertion failed for etcd kv")
|
||||
}
|
||||
count, err := etcdKV.GetCount(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if count <= 0 {
|
||||
ret.IsClosed = false
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
value, err := c.kvClient.Load(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flushMeta := pb.SegmentFlushMeta{}
|
||||
|
||||
err = proto.UnmarshalText(value, &flushMeta)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
ret.IsClosed = flushMeta.IsClosed
|
||||
ret.OpenTime = flushMeta.OpenTime
|
||||
ret.CloseTime = flushMeta.CloseTime
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetInsertBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
|
||||
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
|
||||
|
||||
value, err := c.kvClient.Load(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flushMeta := pb.SegmentFlushMeta{}
|
||||
err = proto.UnmarshalText(value, &flushMeta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make(map[int64][]string)
|
||||
for _, field := range flushMeta.Fields {
|
||||
ret[field.FieldID] = field.BinlogPaths
|
||||
}
|
||||
return ret, nil
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
type Collection struct {
|
||||
schema *schemapb.CollectionSchema
|
||||
id UniqueID
|
||||
}
|
||||
|
||||
func (c *Collection) Name() string {
|
||||
return c.schema.Name
|
||||
}
|
||||
|
||||
func (c *Collection) ID() UniqueID {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func newCollection(collectionID UniqueID, schemaStr string) *Collection {
|
||||
|
||||
var schema schemapb.CollectionSchema
|
||||
err := proto.UnmarshalText(schemaStr, &schema)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var newCollection = &Collection{
|
||||
schema: &schema,
|
||||
id: collectionID,
|
||||
}
|
||||
return newCollection
|
||||
}
|
|
@ -1,99 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
)
|
||||
|
||||
type collectionReplica interface {
|
||||
|
||||
// collection
|
||||
getCollectionNum() int
|
||||
addCollection(collectionID UniqueID, schemaBlob string) error
|
||||
removeCollection(collectionID UniqueID) error
|
||||
getCollectionByID(collectionID UniqueID) (*Collection, error)
|
||||
getCollectionByName(collectionName string) (*Collection, error)
|
||||
hasCollection(collectionID UniqueID) bool
|
||||
}
|
||||
|
||||
type collectionReplicaImpl struct {
|
||||
mu sync.RWMutex
|
||||
collections []*Collection
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------- collection
|
||||
func (colReplica *collectionReplicaImpl) getCollectionNum() int {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
return len(colReplica.collections)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) addCollection(collectionID UniqueID, schemaBlob string) error {
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
var newCollection = newCollection(collectionID, schemaBlob)
|
||||
colReplica.collections = append(colReplica.collections, newCollection)
|
||||
fmt.Println("yyy, create collection: ", newCollection.Name())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) removeCollection(collectionID UniqueID) error {
|
||||
fmt.Println("drop collection:", collectionID)
|
||||
colReplica.mu.Lock()
|
||||
defer colReplica.mu.Unlock()
|
||||
|
||||
tmpCollections := make([]*Collection, 0)
|
||||
for _, col := range colReplica.collections {
|
||||
if col.ID() != collectionID {
|
||||
tmpCollections = append(tmpCollections, col)
|
||||
} else {
|
||||
fmt.Println("yyy, drop collection name: ", col.Name())
|
||||
}
|
||||
}
|
||||
colReplica.collections = tmpCollections
|
||||
return nil
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) getCollectionByID(collectionID UniqueID) (*Collection, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
for _, collection := range colReplica.collections {
|
||||
if collection.ID() == collectionID {
|
||||
return collection, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("cannot find collection, id = " + strconv.FormatInt(collectionID, 10))
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) getCollectionByName(collectionName string) (*Collection, error) {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
for _, collection := range colReplica.collections {
|
||||
if collection.Name() == collectionName {
|
||||
return collection, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("Cannot found collection: " + collectionName)
|
||||
}
|
||||
|
||||
func (colReplica *collectionReplicaImpl) hasCollection(collectionID UniqueID) bool {
|
||||
colReplica.mu.RLock()
|
||||
defer colReplica.mu.RUnlock()
|
||||
|
||||
for _, col := range colReplica.collections {
|
||||
if col.ID() == collectionID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func newReplica() collectionReplica {
|
||||
collections := make([]*Collection, 0)
|
||||
|
||||
var replica collectionReplica = &collectionReplicaImpl{
|
||||
collections: collections,
|
||||
}
|
||||
return replica
|
||||
}
|
||||
|
||||
func genTestCollectionMeta(collectionName string, collectionID UniqueID) *etcdpb.CollectionMeta {
|
||||
fieldVec := schemapb.FieldSchema{
|
||||
FieldID: UniqueID(100),
|
||||
Name: "vec",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "16",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "metric_type",
|
||||
Value: "L2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fieldInt := schemapb.FieldSchema{
|
||||
FieldID: UniqueID(101),
|
||||
Name: "age",
|
||||
IsPrimaryKey: false,
|
||||
DataType: schemapb.DataType_INT32,
|
||||
}
|
||||
|
||||
schema := schemapb.CollectionSchema{
|
||||
Name: collectionName,
|
||||
AutoID: true,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
&fieldVec, &fieldInt,
|
||||
},
|
||||
}
|
||||
|
||||
collectionMeta := etcdpb.CollectionMeta{
|
||||
ID: collectionID,
|
||||
Schema: &schema,
|
||||
CreateTime: Timestamp(0),
|
||||
SegmentIDs: []UniqueID{0},
|
||||
PartitionTags: []string{"default"},
|
||||
}
|
||||
|
||||
return &collectionMeta
|
||||
}
|
||||
|
||||
func initTestMeta(t *testing.T, replica collectionReplica, collectionName string, collectionID UniqueID, segmentID UniqueID) {
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
|
||||
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
|
||||
assert.NotEqual(t, "", schemaBlob)
|
||||
|
||||
var err = replica.addCollection(collectionMeta.ID, schemaBlob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
collection, err := replica.getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.Name(), collectionName)
|
||||
assert.Equal(t, collection.ID(), collectionID)
|
||||
assert.Equal(t, replica.getCollectionNum(), 1)
|
||||
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------- collection
|
||||
func TestCollectionReplica_getCollectionNum(t *testing.T) {
|
||||
replica := newReplica()
|
||||
initTestMeta(t, replica, "collection0", 0, 0)
|
||||
assert.Equal(t, replica.getCollectionNum(), 1)
|
||||
}
|
||||
|
||||
func TestCollectionReplica_addCollection(t *testing.T) {
|
||||
replica := newReplica()
|
||||
initTestMeta(t, replica, "collection0", 0, 0)
|
||||
}
|
||||
|
||||
func TestCollectionReplica_removeCollection(t *testing.T) {
|
||||
replica := newReplica()
|
||||
initTestMeta(t, replica, "collection0", 0, 0)
|
||||
assert.Equal(t, replica.getCollectionNum(), 1)
|
||||
|
||||
err := replica.removeCollection(0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, replica.getCollectionNum(), 0)
|
||||
}
|
||||
|
||||
func TestCollectionReplica_getCollectionByID(t *testing.T) {
|
||||
replica := newReplica()
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, replica, collectionName, collectionID, 0)
|
||||
targetCollection, err := replica.getCollectionByID(collectionID)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, targetCollection)
|
||||
assert.Equal(t, targetCollection.Name(), collectionName)
|
||||
assert.Equal(t, targetCollection.ID(), collectionID)
|
||||
}
|
||||
|
||||
func TestCollectionReplica_getCollectionByName(t *testing.T) {
|
||||
replica := newReplica()
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, replica, collectionName, collectionID, 0)
|
||||
|
||||
targetCollection, err := replica.getCollectionByName(collectionName)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, targetCollection)
|
||||
assert.Equal(t, targetCollection.Name(), collectionName)
|
||||
assert.Equal(t, targetCollection.ID(), collectionID)
|
||||
|
||||
}
|
||||
|
||||
func TestCollectionReplica_hasCollection(t *testing.T) {
|
||||
replica := newReplica()
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, replica, collectionName, collectionID, 0)
|
||||
|
||||
hasCollection := replica.hasCollection(collectionID)
|
||||
assert.Equal(t, hasCollection, true)
|
||||
hasCollection = replica.hasCollection(UniqueID(1))
|
||||
assert.Equal(t, hasCollection, false)
|
||||
|
||||
}
|
||||
|
||||
func TestCollectionReplica_freeAll(t *testing.T) {
|
||||
replica := newReplica()
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
initTestMeta(t, replica, collectionName, collectionID, 0)
|
||||
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCollection_newCollection(t *testing.T) {
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
|
||||
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
|
||||
assert.NotEqual(t, "", schemaBlob)
|
||||
|
||||
collection := newCollection(collectionMeta.ID, schemaBlob)
|
||||
assert.Equal(t, collection.Name(), collectionName)
|
||||
assert.Equal(t, collection.ID(), collectionID)
|
||||
}
|
||||
|
||||
func TestCollection_deleteCollection(t *testing.T) {
|
||||
collectionName := "collection0"
|
||||
collectionID := UniqueID(0)
|
||||
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
|
||||
|
||||
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
|
||||
assert.NotEqual(t, "", schemaBlob)
|
||||
|
||||
collection := newCollection(collectionMeta.ID, schemaBlob)
|
||||
assert.Equal(t, collection.Name(), collectionName)
|
||||
assert.Equal(t, collection.ID(), collectionID)
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
type dataSyncService struct {
|
||||
ctx context.Context
|
||||
fg *flowgraph.TimeTickedFlowGraph
|
||||
ddChan chan *ddlFlushSyncMsg
|
||||
insertChan chan *insertFlushSyncMsg
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func newDataSyncService(ctx context.Context,
|
||||
ddChan chan *ddlFlushSyncMsg, insertChan chan *insertFlushSyncMsg, replica collectionReplica) *dataSyncService {
|
||||
|
||||
return &dataSyncService{
|
||||
ctx: ctx,
|
||||
fg: nil,
|
||||
ddChan: ddChan,
|
||||
insertChan: insertChan,
|
||||
replica: replica,
|
||||
}
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) start() {
|
||||
dsService.initNodes()
|
||||
dsService.fg.Start()
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) close() {
|
||||
if dsService.fg != nil {
|
||||
dsService.fg.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (dsService *dataSyncService) initNodes() {
|
||||
// TODO: add delete pipeline support
|
||||
|
||||
dsService.fg = flowgraph.NewTimeTickedFlowGraph(dsService.ctx)
|
||||
|
||||
var dmStreamNode Node = newDmInputNode(dsService.ctx)
|
||||
var ddStreamNode Node = newDDInputNode(dsService.ctx)
|
||||
|
||||
var filterDmNode Node = newFilteredDmNode()
|
||||
|
||||
var ddNode Node = newDDNode(dsService.ctx, dsService.ddChan, dsService.replica)
|
||||
var insertBufferNode Node = newInsertBufferNode(dsService.ctx, dsService.insertChan, dsService.replica)
|
||||
var gcNode Node = newGCNode(dsService.replica)
|
||||
|
||||
dsService.fg.AddNode(&dmStreamNode)
|
||||
dsService.fg.AddNode(&ddStreamNode)
|
||||
|
||||
dsService.fg.AddNode(&filterDmNode)
|
||||
dsService.fg.AddNode(&ddNode)
|
||||
|
||||
dsService.fg.AddNode(&insertBufferNode)
|
||||
dsService.fg.AddNode(&gcNode)
|
||||
|
||||
// dmStreamNode
|
||||
var err = dsService.fg.SetEdges(dmStreamNode.Name(),
|
||||
[]string{},
|
||||
[]string{filterDmNode.Name()},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("set edges failed in node:", dmStreamNode.Name())
|
||||
}
|
||||
|
||||
// ddStreamNode
|
||||
err = dsService.fg.SetEdges(ddStreamNode.Name(),
|
||||
[]string{},
|
||||
[]string{ddNode.Name()},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("set edges failed in node:", ddStreamNode.Name())
|
||||
}
|
||||
|
||||
// filterDmNode
|
||||
err = dsService.fg.SetEdges(filterDmNode.Name(),
|
||||
[]string{dmStreamNode.Name(), ddNode.Name()},
|
||||
[]string{insertBufferNode.Name()},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("set edges failed in node:", filterDmNode.Name())
|
||||
}
|
||||
|
||||
// ddNode
|
||||
err = dsService.fg.SetEdges(ddNode.Name(),
|
||||
[]string{ddStreamNode.Name()},
|
||||
[]string{filterDmNode.Name()},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("set edges failed in node:", ddNode.Name())
|
||||
}
|
||||
|
||||
// insertBufferNode
|
||||
err = dsService.fg.SetEdges(insertBufferNode.Name(),
|
||||
[]string{filterDmNode.Name()},
|
||||
[]string{gcNode.Name()},
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatal("set edges failed in node:", insertBufferNode.Name())
|
||||
}
|
||||
|
||||
// gcNode
|
||||
err = dsService.fg.SetEdges(gcNode.Name(),
|
||||
[]string{insertBufferNode.Name()},
|
||||
[]string{})
|
||||
if err != nil {
|
||||
log.Fatal("set edges failed in node:", gcNode.Name())
|
||||
}
|
||||
}
|
|
@ -1,388 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
)
|
||||
|
||||
// NOTE: start pulsar before test
|
||||
func TestDataSyncService_Start(t *testing.T) {
|
||||
newMeta()
|
||||
const ctxTimeInMillisecond = 2000
|
||||
const closeWithDeadline = true
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
// init write node
|
||||
pulsarURL := Params.PulsarAddress
|
||||
node := NewWriteNode(ctx, 0)
|
||||
|
||||
// test data generate
|
||||
// GOOSE TODO orgnize
|
||||
const DIM = 2
|
||||
const N = 1
|
||||
var rawData []byte
|
||||
|
||||
// Float vector
|
||||
var fvector = [DIM]float32{1, 2}
|
||||
for _, ele := range fvector {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
rawData = append(rawData, buf...)
|
||||
}
|
||||
|
||||
// Binary vector
|
||||
// Dimension of binary vector is 32
|
||||
var bvector = [4]byte{255, 255, 255, 0}
|
||||
for _, ele := range bvector {
|
||||
bs := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bs, uint32(ele))
|
||||
rawData = append(rawData, bs...)
|
||||
}
|
||||
|
||||
// Bool
|
||||
bb := make([]byte, 4)
|
||||
var fieldBool = true
|
||||
var fieldBoolInt uint32
|
||||
if fieldBool {
|
||||
fieldBoolInt = 1
|
||||
} else {
|
||||
fieldBoolInt = 0
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint32(bb, fieldBoolInt)
|
||||
rawData = append(rawData, bb...)
|
||||
|
||||
// int8
|
||||
var dataInt8 int8 = 100
|
||||
bint8 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint8, uint32(dataInt8))
|
||||
rawData = append(rawData, bint8...)
|
||||
|
||||
// int16
|
||||
var dataInt16 int16 = 200
|
||||
bint16 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint16, uint32(dataInt16))
|
||||
rawData = append(rawData, bint16...)
|
||||
|
||||
// int32
|
||||
var dataInt32 int32 = 300
|
||||
bint32 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint32, uint32(dataInt32))
|
||||
rawData = append(rawData, bint32...)
|
||||
|
||||
// int64
|
||||
var dataInt64 int64 = 300
|
||||
bint64 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bint64, uint32(dataInt64))
|
||||
rawData = append(rawData, bint64...)
|
||||
|
||||
// float32
|
||||
var datafloat float32 = 1.1
|
||||
bfloat32 := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(bfloat32, math.Float32bits(datafloat))
|
||||
rawData = append(rawData, bfloat32...)
|
||||
|
||||
// float64
|
||||
var datafloat64 float64 = 2.2
|
||||
bfloat64 := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bfloat64, math.Float64bits(datafloat64))
|
||||
rawData = append(rawData, bfloat64...)
|
||||
|
||||
timeRange := TimeRange{
|
||||
timestampMin: 0,
|
||||
timestampMax: math.MaxUint64,
|
||||
}
|
||||
|
||||
// messages generate
|
||||
const MSGLENGTH = 1
|
||||
insertMessages := make([]msgstream.TsMsg, 0)
|
||||
for i := 0; i < MSGLENGTH; i++ {
|
||||
var msg msgstream.TsMsg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{
|
||||
uint32(i),
|
||||
},
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0,
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
|
||||
CollectionName: "col1",
|
||||
PartitionName: "default",
|
||||
SegmentID: UniqueID(1),
|
||||
ChannelID: "0",
|
||||
Timestamps: []Timestamp{Timestamp(i + 1000)},
|
||||
RowIDs: []UniqueID{UniqueID(i)},
|
||||
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
},
|
||||
},
|
||||
}
|
||||
insertMessages = append(insertMessages, msg)
|
||||
}
|
||||
|
||||
msgPack := msgstream.MsgPack{
|
||||
BeginTs: timeRange.timestampMin,
|
||||
EndTs: timeRange.timestampMax,
|
||||
Msgs: insertMessages,
|
||||
}
|
||||
|
||||
// generate timeTick
|
||||
timeTickMsgPack := msgstream.MsgPack{}
|
||||
|
||||
timeTickMsg := &msgstream.TimeTickMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: Timestamp(0),
|
||||
EndTimestamp: Timestamp(0),
|
||||
HashValues: []uint32{0},
|
||||
},
|
||||
TimeTickMsg: internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: math.MaxUint64,
|
||||
SourceID: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
|
||||
|
||||
// pulsar produce
|
||||
const receiveBufSize = 1024
|
||||
insertChannels := Params.InsertChannelNames
|
||||
ddChannels := Params.DDChannelNames
|
||||
|
||||
insertStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
insertStream.SetPulsarClient(pulsarURL)
|
||||
insertStream.CreatePulsarProducers(insertChannels)
|
||||
|
||||
ddStream := pulsarms.NewPulsarMsgStream(ctx, receiveBufSize)
|
||||
ddStream.SetPulsarClient(pulsarURL)
|
||||
ddStream.CreatePulsarProducers(ddChannels)
|
||||
|
||||
var insertMsgStream msgstream.MsgStream = insertStream
|
||||
insertMsgStream.Start()
|
||||
|
||||
var ddMsgStream msgstream.MsgStream = ddStream
|
||||
ddMsgStream.Start()
|
||||
|
||||
err := insertMsgStream.Produce(&msgPack)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = insertMsgStream.Broadcast(&timeTickMsgPack)
|
||||
assert.NoError(t, err)
|
||||
err = ddMsgStream.Broadcast(&timeTickMsgPack)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// dataSync
|
||||
replica := newReplica()
|
||||
node.dataSyncService = newDataSyncService(node.ctx, nil, nil, replica)
|
||||
go node.dataSyncService.start()
|
||||
|
||||
node.Close()
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func newMeta() *etcdpb.CollectionMeta {
|
||||
ETCDAddr := Params.EtcdAddress
|
||||
MetaRootPath := Params.MetaRootPath
|
||||
|
||||
cli, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ETCDAddr},
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
kvClient := etcdkv.NewEtcdKV(cli, MetaRootPath)
|
||||
defer kvClient.Close()
|
||||
|
||||
sch := schemapb.CollectionSchema{
|
||||
Name: "col1",
|
||||
Description: "test collection",
|
||||
AutoID: false,
|
||||
Fields: []*schemapb.FieldSchema{
|
||||
{
|
||||
FieldID: 1,
|
||||
Name: "Timestamp",
|
||||
Description: "test collection filed 1",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "col1_f1_tk2",
|
||||
Value: "col1_f1_tv2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 0,
|
||||
Name: "RowID",
|
||||
Description: "test collection filed 1",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "col1_f1_tk2",
|
||||
Value: "col1_f1_tv2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 100,
|
||||
Name: "col1_f1",
|
||||
Description: "test collection filed 1",
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "2",
|
||||
},
|
||||
{
|
||||
Key: "col1_f1_tk2",
|
||||
Value: "col1_f1_tv2",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "col1_f1_ik1",
|
||||
Value: "col1_f1_iv1",
|
||||
},
|
||||
{
|
||||
Key: "col1_f1_ik2",
|
||||
Value: "col1_f1_iv2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 101,
|
||||
Name: "col1_f2",
|
||||
Description: "test collection filed 2",
|
||||
DataType: schemapb.DataType_VECTOR_BINARY,
|
||||
TypeParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "dim",
|
||||
Value: "32",
|
||||
},
|
||||
{
|
||||
Key: "col1_f2_tk2",
|
||||
Value: "col1_f2_tv2",
|
||||
},
|
||||
},
|
||||
IndexParams: []*commonpb.KeyValuePair{
|
||||
{
|
||||
Key: "col1_f2_ik1",
|
||||
Value: "col1_f2_iv1",
|
||||
},
|
||||
{
|
||||
Key: "col1_f2_ik2",
|
||||
Value: "col1_f2_iv2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
FieldID: 102,
|
||||
Name: "col1_f3",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_BOOL,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 103,
|
||||
Name: "col1_f4",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_INT8,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 104,
|
||||
Name: "col1_f5",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_INT16,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 105,
|
||||
Name: "col1_f6",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_INT32,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 106,
|
||||
Name: "col1_f7",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_INT64,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 107,
|
||||
Name: "col1_f8",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_FLOAT,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
{
|
||||
FieldID: 108,
|
||||
Name: "col1_f9",
|
||||
Description: "test collection filed 3",
|
||||
DataType: schemapb.DataType_DOUBLE,
|
||||
TypeParams: []*commonpb.KeyValuePair{},
|
||||
IndexParams: []*commonpb.KeyValuePair{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
collection := etcdpb.CollectionMeta{
|
||||
ID: UniqueID(1),
|
||||
Schema: &sch,
|
||||
CreateTime: Timestamp(1),
|
||||
SegmentIDs: make([]UniqueID, 0),
|
||||
PartitionTags: make([]string, 0),
|
||||
}
|
||||
|
||||
collBytes := proto.MarshalTextString(&collection)
|
||||
kvClient.Save("/collection/"+strconv.FormatInt(collection.ID, 10), collBytes)
|
||||
|
||||
segSch := etcdpb.SegmentMeta{
|
||||
SegmentID: UniqueID(1),
|
||||
CollectionID: UniqueID(1),
|
||||
}
|
||||
segBytes := proto.MarshalTextString(&segSch)
|
||||
kvClient.Save("/segment/"+strconv.FormatInt(segSch.SegmentID, 10), segBytes)
|
||||
|
||||
return &collection
|
||||
|
||||
}
|
|
@ -1,419 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/storage"
|
||||
)
|
||||
|
||||
type ddNode struct {
|
||||
BaseNode
|
||||
ddMsg *ddMsg
|
||||
ddRecords *ddRecords
|
||||
ddBuffer *ddBuffer
|
||||
outCh chan *ddlFlushSyncMsg // for flush sync
|
||||
|
||||
idAllocator *allocator.IDAllocator
|
||||
kv kv.Base
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
type ddData struct {
|
||||
ddRequestString []string
|
||||
timestamps []Timestamp
|
||||
eventTypes []storage.EventTypeCode
|
||||
}
|
||||
|
||||
type ddBuffer struct {
|
||||
ddData map[UniqueID]*ddData // collection ID
|
||||
maxSize int
|
||||
}
|
||||
|
||||
type ddRecords struct {
|
||||
collectionRecords map[UniqueID]interface{}
|
||||
partitionRecords map[UniqueID]interface{}
|
||||
}
|
||||
|
||||
func (d *ddBuffer) size() int {
|
||||
if d.ddData == nil || len(d.ddData) <= 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
size := 0
|
||||
for _, data := range d.ddData {
|
||||
size += len(data.ddRequestString)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (d *ddBuffer) full() bool {
|
||||
return d.size() >= d.maxSize
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) Name() string {
|
||||
return "ddNode"
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
|
||||
//fmt.Println("Do filterDdNode operation")
|
||||
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in ddNode, input length = ", len(in))
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
msMsg, ok := (*in[0]).(*MsgStreamMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for MsgStreamMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
var ddMsg = ddMsg{
|
||||
collectionRecords: make(map[string][]metaOperateRecord),
|
||||
partitionRecords: make(map[string][]metaOperateRecord),
|
||||
timeRange: TimeRange{
|
||||
timestampMin: msMsg.TimestampMin(),
|
||||
timestampMax: msMsg.TimestampMax(),
|
||||
},
|
||||
flushMessages: make([]*msgstream.FlushMsg, 0),
|
||||
}
|
||||
ddNode.ddMsg = &ddMsg
|
||||
|
||||
gcRecord := gcRecord{
|
||||
collections: make([]UniqueID, 0),
|
||||
}
|
||||
ddNode.ddMsg.gcRecord = &gcRecord
|
||||
|
||||
// sort tsMessages
|
||||
tsMessages := msMsg.TsMessages()
|
||||
sort.Slice(tsMessages,
|
||||
func(i, j int) bool {
|
||||
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
|
||||
})
|
||||
|
||||
// do dd tasks
|
||||
for _, msg := range tsMessages {
|
||||
switch msg.Type() {
|
||||
case commonpb.MsgType_kCreateCollection:
|
||||
ddNode.createCollection(msg.(*msgstream.CreateCollectionMsg))
|
||||
case commonpb.MsgType_kDropCollection:
|
||||
ddNode.dropCollection(msg.(*msgstream.DropCollectionMsg))
|
||||
case commonpb.MsgType_kCreatePartition:
|
||||
ddNode.createPartition(msg.(*msgstream.CreatePartitionMsg))
|
||||
case commonpb.MsgType_kDropPartition:
|
||||
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
|
||||
case commonpb.MsgType_kFlush:
|
||||
fMsg := msg.(*msgstream.FlushMsg)
|
||||
flushSegID := fMsg.SegmentID
|
||||
ddMsg.flushMessages = append(ddMsg.flushMessages, fMsg)
|
||||
ddNode.flush()
|
||||
|
||||
log.Println(".. manual flush completed ...")
|
||||
ddlFlushMsg := &ddlFlushSyncMsg{
|
||||
flushCompleted: true,
|
||||
ddlBinlogPathMsg: ddlBinlogPathMsg{
|
||||
segID: flushSegID,
|
||||
},
|
||||
}
|
||||
|
||||
ddNode.outCh <- ddlFlushMsg
|
||||
|
||||
default:
|
||||
log.Println("Non supporting message type:", msg.Type())
|
||||
}
|
||||
}
|
||||
|
||||
// generate binlog
|
||||
if ddNode.ddBuffer.full() {
|
||||
ddNode.flush()
|
||||
}
|
||||
|
||||
var res Msg = ddNode.ddMsg
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) flush() {
|
||||
// generate binlog
|
||||
log.Println(". dd buffer full or receive Flush msg ...")
|
||||
ddCodec := &storage.DataDefinitionCodec{}
|
||||
for collectionID, data := range ddNode.ddBuffer.ddData {
|
||||
// buffer data to binlog
|
||||
binLogs, err := ddCodec.Serialize(data.timestamps, data.ddRequestString, data.eventTypes)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
if len(binLogs) != 2 {
|
||||
log.Println("illegal binLogs")
|
||||
continue
|
||||
}
|
||||
|
||||
// binLogs -> minIO/S3
|
||||
if len(data.ddRequestString) != len(data.timestamps) ||
|
||||
len(data.timestamps) != len(data.eventTypes) {
|
||||
log.Println("illegal ddBuffer, failed to save binlog")
|
||||
continue
|
||||
} else {
|
||||
log.Println(".. dd buffer flushing ...")
|
||||
// Blob key example:
|
||||
// ${tenant}/data_definition_log/${collection_id}/ts/${log_idx}
|
||||
// ${tenant}/data_definition_log/${collection_id}/ddl/${log_idx}
|
||||
keyCommon := path.Join(Params.DdLogRootPath, strconv.FormatInt(collectionID, 10))
|
||||
|
||||
// save ts binlog
|
||||
timestampLogIdx, err := ddNode.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
timestampKey := path.Join(keyCommon, binLogs[0].GetKey(), strconv.FormatInt(timestampLogIdx, 10))
|
||||
err = ddNode.kv.Save(timestampKey, string(binLogs[0].GetValue()))
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
log.Println("save ts binlog, key = ", timestampKey)
|
||||
|
||||
// save dd binlog
|
||||
ddLogIdx, err := ddNode.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
ddKey := path.Join(keyCommon, binLogs[1].GetKey(), strconv.FormatInt(ddLogIdx, 10))
|
||||
err = ddNode.kv.Save(ddKey, string(binLogs[1].GetValue()))
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
log.Println("save dd binlog, key = ", ddKey)
|
||||
|
||||
ddlFlushMsg := &ddlFlushSyncMsg{
|
||||
flushCompleted: false,
|
||||
ddlBinlogPathMsg: ddlBinlogPathMsg{
|
||||
collID: collectionID,
|
||||
paths: []string{timestampKey, ddKey},
|
||||
},
|
||||
}
|
||||
|
||||
ddNode.outCh <- ddlFlushMsg
|
||||
}
|
||||
|
||||
}
|
||||
// clear buffer
|
||||
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
|
||||
collectionID := msg.CollectionID
|
||||
|
||||
// add collection
|
||||
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; ok {
|
||||
err := errors.New("collection " + strconv.FormatInt(collectionID, 10) + " is already exists")
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
ddNode.ddRecords.collectionRecords[collectionID] = nil
|
||||
|
||||
// TODO: add default partition?
|
||||
|
||||
var schema schemapb.CollectionSchema
|
||||
err := proto.Unmarshal(msg.Schema, &schema)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
schemaStr := proto.MarshalTextString(&schema)
|
||||
// add collection
|
||||
err = ddNode.replica.addCollection(collectionID, schemaStr)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
collectionName := schema.Name
|
||||
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
|
||||
metaOperateRecord{
|
||||
createOrDrop: true,
|
||||
timestamp: msg.Base.Timestamp,
|
||||
})
|
||||
|
||||
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
||||
if !ok {
|
||||
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
||||
ddRequestString: make([]string, 0),
|
||||
timestamps: make([]Timestamp, 0),
|
||||
eventTypes: make([]storage.EventTypeCode, 0),
|
||||
}
|
||||
}
|
||||
|
||||
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreateCollectionRequest.String())
|
||||
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
||||
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreateCollectionEventType)
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
|
||||
collectionID := msg.CollectionID
|
||||
|
||||
//err := ddNode.replica.removeCollection(collectionID)
|
||||
//if err != nil {
|
||||
// log.Println(err)
|
||||
//}
|
||||
|
||||
// remove collection
|
||||
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; !ok {
|
||||
err := errors.New("cannot found collection " + strconv.FormatInt(collectionID, 10))
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
delete(ddNode.ddRecords.collectionRecords, collectionID)
|
||||
|
||||
collectionName := msg.CollectionName
|
||||
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
|
||||
metaOperateRecord{
|
||||
createOrDrop: false,
|
||||
timestamp: msg.Base.Timestamp,
|
||||
})
|
||||
|
||||
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
||||
if !ok {
|
||||
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
||||
ddRequestString: make([]string, 0),
|
||||
timestamps: make([]Timestamp, 0),
|
||||
eventTypes: make([]storage.EventTypeCode, 0),
|
||||
}
|
||||
}
|
||||
|
||||
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropCollectionRequest.String())
|
||||
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
||||
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropCollectionEventType)
|
||||
|
||||
ddNode.ddMsg.gcRecord.collections = append(ddNode.ddMsg.gcRecord.collections, collectionID)
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
|
||||
partitionID := msg.PartitionID
|
||||
collectionID := msg.CollectionID
|
||||
|
||||
// add partition
|
||||
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; ok {
|
||||
err := errors.New("partition " + strconv.FormatInt(partitionID, 10) + " is already exists")
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
ddNode.ddRecords.partitionRecords[partitionID] = nil
|
||||
|
||||
partitionTag := msg.PartitionName
|
||||
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
|
||||
metaOperateRecord{
|
||||
createOrDrop: true,
|
||||
timestamp: msg.Base.Timestamp,
|
||||
})
|
||||
|
||||
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
||||
if !ok {
|
||||
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
||||
ddRequestString: make([]string, 0),
|
||||
timestamps: make([]Timestamp, 0),
|
||||
eventTypes: make([]storage.EventTypeCode, 0),
|
||||
}
|
||||
}
|
||||
|
||||
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreatePartitionRequest.String())
|
||||
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
||||
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreatePartitionEventType)
|
||||
}
|
||||
|
||||
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
|
||||
partitionID := msg.PartitionID
|
||||
collectionID := msg.CollectionID
|
||||
|
||||
// remove partition
|
||||
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; !ok {
|
||||
err := errors.New("cannot found partition " + strconv.FormatInt(partitionID, 10))
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
delete(ddNode.ddRecords.partitionRecords, partitionID)
|
||||
|
||||
partitionTag := msg.PartitionName
|
||||
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
|
||||
metaOperateRecord{
|
||||
createOrDrop: false,
|
||||
timestamp: msg.Base.Timestamp,
|
||||
})
|
||||
|
||||
_, ok := ddNode.ddBuffer.ddData[collectionID]
|
||||
if !ok {
|
||||
ddNode.ddBuffer.ddData[collectionID] = &ddData{
|
||||
ddRequestString: make([]string, 0),
|
||||
timestamps: make([]Timestamp, 0),
|
||||
eventTypes: make([]storage.EventTypeCode, 0),
|
||||
}
|
||||
}
|
||||
|
||||
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropPartitionRequest.String())
|
||||
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
|
||||
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropPartitionEventType)
|
||||
}
|
||||
|
||||
func newDDNode(ctx context.Context, outCh chan *ddlFlushSyncMsg, replica collectionReplica) *ddNode {
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
||||
baseNode := BaseNode{}
|
||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
ddRecords := &ddRecords{
|
||||
collectionRecords: make(map[UniqueID]interface{}),
|
||||
partitionRecords: make(map[UniqueID]interface{}),
|
||||
}
|
||||
|
||||
bucketName := Params.MinioBucketName
|
||||
option := &miniokv.Option{
|
||||
Address: Params.MinioAddress,
|
||||
AccessKeyID: Params.MinioAccessKeyID,
|
||||
SecretAccessKeyID: Params.MinioSecretAccessKey,
|
||||
UseSSL: Params.MinioUseSSL,
|
||||
BucketName: bucketName,
|
||||
CreateBucket: true,
|
||||
}
|
||||
minioKV, err := miniokv.NewMinIOKV(ctx, option)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, Params.MasterAddress)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = idAllocator.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &ddNode{
|
||||
BaseNode: baseNode,
|
||||
ddRecords: ddRecords,
|
||||
ddBuffer: &ddBuffer{
|
||||
ddData: make(map[UniqueID]*ddData),
|
||||
maxSize: Params.FlushDdBufSize,
|
||||
},
|
||||
outCh: outCh,
|
||||
|
||||
idAllocator: idAllocator,
|
||||
kv: minioKV,
|
||||
replica: replica,
|
||||
}
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
func TestFlowGraphDDNode_Operate(t *testing.T) {
|
||||
newMeta()
|
||||
const ctxTimeInMillisecond = 2000
|
||||
const closeWithDeadline = false
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
ddChan := make(chan *ddlFlushSyncMsg, 10)
|
||||
defer close(ddChan)
|
||||
insertChan := make(chan *insertFlushSyncMsg, 10)
|
||||
defer close(insertChan)
|
||||
|
||||
testPath := "/test/writenode/root/meta"
|
||||
err := clearEtcd(testPath)
|
||||
require.NoError(t, err)
|
||||
Params.MetaRootPath = testPath
|
||||
fService := newFlushSyncService(ctx, ddChan, insertChan)
|
||||
assert.Equal(t, testPath, fService.metaTable.client.(*etcdkv.EtcdKV).GetPath("."))
|
||||
go fService.start()
|
||||
|
||||
Params.FlushDdBufSize = 4
|
||||
replica := newReplica()
|
||||
ddNode := newDDNode(ctx, ddChan, replica)
|
||||
|
||||
colID := UniqueID(0)
|
||||
colName := "col-test-0"
|
||||
// create collection
|
||||
createColReq := internalpb2.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreateCollection,
|
||||
MsgID: 1,
|
||||
Timestamp: 1,
|
||||
SourceID: 1,
|
||||
},
|
||||
CollectionID: colID,
|
||||
Schema: make([]byte, 0),
|
||||
}
|
||||
createColMsg := msgstream.CreateCollectionMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: Timestamp(1),
|
||||
EndTimestamp: Timestamp(1),
|
||||
HashValues: []uint32{uint32(0)},
|
||||
},
|
||||
CreateCollectionRequest: createColReq,
|
||||
}
|
||||
|
||||
// drop collection
|
||||
dropColReq := internalpb2.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropCollection,
|
||||
MsgID: 2,
|
||||
Timestamp: 2,
|
||||
SourceID: 2,
|
||||
},
|
||||
CollectionID: colID,
|
||||
CollectionName: colName,
|
||||
}
|
||||
dropColMsg := msgstream.DropCollectionMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: Timestamp(2),
|
||||
EndTimestamp: Timestamp(2),
|
||||
HashValues: []uint32{uint32(0)},
|
||||
},
|
||||
DropCollectionRequest: dropColReq,
|
||||
}
|
||||
|
||||
partitionID := UniqueID(100)
|
||||
partitionTag := "partition-test-0"
|
||||
// create partition
|
||||
createPartitionReq := internalpb2.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreatePartition,
|
||||
MsgID: 3,
|
||||
Timestamp: 3,
|
||||
SourceID: 3,
|
||||
},
|
||||
CollectionID: colID,
|
||||
PartitionID: partitionID,
|
||||
CollectionName: colName,
|
||||
PartitionName: partitionTag,
|
||||
}
|
||||
createPartitionMsg := msgstream.CreatePartitionMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: Timestamp(3),
|
||||
EndTimestamp: Timestamp(3),
|
||||
HashValues: []uint32{uint32(0)},
|
||||
},
|
||||
CreatePartitionRequest: createPartitionReq,
|
||||
}
|
||||
|
||||
// drop partition
|
||||
dropPartitionReq := internalpb2.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropPartition,
|
||||
MsgID: 4,
|
||||
Timestamp: 4,
|
||||
SourceID: 4,
|
||||
},
|
||||
CollectionID: colID,
|
||||
PartitionID: partitionID,
|
||||
CollectionName: colName,
|
||||
PartitionName: partitionTag,
|
||||
}
|
||||
dropPartitionMsg := msgstream.DropPartitionMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: Timestamp(4),
|
||||
EndTimestamp: Timestamp(4),
|
||||
HashValues: []uint32{uint32(0)},
|
||||
},
|
||||
DropPartitionRequest: dropPartitionReq,
|
||||
}
|
||||
|
||||
flushMsg := msgstream.FlushMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: Timestamp(5),
|
||||
EndTimestamp: Timestamp(5),
|
||||
HashValues: []uint32{uint32(0)},
|
||||
},
|
||||
FlushMsg: internalpb2.FlushMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kFlush,
|
||||
MsgID: 1,
|
||||
Timestamp: 6,
|
||||
SourceID: 1,
|
||||
},
|
||||
SegmentID: 1,
|
||||
},
|
||||
}
|
||||
|
||||
tsMessages := make([]msgstream.TsMsg, 0)
|
||||
tsMessages = append(tsMessages, msgstream.TsMsg(&createColMsg))
|
||||
tsMessages = append(tsMessages, msgstream.TsMsg(&dropColMsg))
|
||||
tsMessages = append(tsMessages, msgstream.TsMsg(&createPartitionMsg))
|
||||
tsMessages = append(tsMessages, msgstream.TsMsg(&dropPartitionMsg))
|
||||
tsMessages = append(tsMessages, msgstream.TsMsg(&flushMsg))
|
||||
msgStream := flowgraph.GenerateMsgStreamMsg(tsMessages, Timestamp(0), Timestamp(3), make([]*internalpb2.MsgPosition, 0))
|
||||
var inMsg Msg = msgStream
|
||||
ddNode.Operate([]*Msg{&inMsg})
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
)
|
||||
|
||||
type filterDmNode struct {
|
||||
BaseNode
|
||||
ddMsg *ddMsg
|
||||
}
|
||||
|
||||
func (fdmNode *filterDmNode) Name() string {
|
||||
return "fdmNode"
|
||||
}
|
||||
|
||||
func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
|
||||
//fmt.Println("Do filterDmNode operation")
|
||||
|
||||
if len(in) != 2 {
|
||||
log.Println("Invalid operate message input in filterDmNode, input length = ", len(in))
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
msgStreamMsg, ok := (*in[0]).(*MsgStreamMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for MsgStreamMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
var childs []opentracing.Span
|
||||
tracer := opentracing.GlobalTracer()
|
||||
if tracer != nil {
|
||||
for _, msg := range msgStreamMsg.TsMessages() {
|
||||
if msg.Type() == commonpb.MsgType_kInsert {
|
||||
var child opentracing.Span
|
||||
ctx := msg.GetMsgContext()
|
||||
if parent := opentracing.SpanFromContext(ctx); parent != nil {
|
||||
child = tracer.StartSpan("pass filter node",
|
||||
opentracing.FollowsFrom(parent.Context()))
|
||||
} else {
|
||||
child = tracer.StartSpan("pass filter node")
|
||||
}
|
||||
child.SetTag("hash keys", msg.HashKeys())
|
||||
child.SetTag("start time", msg.BeginTs())
|
||||
child.SetTag("end time", msg.EndTs())
|
||||
msg.SetMsgContext(opentracing.ContextWithSpan(ctx, child))
|
||||
childs = append(childs, child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ddMsg, ok := (*in[1]).(*ddMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for ddMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
fdmNode.ddMsg = ddMsg
|
||||
|
||||
var iMsg = insertMsg{
|
||||
insertMessages: make([]*msgstream.InsertMsg, 0),
|
||||
flushMessages: make([]*msgstream.FlushMsg, 0),
|
||||
timeRange: TimeRange{
|
||||
timestampMin: msgStreamMsg.TimestampMin(),
|
||||
timestampMax: msgStreamMsg.TimestampMax(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, fmsg := range ddMsg.flushMessages {
|
||||
switch fmsg.Type() {
|
||||
case commonpb.MsgType_kFlush:
|
||||
iMsg.flushMessages = append(iMsg.flushMessages, fmsg)
|
||||
default:
|
||||
log.Println("Non supporting message type:", fmsg.Type())
|
||||
}
|
||||
}
|
||||
|
||||
for key, msg := range msgStreamMsg.TsMessages() {
|
||||
switch msg.Type() {
|
||||
case commonpb.MsgType_kInsert:
|
||||
var ctx2 context.Context
|
||||
if childs != nil {
|
||||
if childs[key] != nil {
|
||||
ctx2 = opentracing.ContextWithSpan(msg.GetMsgContext(), childs[key])
|
||||
} else {
|
||||
ctx2 = context.Background()
|
||||
}
|
||||
}
|
||||
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
|
||||
if resMsg != nil {
|
||||
resMsg.SetMsgContext(ctx2)
|
||||
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
|
||||
}
|
||||
// case commonpb.MsgType_kDelete:
|
||||
// dmMsg.deleteMessages = append(dmMsg.deleteMessages, (*msg).(*msgstream.DeleteTask))
|
||||
default:
|
||||
log.Println("Non supporting message type:", msg.Type())
|
||||
}
|
||||
}
|
||||
|
||||
iMsg.gcRecord = ddMsg.gcRecord
|
||||
var res Msg = &iMsg
|
||||
for _, child := range childs {
|
||||
child.Finish()
|
||||
}
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
|
||||
// No dd record, do all insert requests.
|
||||
records, ok := fdmNode.ddMsg.collectionRecords[msg.CollectionName]
|
||||
if !ok {
|
||||
return msg
|
||||
}
|
||||
|
||||
// TODO: If the last record is drop type, all insert requests are invalid.
|
||||
//if !records[len(records)-1].createOrDrop {
|
||||
// return nil
|
||||
//}
|
||||
|
||||
// Filter insert requests before last record.
|
||||
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
|
||||
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
|
||||
log.Println("Error, misaligned messages detected")
|
||||
return nil
|
||||
}
|
||||
tmpTimestamps := make([]Timestamp, 0)
|
||||
tmpRowIDs := make([]int64, 0)
|
||||
tmpRowData := make([]*commonpb.Blob, 0)
|
||||
|
||||
// calculate valid time range
|
||||
timeBegin := Timestamp(0)
|
||||
timeEnd := Timestamp(math.MaxUint64)
|
||||
for _, record := range records {
|
||||
if record.createOrDrop && timeBegin < record.timestamp {
|
||||
timeBegin = record.timestamp
|
||||
}
|
||||
if !record.createOrDrop && timeEnd > record.timestamp {
|
||||
timeEnd = record.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
for i, t := range msg.Timestamps {
|
||||
if t >= timeBegin && t <= timeEnd {
|
||||
tmpTimestamps = append(tmpTimestamps, t)
|
||||
tmpRowIDs = append(tmpRowIDs, msg.RowIDs[i])
|
||||
tmpRowData = append(tmpRowData, msg.RowData[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(tmpRowIDs) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
msg.Timestamps = tmpTimestamps
|
||||
msg.RowIDs = tmpRowIDs
|
||||
msg.RowData = tmpRowData
|
||||
return msg
|
||||
}
|
||||
|
||||
func newFilteredDmNode() *filterDmNode {
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
||||
baseNode := BaseNode{}
|
||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
return &filterDmNode{
|
||||
BaseNode: baseNode,
|
||||
}
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
type gcNode struct {
|
||||
BaseNode
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func (gcNode *gcNode) Name() string {
|
||||
return "gcNode"
|
||||
}
|
||||
|
||||
func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
|
||||
//fmt.Println("Do gcNode operation")
|
||||
|
||||
if len(in) != 1 {
|
||||
log.Println("Invalid operate message input in gcNode, input length = ", len(in))
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
gcMsg, ok := (*in[0]).(*gcMsg)
|
||||
if !ok {
|
||||
log.Println("type assertion failed for gcMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
// drop collections
|
||||
for _, collectionID := range gcMsg.gcRecord.collections {
|
||||
err := gcNode.replica.removeCollection(collectionID)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newGCNode(replica collectionReplica) *gcNode {
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
||||
baseNode := BaseNode{}
|
||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
return &gcNode{
|
||||
BaseNode: baseNode,
|
||||
replica: replica,
|
||||
}
|
||||
}
|
|
@ -1,661 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
oplog "github.com/opentracing/opentracing-go/log"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/storage"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
const (
|
||||
CollectionPrefix = "/collection/"
|
||||
SegmentPrefix = "/segment/"
|
||||
)
|
||||
|
||||
type (
|
||||
InsertData = storage.InsertData
|
||||
Blob = storage.Blob
|
||||
|
||||
insertBufferNode struct {
|
||||
BaseNode
|
||||
insertBuffer *insertBuffer
|
||||
minIOKV kv.Base
|
||||
minioPrifex string
|
||||
idAllocator *allocator.IDAllocator
|
||||
outCh chan *insertFlushSyncMsg
|
||||
pulsarWriteNodeTimeTickStream *pulsarms.PulsarMsgStream
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
insertBuffer struct {
|
||||
insertData map[UniqueID]*InsertData // SegmentID to InsertData
|
||||
maxSize int
|
||||
}
|
||||
)
|
||||
|
||||
func (ib *insertBuffer) size(segmentID UniqueID) int {
|
||||
if ib.insertData == nil || len(ib.insertData) <= 0 {
|
||||
return 0
|
||||
}
|
||||
idata, ok := ib.insertData[segmentID]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
maxSize := 0
|
||||
for _, data := range idata.Data {
|
||||
fdata, ok := data.(*storage.FloatVectorFieldData)
|
||||
if ok && fdata.NumRows > maxSize {
|
||||
maxSize = fdata.NumRows
|
||||
}
|
||||
|
||||
bdata, ok := data.(*storage.BinaryVectorFieldData)
|
||||
if ok && bdata.NumRows > maxSize {
|
||||
maxSize = bdata.NumRows
|
||||
}
|
||||
|
||||
}
|
||||
return maxSize
|
||||
}
|
||||
|
||||
func (ib *insertBuffer) full(segmentID UniqueID) bool {
|
||||
return ib.size(segmentID) >= ib.maxSize
|
||||
}
|
||||
|
||||
func (ibNode *insertBufferNode) Name() string {
|
||||
return "ibNode"
|
||||
}
|
||||
|
||||
func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
|
||||
// log.Println("=========== insert buffer Node Operating")
|
||||
|
||||
if len(in) != 1 {
|
||||
log.Println("Error: Invalid operate message input in insertBuffertNode, input length = ", len(in))
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
iMsg, ok := (*in[0]).(*insertMsg)
|
||||
if !ok {
|
||||
log.Println("Error: type assertion failed for insertMsg")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
// iMsg is insertMsg
|
||||
// 1. iMsg -> buffer
|
||||
for _, msg := range iMsg.insertMessages {
|
||||
ctx := msg.GetMsgContext()
|
||||
var span opentracing.Span
|
||||
if ctx != nil {
|
||||
span, _ = opentracing.StartSpanFromContext(ctx, fmt.Sprintf("insert buffer node, start time = %d", msg.BeginTs()))
|
||||
} else {
|
||||
span = opentracing.StartSpan(fmt.Sprintf("insert buffer node, start time = %d", msg.BeginTs()))
|
||||
}
|
||||
span.SetTag("hash keys", msg.HashKeys())
|
||||
span.SetTag("start time", msg.BeginTs())
|
||||
span.SetTag("end time", msg.EndTs())
|
||||
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
|
||||
log.Println("Error: misaligned messages detected")
|
||||
continue
|
||||
}
|
||||
currentSegID := msg.GetSegmentID()
|
||||
collectionName := msg.GetCollectionName()
|
||||
span.LogFields(oplog.Int("segment id", int(currentSegID)))
|
||||
|
||||
idata, ok := ibNode.insertBuffer.insertData[currentSegID]
|
||||
if !ok {
|
||||
idata = &InsertData{
|
||||
Data: make(map[UniqueID]storage.FieldData),
|
||||
}
|
||||
}
|
||||
|
||||
// 1.1 Get CollectionMeta from etcd
|
||||
collection, err := ibNode.replica.getCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
// GOOSE TODO add error handler
|
||||
log.Println("bbb, Get meta wrong:", err)
|
||||
continue
|
||||
}
|
||||
|
||||
collectionID := collection.ID()
|
||||
collSchema := collection.schema
|
||||
// 1.2 Get Fields
|
||||
var pos int = 0 // Record position of blob
|
||||
for _, field := range collSchema.Fields {
|
||||
switch field.DataType {
|
||||
case schemapb.DataType_VECTOR_FLOAT:
|
||||
var dim int
|
||||
for _, t := range field.TypeParams {
|
||||
if t.Key == "dim" {
|
||||
dim, err = strconv.Atoi(t.Value)
|
||||
if err != nil {
|
||||
log.Println("strconv wrong")
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if dim <= 0 {
|
||||
log.Println("invalid dim")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.FloatVectorFieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]float32, 0),
|
||||
Dim: dim,
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.FloatVectorFieldData)
|
||||
|
||||
var offset int
|
||||
for _, blob := range msg.RowData {
|
||||
offset = 0
|
||||
for j := 0; j < dim; j++ {
|
||||
var v float32
|
||||
buf := bytes.NewBuffer(blob.GetValue()[pos+offset:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.read float32 err:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
offset += int(unsafe.Sizeof(*(&v)))
|
||||
}
|
||||
}
|
||||
pos += offset
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
|
||||
case schemapb.DataType_VECTOR_BINARY:
|
||||
var dim int
|
||||
for _, t := range field.TypeParams {
|
||||
if t.Key == "dim" {
|
||||
dim, err = strconv.Atoi(t.Value)
|
||||
if err != nil {
|
||||
log.Println("strconv wrong")
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if dim <= 0 {
|
||||
log.Println("invalid dim")
|
||||
// TODO: add error handling
|
||||
}
|
||||
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.BinaryVectorFieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]byte, 0),
|
||||
Dim: dim,
|
||||
}
|
||||
}
|
||||
fieldData := idata.Data[field.FieldID].(*storage.BinaryVectorFieldData)
|
||||
|
||||
var offset int
|
||||
for _, blob := range msg.RowData {
|
||||
bv := blob.GetValue()[pos+offset : pos+(dim/8)]
|
||||
fieldData.Data = append(fieldData.Data, bv...)
|
||||
offset = len(bv)
|
||||
}
|
||||
pos += offset
|
||||
fieldData.NumRows += len(msg.RowData)
|
||||
|
||||
case schemapb.DataType_BOOL:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.BoolFieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]bool, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.BoolFieldData)
|
||||
var v bool
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewReader(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read bool failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
|
||||
}
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
|
||||
case schemapb.DataType_INT8:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.Int8FieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]int8, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.Int8FieldData)
|
||||
var v int8
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewReader(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read int8 failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
}
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
|
||||
case schemapb.DataType_INT16:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.Int16FieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]int16, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.Int16FieldData)
|
||||
var v int16
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewReader(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read int16 failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
}
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
|
||||
case schemapb.DataType_INT32:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.Int32FieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]int32, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.Int32FieldData)
|
||||
var v int32
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewReader(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read int32 failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
}
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
|
||||
case schemapb.DataType_INT64:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.Int64FieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]int64, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.Int64FieldData)
|
||||
switch field.FieldID {
|
||||
case 0: // rowIDs
|
||||
fieldData.Data = append(fieldData.Data, msg.RowIDs...)
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
case 1: // Timestamps
|
||||
for _, ts := range msg.Timestamps {
|
||||
fieldData.Data = append(fieldData.Data, int64(ts))
|
||||
}
|
||||
fieldData.NumRows += len(msg.Timestamps)
|
||||
default:
|
||||
var v int64
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewBuffer(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read int64 failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
}
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
}
|
||||
|
||||
case schemapb.DataType_FLOAT:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.FloatFieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]float32, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.FloatFieldData)
|
||||
var v float32
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewBuffer(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read float32 failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
}
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
|
||||
case schemapb.DataType_DOUBLE:
|
||||
if _, ok := idata.Data[field.FieldID]; !ok {
|
||||
idata.Data[field.FieldID] = &storage.DoubleFieldData{
|
||||
NumRows: 0,
|
||||
Data: make([]float64, 0),
|
||||
}
|
||||
}
|
||||
|
||||
fieldData := idata.Data[field.FieldID].(*storage.DoubleFieldData)
|
||||
var v float64
|
||||
for _, blob := range msg.RowData {
|
||||
buf := bytes.NewBuffer(blob.GetValue()[pos:])
|
||||
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
|
||||
log.Println("binary.Read float64 failed:", err)
|
||||
}
|
||||
fieldData.Data = append(fieldData.Data, v)
|
||||
}
|
||||
|
||||
pos += int(unsafe.Sizeof(*(&v)))
|
||||
fieldData.NumRows += len(msg.RowIDs)
|
||||
}
|
||||
}
|
||||
|
||||
// 1.3 store in buffer
|
||||
ibNode.insertBuffer.insertData[currentSegID] = idata
|
||||
span.LogFields(oplog.String("store in buffer", "store in buffer"))
|
||||
|
||||
// 1.4 if full
|
||||
// 1.4.1 generate binlogs
|
||||
span.LogFields(oplog.String("generate binlogs", "generate binlogs"))
|
||||
if ibNode.insertBuffer.full(currentSegID) {
|
||||
log.Printf(". Insert Buffer full, auto flushing (%v) rows of data...", ibNode.insertBuffer.size(currentSegID))
|
||||
// partitionTag -> partitionID
|
||||
partitionTag := msg.GetPartitionName()
|
||||
partitionID, err := typeutil.Hash32String(partitionTag)
|
||||
if err != nil {
|
||||
log.Println("partitionTag to partitionID wrong")
|
||||
// TODO GOOSE add error handler
|
||||
}
|
||||
collMeta := &etcdpb.CollectionMeta{
|
||||
Schema: collSchema,
|
||||
ID: collectionID,
|
||||
}
|
||||
inCodec := storage.NewInsertCodec(collMeta)
|
||||
|
||||
// buffer data to binlogs
|
||||
binLogs, err := inCodec.Serialize(partitionID,
|
||||
currentSegID, ibNode.insertBuffer.insertData[currentSegID])
|
||||
|
||||
if err != nil {
|
||||
log.Println("generate binlog wrong: ", err)
|
||||
}
|
||||
|
||||
// clear buffer
|
||||
delete(ibNode.insertBuffer.insertData, currentSegID)
|
||||
log.Println(".. Clearing buffer")
|
||||
|
||||
// 1.5.2 binLogs -> minIO/S3
|
||||
collIDStr := strconv.FormatInt(collectionID, 10)
|
||||
partitionIDStr := strconv.FormatInt(partitionID, 10)
|
||||
segIDStr := strconv.FormatInt(currentSegID, 10)
|
||||
keyPrefix := path.Join(ibNode.minioPrifex, collIDStr, partitionIDStr, segIDStr)
|
||||
|
||||
log.Printf(".. Saving (%v) binlogs to MinIO ...", len(binLogs))
|
||||
for index, blob := range binLogs {
|
||||
uid, err := ibNode.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
log.Println("Allocate Id failed")
|
||||
// GOOSE TODO error handler
|
||||
}
|
||||
|
||||
key := path.Join(keyPrefix, blob.Key, strconv.FormatInt(uid, 10))
|
||||
err = ibNode.minIOKV.Save(key, string(blob.Value[:]))
|
||||
if err != nil {
|
||||
log.Println("Save to MinIO failed")
|
||||
// GOOSE TODO error handler
|
||||
}
|
||||
|
||||
fieldID, err := strconv.ParseInt(blob.Key, 10, 32)
|
||||
if err != nil {
|
||||
log.Println("string to fieldID wrong")
|
||||
// GOOSE TODO error handler
|
||||
}
|
||||
|
||||
inBinlogMsg := &insertFlushSyncMsg{
|
||||
flushCompleted: false,
|
||||
insertBinlogPathMsg: insertBinlogPathMsg{
|
||||
ts: iMsg.timeRange.timestampMax,
|
||||
segID: currentSegID,
|
||||
fieldID: fieldID,
|
||||
paths: []string{key},
|
||||
},
|
||||
}
|
||||
|
||||
log.Println("... Appending binlog paths ...", index)
|
||||
ibNode.outCh <- inBinlogMsg
|
||||
}
|
||||
}
|
||||
span.Finish()
|
||||
}
|
||||
|
||||
if len(iMsg.insertMessages) > 0 {
|
||||
log.Println("---insert buffer status---")
|
||||
var stopSign int = 0
|
||||
for k := range ibNode.insertBuffer.insertData {
|
||||
if stopSign >= 10 {
|
||||
break
|
||||
}
|
||||
log.Printf("seg(%v) buffer size = (%v)", k, ibNode.insertBuffer.size(k))
|
||||
stopSign++
|
||||
}
|
||||
}
|
||||
|
||||
// iMsg is Flush() msg from master
|
||||
// 1. insertBuffer(not empty) -> binLogs -> minIO/S3
|
||||
for _, msg := range iMsg.flushMessages {
|
||||
currentSegID := msg.GetSegmentID()
|
||||
flushTs := msg.Base.Timestamp
|
||||
partitionTag := msg.GetPartitionTag()
|
||||
collectionID := msg.GetCollectionID()
|
||||
log.Printf(". Receiving flush message segID(%v)...", currentSegID)
|
||||
|
||||
if ibNode.insertBuffer.size(currentSegID) > 0 {
|
||||
log.Println(".. Buffer not empty, flushing ...")
|
||||
collSchema, err := ibNode.getCollectionSchemaByID(collectionID)
|
||||
if err != nil {
|
||||
// GOOSE TODO add error handler
|
||||
log.Println("aaa, Get meta wrong: ", err)
|
||||
}
|
||||
collMeta := &etcdpb.CollectionMeta{
|
||||
Schema: collSchema,
|
||||
ID: collectionID,
|
||||
}
|
||||
inCodec := storage.NewInsertCodec(collMeta)
|
||||
|
||||
// partitionTag -> partitionID
|
||||
partitionID, err := typeutil.Hash32String(partitionTag)
|
||||
if err != nil {
|
||||
// GOOSE TODO add error handler
|
||||
log.Println("partitionTag to partitionID Wrong: ", err)
|
||||
}
|
||||
|
||||
// buffer data to binlogs
|
||||
binLogs, err := inCodec.Serialize(partitionID,
|
||||
currentSegID, ibNode.insertBuffer.insertData[currentSegID])
|
||||
if err != nil {
|
||||
log.Println("generate binlog wrong: ", err)
|
||||
}
|
||||
|
||||
// clear buffer
|
||||
delete(ibNode.insertBuffer.insertData, currentSegID)
|
||||
|
||||
// binLogs -> minIO/S3
|
||||
collIDStr := strconv.FormatInt(collectionID, 10)
|
||||
partitionIDStr := strconv.FormatInt(partitionID, 10)
|
||||
segIDStr := strconv.FormatInt(currentSegID, 10)
|
||||
keyPrefix := path.Join(ibNode.minioPrifex, collIDStr, partitionIDStr, segIDStr)
|
||||
|
||||
for _, blob := range binLogs {
|
||||
uid, err := ibNode.idAllocator.AllocOne()
|
||||
if err != nil {
|
||||
log.Println("Allocate Id failed")
|
||||
// GOOSE TODO error handler
|
||||
}
|
||||
|
||||
key := path.Join(keyPrefix, blob.Key, strconv.FormatInt(uid, 10))
|
||||
err = ibNode.minIOKV.Save(key, string(blob.Value[:]))
|
||||
if err != nil {
|
||||
log.Println("Save to MinIO failed")
|
||||
// GOOSE TODO error handler
|
||||
}
|
||||
|
||||
fieldID, err := strconv.ParseInt(blob.Key, 10, 32)
|
||||
if err != nil {
|
||||
log.Println("string to fieldID wrong")
|
||||
// GOOSE TODO error handler
|
||||
}
|
||||
|
||||
// Append binlogs
|
||||
inBinlogMsg := &insertFlushSyncMsg{
|
||||
flushCompleted: false,
|
||||
insertBinlogPathMsg: insertBinlogPathMsg{
|
||||
ts: flushTs,
|
||||
segID: currentSegID,
|
||||
fieldID: fieldID,
|
||||
paths: []string{key},
|
||||
},
|
||||
}
|
||||
ibNode.outCh <- inBinlogMsg
|
||||
}
|
||||
}
|
||||
|
||||
// Flushed
|
||||
log.Println(".. Flush finished ...")
|
||||
inBinlogMsg := &insertFlushSyncMsg{
|
||||
flushCompleted: true,
|
||||
insertBinlogPathMsg: insertBinlogPathMsg{
|
||||
ts: flushTs,
|
||||
segID: currentSegID,
|
||||
},
|
||||
}
|
||||
|
||||
ibNode.outCh <- inBinlogMsg
|
||||
}
|
||||
|
||||
if err := ibNode.writeHardTimeTick(iMsg.timeRange.timestampMax); err != nil {
|
||||
log.Printf("Error: send hard time tick into pulsar channel failed, %s\n", err.Error())
|
||||
}
|
||||
|
||||
var res Msg = &gcMsg{
|
||||
gcRecord: iMsg.gcRecord,
|
||||
timeRange: iMsg.timeRange,
|
||||
}
|
||||
|
||||
return []*Msg{&res}
|
||||
}
|
||||
|
||||
func (ibNode *insertBufferNode) getCollectionSchemaByID(collectionID UniqueID) (*schemapb.CollectionSchema, error) {
|
||||
ret, err := ibNode.replica.getCollectionByID(collectionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret.schema, nil
|
||||
}
|
||||
|
||||
func (ibNode *insertBufferNode) getCollectionSchemaByName(collectionName string) (*schemapb.CollectionSchema, error) {
|
||||
ret, err := ibNode.replica.getCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret.schema, nil
|
||||
}
|
||||
|
||||
func (ibNode *insertBufferNode) writeHardTimeTick(ts Timestamp) error {
|
||||
msgPack := msgstream.MsgPack{}
|
||||
timeTickMsg := msgstream.TimeTickMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
BeginTimestamp: ts,
|
||||
EndTimestamp: ts,
|
||||
HashValues: []uint32{0},
|
||||
},
|
||||
TimeTickMsg: internalpb2.TimeTickMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kTimeTick,
|
||||
MsgID: 0,
|
||||
Timestamp: ts,
|
||||
SourceID: Params.WriteNodeID,
|
||||
},
|
||||
},
|
||||
}
|
||||
msgPack.Msgs = append(msgPack.Msgs, &timeTickMsg)
|
||||
return ibNode.pulsarWriteNodeTimeTickStream.Produce(&msgPack)
|
||||
}
|
||||
|
||||
func newInsertBufferNode(ctx context.Context, outCh chan *insertFlushSyncMsg, replica collectionReplica) *insertBufferNode {
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
||||
baseNode := BaseNode{}
|
||||
baseNode.SetMaxQueueLength(maxQueueLength)
|
||||
baseNode.SetMaxParallelism(maxParallelism)
|
||||
|
||||
maxSize := Params.FlushInsertBufSize
|
||||
iBuffer := &insertBuffer{
|
||||
insertData: make(map[UniqueID]*InsertData),
|
||||
maxSize: maxSize,
|
||||
}
|
||||
|
||||
// MinIO
|
||||
|
||||
option := &miniokv.Option{
|
||||
Address: Params.MinioAddress,
|
||||
AccessKeyID: Params.MinioAccessKeyID,
|
||||
SecretAccessKeyID: Params.MinioSecretAccessKey,
|
||||
UseSSL: Params.MinioUseSSL,
|
||||
CreateBucket: true,
|
||||
BucketName: Params.MinioBucketName,
|
||||
}
|
||||
|
||||
minIOKV, err := miniokv.NewMinIOKV(ctx, option)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
minioPrefix := Params.InsertLogRootPath
|
||||
|
||||
idAllocator, err := allocator.NewIDAllocator(ctx, Params.MasterAddress)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = idAllocator.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
wTt := pulsarms.NewPulsarMsgStream(ctx, 1024) //input stream, write node time tick
|
||||
wTt.SetPulsarClient(Params.PulsarAddress)
|
||||
wTt.CreatePulsarProducers([]string{Params.WriteNodeTimeTickChannelName})
|
||||
|
||||
return &insertBufferNode{
|
||||
BaseNode: baseNode,
|
||||
insertBuffer: iBuffer,
|
||||
minIOKV: minIOKV,
|
||||
minioPrifex: minioPrefix,
|
||||
idAllocator: idAllocator,
|
||||
outCh: outCh,
|
||||
pulsarWriteNodeTimeTickStream: wTt,
|
||||
replica: replica,
|
||||
}
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
func TestFlowGraphInputBufferNode_Operate(t *testing.T) {
|
||||
const ctxTimeInMillisecond = 2000
|
||||
const closeWithDeadline = false
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
ddChan := make(chan *ddlFlushSyncMsg, 10)
|
||||
defer close(ddChan)
|
||||
insertChan := make(chan *insertFlushSyncMsg, 10)
|
||||
defer close(insertChan)
|
||||
|
||||
testPath := "/test/writenode/root/meta"
|
||||
err := clearEtcd(testPath)
|
||||
require.NoError(t, err)
|
||||
Params.MetaRootPath = testPath
|
||||
fService := newFlushSyncService(ctx, ddChan, insertChan)
|
||||
assert.Equal(t, testPath, fService.metaTable.client.(*etcdkv.EtcdKV).GetPath("."))
|
||||
go fService.start()
|
||||
|
||||
collMeta := newMeta()
|
||||
schemaBlob := proto.MarshalTextString(collMeta.Schema)
|
||||
require.NotEqual(t, "", schemaBlob)
|
||||
|
||||
replica := newReplica()
|
||||
err = replica.addCollection(collMeta.ID, schemaBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Params.FlushInsertBufSize = 2
|
||||
iBNode := newInsertBufferNode(ctx, insertChan, replica)
|
||||
inMsg := genInsertMsg()
|
||||
var iMsg flowgraph.Msg = &inMsg
|
||||
iBNode.Operate([]*flowgraph.Msg{&iMsg})
|
||||
}
|
||||
|
||||
func genInsertMsg() insertMsg {
|
||||
// test data generate
|
||||
// GOOSE TODO orgnize
|
||||
const DIM = 2
|
||||
const N = 1
|
||||
var rawData []byte
|
||||
|
||||
// Float vector
|
||||
var fvector = [DIM]float32{1, 2}
|
||||
for _, ele := range fvector {
|
||||
buf := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
rawData = append(rawData, buf...)
|
||||
}
|
||||
|
||||
// Binary vector
|
||||
// Dimension of binary vector is 32
|
||||
// size := 4, = 32 / 8
|
||||
var bvector = []byte{255, 255, 255, 0}
|
||||
rawData = append(rawData, bvector...)
|
||||
|
||||
// Bool
|
||||
var fieldBool = true
|
||||
buf := new(bytes.Buffer)
|
||||
if err := binary.Write(buf, binary.LittleEndian, fieldBool); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rawData = append(rawData, buf.Bytes()...)
|
||||
|
||||
// int8
|
||||
var dataInt8 int8 = 100
|
||||
bint8 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint8, binary.LittleEndian, dataInt8); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint8.Bytes()...)
|
||||
|
||||
// int16
|
||||
var dataInt16 int16 = 200
|
||||
bint16 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint16, binary.LittleEndian, dataInt16); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint16.Bytes()...)
|
||||
|
||||
// int32
|
||||
var dataInt32 int32 = 300
|
||||
bint32 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint32, binary.LittleEndian, dataInt32); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint32.Bytes()...)
|
||||
|
||||
// int64
|
||||
var dataInt64 int64 = 400
|
||||
bint64 := new(bytes.Buffer)
|
||||
if err := binary.Write(bint64, binary.LittleEndian, dataInt64); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bint64.Bytes()...)
|
||||
|
||||
// float32
|
||||
var datafloat float32 = 1.1
|
||||
bfloat32 := new(bytes.Buffer)
|
||||
if err := binary.Write(bfloat32, binary.LittleEndian, datafloat); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bfloat32.Bytes()...)
|
||||
|
||||
// float64
|
||||
var datafloat64 float64 = 2.2
|
||||
bfloat64 := new(bytes.Buffer)
|
||||
if err := binary.Write(bfloat64, binary.LittleEndian, datafloat64); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rawData = append(rawData, bfloat64.Bytes()...)
|
||||
log.Println("Test rawdata length:", len(rawData))
|
||||
|
||||
timeRange := TimeRange{
|
||||
timestampMin: 0,
|
||||
timestampMax: math.MaxUint64,
|
||||
}
|
||||
|
||||
var iMsg = &insertMsg{
|
||||
insertMessages: make([]*msgstream.InsertMsg, 0),
|
||||
flushMessages: make([]*msgstream.FlushMsg, 0),
|
||||
timeRange: TimeRange{
|
||||
timestampMin: timeRange.timestampMin,
|
||||
timestampMax: timeRange.timestampMax,
|
||||
},
|
||||
}
|
||||
|
||||
// messages generate
|
||||
const MSGLENGTH = 1
|
||||
// insertMessages := make([]msgstream.TsMsg, 0)
|
||||
for i := 0; i < MSGLENGTH; i++ {
|
||||
var msg = &msgstream.InsertMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{
|
||||
uint32(i),
|
||||
},
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0,
|
||||
Timestamp: Timestamp(i + 1000),
|
||||
SourceID: 0,
|
||||
},
|
||||
CollectionName: "col1",
|
||||
PartitionName: "default",
|
||||
SegmentID: UniqueID(1),
|
||||
ChannelID: "0",
|
||||
Timestamps: []Timestamp{
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
Timestamp(i + 1000),
|
||||
},
|
||||
RowIDs: []UniqueID{
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
UniqueID(i),
|
||||
},
|
||||
RowData: []*commonpb.Blob{
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
{Value: rawData},
|
||||
},
|
||||
},
|
||||
}
|
||||
iMsg.insertMessages = append(iMsg.insertMessages, msg)
|
||||
}
|
||||
|
||||
var fmsg = msgstream.FlushMsg{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: []uint32{
|
||||
uint32(10),
|
||||
},
|
||||
},
|
||||
FlushMsg: internalpb2.FlushMsg{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kFlush,
|
||||
MsgID: 1,
|
||||
Timestamp: 2000,
|
||||
SourceID: 1,
|
||||
},
|
||||
SegmentID: UniqueID(1),
|
||||
CollectionID: UniqueID(1),
|
||||
PartitionTag: "default",
|
||||
},
|
||||
}
|
||||
iMsg.flushMessages = append(iMsg.flushMessages, &fmsg)
|
||||
return *iMsg
|
||||
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
type (
|
||||
Msg = flowgraph.Msg
|
||||
MsgStreamMsg = flowgraph.MsgStreamMsg
|
||||
)
|
||||
|
||||
type (
|
||||
key2SegMsg struct {
|
||||
tsMessages []msgstream.TsMsg
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
ddMsg struct {
|
||||
// TODO: use collection id
|
||||
collectionRecords map[string][]metaOperateRecord
|
||||
// TODO: use partition id
|
||||
partitionRecords map[string][]metaOperateRecord
|
||||
flushMessages []*msgstream.FlushMsg
|
||||
gcRecord *gcRecord
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
metaOperateRecord struct {
|
||||
createOrDrop bool // create: true, drop: false
|
||||
timestamp Timestamp
|
||||
}
|
||||
|
||||
insertMsg struct {
|
||||
insertMessages []*msgstream.InsertMsg
|
||||
flushMessages []*msgstream.FlushMsg
|
||||
gcRecord *gcRecord
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
deleteMsg struct {
|
||||
deleteMessages []*msgstream.DeleteMsg
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
gcMsg struct {
|
||||
gcRecord *gcRecord
|
||||
timeRange TimeRange
|
||||
}
|
||||
|
||||
gcRecord struct {
|
||||
collections []UniqueID
|
||||
}
|
||||
)
|
||||
|
||||
func (ksMsg *key2SegMsg) TimeTick() Timestamp {
|
||||
return ksMsg.timeRange.timestampMax
|
||||
}
|
||||
|
||||
func (suMsg *ddMsg) TimeTick() Timestamp {
|
||||
return suMsg.timeRange.timestampMax
|
||||
}
|
||||
|
||||
func (iMsg *insertMsg) TimeTick() Timestamp {
|
||||
return iMsg.timeRange.timestampMax
|
||||
}
|
||||
|
||||
func (dMsg *deleteMsg) TimeTick() Timestamp {
|
||||
return dMsg.timeRange.timestampMax
|
||||
}
|
||||
|
||||
func (gcMsg *gcMsg) TimeTick() Timestamp {
|
||||
return gcMsg.timeRange.timestampMax
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
)
|
||||
|
||||
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||
receiveBufSize := Params.InsertReceiveBufSize
|
||||
pulsarBufSize := Params.InsertPulsarBufSize
|
||||
|
||||
msgStreamURL := Params.PulsarAddress
|
||||
|
||||
consumeChannels := Params.InsertChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
insertStream := pulsarms.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
||||
|
||||
// TODO could panic of nil pointer
|
||||
insertStream.SetPulsarClient(msgStreamURL)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
|
||||
// TODO could panic of nil pointer
|
||||
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
|
||||
var stream msgstream.MsgStream = insertStream
|
||||
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
||||
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
|
||||
return node
|
||||
}
|
||||
|
||||
func newDDInputNode(ctx context.Context) *flowgraph.InputNode {
|
||||
receiveBufSize := Params.DDReceiveBufSize
|
||||
pulsarBufSize := Params.DDPulsarBufSize
|
||||
|
||||
msgStreamURL := Params.PulsarAddress
|
||||
|
||||
consumeChannels := Params.DDChannelNames
|
||||
consumeSubName := Params.MsgChannelSubName
|
||||
|
||||
ddStream := pulsarms.NewPulsarTtMsgStream(ctx, receiveBufSize)
|
||||
ddStream.SetPulsarClient(msgStreamURL)
|
||||
unmarshalDispatcher := util.NewUnmarshalDispatcher()
|
||||
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
|
||||
|
||||
var stream msgstream.MsgStream = ddStream
|
||||
|
||||
maxQueueLength := Params.FlowGraphMaxQueueLength
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
|
||||
node := flowgraph.NewInputNode(&stream, "ddInputNode", maxQueueLength, maxParallelism)
|
||||
return node
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import "github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
|
||||
|
||||
type (
|
||||
Node = flowgraph.Node
|
||||
BaseNode = flowgraph.BaseNode
|
||||
InputNode = flowgraph.InputNode
|
||||
)
|
|
@ -1,39 +0,0 @@
|
|||
package writenode
|
||||
|
||||
type (
|
||||
// segID: set when flushComplete == true, to tell
|
||||
// the flush_sync_service which segFlush msg does this
|
||||
// DDL flush for, so that ddl flush and insert flush
|
||||
// will sync.
|
||||
ddlBinlogPathMsg struct {
|
||||
collID UniqueID
|
||||
segID UniqueID
|
||||
paths []string
|
||||
}
|
||||
|
||||
ddlFlushSyncMsg struct {
|
||||
ddlBinlogPathMsg
|
||||
flushCompleted bool
|
||||
}
|
||||
|
||||
insertBinlogPathMsg struct {
|
||||
ts Timestamp
|
||||
segID UniqueID
|
||||
fieldID int64 // TODO GOOSE may need to change
|
||||
paths []string
|
||||
}
|
||||
|
||||
// This Msg can notify flushSyncService
|
||||
// 1.To append binary logs
|
||||
// 2.To set flush-completed status
|
||||
//
|
||||
// When `flushComplete == false`
|
||||
// `ts` means OpenTime of a segFlushMeta
|
||||
// When `flushComplete == true`
|
||||
// `ts` means CloseTime of a segFlushMeta,
|
||||
// `fieldID` and `paths` need to be empty
|
||||
insertFlushSyncMsg struct {
|
||||
insertBinlogPathMsg
|
||||
flushCompleted bool
|
||||
}
|
||||
)
|
|
@ -1,143 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
type (
|
||||
flushSyncService struct {
|
||||
ctx context.Context
|
||||
metaTable *metaTable
|
||||
ddChan chan *ddlFlushSyncMsg
|
||||
insertChan chan *insertFlushSyncMsg
|
||||
ddFlushed map[UniqueID]bool // Segment ID
|
||||
insertFlushed map[UniqueID]bool // Segment ID
|
||||
}
|
||||
)
|
||||
|
||||
func newFlushSyncService(ctx context.Context,
|
||||
ddChan chan *ddlFlushSyncMsg, insertChan chan *insertFlushSyncMsg) *flushSyncService {
|
||||
|
||||
service := &flushSyncService{
|
||||
ctx: ctx,
|
||||
ddChan: ddChan,
|
||||
insertChan: insertChan,
|
||||
ddFlushed: make(map[UniqueID]bool),
|
||||
insertFlushed: make(map[UniqueID]bool),
|
||||
}
|
||||
|
||||
// New metaTable
|
||||
etcdAddr := Params.EtcdAddress
|
||||
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
etcdKV := etcdkv.NewEtcdKV(etcdClient, Params.MetaRootPath)
|
||||
metaKV, err2 := NewMetaTable(etcdKV)
|
||||
if err2 != nil {
|
||||
panic(err2)
|
||||
}
|
||||
|
||||
service.metaTable = metaKV
|
||||
return service
|
||||
}
|
||||
|
||||
func (fService *flushSyncService) completeDDFlush(segID UniqueID) {
|
||||
if _, ok := fService.ddFlushed[segID]; !ok {
|
||||
fService.ddFlushed[segID] = true
|
||||
return
|
||||
}
|
||||
|
||||
fService.ddFlushed[segID] = true
|
||||
}
|
||||
|
||||
func (fService *flushSyncService) completeInsertFlush(segID UniqueID) {
|
||||
if _, ok := fService.insertFlushed[segID]; !ok {
|
||||
fService.insertFlushed[segID] = true
|
||||
return
|
||||
}
|
||||
fService.insertFlushed[segID] = true
|
||||
}
|
||||
|
||||
func (fService *flushSyncService) InsertFlushCompleted(segID UniqueID) bool {
|
||||
isinsertFlushed, ok := fService.insertFlushed[segID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return isinsertFlushed
|
||||
}
|
||||
|
||||
func (fService *flushSyncService) DDFlushCompleted(segID UniqueID) bool {
|
||||
isddFlushed, ok := fService.ddFlushed[segID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return isddFlushed
|
||||
}
|
||||
|
||||
func (fService *flushSyncService) FlushCompleted(segID UniqueID) bool {
|
||||
isddFlushed, ok := fService.ddFlushed[segID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
isinsertFlushed, ok := fService.insertFlushed[segID]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return isddFlushed && isinsertFlushed
|
||||
}
|
||||
|
||||
func (fService *flushSyncService) start() {
|
||||
for {
|
||||
select {
|
||||
case <-fService.ctx.Done():
|
||||
return
|
||||
|
||||
case ddFlushMsg := <-fService.ddChan:
|
||||
if ddFlushMsg == nil {
|
||||
continue
|
||||
}
|
||||
if !ddFlushMsg.flushCompleted {
|
||||
err := fService.metaTable.AppendDDLBinlogPaths(ddFlushMsg.collID, ddFlushMsg.paths)
|
||||
if err != nil {
|
||||
log.Println("Append segBinlog Error")
|
||||
// GOOSE TODO error handling
|
||||
}
|
||||
continue
|
||||
}
|
||||
fService.completeDDFlush(ddFlushMsg.segID)
|
||||
if fService.FlushCompleted(ddFlushMsg.segID) {
|
||||
//log.Printf("DD:Seg(%d) flush completed.", ddFlushMsg.segID)
|
||||
fService.metaTable.CompleteFlush(Timestamp(0), ddFlushMsg.segID)
|
||||
}
|
||||
|
||||
case insertFlushMsg := <-fService.insertChan:
|
||||
if insertFlushMsg == nil {
|
||||
continue
|
||||
}
|
||||
//log.Println("FlushSyncService insertFlushMsg ", insertFlushMsg.segID)
|
||||
if !insertFlushMsg.flushCompleted {
|
||||
//log.Println("FlushSyncService", insertFlushMsg.segID, " not flushCompleted")
|
||||
err := fService.metaTable.AppendSegBinlogPaths(insertFlushMsg.ts, insertFlushMsg.segID, insertFlushMsg.fieldID,
|
||||
insertFlushMsg.paths)
|
||||
if err != nil {
|
||||
log.Println("Append segBinlog Error")
|
||||
// GOOSE TODO error handling
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
fService.completeInsertFlush(insertFlushMsg.segID)
|
||||
|
||||
if fService.FlushCompleted(insertFlushMsg.segID) {
|
||||
log.Printf("Seg(%d) flush completed.", insertFlushMsg.segID)
|
||||
fService.metaTable.CompleteFlush(insertFlushMsg.ts, insertFlushMsg.segID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,166 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
)
|
||||
|
||||
func clearEtcd(rootPath string) error {
|
||||
etcdAddr := Params.EtcdAddress
|
||||
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etcdKV := etcdkv.NewEtcdKV(etcdClient, rootPath)
|
||||
|
||||
err = etcdKV.RemoveWithPrefix("writer/segment")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, err = etcdKV.LoadWithPrefix("writer/segment")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("Clear ETCD with prefix writer/segment ")
|
||||
|
||||
err = etcdKV.RemoveWithPrefix("writer/ddl")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, err = etcdKV.LoadWithPrefix("writer/ddl")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("Clear ETCD with prefix writer/ddl")
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func TestFlushSyncService_Start(t *testing.T) {
|
||||
const ctxTimeInMillisecond = 3000
|
||||
const closeWithDeadline = false
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
if closeWithDeadline {
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
// ctx = context.Background()
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
ddChan := make(chan *ddlFlushSyncMsg, 10)
|
||||
defer close(ddChan)
|
||||
insertChan := make(chan *insertFlushSyncMsg, 10)
|
||||
defer close(insertChan)
|
||||
|
||||
testPath := "/test/writenode/root/meta"
|
||||
err := clearEtcd(testPath)
|
||||
require.NoError(t, err)
|
||||
Params.MetaRootPath = testPath
|
||||
fService := newFlushSyncService(ctx, ddChan, insertChan)
|
||||
assert.Equal(t, testPath, fService.metaTable.client.(*etcdkv.EtcdKV).GetPath("."))
|
||||
|
||||
t.Run("FlushSyncService", func(t *testing.T) {
|
||||
go fService.start()
|
||||
|
||||
SegID := UniqueID(100)
|
||||
ddMsgs := genDdlFlushSyncMsgs(SegID)
|
||||
insertMsgs := geninsertFlushSyncMsgs(SegID)
|
||||
|
||||
for _, msg := range ddMsgs {
|
||||
ddChan <- msg
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
}
|
||||
|
||||
for _, msg := range insertMsgs {
|
||||
insertChan <- msg
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
}
|
||||
|
||||
for {
|
||||
if len(ddChan) == 0 && len(insertChan) == 0 && fService.FlushCompleted(SegID) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := fService.metaTable.getSegBinlogPaths(SegID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, map[int64][]string{
|
||||
0: {"x", "y", "z"},
|
||||
1: {"x", "y", "z"},
|
||||
2: {"x", "y", "z"},
|
||||
3: {"x", "y", "z"},
|
||||
4: {"x", "y", "z"},
|
||||
}, ret)
|
||||
|
||||
ts, err := fService.metaTable.getFlushOpenTime(SegID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, Timestamp(1000), ts)
|
||||
|
||||
ts, err = fService.metaTable.getFlushCloseTime(SegID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, Timestamp(2010), ts)
|
||||
|
||||
cp, err := fService.metaTable.checkFlushComplete(SegID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, cp)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func genDdlFlushSyncMsgs(segID UniqueID) []*ddlFlushSyncMsg {
|
||||
ret := make([]*ddlFlushSyncMsg, 0)
|
||||
for i := 0; i < 5; i++ {
|
||||
ret = append(ret, &ddlFlushSyncMsg{
|
||||
flushCompleted: false,
|
||||
ddlBinlogPathMsg: ddlBinlogPathMsg{
|
||||
collID: UniqueID(100),
|
||||
paths: []string{"a", "b", "c"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
ret = append(ret, &ddlFlushSyncMsg{
|
||||
flushCompleted: true,
|
||||
ddlBinlogPathMsg: ddlBinlogPathMsg{
|
||||
segID: segID,
|
||||
},
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
func geninsertFlushSyncMsgs(segID UniqueID) []*insertFlushSyncMsg {
|
||||
ret := make([]*insertFlushSyncMsg, 0)
|
||||
for i := 0; i < 5; i++ {
|
||||
ret = append(ret, &insertFlushSyncMsg{
|
||||
flushCompleted: false,
|
||||
insertBinlogPathMsg: insertBinlogPathMsg{
|
||||
ts: Timestamp(1000 + i),
|
||||
segID: segID,
|
||||
fieldID: int64(i),
|
||||
paths: []string{"x", "y", "z"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
ret = append(ret, &insertFlushSyncMsg{
|
||||
flushCompleted: true,
|
||||
insertBinlogPathMsg: insertBinlogPathMsg{
|
||||
ts: Timestamp(2010),
|
||||
segID: segID,
|
||||
},
|
||||
})
|
||||
return ret
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
|
||||
)
|
||||
|
||||
type metaService struct {
|
||||
ctx context.Context
|
||||
kvBase *etcdkv.EtcdKV
|
||||
replica collectionReplica
|
||||
}
|
||||
|
||||
func newMetaService(ctx context.Context, replica collectionReplica) *metaService {
|
||||
ETCDAddr := Params.EtcdAddress
|
||||
MetaRootPath := Params.MetaRootPath
|
||||
|
||||
cli, _ := clientv3.New(clientv3.Config{
|
||||
Endpoints: []string{ETCDAddr},
|
||||
DialTimeout: 5 * time.Second,
|
||||
})
|
||||
|
||||
return &metaService{
|
||||
ctx: ctx,
|
||||
kvBase: etcdkv.NewEtcdKV(cli, MetaRootPath),
|
||||
replica: replica,
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) start() {
|
||||
// init from meta
|
||||
err := mService.loadCollections()
|
||||
if err != nil {
|
||||
log.Fatal("metaService loadCollections failed")
|
||||
}
|
||||
}
|
||||
|
||||
func GetCollectionObjID(key string) string {
|
||||
ETCDRootPath := Params.MetaRootPath
|
||||
|
||||
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
|
||||
return strings.TrimPrefix(key, prefix)
|
||||
}
|
||||
|
||||
func isCollectionObj(key string) bool {
|
||||
ETCDRootPath := Params.MetaRootPath
|
||||
|
||||
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
|
||||
prefix = strings.TrimSpace(prefix)
|
||||
index := strings.Index(key, prefix)
|
||||
|
||||
return index == 0
|
||||
}
|
||||
|
||||
func isSegmentObj(key string) bool {
|
||||
ETCDRootPath := Params.MetaRootPath
|
||||
|
||||
prefix := path.Join(ETCDRootPath, SegmentPrefix) + "/"
|
||||
prefix = strings.TrimSpace(prefix)
|
||||
index := strings.Index(key, prefix)
|
||||
|
||||
return index == 0
|
||||
}
|
||||
|
||||
func printCollectionStruct(obj *etcdpb.CollectionMeta) {
|
||||
v := reflect.ValueOf(obj)
|
||||
v = reflect.Indirect(v)
|
||||
typeOfS := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if typeOfS.Field(i).Name == "GrpcMarshalString" {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Field: %s\tValue: %v\n", typeOfS.Field(i).Name, v.Field(i).Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) processCollectionCreate(id string, value string) {
|
||||
//println(fmt.Sprintf("Create Collection:$%s$", id))
|
||||
|
||||
col := mService.collectionUnmarshal(value)
|
||||
if col != nil {
|
||||
schema := col.Schema
|
||||
schemaBlob := proto.MarshalTextString(schema)
|
||||
err := mService.replica.addCollection(col.ID, schemaBlob)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mService *metaService) loadCollections() error {
|
||||
keys, values, err := mService.kvBase.LoadWithPrefix(CollectionPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range keys {
|
||||
objID := GetCollectionObjID(keys[i])
|
||||
mService.processCollectionCreate(objID, values[i])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------- Unmarshal and Marshal
|
||||
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionMeta {
|
||||
col := etcdpb.CollectionMeta{}
|
||||
err := proto.UnmarshalText(value, &col)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil
|
||||
}
|
||||
return &col
|
||||
}
|
||||
|
||||
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionMeta) string {
|
||||
value := proto.MarshalTextString(col)
|
||||
if value == "" {
|
||||
log.Println("marshal collection failed")
|
||||
return ""
|
||||
}
|
||||
return value
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMetaService_start(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
replica := newReplica()
|
||||
|
||||
metaService := newMetaService(ctx, replica)
|
||||
|
||||
metaService.start()
|
||||
}
|
||||
|
||||
func TestMetaService_getCollectionObjId(t *testing.T) {
|
||||
var key = "/collection/collection0"
|
||||
var collectionObjID1 = GetCollectionObjID(key)
|
||||
|
||||
assert.Equal(t, collectionObjID1, "/collection/collection0")
|
||||
|
||||
key = "fakeKey"
|
||||
var collectionObjID2 = GetCollectionObjID(key)
|
||||
|
||||
assert.Equal(t, collectionObjID2, "fakeKey")
|
||||
}
|
||||
|
||||
func TestMetaService_isCollectionObj(t *testing.T) {
|
||||
var key = Params.MetaRootPath + "/collection/collection0"
|
||||
var b1 = isCollectionObj(key)
|
||||
|
||||
assert.Equal(t, b1, true)
|
||||
|
||||
key = Params.MetaRootPath + "/segment/segment0"
|
||||
var b2 = isCollectionObj(key)
|
||||
|
||||
assert.Equal(t, b2, false)
|
||||
}
|
||||
|
||||
func TestMetaService_processCollectionCreate(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
replica := newReplica()
|
||||
metaService := newMetaService(ctx, replica)
|
||||
defer cancel()
|
||||
id := "0"
|
||||
value := `schema: <
|
||||
name: "test"
|
||||
fields: <
|
||||
fieldID:100
|
||||
name: "vec"
|
||||
data_type: VECTOR_FLOAT
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "16"
|
||||
>
|
||||
index_params: <
|
||||
key: "metric_type"
|
||||
value: "L2"
|
||||
>
|
||||
>
|
||||
fields: <
|
||||
fieldID:101
|
||||
name: "age"
|
||||
data_type: INT32
|
||||
type_params: <
|
||||
key: "dim"
|
||||
value: "1"
|
||||
>
|
||||
>
|
||||
>
|
||||
segmentIDs: 0
|
||||
partition_tags: "default"
|
||||
`
|
||||
|
||||
metaService.processCollectionCreate(id, value)
|
||||
|
||||
collectionNum := replica.getCollectionNum()
|
||||
assert.Equal(t, collectionNum, 1)
|
||||
|
||||
collection, err := replica.getCollectionByName("test")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, collection.ID(), UniqueID(0))
|
||||
}
|
||||
|
||||
func TestMetaService_loadCollections(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
replica := newReplica()
|
||||
|
||||
metaService := newMetaService(ctx, replica)
|
||||
|
||||
err2 := (*metaService).loadCollections()
|
||||
assert.Nil(t, err2)
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/kv"
|
||||
pb "github.com/zilliztech/milvus-distributed/internal/proto/writerpb"
|
||||
)
|
||||
|
||||
type metaTable struct {
|
||||
client kv.TxnBase // client of a reliable kv service, i.e. etcd client
|
||||
segID2FlushMeta map[UniqueID]pb.SegmentFlushMeta // segment id to flush meta
|
||||
collID2DdlMeta map[UniqueID]*pb.DDLFlushMeta
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
|
||||
mt := &metaTable{
|
||||
client: kv,
|
||||
lock: sync.RWMutex{},
|
||||
}
|
||||
err := mt.reloadSegMetaFromKV()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = mt.reloadDdlMetaFromKV()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mt, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) AppendDDLBinlogPaths(collID UniqueID, paths []string) error {
|
||||
mt.lock.Lock()
|
||||
defer mt.lock.Unlock()
|
||||
|
||||
_, ok := mt.collID2DdlMeta[collID]
|
||||
if !ok {
|
||||
mt.collID2DdlMeta[collID] = &pb.DDLFlushMeta{
|
||||
CollectionID: collID,
|
||||
BinlogPaths: make([]string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
meta := mt.collID2DdlMeta[collID]
|
||||
meta.BinlogPaths = append(meta.BinlogPaths, paths...)
|
||||
|
||||
return mt.saveDDLFlushMeta(meta)
|
||||
}
|
||||
|
||||
func (mt *metaTable) AppendSegBinlogPaths(tsOpen Timestamp, segmentID UniqueID, fieldID int64, dataPaths []string) error {
|
||||
_, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
err := mt.addSegmentFlush(segmentID, tsOpen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
meta := mt.segID2FlushMeta[segmentID]
|
||||
|
||||
found := false
|
||||
for _, field := range meta.Fields {
|
||||
if field.FieldID == fieldID {
|
||||
field.BinlogPaths = append(field.BinlogPaths, dataPaths...)
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
newField := &pb.FieldFlushMeta{
|
||||
FieldID: fieldID,
|
||||
BinlogPaths: dataPaths,
|
||||
}
|
||||
meta.Fields = append(meta.Fields, newField)
|
||||
}
|
||||
|
||||
return mt.saveSegFlushMeta(&meta)
|
||||
}
|
||||
|
||||
func (mt *metaTable) CompleteFlush(tsClose Timestamp, segmentID UniqueID) error {
|
||||
mt.lock.Lock()
|
||||
defer mt.lock.Unlock()
|
||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
return errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
meta.IsClosed = true
|
||||
meta.CloseTime = tsClose
|
||||
|
||||
return mt.saveSegFlushMeta(&meta)
|
||||
}
|
||||
|
||||
// metaTable.lock.Lock() before call this function
|
||||
func (mt *metaTable) saveDDLFlushMeta(meta *pb.DDLFlushMeta) error {
|
||||
value := proto.MarshalTextString(meta)
|
||||
|
||||
mt.collID2DdlMeta[meta.CollectionID] = meta
|
||||
|
||||
return mt.client.Save(Params.WriteNodeDDLKvSubPath+strconv.FormatInt(meta.CollectionID, 10), value)
|
||||
}
|
||||
|
||||
func (mt *metaTable) reloadDdlMetaFromKV() error {
|
||||
mt.collID2DdlMeta = make(map[UniqueID]*pb.DDLFlushMeta)
|
||||
_, values, err := mt.client.LoadWithPrefix(Params.WriteNodeDDLKvSubPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
ddlMeta := &pb.DDLFlushMeta{}
|
||||
err = proto.UnmarshalText(value, ddlMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.collID2DdlMeta[ddlMeta.CollectionID] = ddlMeta
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// metaTable.lock.Lock() before call this function
|
||||
func (mt *metaTable) saveSegFlushMeta(meta *pb.SegmentFlushMeta) error {
|
||||
value := proto.MarshalTextString(meta)
|
||||
|
||||
mt.segID2FlushMeta[meta.SegmentID] = *meta
|
||||
return mt.client.Save(Params.WriteNodeSegKvSubPath+strconv.FormatInt(meta.SegmentID, 10), value)
|
||||
}
|
||||
|
||||
func (mt *metaTable) reloadSegMetaFromKV() error {
|
||||
mt.segID2FlushMeta = make(map[UniqueID]pb.SegmentFlushMeta)
|
||||
|
||||
_, values, err := mt.client.LoadWithPrefix(Params.WriteNodeSegKvSubPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
flushMeta := pb.SegmentFlushMeta{}
|
||||
err = proto.UnmarshalText(value, &flushMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mt.segID2FlushMeta[flushMeta.SegmentID] = flushMeta
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) addSegmentFlush(segmentID UniqueID, timestamp Timestamp) error {
|
||||
mt.lock.Lock()
|
||||
defer mt.lock.Unlock()
|
||||
_, ok := mt.segID2FlushMeta[segmentID]
|
||||
if ok {
|
||||
return errors.Errorf("segment already exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
meta := pb.SegmentFlushMeta{
|
||||
IsClosed: false,
|
||||
SegmentID: segmentID,
|
||||
OpenTime: timestamp,
|
||||
}
|
||||
return mt.saveSegFlushMeta(&meta)
|
||||
}
|
||||
|
||||
func (mt *metaTable) getFlushCloseTime(segmentID UniqueID) (Timestamp, error) {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
return meta.CloseTime, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) getFlushOpenTime(segmentID UniqueID) (Timestamp, error) {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
return meta.OpenTime, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) checkFlushComplete(segmentID UniqueID) (bool, error) {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
return false, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
return meta.IsClosed, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) getSegBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
meta, ok := mt.segID2FlushMeta[segmentID]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
|
||||
}
|
||||
ret := make(map[int64][]string)
|
||||
for _, field := range meta.Fields {
|
||||
ret[field.FieldID] = field.BinlogPaths
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (mt *metaTable) getDDLBinlogPaths(collID UniqueID) (map[UniqueID][]string, error) {
|
||||
mt.lock.RLock()
|
||||
defer mt.lock.RUnlock()
|
||||
meta, ok := mt.collID2DdlMeta[collID]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("collection not exists with ID = " + strconv.FormatInt(collID, 10))
|
||||
}
|
||||
ret := make(map[UniqueID][]string)
|
||||
ret[meta.CollectionID] = meta.BinlogPaths
|
||||
return ret, nil
|
||||
}
|
|
@ -1,125 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
)
|
||||
|
||||
func TestMetaTable_all(t *testing.T) {
|
||||
|
||||
etcdAddr := Params.EtcdAddress
|
||||
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
require.NoError(t, err)
|
||||
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root/writer")
|
||||
|
||||
_, err = cli.Delete(context.TODO(), "/etcd/test/root/writer", clientv3.WithPrefix())
|
||||
require.NoError(t, err)
|
||||
|
||||
meta, err := NewMetaTable(etcdKV)
|
||||
assert.NoError(t, err)
|
||||
defer meta.client.Close()
|
||||
|
||||
t.Run("TestMetaTable_addSegmentFlush_and_OpenTime", func(t *testing.T) {
|
||||
tsOpen := Timestamp(100)
|
||||
err := meta.addSegmentFlush(101, tsOpen)
|
||||
assert.NoError(t, err)
|
||||
exp, err := meta.getFlushOpenTime(101)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tsOpen, exp)
|
||||
|
||||
tsOpen = Timestamp(200)
|
||||
err = meta.addSegmentFlush(102, tsOpen)
|
||||
assert.NoError(t, err)
|
||||
exp, err = meta.getFlushOpenTime(102)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tsOpen, exp)
|
||||
|
||||
tsOpen = Timestamp(200)
|
||||
err = meta.addSegmentFlush(103, tsOpen)
|
||||
assert.NoError(t, err)
|
||||
exp, err = meta.getFlushOpenTime(103)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tsOpen, exp)
|
||||
|
||||
err = meta.reloadSegMetaFromKV()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("TestMetaTable_AppendSegBinlogPaths", func(t *testing.T) {
|
||||
segmentID := UniqueID(201)
|
||||
tsOpen := Timestamp(1000)
|
||||
err := meta.addSegmentFlush(segmentID, tsOpen)
|
||||
assert.Nil(t, err)
|
||||
|
||||
exp := map[int64][]string{
|
||||
1: {"a", "b", "c"},
|
||||
2: {"b", "a", "c"},
|
||||
}
|
||||
for fieldID, dataPaths := range exp {
|
||||
for _, dp := range dataPaths {
|
||||
err = meta.AppendSegBinlogPaths(tsOpen, segmentID, fieldID, []string{dp})
|
||||
assert.Nil(t, err)
|
||||
err = meta.AppendSegBinlogPaths(tsOpen, segmentID, fieldID, []string{dp})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := meta.getSegBinlogPaths(segmentID)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t,
|
||||
map[int64][]string{
|
||||
1: {"a", "a", "b", "b", "c", "c"},
|
||||
2: {"b", "b", "a", "a", "c", "c"}},
|
||||
ret)
|
||||
})
|
||||
|
||||
t.Run("TestMetaTable_AppendDDLBinlogPaths", func(t *testing.T) {
|
||||
|
||||
collID2Paths := map[UniqueID][]string{
|
||||
301: {"a", "b", "c"},
|
||||
302: {"c", "b", "a"},
|
||||
}
|
||||
|
||||
for collID, dataPaths := range collID2Paths {
|
||||
for _, dp := range dataPaths {
|
||||
err = meta.AppendDDLBinlogPaths(collID, []string{dp})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range collID2Paths {
|
||||
ret, err := meta.getDDLBinlogPaths(k)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, map[UniqueID][]string{k: v}, ret)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TestMetaTable_CompleteFlush_and_CloseTime", func(t *testing.T) {
|
||||
|
||||
var segmentID UniqueID = 401
|
||||
openTime := Timestamp(1000)
|
||||
closeTime := Timestamp(10000)
|
||||
|
||||
err := meta.addSegmentFlush(segmentID, openTime)
|
||||
assert.NoError(t, err)
|
||||
|
||||
ret, err := meta.checkFlushComplete(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, ret)
|
||||
|
||||
meta.CompleteFlush(closeTime, segmentID)
|
||||
|
||||
ret, err = meta.checkFlushComplete(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, ret)
|
||||
ts, err := meta.getFlushCloseTime(segmentID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, closeTime, ts)
|
||||
})
|
||||
|
||||
}
|
|
@ -1,400 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
|
||||
)
|
||||
|
||||
type ParamTable struct {
|
||||
paramtable.BaseTable
|
||||
|
||||
PulsarAddress string
|
||||
MasterAddress string
|
||||
|
||||
WriteNodeID UniqueID
|
||||
WriteNodeNum int
|
||||
WriteNodeTimeTickChannelName string
|
||||
|
||||
FlowGraphMaxQueueLength int32
|
||||
FlowGraphMaxParallelism int32
|
||||
|
||||
// dm
|
||||
InsertChannelNames []string
|
||||
InsertChannelRange []int
|
||||
InsertReceiveBufSize int64
|
||||
InsertPulsarBufSize int64
|
||||
|
||||
// dd
|
||||
DDChannelNames []string
|
||||
DDReceiveBufSize int64
|
||||
DDPulsarBufSize int64
|
||||
|
||||
MsgChannelSubName string
|
||||
DefaultPartitionTag string
|
||||
SliceIndex int
|
||||
|
||||
EtcdAddress string
|
||||
MetaRootPath string
|
||||
WriteNodeSegKvSubPath string
|
||||
WriteNodeDDLKvSubPath string
|
||||
MinioAddress string
|
||||
MinioAccessKeyID string
|
||||
MinioSecretAccessKey string
|
||||
MinioUseSSL bool
|
||||
MinioBucketName string
|
||||
|
||||
FlushInsertBufSize int
|
||||
FlushDdBufSize int
|
||||
|
||||
InsertLogRootPath string
|
||||
DdLogRootPath string
|
||||
}
|
||||
|
||||
var Params ParamTable
|
||||
|
||||
func (p *ParamTable) Init() {
|
||||
p.BaseTable.Init()
|
||||
err := p.LoadYaml("advanced/write_node.yaml")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
writeNodeIDStr := os.Getenv("WRITE_NODE_ID")
|
||||
if writeNodeIDStr == "" {
|
||||
writeNodeIDList := p.WriteNodeIDList()
|
||||
if len(writeNodeIDList) <= 0 {
|
||||
writeNodeIDStr = "0"
|
||||
} else {
|
||||
writeNodeIDStr = strconv.Itoa(int(writeNodeIDList[0]))
|
||||
}
|
||||
}
|
||||
err = p.Save("_writeNodeID", writeNodeIDStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.initMasterAddress()
|
||||
p.initPulsarAddress()
|
||||
p.initEtcdAddress()
|
||||
p.initMetaRootPath()
|
||||
p.initWriteNodeSegKvSubPath()
|
||||
p.initWriteNodeDDLKvSubPath()
|
||||
p.initInsertLogRootPath()
|
||||
p.initDdLogRootPath()
|
||||
|
||||
p.initWriteNodeID()
|
||||
p.initWriteNodeNum()
|
||||
p.initWriteNodeTimeTickChannelName()
|
||||
|
||||
p.initMsgChannelSubName()
|
||||
p.initDefaultPartitionTag()
|
||||
p.initSliceIndex()
|
||||
|
||||
p.initFlowGraphMaxQueueLength()
|
||||
p.initFlowGraphMaxParallelism()
|
||||
|
||||
p.initInsertChannelNames()
|
||||
p.initInsertChannelRange()
|
||||
p.initInsertReceiveBufSize()
|
||||
p.initInsertPulsarBufSize()
|
||||
|
||||
p.initDDChannelNames()
|
||||
p.initDDReceiveBufSize()
|
||||
p.initDDPulsarBufSize()
|
||||
|
||||
p.initMinioAddress()
|
||||
p.initMinioAccessKeyID()
|
||||
p.initMinioSecretAccessKey()
|
||||
p.initMinioUseSSL()
|
||||
p.initMinioBucketName()
|
||||
|
||||
p.initFlushInsertBufSize()
|
||||
p.initFlushDdBufSize()
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeID() {
|
||||
writeNodeID, err := p.Load("_writeNodeID")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
id, err := strconv.Atoi(writeNodeID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.WriteNodeID = UniqueID(id)
|
||||
}
|
||||
|
||||
func (p *ParamTable) initPulsarAddress() {
|
||||
url, err := p.Load("_PulsarAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.PulsarAddress = url
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMasterAddress() {
|
||||
addr, err := p.Load("_MasterAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MasterAddress = addr
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertChannelRange() {
|
||||
insertChannelRange, err := p.Load("msgChannel.channelRange.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.InsertChannelRange = paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
|
||||
}
|
||||
|
||||
// advanced params
|
||||
// dataSync:
|
||||
func (p *ParamTable) initFlowGraphMaxQueueLength() {
|
||||
p.FlowGraphMaxQueueLength = p.ParseInt32("writeNode.dataSync.flowGraph.maxQueueLength")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initFlowGraphMaxParallelism() {
|
||||
p.FlowGraphMaxParallelism = p.ParseInt32("writeNode.dataSync.flowGraph.maxParallelism")
|
||||
}
|
||||
|
||||
// msgStream
|
||||
func (p *ParamTable) initInsertReceiveBufSize() {
|
||||
p.InsertReceiveBufSize = p.ParseInt64("writeNode.msgStream.insert.recvBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertPulsarBufSize() {
|
||||
p.InsertPulsarBufSize = p.ParseInt64("writeNode.msgStream.insert.pulsarBufSize")
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDDReceiveBufSize() {
|
||||
revBufSize, err := p.Load("writeNode.msgStream.dataDefinition.recvBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(revBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.DDReceiveBufSize = int64(bufSize)
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDDPulsarBufSize() {
|
||||
pulsarBufSize, err := p.Load("writeNode.msgStream.dataDefinition.pulsarBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bufSize, err := strconv.Atoi(pulsarBufSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.DDPulsarBufSize = int64(bufSize)
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertChannelNames() {
|
||||
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
prefix += "-"
|
||||
channelRange, err := p.Load("msgChannel.channelRange.insert")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
|
||||
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
sep := len(channelIDs) / p.WriteNodeNum
|
||||
index := p.SliceIndex
|
||||
if index == -1 {
|
||||
panic("writeNodeID not Match with Config")
|
||||
}
|
||||
start := index * sep
|
||||
p.InsertChannelNames = ret[start : start+sep]
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMsgChannelSubName() {
|
||||
name, err := p.Load("msgChannel.subNamePrefix.writeNodeSubNamePrefix")
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
writeNodeIDStr, err := p.Load("_writeNodeID")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MsgChannelSubName = name + "-" + writeNodeIDStr
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDDChannelNames() {
|
||||
prefix, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
prefix += "-"
|
||||
iRangeStr, err := p.Load("msgChannel.channelRange.dataDefinition")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
|
||||
var ret []string
|
||||
for _, ID := range channelIDs {
|
||||
ret = append(ret, prefix+strconv.Itoa(ID))
|
||||
}
|
||||
p.DDChannelNames = ret
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDefaultPartitionTag() {
|
||||
defaultTag, err := p.Load("common.defaultPartitionTag")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.DefaultPartitionTag = defaultTag
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeTimeTickChannelName() {
|
||||
channels, err := p.Load("msgChannel.chanNamePrefix.writeNodeTimeTick")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.WriteNodeTimeTickChannelName = channels + "-" + strconv.FormatInt(p.WriteNodeID, 10)
|
||||
}
|
||||
|
||||
func (p *ParamTable) initSliceIndex() {
|
||||
writeNodeID := p.WriteNodeID
|
||||
writeNodeIDList := p.WriteNodeIDList()
|
||||
for i := 0; i < len(writeNodeIDList); i++ {
|
||||
if writeNodeID == writeNodeIDList[i] {
|
||||
p.SliceIndex = i
|
||||
return
|
||||
}
|
||||
}
|
||||
p.SliceIndex = -1
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeNum() {
|
||||
p.WriteNodeNum = len(p.WriteNodeIDList())
|
||||
}
|
||||
|
||||
func (p *ParamTable) initEtcdAddress() {
|
||||
addr, err := p.Load("_EtcdAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.EtcdAddress = addr
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMetaRootPath() {
|
||||
rootPath, err := p.Load("etcd.rootPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
subPath, err := p.Load("etcd.metaSubPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MetaRootPath = rootPath + "/" + subPath
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeSegKvSubPath() {
|
||||
subPath, err := p.Load("etcd.writeNodeSegKvSubPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.WriteNodeSegKvSubPath = subPath + "/"
|
||||
}
|
||||
|
||||
func (p *ParamTable) initWriteNodeDDLKvSubPath() {
|
||||
subPath, err := p.Load("etcd.writeNodeDDLKvSubPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.WriteNodeDDLKvSubPath = subPath + "/"
|
||||
}
|
||||
|
||||
func (p *ParamTable) initInsertLogRootPath() {
|
||||
rootPath, err := p.Load("etcd.rootPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.InsertLogRootPath = rootPath + "/insert_log"
|
||||
}
|
||||
|
||||
func (p *ParamTable) initDdLogRootPath() {
|
||||
rootPath, err := p.Load("etcd.rootPath")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.DdLogRootPath = rootPath + "/data_definition_log"
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMinioAddress() {
|
||||
endpoint, err := p.Load("_MinioAddress")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MinioAddress = endpoint
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMinioAccessKeyID() {
|
||||
keyID, err := p.Load("minio.accessKeyID")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MinioAccessKeyID = keyID
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMinioSecretAccessKey() {
|
||||
key, err := p.Load("minio.secretAccessKey")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MinioSecretAccessKey = key
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMinioUseSSL() {
|
||||
usessl, err := p.Load("minio.useSSL")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MinioUseSSL, _ = strconv.ParseBool(usessl)
|
||||
}
|
||||
|
||||
func (p *ParamTable) initMinioBucketName() {
|
||||
bucketName, err := p.Load("minio.bucketName")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.MinioBucketName = bucketName
|
||||
}
|
||||
|
||||
func (p *ParamTable) initFlushInsertBufSize() {
|
||||
sizeStr, err := p.Load("writenode.flush.insertBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.FlushInsertBufSize, err = strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ParamTable) initFlushDdBufSize() {
|
||||
sizeStr, err := p.Load("writenode.flush.ddBufSize")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
p.FlushDdBufSize, err = strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParamTable_WriteNode(t *testing.T) {
|
||||
|
||||
Params.Init()
|
||||
|
||||
t.Run("Test PulsarAddress", func(t *testing.T) {
|
||||
address := Params.PulsarAddress
|
||||
split := strings.Split(address, ":")
|
||||
assert.Equal(t, split[0], "pulsar")
|
||||
assert.Equal(t, split[len(split)-1], "6650")
|
||||
})
|
||||
|
||||
t.Run("Test WriteNodeID", func(t *testing.T) {
|
||||
id := Params.WriteNodeID
|
||||
assert.Equal(t, id, UniqueID(3))
|
||||
})
|
||||
|
||||
t.Run("Test insertChannelRange", func(t *testing.T) {
|
||||
channelRange := Params.InsertChannelRange
|
||||
assert.Equal(t, len(channelRange), 2)
|
||||
assert.Equal(t, channelRange[0], 0)
|
||||
assert.Equal(t, channelRange[1], 2)
|
||||
})
|
||||
|
||||
t.Run("Test insertMsgStreamReceiveBufSize", func(t *testing.T) {
|
||||
bufSize := Params.InsertReceiveBufSize
|
||||
assert.Equal(t, bufSize, int64(1024))
|
||||
})
|
||||
|
||||
t.Run("Test insertPulsarBufSize", func(t *testing.T) {
|
||||
bufSize := Params.InsertPulsarBufSize
|
||||
assert.Equal(t, bufSize, int64(1024))
|
||||
})
|
||||
|
||||
t.Run("Test flowGraphMaxQueueLength", func(t *testing.T) {
|
||||
length := Params.FlowGraphMaxQueueLength
|
||||
assert.Equal(t, length, int32(1024))
|
||||
})
|
||||
|
||||
t.Run("Test flowGraphMaxParallelism", func(t *testing.T) {
|
||||
maxParallelism := Params.FlowGraphMaxParallelism
|
||||
assert.Equal(t, maxParallelism, int32(1024))
|
||||
})
|
||||
|
||||
t.Run("Test insertChannelNames", func(t *testing.T) {
|
||||
names := Params.InsertChannelNames
|
||||
assert.Equal(t, len(names), 2)
|
||||
assert.Equal(t, names[0], "insert-0")
|
||||
assert.Equal(t, names[1], "insert-1")
|
||||
})
|
||||
|
||||
t.Run("Test msgChannelSubName", func(t *testing.T) {
|
||||
name := Params.MsgChannelSubName
|
||||
assert.Equal(t, name, "writeNode-3")
|
||||
})
|
||||
|
||||
t.Run("Test timeTickChannelName", func(t *testing.T) {
|
||||
name := Params.WriteNodeTimeTickChannelName
|
||||
assert.Equal(t, name, "writeNodeTimeTick-3")
|
||||
})
|
||||
|
||||
t.Run("Test minioAccessKeyID", func(t *testing.T) {
|
||||
id := Params.MinioAccessKeyID
|
||||
assert.Equal(t, id, "minioadmin")
|
||||
})
|
||||
|
||||
t.Run("Test minioSecretAccessKey", func(t *testing.T) {
|
||||
id := Params.MinioSecretAccessKey
|
||||
assert.Equal(t, id, "minioadmin")
|
||||
})
|
||||
|
||||
t.Run("Test MinioUseSSL", func(t *testing.T) {
|
||||
id := Params.MinioUseSSL
|
||||
assert.Equal(t, id, false)
|
||||
})
|
||||
|
||||
t.Run("Test MinioBucketName", func(t *testing.T) {
|
||||
name := Params.MinioBucketName
|
||||
assert.Equal(t, name, "a-bucket")
|
||||
})
|
||||
|
||||
t.Run("Test FlushInsertBufSize", func(t *testing.T) {
|
||||
name := Params.FlushInsertBufSize
|
||||
assert.Equal(t, name, 500)
|
||||
})
|
||||
|
||||
t.Run("Test FlushDdBufSize", func(t *testing.T) {
|
||||
name := Params.FlushDdBufSize
|
||||
assert.Equal(t, name, 20)
|
||||
})
|
||||
|
||||
t.Run("Test InsertLogRootPath", func(t *testing.T) {
|
||||
name := Params.InsertLogRootPath
|
||||
assert.Equal(t, name, "by-dev/insert_log")
|
||||
})
|
||||
|
||||
t.Run("Test DdLogRootPath", func(t *testing.T) {
|
||||
name := Params.DdLogRootPath
|
||||
assert.Equal(t, name, "by-dev/data_definition_log")
|
||||
})
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import "github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
|
||||
type (
|
||||
UniqueID = typeutil.UniqueID
|
||||
Timestamp = typeutil.Timestamp
|
||||
IntPrimaryKey = typeutil.IntPrimaryKey
|
||||
DSL = string
|
||||
|
||||
TimeRange struct {
|
||||
timestampMin Timestamp
|
||||
timestampMax Timestamp
|
||||
}
|
||||
)
|
|
@ -1,89 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
)
|
||||
|
||||
type WriteNode struct {
|
||||
ctx context.Context
|
||||
WriteNodeID uint64
|
||||
dataSyncService *dataSyncService
|
||||
flushSyncService *flushSyncService
|
||||
metaService *metaService
|
||||
replica collectionReplica
|
||||
tracer opentracing.Tracer
|
||||
closer io.Closer
|
||||
}
|
||||
|
||||
func NewWriteNode(ctx context.Context, writeNodeID uint64) *WriteNode {
|
||||
|
||||
collections := make([]*Collection, 0)
|
||||
|
||||
var replica collectionReplica = &collectionReplicaImpl{
|
||||
collections: collections,
|
||||
}
|
||||
|
||||
node := &WriteNode{
|
||||
ctx: ctx,
|
||||
WriteNodeID: writeNodeID,
|
||||
dataSyncService: nil,
|
||||
flushSyncService: nil,
|
||||
metaService: nil,
|
||||
replica: replica,
|
||||
}
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func Init() {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func (node *WriteNode) Start() error {
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: "write_node",
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
var err error
|
||||
node.tracer, node.closer, err = cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(node.tracer)
|
||||
|
||||
// TODO GOOSE Init Size??
|
||||
chanSize := 100
|
||||
ddChan := make(chan *ddlFlushSyncMsg, chanSize)
|
||||
insertChan := make(chan *insertFlushSyncMsg, chanSize)
|
||||
node.flushSyncService = newFlushSyncService(node.ctx, ddChan, insertChan)
|
||||
|
||||
node.dataSyncService = newDataSyncService(node.ctx, ddChan, insertChan, node.replica)
|
||||
node.metaService = newMetaService(node.ctx, node.replica)
|
||||
|
||||
go node.dataSyncService.start()
|
||||
go node.flushSyncService.start()
|
||||
node.metaService.start()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *WriteNode) Close() {
|
||||
<-node.ctx.Done()
|
||||
|
||||
// close services
|
||||
if node.dataSyncService != nil {
|
||||
(*node.dataSyncService).close()
|
||||
}
|
||||
|
||||
if node.closer != nil {
|
||||
node.closer.Close()
|
||||
}
|
||||
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package writenode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
)
|
||||
|
||||
func makeNewChannelNames(names []string, suffix string) []string {
|
||||
var ret []string
|
||||
for _, name := range names {
|
||||
ret = append(ret, name+suffix)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func refreshChannelNames() {
|
||||
suffix := "-test-write-node" + strconv.FormatInt(rand.Int63n(100), 10)
|
||||
Params.DDChannelNames = makeNewChannelNames(Params.DDChannelNames, suffix)
|
||||
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
|
||||
}
|
||||
|
||||
func startMaster(ctx context.Context) {
|
||||
master.Init()
|
||||
etcdAddr := master.Params.EtcdAddress
|
||||
metaRootPath := master.Params.MetaRootPath
|
||||
|
||||
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
masterPort := 53101
|
||||
master.Params.Port = masterPort
|
||||
svr, err := master.CreateServer(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
if err := svr.Run(int64(master.Params.Port)); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for server!", svr.IsServing())
|
||||
Params.MasterAddress = master.Params.Address + ":" + strconv.Itoa(masterPort)
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
Params.Init()
|
||||
refreshChannelNames()
|
||||
const ctxTimeInMillisecond = 2000
|
||||
const closeWithDeadline = true
|
||||
var ctx context.Context
|
||||
|
||||
if closeWithDeadline {
|
||||
var cancel context.CancelFunc
|
||||
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
|
||||
ctx, cancel = context.WithDeadline(context.Background(), d)
|
||||
defer cancel()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
startMaster(ctx)
|
||||
p := Params
|
||||
fmt.Println(p)
|
||||
exitCode := m.Run()
|
||||
os.Exit(exitCode)
|
||||
}
|
|
@ -17,9 +17,7 @@ echo $MILVUS_DIR
|
|||
go test -race -cover "${MILVUS_DIR}/kv/..." -failfast
|
||||
# TODO: remove to distributed
|
||||
#go test -race -cover "${MILVUS_DIR}/proxynode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/writenode/..." -failfast
|
||||
go test -race -cover "${MILVUS_DIR}/datanode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/master/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/indexnode/..." -failfast
|
||||
#go test -race -cover "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/querynode/..." "${MILVUS_DIR}/storage" "${MILVUS_DIR}/util/..." -failfast
|
||||
go test -race -cover "${MILVUS_DIR}/querynode/..." -failfast
|
||||
|
|
Loading…
Reference in New Issue