Add ci and format code for proxy

Signed-off-by: shengjh <1572099106@qq.com>
pull/4973/head^2
shengjh 2020-10-15 16:32:22 +08:00 committed by yefu.chen
parent 9c15fc550e
commit 3d7181617b
122 changed files with 5173 additions and 3470 deletions

View File

@ -1,58 +0,0 @@
name: Build and test
# TODO: do not trigger action for some document file update
on: [push, pull_request]
jobs:
ubuntu:
name: AMD64 ubuntu-18.04
runs-on: ubuntu-18.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Dependency
run: |
./ci/scripts/install_deps.sh
go get github.com/golang/protobuf/protoc-gen-go@v1.3.2
- name: Cache Proxy Thirdparty
id: cache-proxy
uses: actions/cache@v2
with:
path: |
./proxy/cmake_build
key: ${{ runner.os }}-proxy-thirdparty
- name: Cache Core Thirdparty
id: cache-core
uses: actions/cache@v2
with:
path: |
./core/cmake_build
key: ${{ runner.os }}-core-thirdparty
- name: Cache SDK Thirdparty
id: cache-sdk
uses: actions/cache@v2
with:
path: |
./sdk/cmake_build
key: ${{ runner.os }}-sdk-thirdparty
- name: Build Cpp
run: |
./ci/scripts/proxy_build.sh -u
./ci/scripts/core_build.sh -u
./ci/scripts/sdk_build.sh -u
- name: Generat Proto GO File
run: |
echo `pwd`
export PATH=$PATH:$(go env GOPATH)/bin
export protoc=./proxy/cmake_build/thirdparty/grpc/grpc-build/third_party/protobuf/protoc-3.9.0.0
./ci/scripts/proto_gen_go.sh
- name: Build GO
run: |
go build -o ./cmd/writer ./writer/main.go
go build -o ./cmd/reader ./reader/main.go
go build -o ./cmd/master ./cmd/master.go
- name: Docker Pull And Run
run: |
docker-compose up -d
- name: Run Unittest
run: |
./ci/scripts/run_unittest.sh

6
.gitignore vendored
View File

@ -26,19 +26,19 @@ cmake_build
proxy/milvus
proxy/cmake_build
proxy/cmake-build-debug
proxy/cmake-build-release
proxy/cmake_build_release
proxy/thirdparty/grpc-src
proxy/thirdparty/grpc-build
proxy/milvus/*
proxy/suvlim/
proxy/suvlim/*
proxy-go/proxy-go
# sdk
sdk/cmake_build
sdk/cmake-build-debug
sdk/cmake-build-release
sdk/cmake_build_release
# Compiled source
*.a

196
ci/jenkins/Jenkinsfile vendored
View File

@ -1,196 +0,0 @@
#!/usr/bin/env groovy
@Library('mpl') _
String cron_timezone = "TZ=Asia/Shanghai"
String cron_string = BRANCH_NAME == "master" ? "50 3 * * * " : ""
pipeline {
agent none
triggers {
cron """${cron_timezone}
${cron_string}"""
}
options {
timestamps()
}
parameters{
choice choices: ['Release', 'Debug'], description: 'Build Type', name: 'BUILD_TYPE'
choice choices: ['False', 'True'], description: 'Is Manual Trigger Or Not', name: 'IS_MANUAL_TRIGGER_TYPE'
string defaultValue: 'registry.zilliz.com', description: 'DOCKER REGISTRY URL', name: 'DOKCER_REGISTRY_URL', trim: true
string defaultValue: 'ba070c98-c8cc-4f7c-b657-897715f359fc', description: 'DOCKER CREDENTIALS ID', name: 'DOCKER_CREDENTIALS_ID', trim: true
string defaultValue: 'http://192.168.1.201/artifactory/milvus', description: 'JFROG ARTFACTORY URL', name: 'JFROG_ARTFACTORY_URL', trim: true
string defaultValue: '1a527823-d2b7-44fd-834b-9844350baf14', description: 'JFROG CREDENTIALS ID', name: 'JFROG_CREDENTIALS_ID', trim: true
}
environment {
PROJECT_NAME = "milvus"
MILVUS_ROOT_PATH="/var/lib"
MILVUS_INSTALL_PREFIX="${env.MILVUS_ROOT_PATH}/${env.PROJECT_NAME}"
LOWER_BUILD_TYPE = params.BUILD_TYPE.toLowerCase()
SEMVER = "${BRANCH_NAME.contains('/') ? BRANCH_NAME.substring(BRANCH_NAME.lastIndexOf('/') + 1) : BRANCH_NAME}"
PIPELINE_NAME = "milvus-ci"
HELM_BRANCH = "0.11.0"
}
stages {
stage ('Milvus Build and Unittest') {
matrix {
axes {
axis {
name 'OS_NAME'
values 'centos7'
}
axis {
name 'CPU_ARCH'
values 'amd64'
}
axis {
name 'BINARY_VERSION'
values 'cpu'
}
}
environment {
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${BINARY_VERSION}-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}'
]);
}
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-build-${SEMVER}-${env.PIPELINE_NAME}-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
customWorkspace '/home/jenkins/agent/workspace'
yamlFile "ci/jenkins/pod/milvus-${BINARY_VERSION}-version-${OS_NAME}-build-env-pod.yaml"
}
}
stages {
stage('Build and Unittest') {
steps {
container("milvus-${BINARY_VERSION}-build-env") {
MPLModule('Milvus Build')
MPLModule('Unittest')
MPLModule('Package Build')
}
}
}
}
}
}
stage ('Publish Docker Images') {
matrix {
axes {
axis {
name 'OS_NAME'
values 'centos7'
}
axis {
name 'CPU_ARCH'
values 'amd64'
}
axis {
name 'BINARY_VERSION'
values 'cpu'
}
}
environment {
PACKAGE_VERSION = VersionNumber([
versionNumberString : '${SEMVER}-${BINARY_VERSION}-${OS_NAME}-${CPU_ARCH}-${LOWER_BUILD_TYPE}'
]);
SOURCE_REPO = "${params.DOKCER_REGISTRY_URL}/milvus/engine"
TARGET_REPO = "${params.DOKCER_REGISTRY_URL}/milvus/engine"
SOURCE_TAG = "${CHANGE_TARGET ? CHANGE_TARGET : SEMVER}-${BINARY_VERSION}-${OS_NAME}-${LOWER_BUILD_TYPE}"
TARGET_TAG = "${SEMVER}-${BINARY_VERSION}-${OS_NAME}-${LOWER_BUILD_TYPE}"
DOCKER_BUILDKIT = 1
}
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-publish-${SEMVER}-${env.PIPELINE_NAME}-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/docker-pod.yaml'
}
}
stages {
stage('Publish') {
steps {
container('publish-images') {
MPLModule('Publish')
}
}
}
}
}
}
stage ('Dev Test') {
matrix {
axes {
axis {
name 'OS_NAME'
values 'centos7'
}
axis {
name 'CPU_ARCH'
values 'amd64'
}
axis {
name 'BINARY_VERSION'
values 'cpu'
}
}
environment {
DOCKER_VERSION = "${SEMVER}-${BINARY_VERSION}-${OS_NAME}-${LOWER_BUILD_TYPE}"
FROMAT_SEMVER = "${env.SEMVER}".replaceAll("\\.", "-").replaceAll("_", "-")
FORMAT_OS_NAME = "${OS_NAME}".replaceAll("\\.", "-").replaceAll("_", "-")
HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-single-${FORMAT_OS_NAME}-${BINARY_VERSION}".toLowerCase()
SHARDS_HELM_RELEASE_NAME = "${env.PIPELINE_NAME}-${env.FROMAT_SEMVER}-${env.BUILD_NUMBER}-shards-${FORMAT_OS_NAME}-${BINARY_VERSION}".toLowerCase()
DEV_TEST_ARTIFACTS = "_artifacts/${FORMAT_OS_NAME}/${BINARY_VERSION}"
}
agent {
kubernetes {
label "${OS_NAME}-${BINARY_VERSION}-dev-test-${SEMVER}-${env.PIPELINE_NAME}-${env.BUILD_NUMBER}"
defaultContainer 'jnlp'
yamlFile 'ci/jenkins/pod/testEnvironment.yaml'
}
}
stages {
stage('Test') {
steps {
container('milvus-test-env') {
MPLModule('Single Node DevTest')
MPLModule('Mishards DevTest')
}
}
}
}
post {
cleanup {
container('milvus-test-env') {
archiveArtifacts artifacts: "${env.DEV_TEST_ARTIFACTS}/**", allowEmptyArchive: true
MPLModule('Cleanup Single Node DevTest')
MPLModule('Cleanup Mishards DevTest')
}
}
}
}
}
}
post {
unsuccessful {
script {
if (isTimeTriggeredBuild()) {
// Send an email only if the build status has changed from green/unstable to red
emailext subject: '$DEFAULT_SUBJECT',
body: '$DEFAULT_CONTENT',
recipientProviders: [
[$class: 'DevelopersRecipientProvider'],
[$class: 'RequesterRecipientProvider']
],
replyTo: '$DEFAULT_REPLYTO',
to: 'dev.milvus@zilliz.com'
}
}
}
}
}
boolean isTimeTriggeredBuild() {
return (currentBuild.getBuildCauses('hudson.triggers.TimerTrigger$TimerTriggerCause').size() != 0) ? true : false;
}

View File

@ -1,35 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: publish
componet: docker
spec:
containers:
- name: publish-images
image: registry.zilliz.com/library/docker:v1.1.0
imagePullPolicy: Always
securityContext:
privileged: true
command:
- cat
tty: true
resources:
limits:
memory: "8Gi"
cpu: "2"
requests:
memory: "2Gi"
cpu: "1"
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
volumes:
- name: docker-sock
hostPath:
path: /var/run/docker.sock
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,35 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: milvus-cpu-build-env
labels:
app: milvus
componet: cpu-build-env
spec:
containers:
- name: milvus-cpu-build-env
image: registry.zilliz.com/milvus/milvus-cpu-build-env:v0.10.2-ubuntu18.04
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: OS_NAME
value: "ubuntu18.04"
- name: BUILD_ENV_IMAGE_ID
value: "4e30ebe398d1a10150c625e52f44d5426c71a557afbb3f16ee9cea20e52e1b9d"
command:
- cat
tty: true
resources:
limits:
memory: "14Gi"
cpu: "6.0"
requests:
memory: "8Gi"
cpu: "4.0"
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
labels:
app: milvus
componet: test-env
spec:
containers:
- name: milvus-test-env
image: registry.zilliz.com/milvus/milvus-test-env:v0.2
command:
- cat
tty: true
resources:
limits:
memory: "8Gi"
cpu: "4.0"
requests:
memory: "4Gi"
cpu: "2.0"
volumeMounts:
- name: kubeconf
mountPath: /root/.kube/
readOnly: true
volumes:
- name: kubeconf
secret:
secretName: test-cluster-config
tolerations:
- key: dedicated
operator: Equal
value: milvus
effect: NoSchedule

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -ex
export CCACHE_COMPRESS=1
export CCACHE_COMPRESSLEVEL=5
export CCACHE_COMPILERCHECK=content
export PATH=/usr/lib/ccache/:$PATH
export CCACHE_BASEDIR=${WORKSPACE:=""}
export CCACHE_DIR=${CCACHE_DIR:="${HOME}/.ccache"}
export CCACHE_COMPRESS_PACKAGE_FILE=${CCACHE_COMPRESS_PACKAGE_FILE:="ccache-${OS_NAME}-${BUILD_ENV_IMAGE_ID}.tar.gz"}
export CUSTOM_THIRDPARTY_DOWNLOAD_PATH=${CUSTOM_THIRDPARTY_DOWNLOAD_PATH:="${HOME}/3rdparty_download"}
export THIRDPARTY_COMPRESS_PACKAGE_FILE=${THIRDPARTY_COMPRESS_PACKAGE_FILE:="thirdparty-download.tar.gz"}
set +ex

View File

@ -1,114 +0,0 @@
#!/bin/bash
HELP="
Usage:
$0 [flags] [Arguments]
-l [ARTIFACTORY_URL] Artifactory URL
--cache_dir=[CACHE_DIR] Cache directory
-f [FILE] or --file=[FILE] Cache compress package file
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=$(getopt -o "l:f:h" -l "cache_dir::,file::,help" -n "$0" -- "$@")
eval set -- "${ARGS}"
while true ; do
case "$1" in
-l)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option Artifactory URL, no argument"; exit 1 ;;
*) ARTIFACTORY_URL=$2 ; shift 2 ;;
esac ;;
--cache_dir)
case "$2" in
"") echo "Option cache_dir, no argument"; exit 1 ;;
*) CACHE_DIR=$2 ; shift 2 ;;
esac ;;
-f|--file)
case "$2" in
"") echo "Option file, no argument"; exit 1 ;;
*) PACKAGE_FILE=$2 ; shift 2 ;;
esac ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
BRANCH_NAMES=$(git log --decorate | head -n 1 | sed 's/.*(\(.*\))/\1/' | sed 's=[a-zA-Z]*\/==g' | awk -F", " '{$1=""; print $0}')
if [[ -z "${ARTIFACTORY_URL}" || "${ARTIFACTORY_URL}" == "" ]];then
echo "You have not input ARTIFACTORY_URL !"
exit 1
fi
if [[ -z "${CACHE_DIR}" ]]; then
echo "You have not input CACHE_DIR !"
exit 1
fi
if [[ -z "${PACKAGE_FILE}" ]]; then
echo "You have not input PACKAGE_FILE !"
exit 1
fi
function check_cache() {
BRANCH=$1
echo "fetching ${BRANCH}/${PACKAGE_FILE}"
wget -q --spider "${ARTIFACTORY_URL}/${BRANCH}/${PACKAGE_FILE}"
return $?
}
function download_file() {
BRANCH=$1
wget -q "${ARTIFACTORY_URL}/${BRANCH}/${PACKAGE_FILE}" && \
mkdir -p "${CACHE_DIR}" && \
tar zxf "${PACKAGE_FILE}" -C "${CACHE_DIR}" && \
rm ${PACKAGE_FILE}
return $?
}
if [[ -n "${CHANGE_TARGET}" && "${BRANCH_NAME}" =~ "PR-" ]];then
check_cache ${CHANGE_TARGET}
if [[ $? == 0 ]];then
download_file ${CHANGE_TARGET}
if [[ $? == 0 ]];then
echo "found cache"
exit 0
fi
fi
check_cache ${BRANCH_NAME}
if [[ $? == 0 ]];then
download_file ${BRANCH_NAME}
if [[ $? == 0 ]];then
echo "found cache"
exit 0
fi
fi
fi
for CURRENT_BRANCH in ${BRANCH_NAMES}
do
if [[ "${CURRENT_BRANCH}" != "HEAD" ]];then
check_cache ${CURRENT_BRANCH}
if [[ $? == 0 ]];then
download_file ${CURRENT_BRANCH}
if [[ $? == 0 ]];then
echo "found cache"
exit 0
fi
fi
fi
done
echo "could not download cache" && exit 1

View File

@ -1,169 +0,0 @@
#!/usr/bin/env bash
# Compile jobs variable; Usage: $ jobs=12 ./core_build.sh ...
if [[ ! ${jobs+1} ]]; then
jobs=$(nproc)
fi
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
CPP_SRC_DIR="${SCRIPTS_DIR}/../../core"
CPP_BUILD_DIR="${CPP_SRC_DIR}/cmake_build"
BUILD_OUTPUT_DIR=${CPP_BUILD_DIR}
BUILD_TYPE="Release"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX="${CPP_SRC_DIR}/milvus"
MAKE_CLEAN="OFF"
BUILD_COVERAGE="OFF"
DB_PATH="/tmp/milvus"
PROFILING="OFF"
RUN_CPPLINT="OFF"
CUDA_COMPILER=/usr/local/cuda/bin/nvcc
GPU_VERSION="OFF" #defaults to CPU version
WITH_PROMETHEUS="ON"
CUDA_ARCH="DEFAULT"
CUSTOM_THIRDPARTY_PATH=""
while getopts "p:d:t:s:f:ulrcghzme" arg; do
case $arg in
f)
CUSTOM_THIRDPARTY_PATH=$OPTARG
;;
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
u)
echo "Build and run unittest cases"
BUILD_UNITTEST="ON"
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
MAKE_CLEAN="ON"
fi
;;
c)
BUILD_COVERAGE="ON"
;;
z)
PROFILING="ON"
;;
g)
GPU_VERSION="ON"
;;
e)
WITH_PROMETHEUS="OFF"
;;
s)
CUDA_ARCH=$OPTARG
;;
h) # help
echo "
parameter:
-f: custom paths of thirdparty downloaded files(default: NULL)
-p: install prefix(default: $(pwd)/milvus)
-d: db data path(default: /tmp/milvus)
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-r: remove previous build directory(default: OFF)
-c: code coverage(default: OFF)
-z: profiling(default: OFF)
-g: build GPU version(default: OFF)
-e: build without prometheus(default: OFF)
-s: build with CUDA arch(default:DEFAULT), for example '-gencode=compute_61,code=sm_61;-gencode=compute_75,code=sm_75'
-h: help
usage:
./core_build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} -s \${CUDA_ARCH} -f\${CUSTOM_THIRDPARTY_PATH} [-u] [-l] [-r] [-c] [-z] [-g] [-m] [-e] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
mkdir ${BUILD_OUTPUT_DIR}
fi
pushd ${BUILD_OUTPUT_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache >/dev/null 2>&1
if [[ ${MAKE_CLEAN} == "ON" ]]; then
echo "Runing make clean in ${BUILD_OUTPUT_DIR} ..."
make clean
exit 0
fi
CMAKE_CMD="cmake \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DOpenBLAS_SOURCE=AUTO \
-DCMAKE_CUDA_COMPILER=${CUDA_COMPILER} \
-DBUILD_COVERAGE=${BUILD_COVERAGE} \
-DMILVUS_DB_PATH=${DB_PATH} \
-DENABLE_CPU_PROFILING=${PROFILING} \
-DMILVUS_GPU_VERSION=${GPU_VERSION} \
-DMILVUS_WITH_PROMETHEUS=${WITH_PROMETHEUS} \
-DMILVUS_CUDA_ARCH=${CUDA_ARCH} \
-DCUSTOM_THIRDPARTY_DOWNLOAD_PATH=${CUSTOM_THIRDPARTY_PATH} \
${CPP_SRC_DIR}"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# clang-tidy check
make check-clang-tidy
if [ $? -ne 0 ]; then
echo "ERROR! clang-tidy check failed"
exit 1
fi
echo "clang-tidy check passed!"
else
# compile and build
make -j ${jobs} install || exit 1
fi
popd

View File

@ -1,6 +0,0 @@
#!/usr/bin/env bash
sudo apt install -y g++ gcc make libssl-dev zlib1g-dev libboost-regex-dev \
libboost-program-options-dev libboost-system-dev libboost-filesystem-dev \
libboost-serialization-dev python3-dev libboost-python-dev libcurl4-openssl-dev gfortran libtbb-dev

View File

@ -1,4 +0,0 @@
#!/usr/bin/env bash
${protoc} --go_out=plugins=grpc,paths=source_relative:. pkg/master/grpc/master/master.proto
${protoc} --go_out=plugins=grpc,paths=source_relative:. pkg/master/grpc/message/message.proto

View File

@ -1,133 +0,0 @@
#!/usr/bin/env bash
# Compile jobs variable; Usage: $ jobs=12 ./proxy_build.sh ...
if [[ ! ${jobs+1} ]]; then
jobs=$(nproc)
fi
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
CPP_SRC_DIR="${SCRIPTS_DIR}/../../proxy"
CPP_BUILD_DIR="${CPP_SRC_DIR}/cmake_build"
BUILD_OUTPUT_DIR="${CPP_BUILD_DIR}"
BUILD_TYPE="Release"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX="${CPP_SRC_DIR}/milvus"
MAKE_CLEAN="OFF"
DB_PATH="/tmp/milvus"
RUN_CPPLINT="OFF"
while getopts "p:d:t:s:ulrcghzme" arg; do
case $arg in
p)
INSTALL_PREFIX=$OPTARG
;;
d)
DB_PATH=$OPTARG
;;
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
u)
echo "Build and run unittest cases"
BUILD_UNITTEST="ON"
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
MAKE_CLEAN="ON"
fi
;;
h) # help
echo "
parameter:
-p: install prefix(default: ${CPP_SRC_DIR}/milvus)
-d: db data path(default: /tmp/milvus)
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-r: remove previous build directory(default: OFF)
-h: help
usage:
./proxy_build.sh -p \${INSTALL_PREFIX} -t \${BUILD_TYPE} [-u] [-l] [-r] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
mkdir ${BUILD_OUTPUT_DIR}
fi
pushd ${CPP_BUILD_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache >/dev/null 2>&1
if [[ ${MAKE_CLEAN} == "ON" ]]; then
echo "Runing make clean in ${BUILD_OUTPUT_DIR} ..."
make clean
exit 0
fi
CMAKE_CMD="cmake \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX} \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DOpenBLAS_SOURCE=AUTO \
-DMILVUS_DB_PATH=${DB_PATH} \
${CPP_SRC_DIR}"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# clang-tidy check
# make check-clang-tidy
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# exit 1
# fi
# echo "clang-tidy check passed!"
else
# compile and build
make -j ${jobs} install || exit 1
fi
popd

View File

@ -1,44 +0,0 @@
#!/usr/bin/env bash
set -e
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
MILVUS_CORE_DIR="${SCRIPTS_DIR}/../../core"
MILVUS_PROXY_DIR="${SCRIPTS_DIR}/../../proxy"
CORE_INSTALL_PREFIX="${MILVUS_CORE_DIR}/milvus"
PROXY_INSTALL_PREFIX="${MILVUS_PROXY_DIR}/milvus"
UNITTEST_DIRS=("${CORE_INSTALL_PREFIX}/unittest" "${PROXY_INSTALL_PREFIX}/unittest")
# Currently core will install target lib to "core/lib"
if [ -d "${MILVUS_CORE_DIR}/lib" ]; then
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MILVUS_CORE_DIR}/lib
fi
# run unittest
for UNITTEST_DIR in "${UNITTEST_DIRS[@]}"; do
if [ ! -d "${UNITTEST_DIR}" ]; then
echo "The unittest folder does not exist!"
exit 1
fi
for test in `ls ${UNITTEST_DIR}`; do
echo $test " running..."
# run unittest
# ${UNITTEST_DIR}/${test}
if [ $? -ne 0 ]; then
echo ${UNITTEST_DIR}/${test} "run failed"
exit 1
fi
done
done
# ignore Minio,S3 unittes
MILVUS_DIR="${SCRIPTS_DIR}/../../"
echo $MILVUS_DIR
go test "${MILVUS_DIR}/storage/internal/tikv/..." "${MILVUS_DIR}/reader/..." "${MILVUS_DIR}/writer/..." "${MILVUS_DIR}/pkg/master/..." -failfast

View File

@ -1,99 +0,0 @@
#!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
BUILD_OUTPUT_DIR=${SCRIPTS_DIR}"/../../sdk/cmake_build"
BUILD_TYPE="Release"
MAKE_CLEAN="OFF"
RUN_CPPLINT="OFF"
BUILD_UNITTEST="OFF"
while getopts "p:d:t:f:ulrcgjhxzme" arg; do
case $arg in
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
u)
echo "Build and run unittest cases"
BUILD_UNITTEST="ON"
;;
l)
RUN_CPPLINT="ON"
;;
r)
if [[ -d ${BUILD_OUTPUT_DIR} ]]; then
rm ./${BUILD_OUTPUT_DIR} -r
MAKE_CLEAN="ON"
fi
;;
h) # help
echo "
parameter:
-t: build type(default: Debug)
-u: building unit test options(default: OFF)
-l: run cpplint, clang-format and clang-tidy(default: OFF)
-h: help
usage:
./build.sh -t \${BUILD_TYPE} -f \${FAISS_ROOT} [-u] [-l] [-r] [-h]
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
if [[ ! -d ${BUILD_OUTPUT_DIR} ]]; then
mkdir ${BUILD_OUTPUT_DIR}
fi
cd ${BUILD_OUTPUT_DIR}
# remove make cache since build.sh -l use default variables
# force update the variables each time
make rebuild_cache >/dev/null 2>&1
CMAKE_CMD="cmake \
-DCMAKE_BUILD_TYPE=${BUILD_TYPE} \
-DBUILD_UNIT_TEST=${BUILD_UNITTEST} \
../"
echo ${CMAKE_CMD}
${CMAKE_CMD}
if [[ ${MAKE_CLEAN} == "ON" ]]; then
make clean
fi
if [[ ${RUN_CPPLINT} == "ON" ]]; then
# cpplint check
make lint
if [ $? -ne 0 ]; then
echo "ERROR! cpplint check failed"
exit 1
fi
echo "cpplint check passed!"
# clang-format check
make check-clang-format
if [ $? -ne 0 ]; then
echo "ERROR! clang-format check failed"
exit 1
fi
echo "clang-format check passed!"
# # clang-tidy check
# make check-clang-tidy
# if [ $? -ne 0 ]; then
# echo "ERROR! clang-tidy check failed"
# exit 1
# fi
# echo "clang-tidy check passed!"
else
# compile and build
make -j 8 || exit 1
fi

View File

@ -1,104 +0,0 @@
#!/bin/bash
HELP="
Usage:
$0 [flags] [Arguments]
-l [ARTIFACTORY_URL] Artifactory URL
--cache_dir=[CACHE_DIR] Cache directory
-f [FILE] or --file=[FILE] Cache compress package file
-u [USERNAME] Artifactory Username
-p [PASSWORD] Artifactory Password
-h or --help Print help information
Use \"$0 --help\" for more information about a given command.
"
ARGS=$(getopt -o "l:f:u:p:h" -l "cache_dir::,file::,help" -n "$0" -- "$@")
eval set -- "${ARGS}"
while true ; do
case "$1" in
-l)
# o has an optional argument. As we are in quoted mode,
# an empty parameter will be generated if its optional
# argument is not found.
case "$2" in
"") echo "Option Artifactory URL, no argument"; exit 1 ;;
*) ARTIFACTORY_URL=$2 ; shift 2 ;;
esac ;;
--cache_dir)
case "$2" in
"") echo "Option cache_dir, no argument"; exit 1 ;;
*) CACHE_DIR=$2 ; shift 2 ;;
esac ;;
-u)
case "$2" in
"") echo "Option Username, no argument"; exit 1 ;;
*) USERNAME=$2 ; shift 2 ;;
esac ;;
-p)
case "$2" in
"") echo "Option Password, no argument"; exit 1 ;;
*) PASSWORD=$2 ; shift 2 ;;
esac ;;
-f|--file)
case "$2" in
"") echo "Option file, no argument"; exit 1 ;;
*) PACKAGE_FILE=$2 ; shift 2 ;;
esac ;;
-h|--help) echo -e "${HELP}" ; exit 0 ;;
--) shift ; break ;;
*) echo "Internal error!" ; exit 1 ;;
esac
done
# Set defaults for vars modified by flags to this script
BRANCH_NAME=$(git log --decorate | head -n 1 | sed 's/.*(\(.*\))/\1/' | sed 's/.*, //' | sed 's=[a-zA-Z]*\/==g')
if [[ -z "${ARTIFACTORY_URL}" || "${ARTIFACTORY_URL}" == "" ]];then
echo "You have not input ARTIFACTORY_URL !"
exit 1
fi
if [[ ! -d "${CACHE_DIR}" ]]; then
echo "\"${CACHE_DIR}\" directory does not exist !"
exit 1
fi
if [[ -z "${PACKAGE_FILE}" ]]; then
echo "You have not input PACKAGE_FILE !"
exit 1
fi
function check_cache() {
BRANCH=$1
wget -q --spider "${ARTIFACTORY_URL}/${BRANCH}/${PACKAGE_FILE}"
return $?
}
if [[ -n "${CHANGE_TARGET}" && "${BRANCH_NAME}" =~ "PR-" ]]; then
check_cache ${CHANGE_TARGET}
if [[ $? == 0 ]];then
echo "Skip Update cache package ..." && exit 0
fi
fi
if [[ "${BRANCH_NAME}" != "HEAD" ]];then
REMOTE_PACKAGE_PATH="${ARTIFACTORY_URL}/${BRANCH_NAME}"
echo "Updating cache package file: ${PACKAGE_FILE}"
tar zcf ./"${PACKAGE_FILE}" -C "${CACHE_DIR}" .
echo "Uploading cache package file ${PACKAGE_FILE} to ${REMOTE_PACKAGE_PATH}"
curl -u"${USERNAME}":"${PASSWORD}" -T "${PACKAGE_FILE}" "${REMOTE_PACKAGE_PATH}"/"${PACKAGE_FILE}"
if [[ $? == 0 ]];then
echo "Uploading cache package file success !"
exit 0
else
echo "Uploading cache package file fault !"
exit 1
fi
fi
echo "Skip Update cache package ..."

View File

@ -1,11 +1,6 @@
package main
import (
"flag"
"fmt"
"github.com/czs007/suvlim/conf"
"github.com/czs007/suvlim/pkg/master"
)
import "github.com/czs007/suvlim/pkg/master"
// func main() {
// ctx, cancel := context.WithCancel(context.Background())
@ -25,13 +20,6 @@ func init() {
// go mock.FakePulsarProducer()
}
func main() {
var yamlFile string
flag.StringVar(&yamlFile, "yaml", "", "yaml file")
flag.Parse()
// flag.Usage()
fmt.Println("yaml file: ", yamlFile)
conf.LoadConfig(yamlFile)
master.Run()
//master.SegmentStatsController()
//master.CollectionController()

View File

@ -42,19 +42,42 @@ type StorageConfig struct {
}
type PulsarConfig struct {
Authentication bool
User string
Token string
Address string
Port int32
TopicNum int
Address string
Port int32
TopicNum int
}
//type ProxyConfig struct {
// Timezone string
// Address string
// Port int32
//}
type ProxyConfig struct {
Timezone string `yaml:"timezone"`
ProxyId int `yaml:"proxy_id"`
NumReaderNodes int `yaml:"numReaderNodes"`
TosSaveInterval int `yaml:"tsoSaveInterval"`
TimeTickInterval int `yaml:"timeTickInterval"`
PulsarTopics struct {
ReaderTopicPrefix string `yaml:"readerTopicPrefix"`
NumReaderTopics int `yaml:"numReaderTopics"`
DeleteTopic string `yaml:"deleteTopic"`
QueryTopic string `yaml:"queryTopic"`
ResultTopic string `yaml:"resultTopic"`
ResultGroup string `yaml:"resultGroup"`
TimeTickTopic string `yaml:"timeTickTopic"`
} `yaml:"pulsarTopics"`
Network struct {
Address string `yaml:"address"`
Port int `yaml:"port"`
} `yaml:"network"`
Logs struct {
Level string `yaml:"level"`
TraceEnable bool `yaml:"trace.enable"`
Path string `yaml:"path"`
MaxLogFileSize string `yaml:"max_log_file_size"`
LogRotateNum int `yaml:"log_rotate_num"`
} `yaml:"logs"`
Storage struct {
Path string `yaml:"path"`
AutoFlushInterval int `yaml:"auto_flush_interval"`
} `yaml:"storage"`
}
type Reader struct {
ClientId int
@ -71,10 +94,8 @@ type Writer struct {
StopFlag int64
ReaderQueueSize int
SearchByIdChanSize int
Parallelism int
TopicStart int
TopicEnd int
Bucket string
}
type ServerConfig struct {
@ -85,14 +106,14 @@ type ServerConfig struct {
Pulsar PulsarConfig
Writer Writer
Reader Reader
//Proxy ProxyConfig
Proxy ProxyConfig
}
var Config ServerConfig
// func init() {
// load_config()
// }
func init() {
load_config()
}
func getCurrentFileDir() string {
_, fpath, _, _ := runtime.Caller(0)
@ -111,16 +132,3 @@ func load_config() {
}
//fmt.Printf("Result: %v\n", Config)
}
func LoadConfig(yamlFile string) {
filePath := path.Join(getCurrentFileDir(), yamlFile)
source, err := ioutil.ReadFile(filePath)
if err != nil {
panic(err)
}
err = yaml.Unmarshal(source, &Config)
if err != nil {
panic(err)
}
//fmt.Printf("Result: %v\n", Config)
}

View File

@ -14,7 +14,7 @@ master:
port: 53100
pulsarmoniterinterval: 1
pulsartopic: "monitor-topic"
segmentthreshole: 1073741824
segmentthreshole: 104857600
proxyidlist: [0]
querynodenum: 1
writenodenum: 1
@ -25,7 +25,7 @@ etcd:
rootpath: by-dev
segthreshold: 10000
timesync:
timesync:
interval: 400
storage:
@ -36,9 +36,6 @@ storage:
secretkey:
pulsar:
authentication: false
user: user-default
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
address: localhost
port: 6650
topicnum: 128
@ -57,14 +54,24 @@ writer:
stopflag: -2
readerqueuesize: 10000
searchbyidchansize: 10000
parallelism: 100
topicstart: 0
topicend: 128
bucket: "zilliz-hz"
proxy:
timezone: UTC+8
proxy_id: 0
proxy_id: 1
numReaderNodes: 2
tsoSaveInterval: 200
timeTickInterval: 200
pulsarTopics:
readerTopicPrefix: "milvusReader"
numReaderTopics: 2
deleteTopic: "milvusDeleter"
queryTopic: "milvusQuery"
resultTopic: "milvusResult"
resultGroup: "milvusResultGroup"
timeTickTopic: "milvusTimeTick"
network:
address: 0.0.0.0

View File

@ -5,8 +5,8 @@ if [[ ! ${jobs+1} ]]; then
jobs=$(nproc)
fi
BUILD_OUTPUT_DIR="cmake-build-release"
BUILD_TYPE="Release"
BUILD_OUTPUT_DIR="cmake-build-debug"
BUILD_TYPE="Debug"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX=$(pwd)/milvus
MAKE_CLEAN="OFF"

View File

@ -1,6 +1,5 @@
aux_source_directory(${CMAKE_CURRENT_SOURCE_DIR}/../pb PB_SRC_FILES)
# add_definitions(-DBOOST_STACKTRACE_USE_ADDR2LINE)
set(DOG_SEGMENT_FILES
SegmentNaive.cpp
IndexMeta.cpp
@ -10,7 +9,6 @@ set(DOG_SEGMENT_FILES
collection_c.cpp
partition_c.cpp
segment_c.cpp
EasyAssert.cpp
${PB_SRC_FILES}
)
add_library(milvus_dog_segment SHARED
@ -20,5 +18,4 @@ add_library(milvus_dog_segment SHARED
#add_dependencies( segment sqlite mysqlpp )
target_link_libraries(milvus_dog_segment tbb utils pthread knowhere log libprotobuf dl backtrace
)
target_link_libraries(milvus_dog_segment tbb utils pthread knowhere log libprotobuf)

View File

@ -1,26 +0,0 @@
#include <iostream>
#include "EasyAssert.h"
// #define BOOST_STACKTRACE_USE_ADDR2LINE
#define BOOST_STACKTRACE_USE_BACKTRACE
#include <boost/stacktrace.hpp>
namespace milvus::impl {
void EasyAssertInfo(bool value, std::string_view expr_str, std::string_view filename, int lineno,
std::string_view extra_info) {
if (!value) {
std::string info;
info += "Assert \"" + std::string(expr_str) + "\"";
info += " at " + std::string(filename) + ":" + std::to_string(lineno) + "\n";
if(!extra_info.empty()) {
info += " => " + std::string(extra_info);
}
auto fuck = boost::stacktrace::stacktrace();
std::cout << fuck;
// std::string s = fuck;
// info += ;
throw std::runtime_error(info);
}
}
}

View File

@ -1,13 +1,18 @@
#pragma once
#include <string_view>
#include <stdio.h>
#include <stdlib.h>
/* Paste this on the file you want to debug. */
namespace milvus::impl {
inline
void EasyAssertInfo(bool value, std::string_view expr_str, std::string_view filename, int lineno,
std::string_view extra_info);
std::string_view extra_info) {
if (!value) {
std::string info;
info += "Assert \"" + std::string(expr_str) + "\"";
info += " at " + std::string(filename) + ":" + std::to_string(lineno);
info += " => " + std::string(extra_info);
throw std::runtime_error(info);
}
}
}
#define AssertInfo(expr, info) impl::EasyAssertInfo(bool(expr), #expr, __FILE__, __LINE__, (info))

View File

@ -171,7 +171,9 @@ class Schema {
const FieldMeta&
operator[](const std::string& field_name) const {
auto offset_iter = offsets_.find(field_name);
AssertInfo(offset_iter != offsets_.end(), "Cannot found field_name: " + field_name);
if (offset_iter == offsets_.end()) {
throw std::runtime_error("Cannot found field_name: " + field_name);
}
auto offset = offset_iter->second;
return (*this)[offset];
}

View File

@ -96,6 +96,9 @@ auto SegmentNaive::get_deleted_bitmap(int64_t del_barrier, Timestamp query_times
if (offset >= insert_barrier) {
continue;
}
if (offset >= insert_barrier) {
continue;
}
if (record_.timestamps_[offset] < query_timestamp) {
Assert(offset < insert_barrier);
the_offset = std::max(the_offset, offset);

View File

@ -21,5 +21,4 @@ target_link_libraries(all_tests
knowhere
log
pthread
)
install (TARGETS all_tests DESTINATION unittest)
)

View File

@ -24,34 +24,6 @@ using std::cin;
using std::cout;
using std::endl;
namespace {
auto
generate_data(int N) {
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
std::vector<int64_t> uids;
std::default_random_engine er(42);
std::normal_distribution<> distribution(0.0, 1.0);
std::default_random_engine ei(42);
for (int i = 0; i < N; ++i) {
uids.push_back(10 * N + i);
timestamps.push_back(0);
// append vec
float vec[16];
for (auto& x : vec) {
x = distribution(er);
}
raw_data.insert(
raw_data.end(), (const char*)std::begin(vec), (const char*)std::end(vec));
int age = ei() % 100;
raw_data.insert(
raw_data.end(), (const char*)&age, ((const char*)&age) + sizeof(age));
}
return std::make_tuple(raw_data, timestamps, uids);
}
} // namespace
TEST(DogSegmentTest, TestABI) {
using namespace milvus::engine;
@ -60,20 +32,6 @@ TEST(DogSegmentTest, TestABI) {
assert(true);
}
TEST(DogSegmentTest, NormalDistributionTest) {
using namespace milvus::dog_segment;
using namespace milvus::engine;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("age", DataType::INT32);
int N = 1000* 1000;
auto [raw_data, timestamps, uids] = generate_data(N);
auto segment = CreateSegment(schema);
segment->PreInsert(N);
segment->PreDelete(N);
}
TEST(DogSegmentTest, MockTest) {
using namespace milvus::dog_segment;

View File

@ -1,60 +0,0 @@
version: '3.5'
services:
etcd:
image: quay.io/coreos/etcd:latest
command: etcd -listen-peer-urls=http://127.0.0.1:12380 -advertise-client-urls=http://127.0.0.1:12379 -listen-client-urls http://0.0.0.0:12379,http://0.0.0.0:14001 -initial-advertise-peer-urls=http://127.0.0.1:12380 --initial-cluster default=http://127.0.0.1:12380
ports:
- "12379:12379"
- "12380:12380"
- "14001:14001"
pulsar:
image: apachepulsar/pulsar:latest
command: bin/pulsar standalone
ports:
- "6650:6650"
- "18080:8080"
pd0:
image: pingcap/pd:latest
network_mode: "host"
ports:
- "2379:2379"
- "2380:2380"
volumes:
- /tmp/config/pd.toml:/pd.toml:ro
- /tmp/data:/data
- /tmp/logs:/logs
- /etc/localtime:/etc/localtime:ro
command:
- --name=pd0
- --client-urls=http://0.0.0.0:2379
- --peer-urls=http://0.0.0.0:2380
- --advertise-client-urls=http://127.0.0.1:2379
- --advertise-peer-urls=http://127.0.0.1:2380
- --initial-cluster=pd0=http://127.0.0.1:2380
- --data-dir=/data/pd0
- --log-file=/logs/pd0.log
restart: on-failure
tikv0:
network_mode: "host"
image: pingcap/tikv:latest
ports:
- "20160:20160"
volumes:
- /tmp/config/tikv.toml:/tikv.toml:ro
- /tmp/data:/data
- /tmp/logs:/logs
- /etc/localtime:/etc/localtime:ro
command:
- --addr=0.0.0.0:20160
- --advertise-addr=127.0.0.1:20160
- --data-dir=/data/tikv0
- --pd=127.0.0.1:2379
- --log-file=/logs/tikv0.log
depends_on:
- "pd0"
restart: on-failure

View File

@ -1,57 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM ubuntu:18.04
# pipefail is enabled for proper error detection in the `wget | apt-key add`
# step
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y --no-install-recommends wget ca-certificates gnupg2 && \
wget -P /tmp https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
apt-key add /tmp/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB && \
sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list' && \
wget -qO- "https://cmake.org/files/v3.14/cmake-3.14.3-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local && \
apt-get update && apt-get install -y --no-install-recommends \
g++ git gfortran lsb-core \
libboost-serialization-dev libboost-filesystem-dev libboost-system-dev libboost-regex-dev \
curl libtool automake libssl-dev pkg-config libcurl4-openssl-dev python3-pip \
clang-format-7 clang-tidy-7 \
lcov mysql-client libmysqlclient-dev intel-mkl-gnu-2019.5-281 intel-mkl-core-2019.5-281 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so \
/usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
RUN sh -c 'echo export LD_LIBRARY_PATH=/opt/intel/compilers_and_libraries_2019.5.281/linux/mkl/lib/intel64:\$LD_LIBRARY_PATH > /etc/profile.d/mkl.sh'
RUN wget https://github.com/xianyi/OpenBLAS/archive/v0.3.9.tar.gz && \
tar zxvf v0.3.9.tar.gz && cd OpenBLAS-0.3.9 && \
make TARGET=CORE2 DYNAMIC_ARCH=1 DYNAMIC_OLDER=1 USE_THREAD=0 USE_OPENMP=0 FC=gfortran CC=gcc COMMON_OPT="-O3 -g -fPIC" FCOMMON_OPT="-O3 -g -fPIC -frecursive" NMAX="NUM_THREADS=128" LIBPREFIX="libopenblas" LAPACKE="NO_LAPACKE=1" INTERFACE64=0 NO_STATIC=1 && \
make PREFIX=/usr install && \
cd .. && rm -rf OpenBLAS-0.3.9 && rm v0.3.9.tar.gz
RUN apt-get update && apt-get install -y --no-install-recommends ccache && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib"
# Set permissions on /etc/passwd and /home to allow arbitrary users to write
COPY --chown=0:0 docker/build_env/entrypoint.sh /
RUN mkdir -p /home/user && chgrp -R 0 /home && chmod -R g=u /etc/passwd /etc/group /home && chmod +x /entrypoint.sh
ENV HOME=/home/user
ENTRYPOINT [ "/entrypoint.sh" ]
CMD ["tail", "-f", "/dev/null"]

View File

@ -1,24 +0,0 @@
#!/bin/bash
set -e
# Ensure $HOME exists when starting
if [ ! -d "${HOME}" ]; then
mkdir -p "${HOME}"
fi
# Setup $PS1 for a consistent and reasonable prompt
if [ -w "${HOME}" ] && [ ! -f "${HOME}"/.bashrc ]; then
echo "PS1='\s-\v \w \$ '" > "${HOME}"/.bashrc
echo -e 'if [ -f /etc/bashrc ]; then\n . /etc/bashrc\nfi' >> "${HOME}"/.bashrc
fi
# Add current (arbitrary) user to /etc/passwd and /etc/group
if ! whoami &> /dev/null; then
if [ -w /etc/passwd ]; then
echo "${USER_NAME:-user}:x:$(id -u):0:${USER_NAME:-user} user:${HOME}:/bin/bash" >> /etc/passwd
echo "${USER_NAME:-user}:x:$(id -u):" >> /etc/group
fi
fi
exec "$@"

View File

@ -1,33 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM milvusdb/milvus-dev:amd64-centos-7-core AS openblas
FROM centos:centos7
RUN yum install -y wget && \
wget -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/epel-7.repo && \
yum clean all && yum makecache && \
yum install -y libgomp libgfortran4 mysql-devel && \
rm -rf /var/cache/yum/*
COPY ./milvus /var/lib/milvus
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/var/lib/milvus/lib"
COPY --from=openblas /usr/lib/libopenblas-r0.3.9.so /var/lib/milvus/lib/
RUN ln -s /var/lib/milvus/lib/libopenblas-r0.3.9.so /var/lib/milvus/lib/libopenblas.so.0 && \
ln -s /var/lib/milvus/lib/libopenblas.so.0 /var/lib/milvus/lib/libopenblas.so
WORKDIR /var/lib/milvus
CMD [ "/var/lib/milvus/bin/milvus_server", "-c", "/var/lib/milvus/conf/milvus.yaml" ]
EXPOSE 19530

View File

@ -1,35 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM milvusdb/milvus-dev:amd64-ubuntu-18.04-core AS openblas
FROM ubuntu:18.04
RUN apt-get update && apt-get install -y --no-install-recommends \
gfortran libsqlite3-dev libmysqlclient-dev libcurl4-openssl-dev python3 && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
RUN ln -s /usr/lib/x86_64-linux-gnu/libmysqlclient.so /usr/lib/x86_64-linux-gnu/libmysqlclient_r.so
COPY ./milvus /var/lib/milvus
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/var/lib/milvus/lib"
COPY --from=openblas /usr/lib/libopenblas-r0.3.9.so /var/lib/milvus/lib/
RUN ln -s /var/lib/milvus/lib/libopenblas-r0.3.9.so /var/lib/milvus/lib/libopenblas.so.0 && \
ln -s /var/lib/milvus/lib/libopenblas.so.0 /var/lib/milvus/lib/libopenblas.so
WORKDIR /var/lib/milvus
CMD [ "/var/lib/milvus/bin/milvus_server", "-c", "/var/lib/milvus/conf/milvus.yaml" ]
EXPOSE 19530

View File

@ -1,17 +0,0 @@
version: '2.3'
services:
cpu_centos7:
image: ${TARGET_REPO}:${TARGET_TAG}
build:
context: ./
dockerfile: cpu/centos7/Dockerfile
cache_from:
- ${SOURCE_REPO}:${SOURCE_TAG}
cpu_ubuntu18.04:
image: ${TARGET_REPO}:${TARGET_TAG}
build:
context: ./
dockerfile: cpu/ubuntu18.04/Dockerfile
cache_from:
- ${SOURCE_REPO}:${SOURCE_TAG}

View File

@ -1,21 +0,0 @@
version: '2.3'
networks:
monitor:
driver: bridge
services:
milvus_server:
runtime: nvidia
image: milvusdb/milvus:latest
restart: always
environment:
WEB_APP: host.docker.internal
volumes:
- ../core/conf/milvus.yaml:/var/lib/milvus/conf/milvus.yaml
- ../core/conf/log_config.conf:/var/lib/milvus/conf/log_config.conf
ports:
- "8080:8080"
- "19530:19530"
networks:
- monitor

View File

@ -1,30 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM python:3.6.8-jessie
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
RUN apt-get update && apt-get install -y --no-install-recommends wget apt-transport-https && \
wget -qO- "https://get.helm.sh/helm-v3.0.2-linux-amd64.tar.gz" | tar --strip-components=1 -xz -C /usr/local/bin linux-amd64/helm && \
wget -P /tmp https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg && \
apt-key add /tmp/apt-key.gpg && \
sh -c 'echo deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main > /etc/apt/sources.list.d/kubernetes.list' && \
apt-get update && apt-get install -y --no-install-recommends \
build-essential kubectl && \
apt-get remove --purge -y && \
rm -rf /var/lib/apt/lists/*
COPY docker-entrypoint.sh /app/docker-entrypoint.sh
WORKDIR /root
ENTRYPOINT [ "/app/docker-entrypoint.sh" ]
CMD [ "start" ]

View File

@ -1,9 +0,0 @@
#!/bin/bash
set -e
if [ "$1" = 'start' ]; then
tail -f /dev/null
fi
exec "$@"

6
go.mod
View File

@ -4,7 +4,7 @@ go 1.15
require (
cloud.google.com/go/bigquery v1.4.0 // indirect
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 // indirect
code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48
github.com/99designs/keyring v1.1.5 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/DataDog/zstd v1.4.6-0.20200617134701-89f69fb7df32 // indirect
@ -26,7 +26,7 @@ require (
github.com/google/btree v1.0.0
github.com/google/martian/v3 v3.0.0 // indirect
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 // indirect
github.com/google/uuid v1.1.1
github.com/google/uuid v1.1.1 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.10
github.com/julienschmidt/httprouter v1.3.0 // indirect
@ -47,12 +47,14 @@ require (
github.com/prometheus/procfs v0.1.3 // indirect
github.com/rs/xid v1.2.1
github.com/sirupsen/logrus v1.6.0
github.com/spaolacci/murmur3 v1.1.0
github.com/stretchr/testify v1.6.1
github.com/tikv/client-go v0.0.0-20200824032810-95774393107b
github.com/tikv/pd v2.1.19+incompatible
github.com/yahoo/athenz v1.9.16 // indirect
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738
go.opencensus.io v0.22.4 // indirect
go.uber.org/atomic v1.6.0
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect

2
go.sum
View File

@ -160,6 +160,8 @@ github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaI
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/data-plane-api v0.0.0-20200904023242-f4d8a28107ca h1:EvL1gA7uyPU2JVN93HbQwYOXyUjUJKYGStDN8eKD/Ss=
github.com/envoyproxy/data-plane-api v0.0.0-20200909004014-2bb47b2b6fb0 h1:0edaQ8F7kgXmqz/tFjjl5rW/nAKUZ5Zg0Rv5vKiE6+U=
github.com/envoyproxy/data-plane-api v0.0.0-20200923192109-df3147960318 h1:fdyLKTIP2g4GinIlHcG/8E1dDLgZIkyLIwV1mjvOxXk=
github.com/envoyproxy/data-plane-api v0.0.0-20200924222414-c0b715aedb66 h1:c5a7hsMEcrARmUgB2N/gkgJDPCiNKXCXzTBXKvF3SVI=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=

View File

@ -263,6 +263,100 @@ func (OpType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{4}
}
type ReqType int32
const (
// general operations
ReqType_kCmd ReqType = 0
// collection operations
ReqType_kCreateCollection ReqType = 100
ReqType_kDropCollection ReqType = 101
ReqType_kHasCollection ReqType = 102
ReqType_kListCollections ReqType = 103
ReqType_kGetCollectionInfo ReqType = 104
ReqType_kGetCollectionStats ReqType = 105
ReqType_kCountEntities ReqType = 106
// partition operations
ReqType_kCreatePartition ReqType = 200
ReqType_kDropPartition ReqType = 201
ReqType_kHasPartition ReqType = 202
ReqType_kListPartitions ReqType = 203
// index operations
ReqType_kCreateIndex ReqType = 300
ReqType_kDropIndex ReqType = 301
ReqType_kDescribeIndex ReqType = 302
// data operations
ReqType_kInsert ReqType = 400
ReqType_kGetEntityByID ReqType = 401
ReqType_kDeleteEntityByID ReqType = 402
ReqType_kSearch ReqType = 403
ReqType_kListIDInSegment ReqType = 404
// other operations
ReqType_kLoadCollection ReqType = 500
ReqType_kFlush ReqType = 501
ReqType_kCompact ReqType = 502
)
var ReqType_name = map[int32]string{
0: "kCmd",
100: "kCreateCollection",
101: "kDropCollection",
102: "kHasCollection",
103: "kListCollections",
104: "kGetCollectionInfo",
105: "kGetCollectionStats",
106: "kCountEntities",
200: "kCreatePartition",
201: "kDropPartition",
202: "kHasPartition",
203: "kListPartitions",
300: "kCreateIndex",
301: "kDropIndex",
302: "kDescribeIndex",
400: "kInsert",
401: "kGetEntityByID",
402: "kDeleteEntityByID",
403: "kSearch",
404: "kListIDInSegment",
500: "kLoadCollection",
501: "kFlush",
502: "kCompact",
}
var ReqType_value = map[string]int32{
"kCmd": 0,
"kCreateCollection": 100,
"kDropCollection": 101,
"kHasCollection": 102,
"kListCollections": 103,
"kGetCollectionInfo": 104,
"kGetCollectionStats": 105,
"kCountEntities": 106,
"kCreatePartition": 200,
"kDropPartition": 201,
"kHasPartition": 202,
"kListPartitions": 203,
"kCreateIndex": 300,
"kDropIndex": 301,
"kDescribeIndex": 302,
"kInsert": 400,
"kGetEntityByID": 401,
"kDeleteEntityByID": 402,
"kSearch": 403,
"kListIDInSegment": 404,
"kLoadCollection": 500,
"kFlush": 501,
"kCompact": 502,
}
func (x ReqType) String() string {
return proto.EnumName(ReqType_name, int32(x))
}
func (ReqType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{5}
}
type SyncType int32
const (
@ -285,7 +379,7 @@ func (x SyncType) String() string {
}
func (SyncType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{5}
return fileDescriptor_0802b3a25fb57244, []int{6}
}
type Status struct {
@ -1328,8 +1422,8 @@ type QueryResult struct {
Scores []float32 `protobuf:"fixed32,4,rep,packed,name=scores,proto3" json:"scores,omitempty"`
Distances []float32 `protobuf:"fixed32,5,rep,packed,name=distances,proto3" json:"distances,omitempty"`
ExtraParams []*KeyValuePair `protobuf:"bytes,6,rep,name=extra_params,json=extraParams,proto3" json:"extra_params,omitempty"`
QueryId int64 `protobuf:"varint,7,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
ClientId int64 `protobuf:"varint,8,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
QueryId uint64 `protobuf:"varint,7,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
ProxyId int64 `protobuf:"varint,8,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1402,16 +1496,16 @@ func (m *QueryResult) GetExtraParams() []*KeyValuePair {
return nil
}
func (m *QueryResult) GetQueryId() int64 {
func (m *QueryResult) GetQueryId() uint64 {
if m != nil {
return m.QueryId
}
return 0
}
func (m *QueryResult) GetClientId() int64 {
func (m *QueryResult) GetProxyId() int64 {
if m != nil {
return m.ClientId
return m.ProxyId
}
return 0
}
@ -2729,6 +2823,220 @@ func (m *SearchParamPB) GetExtraParams() []*KeyValuePair {
return nil
}
type QueryReqMsg struct {
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
VectorParam []*VectorParam `protobuf:"bytes,2,rep,name=vector_param,json=vectorParam,proto3" json:"vector_param,omitempty"`
PartitionTags []string `protobuf:"bytes,3,rep,name=partition_tags,json=partitionTags,proto3" json:"partition_tags,omitempty"`
Dsl string `protobuf:"bytes,4,opt,name=dsl,proto3" json:"dsl,omitempty"`
ExtraParams []*KeyValuePair `protobuf:"bytes,5,rep,name=extra_params,json=extraParams,proto3" json:"extra_params,omitempty"`
Timestamp uint64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
ProxyId int64 `protobuf:"varint,7,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
QueryId uint64 `protobuf:"varint,8,opt,name=query_id,json=queryId,proto3" json:"query_id,omitempty"`
ReqType ReqType `protobuf:"varint,9,opt,name=req_type,json=reqType,proto3,enum=milvus.grpc.ReqType" json:"req_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *QueryReqMsg) Reset() { *m = QueryReqMsg{} }
func (m *QueryReqMsg) String() string { return proto.CompactTextString(m) }
func (*QueryReqMsg) ProtoMessage() {}
func (*QueryReqMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{43}
}
func (m *QueryReqMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_QueryReqMsg.Unmarshal(m, b)
}
func (m *QueryReqMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_QueryReqMsg.Marshal(b, m, deterministic)
}
func (m *QueryReqMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_QueryReqMsg.Merge(m, src)
}
func (m *QueryReqMsg) XXX_Size() int {
return xxx_messageInfo_QueryReqMsg.Size(m)
}
func (m *QueryReqMsg) XXX_DiscardUnknown() {
xxx_messageInfo_QueryReqMsg.DiscardUnknown(m)
}
var xxx_messageInfo_QueryReqMsg proto.InternalMessageInfo
func (m *QueryReqMsg) GetCollectionName() string {
if m != nil {
return m.CollectionName
}
return ""
}
func (m *QueryReqMsg) GetVectorParam() []*VectorParam {
if m != nil {
return m.VectorParam
}
return nil
}
func (m *QueryReqMsg) GetPartitionTags() []string {
if m != nil {
return m.PartitionTags
}
return nil
}
func (m *QueryReqMsg) GetDsl() string {
if m != nil {
return m.Dsl
}
return ""
}
func (m *QueryReqMsg) GetExtraParams() []*KeyValuePair {
if m != nil {
return m.ExtraParams
}
return nil
}
func (m *QueryReqMsg) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *QueryReqMsg) GetProxyId() int64 {
if m != nil {
return m.ProxyId
}
return 0
}
func (m *QueryReqMsg) GetQueryId() uint64 {
if m != nil {
return m.QueryId
}
return 0
}
func (m *QueryReqMsg) GetReqType() ReqType {
if m != nil {
return m.ReqType
}
return ReqType_kCmd
}
type ManipulationReqMsg struct {
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
PartitionTag string `protobuf:"bytes,2,opt,name=partition_tag,json=partitionTag,proto3" json:"partition_tag,omitempty"`
PrimaryKeys []uint64 `protobuf:"varint,3,rep,packed,name=primary_keys,json=primaryKeys,proto3" json:"primary_keys,omitempty"`
RowsData []*RowData `protobuf:"bytes,4,rep,name=rows_data,json=rowsData,proto3" json:"rows_data,omitempty"`
Timestamp uint64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
SegmentId uint64 `protobuf:"varint,6,opt,name=segment_id,json=segmentId,proto3" json:"segment_id,omitempty"`
ChannelId uint64 `protobuf:"varint,7,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
ReqType ReqType `protobuf:"varint,8,opt,name=req_type,json=reqType,proto3,enum=milvus.grpc.ReqType" json:"req_type,omitempty"`
ProxyId int64 `protobuf:"varint,9,opt,name=proxy_id,json=proxyId,proto3" json:"proxy_id,omitempty"`
ExtraParams []*KeyValuePair `protobuf:"bytes,10,rep,name=extra_params,json=extraParams,proto3" json:"extra_params,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ManipulationReqMsg) Reset() { *m = ManipulationReqMsg{} }
func (m *ManipulationReqMsg) String() string { return proto.CompactTextString(m) }
func (*ManipulationReqMsg) ProtoMessage() {}
func (*ManipulationReqMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{44}
}
func (m *ManipulationReqMsg) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ManipulationReqMsg.Unmarshal(m, b)
}
func (m *ManipulationReqMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ManipulationReqMsg.Marshal(b, m, deterministic)
}
func (m *ManipulationReqMsg) XXX_Merge(src proto.Message) {
xxx_messageInfo_ManipulationReqMsg.Merge(m, src)
}
func (m *ManipulationReqMsg) XXX_Size() int {
return xxx_messageInfo_ManipulationReqMsg.Size(m)
}
func (m *ManipulationReqMsg) XXX_DiscardUnknown() {
xxx_messageInfo_ManipulationReqMsg.DiscardUnknown(m)
}
var xxx_messageInfo_ManipulationReqMsg proto.InternalMessageInfo
func (m *ManipulationReqMsg) GetCollectionName() string {
if m != nil {
return m.CollectionName
}
return ""
}
func (m *ManipulationReqMsg) GetPartitionTag() string {
if m != nil {
return m.PartitionTag
}
return ""
}
func (m *ManipulationReqMsg) GetPrimaryKeys() []uint64 {
if m != nil {
return m.PrimaryKeys
}
return nil
}
func (m *ManipulationReqMsg) GetRowsData() []*RowData {
if m != nil {
return m.RowsData
}
return nil
}
func (m *ManipulationReqMsg) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *ManipulationReqMsg) GetSegmentId() uint64 {
if m != nil {
return m.SegmentId
}
return 0
}
func (m *ManipulationReqMsg) GetChannelId() uint64 {
if m != nil {
return m.ChannelId
}
return 0
}
func (m *ManipulationReqMsg) GetReqType() ReqType {
if m != nil {
return m.ReqType
}
return ReqType_kCmd
}
func (m *ManipulationReqMsg) GetProxyId() int64 {
if m != nil {
return m.ProxyId
}
return 0
}
func (m *ManipulationReqMsg) GetExtraParams() []*KeyValuePair {
if m != nil {
return m.ExtraParams
}
return nil
}
type InsertOrDeleteMsg struct {
CollectionName string `protobuf:"bytes,1,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
RowsData *RowData `protobuf:"bytes,2,opt,name=rows_data,json=rowsData,proto3" json:"rows_data,omitempty"`
@ -2749,7 +3057,7 @@ func (m *InsertOrDeleteMsg) Reset() { *m = InsertOrDeleteMsg{} }
func (m *InsertOrDeleteMsg) String() string { return proto.CompactTextString(m) }
func (*InsertOrDeleteMsg) ProtoMessage() {}
func (*InsertOrDeleteMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{43}
return fileDescriptor_0802b3a25fb57244, []int{45}
}
func (m *InsertOrDeleteMsg) XXX_Unmarshal(b []byte) error {
@ -2859,7 +3167,7 @@ func (m *SearchMsg) Reset() { *m = SearchMsg{} }
func (m *SearchMsg) String() string { return proto.CompactTextString(m) }
func (*SearchMsg) ProtoMessage() {}
func (*SearchMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{44}
return fileDescriptor_0802b3a25fb57244, []int{46}
}
func (m *SearchMsg) XXX_Unmarshal(b []byte) error {
@ -2945,7 +3253,7 @@ func (m *SearchMsg) GetDsl() string {
type TimeSyncMsg struct {
Peer_Id int64 `protobuf:"varint,1,opt,name=peer_Id,json=peerId,proto3" json:"peer_Id,omitempty"`
Timestamp uint64 `protobuf:"varint,2,opt,name=Timestamp,json=timestamp,proto3" json:"Timestamp,omitempty"`
Timestamp uint64 `protobuf:"varint,2,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"`
SyncType SyncType `protobuf:"varint,3,opt,name=sync_type,json=syncType,proto3,enum=milvus.grpc.SyncType" json:"sync_type,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -2956,7 +3264,7 @@ func (m *TimeSyncMsg) Reset() { *m = TimeSyncMsg{} }
func (m *TimeSyncMsg) String() string { return proto.CompactTextString(m) }
func (*TimeSyncMsg) ProtoMessage() {}
func (*TimeSyncMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{45}
return fileDescriptor_0802b3a25fb57244, []int{47}
}
func (m *TimeSyncMsg) XXX_Unmarshal(b []byte) error {
@ -3011,7 +3319,7 @@ func (m *Key2SegMsg) Reset() { *m = Key2SegMsg{} }
func (m *Key2SegMsg) String() string { return proto.CompactTextString(m) }
func (*Key2SegMsg) ProtoMessage() {}
func (*Key2SegMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_0802b3a25fb57244, []int{46}
return fileDescriptor_0802b3a25fb57244, []int{48}
}
func (m *Key2SegMsg) XXX_Unmarshal(b []byte) error {
@ -3059,6 +3367,7 @@ func init() {
proto.RegisterEnum("milvus.grpc.CompareOperator", CompareOperator_name, CompareOperator_value)
proto.RegisterEnum("milvus.grpc.Occur", Occur_name, Occur_value)
proto.RegisterEnum("milvus.grpc.OpType", OpType_name, OpType_value)
proto.RegisterEnum("milvus.grpc.ReqType", ReqType_name, ReqType_value)
proto.RegisterEnum("milvus.grpc.SyncType", SyncType_name, SyncType_value)
proto.RegisterType((*Status)(nil), "milvus.grpc.Status")
proto.RegisterType((*KeyValuePair)(nil), "milvus.grpc.KeyValuePair")
@ -3103,6 +3412,8 @@ func init() {
proto.RegisterType((*BooleanQuery)(nil), "milvus.grpc.BooleanQuery")
proto.RegisterType((*GeneralQuery)(nil), "milvus.grpc.GeneralQuery")
proto.RegisterType((*SearchParamPB)(nil), "milvus.grpc.SearchParamPB")
proto.RegisterType((*QueryReqMsg)(nil), "milvus.grpc.QueryReqMsg")
proto.RegisterType((*ManipulationReqMsg)(nil), "milvus.grpc.ManipulationReqMsg")
proto.RegisterType((*InsertOrDeleteMsg)(nil), "milvus.grpc.InsertOrDeleteMsg")
proto.RegisterType((*SearchMsg)(nil), "milvus.grpc.SearchMsg")
proto.RegisterType((*TimeSyncMsg)(nil), "milvus.grpc.TimeSyncMsg")
@ -3114,190 +3425,213 @@ func init() {
}
var fileDescriptor_0802b3a25fb57244 = []byte{
// 2916 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x3a, 0xcd, 0x72, 0xdb, 0xc8,
0xd1, 0x04, 0x49, 0x91, 0x44, 0x83, 0x92, 0xa0, 0xb1, 0x6c, 0xcb, 0xf6, 0xfa, 0xb3, 0x17, 0x5f,
0x25, 0xf1, 0x3a, 0x55, 0xb6, 0x97, 0xbb, 0xb1, 0x9d, 0x78, 0x77, 0xb3, 0xfc, 0x81, 0x24, 0xc4,
0x14, 0x29, 0x0f, 0x21, 0x7b, 0x7f, 0x2a, 0x85, 0x40, 0xc4, 0x98, 0x46, 0x96, 0x00, 0x18, 0x00,
0x94, 0xcc, 0x1c, 0x73, 0x4f, 0xaa, 0x72, 0xcc, 0x31, 0x79, 0x84, 0xe4, 0x0d, 0x72, 0xcc, 0x0b,
0xec, 0x61, 0xab, 0x72, 0x4c, 0xe5, 0x31, 0x52, 0xf3, 0x03, 0x12, 0xa0, 0x48, 0x59, 0x5c, 0x25,
0x27, 0x0d, 0x7b, 0xa6, 0x7b, 0xfa, 0x7f, 0xba, 0x1b, 0x82, 0x1f, 0x8c, 0xbe, 0x19, 0x3c, 0xf4,
0xec, 0x28, 0x26, 0xe1, 0xc3, 0x41, 0x38, 0xea, 0x3f, 0xf4, 0x48, 0x14, 0xd9, 0x03, 0x92, 0xfc,
0x7d, 0x30, 0x0a, 0x83, 0x38, 0x40, 0x8a, 0xe7, 0x0e, 0x4f, 0xc6, 0xd1, 0x03, 0x7a, 0x44, 0x7b,
0x05, 0xa5, 0x5e, 0x6c, 0xc7, 0xe3, 0x08, 0xfd, 0x04, 0x80, 0x84, 0x61, 0x10, 0x5a, 0xfd, 0xc0,
0x21, 0x3b, 0xd2, 0x5d, 0xe9, 0xde, 0x46, 0xed, 0xda, 0x83, 0xd4, 0xd9, 0x07, 0x3a, 0xdd, 0x6e,
0x06, 0x0e, 0xc1, 0x32, 0x49, 0x96, 0xe8, 0x1a, 0x94, 0x42, 0x62, 0x47, 0x81, 0xbf, 0x93, 0xbf,
0x2b, 0xdd, 0x93, 0xb1, 0xf8, 0xa5, 0x3d, 0x86, 0xea, 0x73, 0x32, 0x79, 0x69, 0x0f, 0xc7, 0xe4,
0xd0, 0x76, 0x43, 0xa4, 0x42, 0xe1, 0x1b, 0x32, 0x61, 0x74, 0x65, 0x4c, 0x97, 0x68, 0x1b, 0xd6,
0x4e, 0xe8, 0xb6, 0x40, 0xe4, 0x3f, 0xb4, 0x9f, 0xc2, 0x46, 0x33, 0x18, 0x0e, 0x49, 0x3f, 0x76,
0x03, 0xbf, 0x63, 0x7b, 0x04, 0xfd, 0x08, 0x36, 0xfb, 0x53, 0x88, 0xe5, 0xdb, 0x1e, 0x11, 0x54,
0x36, 0xfa, 0x99, 0x83, 0xda, 0x10, 0x50, 0x16, 0xb5, 0xed, 0x46, 0x31, 0xfa, 0x31, 0x94, 0x22,
0x26, 0x21, 0xc3, 0x52, 0x6a, 0x57, 0x32, 0x32, 0x71, 0xe1, 0xb1, 0x38, 0x82, 0x3e, 0x00, 0x75,
0xee, 0xae, 0x68, 0x27, 0x7f, 0xb7, 0x70, 0x4f, 0xc6, 0x9b, 0xd9, 0xcb, 0x22, 0xad, 0x07, 0xf2,
0xae, 0x4b, 0x86, 0xce, 0x4a, 0x3c, 0xa2, 0xdb, 0x00, 0xaf, 0x29, 0x16, 0x3f, 0xc3, 0x25, 0x97,
0x5f, 0x27, 0x74, 0xb4, 0x7f, 0x48, 0x50, 0x3e, 0xb0, 0x47, 0x23, 0xd7, 0x1f, 0xac, 0xc6, 0xf8,
0x02, 0x06, 0xf2, 0x0b, 0x19, 0xa0, 0x54, 0xfb, 0x6f, 0x88, 0x67, 0xef, 0x14, 0x16, 0x51, 0x65,
0x5b, 0x58, 0x1c, 0x41, 0x9f, 0x40, 0x95, 0xbc, 0x8d, 0x43, 0xdb, 0x1a, 0xd9, 0xa1, 0xed, 0x45,
0x3b, 0xc5, 0xbb, 0x85, 0x7b, 0x4a, 0xed, 0x46, 0x06, 0x25, 0x6d, 0x65, 0xac, 0xb0, 0xe3, 0x87,
0xec, 0xb4, 0x16, 0x81, 0x22, 0x64, 0x59, 0xdd, 0x10, 0x4f, 0xa0, 0xea, 0x71, 0x5c, 0x6b, 0xe8,
0x46, 0x31, 0x33, 0x82, 0x52, 0xdb, 0xce, 0xa0, 0x08, 0xe2, 0x58, 0xf1, 0x66, 0xb7, 0x68, 0xcf,
0x61, 0xe3, 0xd0, 0x0e, 0x63, 0x97, 0x0a, 0xcc, 0xf8, 0xb8, 0xb8, 0x6d, 0x54, 0x28, 0xc4, 0xf6,
0x40, 0xe8, 0x8d, 0x2e, 0xb5, 0x21, 0xac, 0x4f, 0x89, 0xad, 0x2e, 0xc3, 0x03, 0xb8, 0x32, 0x4a,
0xb0, 0xad, 0xd8, 0x1e, 0x58, 0x76, 0x18, 0xda, 0x13, 0xe1, 0x4f, 0x5b, 0xd3, 0x2d, 0xd3, 0x1e,
0xd4, 0xe9, 0x86, 0xf6, 0x02, 0x36, 0x5f, 0x92, 0x7e, 0x1c, 0x84, 0x38, 0x38, 0xc5, 0xa4, 0x1f,
0x84, 0x0e, 0x73, 0x97, 0x61, 0x60, 0xc7, 0x96, 0x63, 0xc7, 0xf6, 0x8e, 0x74, 0xb7, 0x70, 0x2f,
0x8f, 0x65, 0x06, 0x69, 0xd9, 0xb1, 0x8d, 0xee, 0x80, 0x72, 0xec, 0xfa, 0x76, 0x38, 0xe1, 0xfb,
0x94, 0xf3, 0x2a, 0x06, 0x0e, 0xa2, 0x07, 0xb4, 0x5f, 0x81, 0xac, 0xfb, 0xb1, 0x1b, 0x4f, 0x0c,
0x27, 0x5a, 0x8d, 0xf9, 0x1f, 0xc2, 0x26, 0x61, 0x98, 0x96, 0xeb, 0xa4, 0x18, 0x2f, 0xe0, 0x75,
0x22, 0x08, 0x72, 0xa6, 0x77, 0xa1, 0x2a, 0x98, 0xe6, 0x1c, 0x3f, 0x86, 0x72, 0xc8, 0x56, 0x11,
0x63, 0x57, 0xa9, 0xbd, 0x97, 0xb9, 0x65, 0x4e, 0x40, 0x9c, 0x1c, 0xd6, 0xbe, 0x06, 0x85, 0xef,
0x71, 0xa3, 0x21, 0x28, 0xfe, 0x9a, 0x26, 0x15, 0x6e, 0x29, 0xb6, 0x46, 0x4f, 0x01, 0xc2, 0xe0,
0xd4, 0xe2, 0x18, 0x4c, 0xd8, 0x79, 0x5f, 0x4c, 0x73, 0x82, 0xe5, 0x30, 0xb9, 0x45, 0x1b, 0x88,
0x58, 0x3d, 0x20, 0xb1, 0x3d, 0x17, 0x82, 0xd2, 0x5c, 0x08, 0xa2, 0x0f, 0xa0, 0x18, 0x4f, 0x46,
0x3c, 0x7c, 0x36, 0x6a, 0x57, 0x33, 0xf4, 0xa9, 0x4e, 0xcd, 0xc9, 0x88, 0x60, 0x76, 0x84, 0x3a,
0x8c, 0xe3, 0x7a, 0x2c, 0x90, 0x0a, 0x98, 0x2e, 0xb5, 0x3a, 0x94, 0x78, 0x08, 0xa1, 0x27, 0xa0,
0xf0, 0x5b, 0x3c, 0x12, 0xdb, 0x89, 0x2e, 0xb2, 0xf9, 0x74, 0xca, 0x12, 0xe6, 0x0c, 0xd1, 0x65,
0xa4, 0xdd, 0x86, 0x32, 0x0e, 0x4e, 0x99, 0x79, 0x11, 0x14, 0x8f, 0x87, 0xc1, 0x31, 0xe3, 0xb1,
0x8a, 0xd9, 0x5a, 0xfb, 0x73, 0x1e, 0x14, 0xc3, 0x8f, 0x48, 0x18, 0xaf, 0xe8, 0xdd, 0xb3, 0xc0,
0xcf, 0xbf, 0x3b, 0xf0, 0x3f, 0x04, 0xaa, 0xbd, 0x88, 0xbb, 0x55, 0x61, 0x41, 0xec, 0x09, 0x16,
0x71, 0x85, 0x1e, 0x63, 0xcc, 0x2e, 0x70, 0x98, 0xe2, 0x02, 0x87, 0x41, 0xff, 0x0f, 0xeb, 0x99,
0xa8, 0xd8, 0x59, 0x63, 0xec, 0x56, 0xd3, 0xf1, 0x70, 0x26, 0xf1, 0x94, 0x56, 0x4a, 0x3c, 0xff,
0x96, 0x40, 0xe9, 0x11, 0x3b, 0xec, 0xbf, 0x59, 0x51, 0x47, 0xcf, 0xa0, 0x7a, 0xc2, 0x5c, 0x88,
0xdf, 0x2b, 0xb2, 0xce, 0xce, 0x02, 0x1f, 0x63, 0x84, 0xb1, 0x72, 0x92, 0x72, 0x59, 0xea, 0x0d,
0xd1, 0x90, 0x79, 0x83, 0x8c, 0xe9, 0xf2, 0xac, 0xa8, 0x45, 0x16, 0xfa, 0xe7, 0x8b, 0xba, 0xb6,
0x92, 0xa8, 0xa7, 0xb0, 0xcd, 0x25, 0x35, 0xfc, 0x1e, 0x19, 0x78, 0xc4, 0x17, 0x6e, 0xa1, 0xc1,
0xfa, 0x6b, 0x77, 0x48, 0x66, 0xb6, 0x90, 0xd8, 0xd5, 0x0a, 0x05, 0x26, 0x96, 0x78, 0x06, 0xd5,
0x88, 0xe1, 0x4e, 0xa5, 0x95, 0xce, 0x48, 0x9b, 0x52, 0x23, 0x56, 0xa2, 0xd9, 0x0f, 0xed, 0x4f,
0x12, 0x54, 0x58, 0x6a, 0x71, 0xc9, 0x8a, 0x99, 0x45, 0x85, 0x82, 0xeb, 0x44, 0x22, 0x9b, 0xd0,
0x25, 0xba, 0x05, 0xf2, 0x89, 0x3d, 0x74, 0x1d, 0x2b, 0x0c, 0x4e, 0x99, 0xb7, 0x55, 0x70, 0x85,
0x01, 0x70, 0x70, 0x9a, 0x75, 0xc5, 0xe2, 0x45, 0x5c, 0x51, 0xfb, 0x6b, 0x1e, 0x94, 0x17, 0x63,
0x12, 0x4e, 0x30, 0x89, 0xc6, 0xc3, 0x15, 0xb3, 0xf6, 0x87, 0x50, 0x21, 0x42, 0x2e, 0xa1, 0x91,
0x6c, 0x0e, 0x48, 0x84, 0xc6, 0xd3, 0x63, 0xe8, 0x3a, 0x94, 0x69, 0x62, 0xf2, 0xc7, 0x49, 0x2e,
0x28, 0x85, 0xc1, 0x69, 0x67, 0xec, 0xd1, 0xe2, 0x28, 0xea, 0x07, 0x21, 0xe1, 0x2f, 0x67, 0x1e,
0x8b, 0x5f, 0xe8, 0x3d, 0x90, 0x1d, 0x37, 0x8a, 0x6d, 0xbf, 0x4f, 0xb8, 0xc1, 0xf3, 0x78, 0x06,
0xb8, 0x9c, 0xf3, 0xa3, 0x1b, 0x50, 0xf9, 0x0d, 0x95, 0xdd, 0x72, 0x9d, 0x9d, 0x32, 0xe3, 0xa6,
0xcc, 0x7e, 0x1b, 0x0e, 0xd5, 0x73, 0x7f, 0xe8, 0x12, 0x3f, 0xa6, 0x7b, 0x15, 0xb6, 0x57, 0xe1,
0x00, 0xc3, 0xd1, 0x7e, 0x09, 0x4a, 0x2f, 0x0e, 0xe9, 0x7b, 0x4a, 0x46, 0xc3, 0xc9, 0x6a, 0x3a,
0x7b, 0x1f, 0xaa, 0x11, 0xc3, 0xb5, 0x42, 0x8a, 0x2c, 0x9e, 0x50, 0x25, 0x9a, 0xd1, 0xd3, 0x5e,
0x81, 0xdc, 0x08, 0x82, 0xe1, 0xf7, 0x20, 0x7e, 0x1b, 0xe0, 0x38, 0x08, 0x86, 0x29, 0xd2, 0x15,
0x2c, 0x1f, 0x27, 0xb4, 0xb4, 0x28, 0x5d, 0xf5, 0xe1, 0xe0, 0xb4, 0x19, 0x8c, 0xfd, 0x15, 0x4d,
0xfe, 0x08, 0xb6, 0x53, 0xf9, 0x81, 0x9a, 0xb2, 0x4f, 0x89, 0xb0, 0xbb, 0x0a, 0x18, 0xf5, 0xcf,
0x90, 0xd7, 0x6e, 0x41, 0xb9, 0x19, 0x78, 0x9e, 0xed, 0x3b, 0xd4, 0x9d, 0xfb, 0x9e, 0x93, 0x14,
0xb6, 0x7d, 0xcf, 0xd1, 0xfe, 0x29, 0x01, 0x18, 0xbe, 0x43, 0xde, 0xf2, 0x50, 0xfc, 0xdf, 0xd4,
0x71, 0xd9, 0x57, 0xac, 0x30, 0xff, 0x8a, 0xdd, 0x06, 0x70, 0x29, 0x0b, 0x7c, 0xbb, 0xc8, 0xb7,
0x19, 0x84, 0x6d, 0x5f, 0x2e, 0xe9, 0x7c, 0x0e, 0xb0, 0x3b, 0x1c, 0x47, 0x22, 0xbb, 0xd6, 0xe0,
0xea, 0x1c, 0xcb, 0x99, 0x94, 0x73, 0x25, 0xcb, 0x38, 0xaf, 0x1a, 0x8e, 0xa0, 0xda, 0x0c, 0xbc,
0x91, 0xdd, 0x5f, 0xf5, 0x15, 0x7b, 0x0f, 0xe4, 0xf8, 0x4d, 0x48, 0xa2, 0x37, 0xc1, 0x90, 0x97,
0x00, 0x12, 0x9e, 0x01, 0xb4, 0x23, 0xd8, 0x6c, 0x91, 0x21, 0x89, 0x49, 0x63, 0x62, 0xb4, 0x56,
0xa4, 0x7c, 0x03, 0x2a, 0x73, 0x95, 0x4e, 0xd9, 0x15, 0x35, 0xce, 0x57, 0xe9, 0x9e, 0xc4, 0xf0,
0x5f, 0x07, 0xab, 0xd9, 0xf4, 0x16, 0xc8, 0xb4, 0x7e, 0xb1, 0x5c, 0xff, 0x75, 0x20, 0xac, 0x59,
0xa1, 0x00, 0x4a, 0x49, 0xfb, 0x1a, 0xb6, 0xf6, 0x48, 0x2c, 0x8a, 0xb4, 0x56, 0xb4, 0x22, 0xd3,
0xb7, 0x01, 0x22, 0x9e, 0xf6, 0x69, 0x48, 0x73, 0x7f, 0x95, 0x05, 0xc4, 0x70, 0xb4, 0x31, 0x6c,
0x24, 0xe5, 0x1f, 0x7f, 0x84, 0xff, 0x1b, 0xea, 0xa0, 0x55, 0xe7, 0xcc, 0xf5, 0x22, 0x96, 0xb0,
0x65, 0x51, 0xc2, 0xf0, 0xd6, 0xe8, 0x11, 0xa8, 0xfc, 0x95, 0x64, 0x15, 0x0e, 0x17, 0x89, 0xa5,
0x3c, 0x8f, 0xf8, 0x91, 0x2b, 0xaa, 0xba, 0x02, 0x9e, 0x01, 0xb4, 0x3f, 0x48, 0xa2, 0x42, 0xa3,
0xd5, 0x15, 0xfa, 0x18, 0x64, 0x9a, 0xed, 0x2d, 0x56, 0x87, 0x49, 0xe7, 0xd4, 0x61, 0xfb, 0x39,
0x5c, 0x71, 0xc4, 0x1a, 0x35, 0xce, 0x3c, 0xde, 0xd4, 0x32, 0xb7, 0x17, 0x3c, 0xde, 0x33, 0xb6,
0xf6, 0x73, 0x99, 0x37, 0xbc, 0x51, 0x16, 0x3d, 0xa9, 0xf6, 0xad, 0x04, 0x90, 0xe2, 0x7e, 0x03,
0xf2, 0x2e, 0x8f, 0xf1, 0x22, 0xce, 0xbb, 0x0e, 0xad, 0xcc, 0x52, 0xb1, 0xc9, 0xd6, 0xd3, 0xc2,
0xb1, 0xf0, 0xee, 0xc2, 0xf1, 0x13, 0xa8, 0xf2, 0xe8, 0xbc, 0x70, 0x5f, 0xe5, 0x4e, 0xf3, 0x49,
0x74, 0xc9, 0xe0, 0xdd, 0x83, 0xad, 0x94, 0x16, 0x44, 0xd5, 0x5e, 0x4b, 0x7a, 0xf1, 0x8b, 0xd4,
0xec, 0x42, 0x45, 0xdf, 0x49, 0x20, 0x9b, 0x24, 0xf4, 0xd8, 0x4b, 0xfb, 0xae, 0xaa, 0xfa, 0x16,
0xc8, 0xae, 0x1f, 0x5b, 0x49, 0xc3, 0x4f, 0xfd, 0xa9, 0xe2, 0xfa, 0x31, 0xe3, 0x91, 0x3e, 0x1f,
0x4e, 0x30, 0x3e, 0x1e, 0x12, 0xb1, 0x4f, 0x3d, 0x4a, 0xc2, 0x0a, 0x87, 0xf1, 0x23, 0xbc, 0x44,
0x18, 0x13, 0xf6, 0xc8, 0x16, 0xf9, 0xd3, 0xc5, 0x00, 0xf4, 0x99, 0xdd, 0x86, 0xb5, 0xe3, 0x20,
0x88, 0x62, 0x56, 0x4a, 0xe6, 0x31, 0xff, 0x71, 0xc9, 0x1a, 0xd2, 0x06, 0x85, 0x65, 0xa8, 0x90,
0xe8, 0x6f, 0x47, 0x21, 0x7a, 0x0a, 0x95, 0x60, 0x44, 0x42, 0x3b, 0x0e, 0x42, 0xe1, 0x91, 0x59,
0x1d, 0x89, 0xb3, 0x5d, 0x71, 0x06, 0x4f, 0x4f, 0xa3, 0x1d, 0x28, 0xb3, 0xb5, 0xef, 0x08, 0x6f,
0x49, 0x7e, 0x6a, 0x7f, 0x93, 0x00, 0xb0, 0xed, 0x0f, 0xc8, 0x85, 0x34, 0x58, 0x4b, 0xd3, 0x39,
0x5b, 0x96, 0xa6, 0x98, 0x9d, 0xde, 0x30, 0x53, 0x4c, 0xe1, 0x3c, 0xc5, 0xac, 0xd6, 0xd5, 0x7f,
0x2b, 0x25, 0x9d, 0xda, 0x85, 0xd8, 0xbe, 0x03, 0x0a, 0x2f, 0x47, 0x38, 0x23, 0x79, 0xc6, 0x08,
0x30, 0x50, 0x83, 0x71, 0x93, 0x6a, 0x18, 0x0b, 0x2b, 0x34, 0x8c, 0x34, 0x04, 0xe3, 0x60, 0xf4,
0x8d, 0x70, 0x06, 0xb6, 0xbe, 0x64, 0x64, 0xbc, 0x85, 0x2a, 0x2d, 0x51, 0x88, 0xed, 0x73, 0xc9,
0xee, 0xc1, 0x5a, 0xd0, 0xef, 0x8f, 0x13, 0x83, 0xa3, 0x0c, 0x99, 0x2e, 0xdd, 0xc1, 0xfc, 0x00,
0xfa, 0x0c, 0xd6, 0x07, 0xc4, 0x27, 0xa1, 0x3d, 0xb4, 0x98, 0x64, 0xc2, 0x42, 0xd9, 0x8b, 0xf7,
0xf8, 0x09, 0x5e, 0x98, 0x56, 0x07, 0xa9, 0x5f, 0xda, 0xef, 0xf3, 0x50, 0x4d, 0x6f, 0xa3, 0xcf,
0x61, 0xfd, 0x98, 0xb3, 0x22, 0x08, 0x4a, 0x0b, 0xba, 0xdd, 0x34, 0xb3, 0xfb, 0x39, 0x5c, 0x3d,
0x4e, 0x33, 0xff, 0x04, 0x20, 0x26, 0xa1, 0x37, 0xe5, 0x47, 0x3a, 0xd3, 0x7e, 0x4e, 0x63, 0x77,
0x3f, 0x87, 0xe5, 0x78, 0x1a, 0xc8, 0x3f, 0x03, 0x25, 0xa4, 0x4e, 0x29, 0x30, 0xf9, 0x94, 0xe8,
0x7a, 0xb6, 0xe2, 0x9e, 0x3a, 0xed, 0x7e, 0x0e, 0x43, 0x38, 0x73, 0xe1, 0x4f, 0xa7, 0x29, 0x98,
0x23, 0x17, 0x17, 0x74, 0x14, 0x29, 0xdf, 0x99, 0x65, 0x5f, 0xf6, 0x93, 0x66, 0x5f, 0x86, 0xa7,
0xfd, 0x4b, 0x82, 0xf5, 0x54, 0xe7, 0x71, 0xd8, 0xb8, 0xf8, 0xbb, 0xb5, 0xe2, 0xd0, 0xe5, 0xac,
0xe9, 0x0a, 0x0b, 0x34, 0xbd, 0xdc, 0x74, 0x97, 0x0c, 0xa6, 0xdf, 0x15, 0x60, 0x8b, 0x77, 0xf3,
0xdd, 0x90, 0x57, 0x2e, 0x07, 0xd1, 0xe0, 0xe2, 0xc2, 0x66, 0x7a, 0x23, 0x6e, 0xe3, 0x77, 0xb5,
0xe9, 0x2a, 0x14, 0xc6, 0xae, 0x93, 0xcc, 0x2c, 0xc6, 0xae, 0xb3, 0xa8, 0x4b, 0x3d, 0xdb, 0x90,
0xd3, 0xba, 0xcb, 0xf5, 0x48, 0x14, 0xdb, 0xde, 0x88, 0xa5, 0xd9, 0x22, 0x9e, 0x01, 0xe6, 0xca,
0x90, 0xd2, 0x5c, 0x19, 0x42, 0xb7, 0xfb, 0x6f, 0x6c, 0xdf, 0x27, 0xc3, 0x59, 0x53, 0x22, 0x0b,
0x88, 0x41, 0x19, 0xc8, 0x07, 0x23, 0xd6, 0x8f, 0x6c, 0xcc, 0x15, 0x52, 0xdd, 0x11, 0x7b, 0x34,
0xf3, 0xc1, 0x28, 0xdb, 0xbb, 0xc8, 0xd9, 0xde, 0xe5, 0x8c, 0x11, 0x60, 0x25, 0x23, 0xfc, 0x3d,
0x0f, 0x32, 0xf7, 0xb6, 0x95, 0x94, 0x9f, 0x4a, 0x5c, 0x5c, 0xf5, 0x17, 0x4c, 0x5c, 0x67, 0xf4,
0x5d, 0x58, 0x30, 0x15, 0x10, 0x66, 0x2a, 0xce, 0xcc, 0x74, 0xbe, 0x05, 0x32, 0xea, 0x29, 0xbd,
0x43, 0x3d, 0xe5, 0x95, 0x1a, 0xca, 0x64, 0x14, 0x57, 0x61, 0x6c, 0xf2, 0x51, 0x9c, 0x98, 0x75,
0xc8, 0xd3, 0x59, 0x87, 0xf6, 0x16, 0x14, 0xd3, 0xf5, 0x48, 0x6f, 0xe2, 0xf7, 0xa9, 0x16, 0xaf,
0x43, 0x79, 0x44, 0x48, 0x68, 0x19, 0x8e, 0x28, 0xf6, 0x4a, 0xf4, 0xa7, 0xc1, 0xc4, 0x30, 0xa7,
0x62, 0xe4, 0xe7, 0xc5, 0xa8, 0x81, 0x1c, 0x4d, 0xfc, 0xbe, 0xb5, 0xb4, 0x90, 0xa2, 0xf4, 0x99,
0x4f, 0x54, 0x22, 0xb1, 0xd2, 0xbe, 0x06, 0x78, 0x4e, 0x26, 0xb5, 0x1e, 0x19, 0xd0, 0x8b, 0x85,
0xe2, 0xa4, 0x25, 0x8a, 0xcb, 0x9f, 0xef, 0xba, 0x05, 0x56, 0x99, 0xcc, 0x5c, 0xf7, 0xfe, 0x5f,
0x8a, 0x20, 0x4f, 0xbf, 0x7b, 0x20, 0x05, 0xca, 0xbd, 0xa3, 0x66, 0x53, 0xef, 0xf5, 0xd4, 0x1c,
0xda, 0x06, 0xf5, 0xa8, 0xa3, 0x7f, 0x71, 0xa8, 0x37, 0x4d, 0xbd, 0x65, 0xe9, 0x18, 0x77, 0xb1,
0x2a, 0x21, 0x04, 0x1b, 0xcd, 0x6e, 0xa7, 0xa3, 0x37, 0x4d, 0x6b, 0xb7, 0x6e, 0xb4, 0xf5, 0x96,
0x9a, 0x47, 0x57, 0x61, 0xeb, 0x50, 0xc7, 0x07, 0x46, 0xaf, 0x67, 0x74, 0x3b, 0x56, 0x4b, 0xef,
0x18, 0x7a, 0x4b, 0x2d, 0xa0, 0x1b, 0x70, 0xb5, 0xd9, 0x6d, 0xb7, 0xf5, 0xa6, 0x49, 0xc1, 0x9d,
0xae, 0x69, 0xe9, 0x5f, 0x18, 0x3d, 0xb3, 0xa7, 0x16, 0x29, 0x6d, 0xa3, 0xdd, 0xd6, 0xf7, 0xea,
0x6d, 0xab, 0x8e, 0xf7, 0x8e, 0x0e, 0xf4, 0x8e, 0xa9, 0xae, 0x51, 0x3a, 0x09, 0xb4, 0x65, 0x1c,
0xe8, 0x1d, 0x4a, 0x4e, 0x2d, 0xa3, 0x6b, 0x80, 0x12, 0xb0, 0xd1, 0x69, 0xe9, 0x5f, 0x58, 0xe6,
0x97, 0x87, 0xba, 0x5a, 0x41, 0xb7, 0xe0, 0x7a, 0x02, 0x4f, 0xdf, 0x53, 0x3f, 0xd0, 0x55, 0x19,
0xa9, 0x50, 0x4d, 0x36, 0xcd, 0xee, 0xe1, 0x73, 0x15, 0xd2, 0xd4, 0x71, 0xf7, 0x15, 0xd6, 0x9b,
0x5d, 0xdc, 0x52, 0x95, 0x34, 0xf8, 0xa5, 0xde, 0x34, 0xbb, 0xd8, 0x32, 0x5a, 0x6a, 0x95, 0x32,
0x9f, 0x80, 0x7b, 0x7a, 0x1d, 0x37, 0xf7, 0x2d, 0xac, 0xf7, 0x8e, 0xda, 0xa6, 0xba, 0x4e, 0x55,
0xb0, 0x6b, 0xb4, 0x75, 0x26, 0xd1, 0x6e, 0xf7, 0xa8, 0xd3, 0x52, 0x37, 0xd0, 0x26, 0x28, 0x07,
0xba, 0x59, 0x4f, 0x74, 0xb2, 0x49, 0xef, 0x6f, 0xd6, 0x9b, 0xfb, 0x7a, 0x02, 0x51, 0xd1, 0x0e,
0x6c, 0x37, 0xeb, 0x1d, 0x8a, 0xd4, 0xc4, 0x7a, 0xdd, 0xd4, 0xad, 0xdd, 0x6e, 0xbb, 0xa5, 0x63,
0x75, 0x8b, 0x0a, 0x38, 0xb7, 0x63, 0xb4, 0x75, 0x15, 0xa5, 0x30, 0x5a, 0x7a, 0x5b, 0x9f, 0x61,
0x5c, 0x49, 0x61, 0x24, 0x3b, 0x14, 0x63, 0x9b, 0x0a, 0xd3, 0x38, 0x32, 0xda, 0x2d, 0xa1, 0x28,
0x6e, 0xb4, 0xab, 0x68, 0x0b, 0xd6, 0x13, 0x61, 0x3a, 0x6d, 0xa3, 0x67, 0xaa, 0xd7, 0xd0, 0x75,
0xb8, 0x92, 0x80, 0x0e, 0x74, 0x13, 0x1b, 0x4d, 0xae, 0xd5, 0xeb, 0xf4, 0x6c, 0xf7, 0xc8, 0xb4,
0xba, 0xbb, 0xd6, 0x81, 0x7e, 0xd0, 0xc5, 0x5f, 0xaa, 0x3b, 0xf7, 0xff, 0x28, 0x41, 0x25, 0xa9,
0xf0, 0x51, 0x05, 0x8a, 0x9d, 0x6e, 0x47, 0x57, 0x73, 0x74, 0xd5, 0xe8, 0x76, 0xdb, 0xaa, 0x44,
0x57, 0x46, 0xc7, 0x7c, 0xaa, 0xe6, 0x91, 0x0c, 0x6b, 0x46, 0xc7, 0xfc, 0xf0, 0xb1, 0x5a, 0x10,
0xcb, 0x8f, 0x6a, 0x6a, 0x51, 0x2c, 0x1f, 0x7f, 0xac, 0xae, 0xd1, 0xe5, 0x6e, 0xbb, 0x5b, 0x37,
0x55, 0x40, 0x00, 0xa5, 0x56, 0xf7, 0xa8, 0xd1, 0xd6, 0x55, 0x85, 0xae, 0x7b, 0x26, 0x36, 0x3a,
0x7b, 0xea, 0x36, 0xe5, 0x40, 0x58, 0xa2, 0x61, 0x74, 0xea, 0xf8, 0x4b, 0xd5, 0xa1, 0xda, 0x14,
0x20, 0x8e, 0x4c, 0xee, 0x37, 0x61, 0x73, 0xae, 0x26, 0x45, 0x25, 0xc8, 0xb7, 0x4d, 0x35, 0x87,
0xca, 0x50, 0x68, 0x9b, 0xba, 0x2a, 0x51, 0x80, 0xfe, 0x42, 0xcd, 0xd3, 0xbf, 0x7b, 0xa6, 0x5a,
0xa0, 0x1b, 0x7b, 0xa6, 0xae, 0x16, 0x29, 0xa0, 0xa3, 0xab, 0x6b, 0xf7, 0x9f, 0xc2, 0x1a, 0xab,
0x73, 0xa8, 0xe3, 0x1b, 0x9d, 0x97, 0xf5, 0xb6, 0xd1, 0xe2, 0x72, 0x1d, 0x1c, 0xf5, 0x4c, 0x55,
0x62, 0x5c, 0xed, 0x77, 0x8f, 0xda, 0xd4, 0xc9, 0xab, 0x50, 0xa1, 0x50, 0x6a, 0x75, 0xb5, 0x70,
0xff, 0x2e, 0x94, 0x78, 0xf2, 0xa6, 0x67, 0x8c, 0x4e, 0x4f, 0xc7, 0xf4, 0x66, 0x2a, 0x11, 0xb3,
0x87, 0x2a, 0xdd, 0xbf, 0x03, 0x95, 0x24, 0x98, 0x29, 0x45, 0xac, 0xd7, 0x29, 0x6d, 0x19, 0xd6,
0x5e, 0x61, 0x83, 0x1e, 0xa8, 0x7d, 0xb7, 0x0e, 0xeb, 0x07, 0x2c, 0xf4, 0x7b, 0x24, 0x3c, 0x71,
0xfb, 0x04, 0xfd, 0x1c, 0xd4, 0x66, 0x48, 0xec, 0x98, 0xcc, 0xba, 0x71, 0xb4, 0xf0, 0x93, 0xd0,
0xcd, 0x45, 0xfd, 0xb8, 0x96, 0x43, 0xbb, 0xb0, 0xbe, 0x6f, 0x47, 0x29, 0xec, 0x5b, 0x73, 0x35,
0x74, 0x3a, 0xc1, 0xdf, 0xbc, 0x76, 0xa6, 0xda, 0xe2, 0x13, 0xa7, 0x1c, 0x32, 0x00, 0xb5, 0x48,
0xd4, 0x0f, 0xdd, 0x63, 0x72, 0x51, 0x62, 0x0b, 0xf9, 0xd4, 0x72, 0xe8, 0x05, 0xb5, 0xd3, 0xd8,
0x8f, 0x2f, 0x4a, 0xe7, 0xce, 0x92, 0xcd, 0xe9, 0x68, 0x2a, 0x87, 0x7e, 0x01, 0x9b, 0xbd, 0x37,
0xf4, 0x67, 0xb2, 0x17, 0xcd, 0x69, 0x49, 0x8c, 0xae, 0x96, 0xd2, 0x4a, 0xbe, 0x9d, 0x6a, 0x39,
0x74, 0x08, 0x28, 0x4b, 0x8b, 0x8d, 0x3f, 0xce, 0xe5, 0x70, 0xd9, 0x26, 0x1b, 0x77, 0xe4, 0x50,
0x0b, 0x36, 0x5a, 0x61, 0x30, 0xba, 0xa8, 0xbc, 0x4b, 0x2c, 0xf9, 0x29, 0x28, 0xdc, 0x15, 0xd8,
0xa0, 0x0d, 0x65, 0xeb, 0xd3, 0xd9, 0xf0, 0x6d, 0x19, 0x7a, 0x13, 0xd6, 0x13, 0x03, 0xbe, 0x83,
0xc0, 0xb2, 0x0d, 0x2d, 0x87, 0x9e, 0x81, 0x4c, 0x25, 0xf9, 0x7e, 0x1c, 0xe8, 0xb0, 0xc9, 0x05,
0x98, 0x7e, 0x60, 0x9c, 0xd3, 0x43, 0xf6, 0x2b, 0xe6, 0x72, 0x32, 0xd5, 0x7d, 0x3b, 0xba, 0x20,
0x8d, 0xe5, 0x0e, 0xfd, 0x1c, 0x36, 0xa8, 0x99, 0xa7, 0xe7, 0xa3, 0xf3, 0x8d, 0x72, 0x73, 0xf1,
0x2d, 0xc2, 0x67, 0xa8, 0x72, 0xc3, 0x60, 0x74, 0x39, 0xc1, 0x3e, 0x81, 0x12, 0x2f, 0x8c, 0xd1,
0xce, 0x9c, 0x66, 0xa7, 0xdf, 0xbe, 0xe6, 0xe4, 0x99, 0x7e, 0xe8, 0x64, 0x6a, 0x59, 0x9f, 0x4e,
0xd5, 0x1a, 0x13, 0xa3, 0x35, 0xc7, 0x42, 0x76, 0x28, 0x76, 0x73, 0xf1, 0x80, 0x5f, 0xcb, 0xa1,
0x7d, 0xda, 0x96, 0xcd, 0x86, 0x73, 0xe8, 0xff, 0xe6, 0xba, 0x82, 0xb9, 0xb9, 0xdd, 0x39, 0x0c,
0x7d, 0x06, 0x25, 0x5e, 0x62, 0xa2, 0xa5, 0xdf, 0x57, 0x6e, 0x66, 0x77, 0x52, 0x1f, 0x30, 0x58,
0x1c, 0x6e, 0xce, 0x7d, 0xe7, 0x41, 0xef, 0x2f, 0x20, 0x94, 0xfd, 0x0a, 0x74, 0x2e, 0xc5, 0x27,
0x50, 0x68, 0x7a, 0xce, 0x92, 0xcc, 0x30, 0xc7, 0x64, 0x6a, 0x8e, 0x9f, 0x43, 0x75, 0x80, 0xd9,
0x90, 0x15, 0x65, 0x8b, 0xde, 0xb9, 0xe9, 0xeb, 0x32, 0xe3, 0xee, 0xc1, 0xd6, 0x61, 0x48, 0x86,
0x81, 0xed, 0x5c, 0x32, 0x0d, 0x3c, 0x81, 0x35, 0x36, 0x89, 0x9e, 0x0b, 0xbf, 0xd9, 0x74, 0x7a,
0x19, 0xe2, 0x33, 0x36, 0xc0, 0x1f, 0xd9, 0xfd, 0x18, 0xdd, 0x38, 0x3b, 0x47, 0x11, 0x63, 0xe9,
0x65, 0xc8, 0x0d, 0xa8, 0x08, 0xbb, 0x35, 0xd0, 0xcd, 0x65, 0xe6, 0x3c, 0x6c, 0x9c, 0xa7, 0xfe,
0x46, 0xed, 0xab, 0x47, 0x03, 0x37, 0x7e, 0x33, 0x3e, 0x7e, 0xd0, 0x0f, 0xbc, 0x87, 0xfd, 0xdf,
0x46, 0x8f, 0x1e, 0x3d, 0x79, 0x18, 0x8d, 0x4f, 0x86, 0xae, 0xf7, 0x70, 0xc9, 0xff, 0xf1, 0x1c,
0x97, 0xd8, 0x3f, 0xf0, 0x7c, 0xf4, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa7, 0x81, 0xdb, 0x7c,
0xe9, 0x23, 0x00, 0x00,
// 3284 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3a, 0x4d, 0x73, 0x1b, 0xc9,
0x75, 0x18, 0x0c, 0x08, 0x60, 0xde, 0x00, 0x64, 0xb3, 0x45, 0x49, 0x94, 0xb4, 0x8a, 0xb4, 0xb3,
0xb5, 0x89, 0x56, 0xa9, 0x92, 0xb4, 0xdc, 0x8d, 0xa4, 0x44, 0xbb, 0x9b, 0x05, 0x81, 0x21, 0x39,
0x11, 0x08, 0x50, 0x0d, 0x50, 0xda, 0x8f, 0x4a, 0x21, 0x43, 0x4c, 0x0b, 0x9c, 0x25, 0x80, 0x81,
0x66, 0x06, 0xa2, 0x90, 0x63, 0xee, 0x49, 0x65, 0x93, 0x5c, 0x72, 0x71, 0x95, 0x7d, 0xb6, 0x5d,
0xae, 0xf2, 0x3f, 0xf0, 0xc9, 0x5e, 0x1f, 0x7c, 0xdc, 0xc3, 0x56, 0xf9, 0xe8, 0xf2, 0xcd, 0x17,
0xdb, 0x67, 0x57, 0x7f, 0x0c, 0x30, 0x03, 0x02, 0x14, 0x21, 0xda, 0x3e, 0xa1, 0xe7, 0x75, 0xf7,
0xeb, 0xf7, 0xdd, 0xef, 0xbd, 0x06, 0xbc, 0x3b, 0x38, 0xea, 0xdc, 0xed, 0xd9, 0x41, 0x48, 0xfd,
0xbb, 0x1d, 0x7f, 0xd0, 0xbe, 0xdb, 0xa3, 0x41, 0x60, 0x77, 0x68, 0xf4, 0x7b, 0x67, 0xe0, 0x7b,
0xa1, 0x87, 0xf5, 0x9e, 0xdb, 0x7d, 0x39, 0x0c, 0xee, 0xb0, 0x25, 0xc6, 0x33, 0xc8, 0x36, 0x42,
0x3b, 0x1c, 0x06, 0xf8, 0x1f, 0x00, 0xa8, 0xef, 0x7b, 0x7e, 0xab, 0xed, 0x39, 0x74, 0x5d, 0xb9,
0xa9, 0xdc, 0x5a, 0xde, 0xb8, 0x74, 0x27, 0xb6, 0xf6, 0x8e, 0xc9, 0xa6, 0xcb, 0x9e, 0x43, 0x89,
0x46, 0xa3, 0x21, 0xbe, 0x04, 0x59, 0x9f, 0xda, 0x81, 0xd7, 0x5f, 0x4f, 0xdf, 0x54, 0x6e, 0x69,
0x44, 0x7e, 0x19, 0xf7, 0xa1, 0xf0, 0x98, 0x8e, 0x9e, 0xda, 0xdd, 0x21, 0xdd, 0xb3, 0x5d, 0x1f,
0x23, 0x50, 0x8f, 0xe8, 0x88, 0xe3, 0xd5, 0x08, 0x1b, 0xe2, 0x35, 0x58, 0x7a, 0xc9, 0xa6, 0xe5,
0x46, 0xf1, 0x61, 0xfc, 0x23, 0x2c, 0x97, 0xbd, 0x6e, 0x97, 0xb6, 0x43, 0xd7, 0xeb, 0xd7, 0xec,
0x1e, 0xc5, 0x7f, 0x07, 0x2b, 0xed, 0x31, 0xa4, 0xd5, 0xb7, 0x7b, 0x54, 0x62, 0x59, 0x6e, 0x27,
0x16, 0x1a, 0x5d, 0xc0, 0xc9, 0xad, 0x55, 0x37, 0x08, 0xf1, 0xdf, 0x43, 0x36, 0xe0, 0x1c, 0xf2,
0x5d, 0xfa, 0xc6, 0x85, 0x04, 0x4f, 0x82, 0x79, 0x22, 0x97, 0xe0, 0xf7, 0x00, 0x4d, 0x9d, 0x15,
0xac, 0xa7, 0x6f, 0xaa, 0xb7, 0x34, 0xb2, 0x92, 0x3c, 0x2c, 0x30, 0x1a, 0xa0, 0x6d, 0xb9, 0xb4,
0xeb, 0x2c, 0x44, 0x23, 0xbe, 0x0e, 0xf0, 0x9c, 0xed, 0x12, 0x6b, 0x04, 0xe7, 0xda, 0xf3, 0x08,
0x8f, 0xf1, 0x8d, 0x02, 0xb9, 0x5d, 0x7b, 0x30, 0x70, 0xfb, 0x9d, 0xc5, 0x08, 0x9f, 0x41, 0x40,
0x7a, 0x26, 0x01, 0x0c, 0x6b, 0xfb, 0x90, 0xf6, 0xec, 0x75, 0x75, 0x16, 0x56, 0x3e, 0x45, 0xe4,
0x12, 0xfc, 0x11, 0x14, 0xe8, 0xab, 0xd0, 0xb7, 0x5b, 0x03, 0xdb, 0xb7, 0x7b, 0xc1, 0x7a, 0xe6,
0xa6, 0x7a, 0x4b, 0xdf, 0xb8, 0x92, 0xd8, 0x12, 0xd7, 0x32, 0xd1, 0xf9, 0xf2, 0x3d, 0xbe, 0xda,
0x08, 0x40, 0x97, 0xbc, 0x2c, 0xae, 0x88, 0x07, 0x50, 0xe8, 0x89, 0xbd, 0xad, 0xae, 0x1b, 0x84,
0x5c, 0x09, 0xfa, 0xc6, 0x5a, 0x62, 0x8b, 0x44, 0x4e, 0xf4, 0xde, 0xe4, 0x14, 0xe3, 0x31, 0x2c,
0xef, 0xd9, 0x7e, 0xe8, 0x32, 0x86, 0x39, 0x1d, 0x67, 0xd7, 0x0d, 0x02, 0x35, 0xb4, 0x3b, 0x52,
0x6e, 0x6c, 0x68, 0x74, 0xa1, 0x38, 0x46, 0xb6, 0x38, 0x0f, 0x77, 0xe0, 0xc2, 0x20, 0xda, 0xdd,
0x0a, 0xed, 0x4e, 0xcb, 0xf6, 0x7d, 0x7b, 0x24, 0xed, 0x69, 0x75, 0x3c, 0xd5, 0xb4, 0x3b, 0x25,
0x36, 0x61, 0x3c, 0x81, 0x95, 0xa7, 0xb4, 0x1d, 0x7a, 0x3e, 0xf1, 0x8e, 0x09, 0x6d, 0x7b, 0xbe,
0xc3, 0xcd, 0xa5, 0xeb, 0xd9, 0x61, 0xcb, 0xb1, 0x43, 0x7b, 0x5d, 0xb9, 0xa9, 0xde, 0x4a, 0x13,
0x8d, 0x43, 0x2a, 0x76, 0x68, 0xe3, 0x1b, 0xa0, 0x1f, 0xb8, 0x7d, 0xdb, 0x1f, 0x89, 0x79, 0x46,
0x79, 0x81, 0x80, 0x00, 0xb1, 0x05, 0xc6, 0xbf, 0x81, 0x66, 0xf6, 0x43, 0x37, 0x1c, 0x59, 0x4e,
0xb0, 0x18, 0xf1, 0x7f, 0x0b, 0x2b, 0x94, 0xef, 0x6c, 0xb9, 0x4e, 0x8c, 0x70, 0x95, 0x14, 0xa9,
0x44, 0x28, 0x88, 0xde, 0x82, 0x82, 0x24, 0x5a, 0x50, 0x7c, 0x1f, 0x72, 0x3e, 0x1f, 0x05, 0x9c,
0x5c, 0x7d, 0xe3, 0xad, 0xc4, 0x29, 0x53, 0x0c, 0x92, 0x68, 0xb1, 0xf1, 0x25, 0xe8, 0x62, 0x4e,
0x28, 0x0d, 0x43, 0xe6, 0x2b, 0x16, 0x54, 0x84, 0xa6, 0xf8, 0x18, 0x3f, 0x04, 0xf0, 0xbd, 0xe3,
0x96, 0xd8, 0xc1, 0x99, 0x9d, 0xb6, 0xc5, 0x38, 0x25, 0x44, 0xf3, 0xa3, 0x53, 0x8c, 0x8e, 0xf4,
0xd5, 0x5d, 0x1a, 0xda, 0x53, 0x2e, 0xa8, 0x4c, 0xb9, 0x20, 0x7e, 0x0f, 0x32, 0xe1, 0x68, 0x20,
0xdc, 0x67, 0x79, 0xe3, 0x62, 0x02, 0x3f, 0x93, 0x69, 0x73, 0x34, 0xa0, 0x84, 0x2f, 0x61, 0x06,
0xe3, 0xb8, 0x3d, 0xee, 0x48, 0x2a, 0x61, 0x43, 0xa3, 0x04, 0x59, 0xe1, 0x42, 0xf8, 0x01, 0xe8,
0xe2, 0x94, 0x1e, 0x0d, 0xed, 0x48, 0x16, 0xc9, 0x78, 0x3a, 0x26, 0x89, 0x08, 0x82, 0xd8, 0x30,
0x30, 0xae, 0x43, 0x8e, 0x78, 0xc7, 0x5c, 0xbd, 0x18, 0x32, 0x07, 0x5d, 0xef, 0x80, 0xd3, 0x58,
0x20, 0x7c, 0x6c, 0x7c, 0x3f, 0x0d, 0xba, 0xd5, 0x0f, 0xa8, 0x1f, 0x2e, 0x68, 0xdd, 0x13, 0xc7,
0x4f, 0xbf, 0xde, 0xf1, 0xdf, 0x07, 0x26, 0xbd, 0x40, 0x98, 0x95, 0x3a, 0xc3, 0xf7, 0x24, 0x89,
0x24, 0xcf, 0x96, 0x71, 0x62, 0x67, 0x18, 0x4c, 0x66, 0x86, 0xc1, 0xe0, 0x77, 0xa0, 0x98, 0xf0,
0x8a, 0xf5, 0x25, 0x4e, 0x6e, 0x21, 0xee, 0x0f, 0x27, 0x02, 0x4f, 0x76, 0xa1, 0xc0, 0xf3, 0x5b,
0x05, 0xf4, 0x06, 0xb5, 0xfd, 0xf6, 0xe1, 0x82, 0x32, 0x7a, 0x04, 0x85, 0x97, 0xdc, 0x84, 0xc4,
0xb9, 0x32, 0xea, 0xac, 0xcf, 0xb0, 0x31, 0x8e, 0x98, 0xe8, 0x2f, 0x63, 0x26, 0xcb, 0xac, 0x21,
0xe8, 0x72, 0x6b, 0xd0, 0x08, 0x1b, 0x9e, 0x64, 0x35, 0xc3, 0x5d, 0xff, 0x74, 0x56, 0x97, 0x16,
0x62, 0xf5, 0x18, 0xd6, 0x04, 0xa7, 0x56, 0xbf, 0x41, 0x3b, 0x3d, 0xda, 0x97, 0x66, 0x61, 0x40,
0xf1, 0xb9, 0xdb, 0xa5, 0x13, 0x5d, 0x28, 0xfc, 0x68, 0x9d, 0x01, 0x23, 0x4d, 0x3c, 0x82, 0x42,
0xc0, 0xf7, 0x8e, 0xb9, 0x55, 0x4e, 0x70, 0x1b, 0x13, 0x23, 0xd1, 0x83, 0xc9, 0x87, 0xf1, 0xff,
0x0a, 0xe4, 0x79, 0x68, 0x71, 0xe9, 0x82, 0x91, 0x05, 0x81, 0xea, 0x3a, 0x81, 0x8c, 0x26, 0x6c,
0x88, 0xaf, 0x81, 0xf6, 0xd2, 0xee, 0xba, 0x4e, 0xcb, 0xf7, 0x8e, 0xb9, 0xb5, 0xe5, 0x49, 0x9e,
0x03, 0x88, 0x77, 0x9c, 0x34, 0xc5, 0xcc, 0x59, 0x4c, 0xd1, 0xf8, 0x49, 0x1a, 0xf4, 0x27, 0x43,
0xea, 0x8f, 0x08, 0x0d, 0x86, 0xdd, 0x05, 0xa3, 0xf6, 0xfb, 0x90, 0xa7, 0x92, 0x2f, 0x29, 0x91,
0x64, 0x0c, 0x88, 0x98, 0x26, 0xe3, 0x65, 0xf8, 0x32, 0xe4, 0x58, 0x60, 0xea, 0x0f, 0xa3, 0x58,
0x90, 0xf5, 0xbd, 0xe3, 0xda, 0xb0, 0xc7, 0x92, 0xa3, 0xa0, 0xed, 0xf9, 0x54, 0xdc, 0x9c, 0x69,
0x22, 0xbf, 0xf0, 0x5b, 0xa0, 0x39, 0x6e, 0x10, 0xda, 0xfd, 0x36, 0x15, 0x0a, 0x4f, 0x93, 0x09,
0xe0, 0x7c, 0xc6, 0x8f, 0xaf, 0x40, 0xfe, 0x05, 0xe3, 0xbd, 0xe5, 0x3a, 0xeb, 0xb9, 0x9b, 0xca,
0xad, 0x0c, 0xc9, 0xf1, 0x6f, 0xcb, 0x61, 0x53, 0x03, 0xdf, 0x7b, 0xc5, 0xa7, 0xf2, 0x9c, 0xd0,
0x1c, 0xff, 0xb6, 0x1c, 0xe3, 0x5f, 0x41, 0x6f, 0x84, 0x3e, 0xbb, 0x4d, 0xe9, 0xa0, 0x3b, 0x5a,
0x4c, 0x62, 0x6f, 0x43, 0x21, 0xe0, 0x7b, 0x5b, 0x3e, 0xdb, 0x2c, 0x2f, 0x50, 0x3d, 0x98, 0xe0,
0x33, 0x9e, 0x81, 0xb6, 0xe9, 0x79, 0xdd, 0x37, 0x40, 0x7e, 0x1d, 0xe0, 0xc0, 0xf3, 0xba, 0x31,
0xd4, 0x79, 0xa2, 0x1d, 0x44, 0xb8, 0x8c, 0x20, 0x9e, 0xf3, 0x11, 0xef, 0xb8, 0xec, 0x0d, 0xfb,
0x0b, 0x2a, 0xfc, 0x1e, 0xac, 0xc5, 0xa2, 0x03, 0x53, 0x64, 0x9b, 0x21, 0xe1, 0x67, 0xa9, 0x04,
0xb7, 0x4f, 0xa0, 0x37, 0xae, 0x41, 0xae, 0xec, 0xf5, 0x7a, 0x76, 0xdf, 0x61, 0xc6, 0xdc, 0xee,
0x39, 0x51, 0x5a, 0xdb, 0xee, 0x39, 0xc6, 0xaf, 0x15, 0x00, 0xab, 0xef, 0xd0, 0x57, 0xc2, 0x11,
0xff, 0x32, 0x59, 0x5c, 0xf2, 0x0e, 0x53, 0xa7, 0xef, 0xb0, 0xeb, 0x00, 0x2e, 0x23, 0x41, 0x4c,
0x67, 0xc4, 0x34, 0x87, 0xf0, 0xe9, 0xf3, 0x85, 0x9c, 0x4f, 0x01, 0xb6, 0xba, 0xc3, 0x40, 0xc6,
0xd6, 0x0d, 0xb8, 0x38, 0x45, 0x72, 0x22, 0xe0, 0x5c, 0x48, 0x12, 0x2e, 0x72, 0x86, 0x7d, 0x28,
0x94, 0xbd, 0xde, 0xc0, 0x6e, 0x2f, 0x7a, 0x87, 0xbd, 0x05, 0x5a, 0x78, 0xe8, 0xd3, 0xe0, 0xd0,
0xeb, 0x8a, 0x04, 0x40, 0x21, 0x13, 0x80, 0xb1, 0x0f, 0x2b, 0x15, 0xda, 0xa5, 0x21, 0xdd, 0x1c,
0x59, 0x95, 0x05, 0x31, 0x5f, 0x81, 0xfc, 0x54, 0x9e, 0x93, 0x73, 0x65, 0x86, 0xf3, 0x45, 0xbc,
0x22, 0xb1, 0xfa, 0xcf, 0xbd, 0xc5, 0x74, 0x7a, 0x0d, 0x34, 0x96, 0xbd, 0xb4, 0xdc, 0xfe, 0x73,
0x4f, 0x6a, 0x33, 0xcf, 0x00, 0x0c, 0x93, 0xf1, 0x25, 0xac, 0x6e, 0xd3, 0x50, 0xa6, 0x68, 0x95,
0x60, 0x41, 0xa2, 0xaf, 0x03, 0x04, 0x22, 0xe8, 0x33, 0x8f, 0x16, 0xf6, 0xaa, 0x49, 0x88, 0xe5,
0x18, 0x43, 0x58, 0x8e, 0x92, 0x3f, 0x71, 0x05, 0xff, 0x39, 0xc4, 0xc1, 0x72, 0xce, 0x89, 0xe9,
0x05, 0x3c, 0x5c, 0x6b, 0x32, 0x81, 0x11, 0x85, 0xd1, 0x3d, 0x40, 0xe2, 0x8e, 0xe4, 0xf9, 0x8d,
0x60, 0x89, 0x07, 0xbc, 0x1e, 0xed, 0x07, 0xae, 0xcc, 0xe9, 0x54, 0x32, 0x01, 0x18, 0xff, 0xa5,
0xc8, 0xfc, 0x8c, 0xe5, 0x56, 0xf8, 0x43, 0xd0, 0x58, 0xac, 0x6f, 0xf1, 0x2c, 0x4c, 0x39, 0x25,
0x0b, 0xdb, 0x49, 0x91, 0xbc, 0x23, 0xc7, 0x78, 0xf3, 0xc4, 0xd5, 0xcd, 0x34, 0x73, 0x7d, 0xc6,
0xd5, 0x3d, 0x21, 0x6b, 0x27, 0x95, 0xb8, 0xc1, 0x37, 0x73, 0xb2, 0x22, 0x35, 0xbe, 0x55, 0x00,
0x62, 0xd4, 0x2f, 0x43, 0xda, 0x15, 0x3e, 0x9e, 0x21, 0x69, 0xd7, 0x61, 0x79, 0x59, 0xcc, 0x37,
0xf9, 0x78, 0x9c, 0x36, 0xaa, 0xaf, 0x4f, 0x1b, 0x3f, 0x82, 0x82, 0xf0, 0xce, 0x33, 0x57, 0x55,
0xee, 0x38, 0x9e, 0x04, 0xe7, 0x74, 0xde, 0x6d, 0x58, 0x8d, 0x49, 0x41, 0xe6, 0xec, 0x1b, 0x51,
0x25, 0x7e, 0x96, 0x8c, 0x5d, 0x8a, 0xe8, 0x3b, 0x05, 0xb4, 0x26, 0xf5, 0x7b, 0xfc, 0x9e, 0x7d,
0x5d, 0x4e, 0x7d, 0x0d, 0x34, 0xb7, 0x1f, 0xb6, 0xa2, 0x72, 0x9f, 0xd9, 0x53, 0xde, 0xed, 0x87,
0x9c, 0x46, 0x76, 0x7d, 0x38, 0xde, 0xf0, 0xa0, 0x4b, 0xe5, 0x3c, 0xb3, 0x28, 0x85, 0xe8, 0x02,
0x26, 0x96, 0x88, 0x04, 0x61, 0x48, 0xf9, 0x15, 0x9b, 0xe1, 0xe6, 0x93, 0xe7, 0x00, 0x76, 0xc9,
0xae, 0xc1, 0xd2, 0x81, 0xe7, 0x05, 0x21, 0x4f, 0x24, 0xd3, 0x44, 0x7c, 0x9c, 0x33, 0x83, 0xb4,
0x41, 0xe7, 0x11, 0xca, 0xa7, 0xe6, 0xab, 0x81, 0x8f, 0x1f, 0x42, 0xde, 0x1b, 0x50, 0xdf, 0x0e,
0x3d, 0x5f, 0x5a, 0x64, 0x52, 0x46, 0x72, 0x6d, 0x5d, 0xae, 0x21, 0xe3, 0xd5, 0x78, 0x1d, 0x72,
0x7c, 0xdc, 0x77, 0xa4, 0xb5, 0x44, 0x9f, 0xc6, 0x4f, 0x15, 0x00, 0x62, 0xf7, 0x3b, 0xf4, 0x4c,
0x12, 0xdc, 0x88, 0xe3, 0x39, 0x99, 0x94, 0xc6, 0x88, 0x1d, 0x9f, 0x30, 0x11, 0x8c, 0x7a, 0x9a,
0x60, 0x16, 0xab, 0xe9, 0xbf, 0x55, 0xa2, 0x3a, 0xed, 0x4c, 0x64, 0xdf, 0x00, 0x5d, 0x24, 0x23,
0x82, 0x90, 0x34, 0x27, 0x04, 0x38, 0x68, 0x93, 0x53, 0x13, 0x2b, 0x17, 0xd5, 0x05, 0xca, 0x45,
0xe6, 0x82, 0xa1, 0x37, 0x38, 0x92, 0xc6, 0xc0, 0xc7, 0xe7, 0xf4, 0x8c, 0x57, 0x50, 0x60, 0x29,
0x0a, 0xb5, 0xfb, 0x82, 0xb3, 0x5b, 0xb0, 0xe4, 0xb5, 0xdb, 0xc3, 0x48, 0xe1, 0x38, 0x81, 0xa6,
0xce, 0x66, 0x88, 0x58, 0x80, 0x3f, 0x81, 0x62, 0x87, 0xf6, 0xa9, 0x6f, 0x77, 0x5b, 0x9c, 0x33,
0xa9, 0xa1, 0xe4, 0xc1, 0xdb, 0x62, 0x85, 0x48, 0x4b, 0x0b, 0x9d, 0xd8, 0x97, 0xf1, 0x9f, 0x69,
0x28, 0xc4, 0xa7, 0xf1, 0xa7, 0x50, 0x3c, 0x10, 0xa4, 0x48, 0x84, 0xca, 0x8c, 0x5a, 0x37, 0x4e,
0xec, 0x4e, 0x8a, 0x14, 0x0e, 0xe2, 0xc4, 0x3f, 0x00, 0x08, 0xa9, 0xdf, 0x1b, 0xd3, 0xa3, 0x9c,
0x28, 0x3e, 0xc7, 0xbe, 0xbb, 0x93, 0x22, 0x5a, 0x38, 0x76, 0xe4, 0x7f, 0x02, 0xdd, 0x67, 0x46,
0x29, 0x77, 0x8a, 0x1e, 0xd1, 0xe5, 0x64, 0xbe, 0x3d, 0x36, 0xda, 0x9d, 0x14, 0x01, 0x7f, 0x62,
0xc2, 0x1f, 0x8f, 0x43, 0xb0, 0xd8, 0x9c, 0x99, 0x51, 0x4f, 0xc4, 0x6c, 0x67, 0x12, 0x7d, 0xf9,
0x27, 0x8b, 0xbe, 0x7c, 0x9f, 0xf1, 0x1b, 0x05, 0x8a, 0xb1, 0xba, 0x63, 0x6f, 0xf3, 0xec, 0xf7,
0xd6, 0x82, 0x2d, 0x97, 0x93, 0xaa, 0x53, 0x67, 0x48, 0x7a, 0xbe, 0xea, 0xce, 0xe9, 0x4c, 0xbf,
0x9b, 0xd4, 0x29, 0x2f, 0x76, 0x83, 0xce, 0x5f, 0xa9, 0x4e, 0x7d, 0x17, 0x96, 0x13, 0x32, 0x8a,
0xee, 0xf0, 0x62, 0x5c, 0x3c, 0x41, 0x54, 0xce, 0x66, 0x26, 0xe5, 0xec, 0xb9, 0xfc, 0x8b, 0xe7,
0x6e, 0x6e, 0x8f, 0x06, 0xa1, 0xdd, 0x1b, 0xac, 0x67, 0xf9, 0x5d, 0x3a, 0x01, 0x24, 0x4a, 0x93,
0x5c, 0xa2, 0x34, 0x49, 0x14, 0x34, 0xf9, 0x64, 0x41, 0x73, 0x17, 0xf2, 0x3e, 0x7d, 0x21, 0x32,
0x05, 0x8d, 0xbb, 0xe9, 0x54, 0x69, 0x48, 0x5f, 0xf0, 0x7b, 0x37, 0xe7, 0x8b, 0x81, 0xf1, 0x3d,
0x15, 0xf0, 0xae, 0xdd, 0x77, 0x07, 0xc3, 0xae, 0xcd, 0x53, 0xfa, 0x05, 0x05, 0x7f, 0xa2, 0xa2,
0x4f, 0xcf, 0x68, 0x5e, 0xbc, 0x0d, 0x85, 0x81, 0xef, 0xf6, 0x6c, 0x7f, 0xd4, 0x3a, 0xa2, 0x23,
0x21, 0xde, 0x0c, 0xd1, 0x25, 0xec, 0x31, 0x1d, 0x05, 0x6f, 0x50, 0xd4, 0x26, 0xe5, 0xb7, 0x34,
0x2d, 0xbf, 0x64, 0x2a, 0x28, 0xc5, 0x3b, 0x4e, 0x05, 0xd9, 0x74, 0xfb, 0xd0, 0xee, 0xf7, 0x69,
0x77, 0x52, 0x16, 0x6a, 0x12, 0x32, 0x25, 0xc7, 0xfc, 0x19, 0xe4, 0x98, 0x50, 0x97, 0x96, 0x54,
0xd7, 0xb4, 0x95, 0xc0, 0x42, 0x2e, 0xf1, 0x1f, 0x2a, 0xac, 0x8a, 0xf6, 0x56, 0xdd, 0x17, 0xc9,
0xfc, 0x42, 0xfa, 0x49, 0xc8, 0x55, 0x84, 0xbd, 0xd7, 0xc9, 0x15, 0x81, 0x3a, 0x74, 0x9d, 0xa8,
0x89, 0x37, 0x74, 0x9d, 0x59, 0x6d, 0x9b, 0x93, 0x4a, 0x5e, 0x54, 0x1d, 0xea, 0xe9, 0xea, 0x50,
0xe3, 0xea, 0x78, 0x07, 0xd2, 0xde, 0x40, 0x2a, 0x22, 0x59, 0x5b, 0xd4, 0x07, 0x5c, 0x0f, 0x69,
0x6f, 0xc0, 0x72, 0xa2, 0x76, 0xd7, 0x95, 0x27, 0x08, 0x1d, 0xe4, 0x05, 0xe0, 0xdc, 0x4a, 0xf8,
0x59, 0x1a, 0x34, 0x11, 0x80, 0x17, 0x12, 0x7e, 0xec, 0x2e, 0x17, 0xa2, 0x3f, 0xe3, 0x5d, 0x7e,
0x42, 0xde, 0xea, 0x8c, 0x36, 0x99, 0x54, 0x53, 0x66, 0xa2, 0xa6, 0xd3, 0x35, 0x90, 0x10, 0x4f,
0xf6, 0x35, 0xe2, 0xc9, 0x2d, 0x14, 0xc9, 0xa2, 0xde, 0x74, 0x9e, 0x93, 0x29, 0x7a, 0xd3, 0x32,
0x5a, 0x6a, 0xe3, 0x68, 0x69, 0xbc, 0x02, 0xbd, 0xe9, 0xf6, 0x68, 0x63, 0xd4, 0x6f, 0x33, 0x29,
0x5e, 0x86, 0xdc, 0x80, 0x52, 0xbf, 0x65, 0x39, 0xb2, 0xfe, 0xc9, 0xb2, 0x4f, 0x8b, 0xb3, 0xd1,
0x1c, 0xb3, 0x91, 0x16, 0x6c, 0x8c, 0x01, 0x78, 0x03, 0xb4, 0x60, 0xd4, 0x6f, 0xb7, 0xe6, 0xd6,
0x16, 0x0c, 0x3f, 0xb7, 0x89, 0x7c, 0x20, 0x47, 0xc6, 0x97, 0x00, 0x8f, 0xe9, 0x68, 0xa3, 0x41,
0x3b, 0xec, 0x60, 0x29, 0x38, 0x65, 0x8e, 0xe0, 0xd2, 0xa7, 0x9b, 0xae, 0xca, 0x93, 0xf5, 0x89,
0xe9, 0xde, 0xfe, 0x41, 0x06, 0xb4, 0xf1, 0x43, 0x20, 0xd6, 0x21, 0xd7, 0xd8, 0x2f, 0x97, 0xcd,
0x46, 0x03, 0xa5, 0xf0, 0x1a, 0xa0, 0xfd, 0x9a, 0xf9, 0xd9, 0x9e, 0x59, 0x6e, 0x9a, 0x95, 0x96,
0x49, 0x48, 0x9d, 0x20, 0x05, 0x63, 0x58, 0x2e, 0xd7, 0x6b, 0x35, 0xb3, 0xdc, 0x6c, 0x6d, 0x95,
0xac, 0xaa, 0x59, 0x41, 0x69, 0x7c, 0x11, 0x56, 0xf7, 0x4c, 0xb2, 0x6b, 0x35, 0x1a, 0x56, 0xbd,
0xd6, 0xaa, 0x98, 0x35, 0xcb, 0xac, 0x20, 0x15, 0x5f, 0x81, 0x8b, 0xe5, 0x7a, 0xb5, 0x6a, 0x96,
0x9b, 0x0c, 0x5c, 0xab, 0x37, 0x5b, 0xe6, 0x67, 0x56, 0xa3, 0xd9, 0x40, 0x19, 0x86, 0xdb, 0xaa,
0x56, 0xcd, 0xed, 0x52, 0xb5, 0x55, 0x22, 0xdb, 0xfb, 0xbb, 0x66, 0xad, 0x89, 0x96, 0x18, 0x9e,
0x08, 0x5a, 0xb1, 0x76, 0xcd, 0x1a, 0x43, 0x87, 0x72, 0xf8, 0x12, 0xe0, 0x08, 0x6c, 0xd5, 0x2a,
0xe6, 0x67, 0xad, 0xe6, 0xe7, 0x7b, 0x26, 0xca, 0xe3, 0x6b, 0x70, 0x39, 0x82, 0xc7, 0xcf, 0x29,
0xed, 0x9a, 0x48, 0xc3, 0x08, 0x0a, 0xd1, 0x64, 0xb3, 0xbe, 0xf7, 0x18, 0x41, 0x1c, 0x3b, 0xa9,
0x3f, 0x23, 0x66, 0xb9, 0x4e, 0x2a, 0x48, 0x8f, 0x83, 0x9f, 0x9a, 0xe5, 0x66, 0x9d, 0xb4, 0xac,
0x0a, 0x2a, 0x30, 0xe2, 0x23, 0x70, 0xc3, 0x2c, 0x91, 0xf2, 0x4e, 0x8b, 0x98, 0x8d, 0xfd, 0x6a,
0x13, 0x15, 0x99, 0x08, 0xb6, 0xac, 0xaa, 0xc9, 0x39, 0xda, 0xaa, 0xef, 0xd7, 0x2a, 0x68, 0x19,
0xaf, 0x80, 0xbe, 0x6b, 0x36, 0x4b, 0x91, 0x4c, 0x56, 0xd8, 0xf9, 0xe5, 0x52, 0x79, 0xc7, 0x8c,
0x20, 0x08, 0xaf, 0xc3, 0x5a, 0xb9, 0x54, 0x63, 0x9b, 0xca, 0xc4, 0x2c, 0x35, 0xcd, 0xd6, 0x56,
0xbd, 0x5a, 0x31, 0x09, 0x5a, 0x65, 0x0c, 0x4e, 0xcd, 0x58, 0x55, 0x13, 0xe1, 0xd8, 0x8e, 0x8a,
0x59, 0x35, 0x27, 0x3b, 0x2e, 0xc4, 0x76, 0x44, 0x33, 0x6c, 0xc7, 0x1a, 0x63, 0x66, 0x73, 0xdf,
0xaa, 0x56, 0xa4, 0xa0, 0x84, 0xd2, 0x2e, 0xe2, 0x55, 0x28, 0x46, 0xcc, 0xd4, 0xaa, 0x56, 0xa3,
0x89, 0x2e, 0xe1, 0xcb, 0x70, 0x21, 0x02, 0xed, 0x9a, 0x4d, 0x62, 0x95, 0x85, 0x54, 0x2f, 0xb3,
0xb5, 0xf5, 0xfd, 0x66, 0xab, 0xbe, 0xd5, 0xda, 0x35, 0x77, 0xeb, 0xe4, 0x73, 0xb4, 0x7e, 0xfb,
0x6b, 0x05, 0xf2, 0x51, 0xd1, 0x8b, 0xf3, 0x90, 0xa9, 0xd5, 0x6b, 0x26, 0x4a, 0xb1, 0xd1, 0x66,
0xbd, 0x5e, 0x45, 0x0a, 0x1b, 0x59, 0xb5, 0xe6, 0x43, 0x94, 0xc6, 0x1a, 0x2c, 0x59, 0xb5, 0xe6,
0xfb, 0xf7, 0x91, 0x2a, 0x87, 0x1f, 0x6c, 0xa0, 0x8c, 0x1c, 0xde, 0xff, 0x10, 0x2d, 0xb1, 0xe1,
0x56, 0xb5, 0x5e, 0x6a, 0x22, 0xc0, 0x00, 0xd9, 0x4a, 0x7d, 0x7f, 0xb3, 0x6a, 0x22, 0x9d, 0x8d,
0x1b, 0x4d, 0x62, 0xd5, 0xb6, 0xd1, 0x1a, 0xa3, 0x40, 0x6a, 0x62, 0xd3, 0xaa, 0x95, 0xc8, 0xe7,
0xc8, 0x61, 0xd2, 0x94, 0x20, 0xb1, 0x99, 0xde, 0x2e, 0xc3, 0xca, 0x54, 0x99, 0x86, 0xb3, 0x90,
0xae, 0x36, 0x51, 0x0a, 0xe7, 0x40, 0xad, 0x36, 0x4d, 0xa4, 0x30, 0x80, 0xf9, 0x04, 0xa5, 0xd9,
0xef, 0x76, 0x13, 0xa9, 0x6c, 0x62, 0xbb, 0x69, 0xa2, 0x0c, 0x03, 0xd4, 0x4c, 0xb4, 0x74, 0xfb,
0x21, 0x2c, 0xf1, 0xd4, 0x9f, 0x19, 0xbe, 0x55, 0x7b, 0x5a, 0xaa, 0x5a, 0x15, 0xc1, 0xd7, 0xee,
0x7e, 0xa3, 0x89, 0x14, 0x4e, 0xd5, 0x4e, 0x7d, 0xbf, 0xca, 0x8c, 0xbc, 0x00, 0x79, 0x06, 0x65,
0x5a, 0x47, 0xea, 0xed, 0x9b, 0x90, 0x15, 0xc1, 0x9b, 0xad, 0xb1, 0x6a, 0x0d, 0x93, 0xb0, 0x93,
0x19, 0x47, 0x5c, 0x1f, 0x48, 0xb9, 0xfd, 0x2b, 0x15, 0x72, 0xf2, 0xa2, 0x65, 0x18, 0x8f, 0xca,
0x3d, 0x07, 0xa5, 0x98, 0x82, 0x8e, 0xca, 0x3e, 0xb5, 0x43, 0x3a, 0x69, 0x42, 0x21, 0x07, 0x5f,
0x80, 0x95, 0xa3, 0x8a, 0xef, 0x0d, 0x62, 0x40, 0xca, 0xec, 0xec, 0x68, 0xc7, 0x0e, 0x62, 0xb0,
0xe7, 0xcc, 0x71, 0x8e, 0xaa, 0x6e, 0x10, 0x4e, 0x80, 0x01, 0xea, 0x30, 0x73, 0x38, 0xda, 0xa6,
0x61, 0xb2, 0xaf, 0x85, 0x0e, 0x99, 0x92, 0x93, 0xf0, 0x46, 0x68, 0x87, 0x01, 0x72, 0x39, 0x6a,
0xde, 0xfc, 0x8c, 0xda, 0xdf, 0xe8, 0x2b, 0x7c, 0x11, 0x90, 0x24, 0x6d, 0xfc, 0x48, 0x8a, 0x7e,
0xae, 0xe0, 0x0b, 0xb0, 0xcc, 0x49, 0x9b, 0x00, 0x7f, 0xc1, 0xa2, 0x40, 0x91, 0x91, 0x36, 0x81,
0x7d, 0xa3, 0xe0, 0x35, 0x58, 0xe1, 0xa4, 0x8d, 0x81, 0x01, 0xfa, 0xa5, 0x82, 0x57, 0xa1, 0x20,
0xb1, 0xf2, 0x2e, 0x2a, 0xfa, 0x61, 0x1a, 0xaf, 0x00, 0x70, 0x8c, 0x02, 0xf0, 0xa3, 0xb4, 0x38,
0x82, 0x06, 0x6d, 0xdf, 0x3d, 0x90, 0xab, 0x7e, 0xcc, 0xe4, 0x9d, 0x3b, 0x12, 0xa9, 0x03, 0xfa,
0x6f, 0x95, 0x2f, 0x19, 0xb7, 0xd6, 0x36, 0x47, 0x56, 0x05, 0x7d, 0xad, 0xe2, 0x4b, 0xb0, 0x7a,
0x24, 0xb2, 0x8a, 0x18, 0xfc, 0x7f, 0x54, 0xbe, 0x55, 0x5c, 0x78, 0xe8, 0x7f, 0x55, 0xce, 0x17,
0xa3, 0xcb, 0xaa, 0x8c, 0x1f, 0x55, 0xd0, 0xff, 0xa9, 0x82, 0x5c, 0xcf, 0x76, 0x62, 0xe2, 0xfd,
0xbd, 0x8a, 0x75, 0xc8, 0x1e, 0xf1, 0x76, 0x28, 0xfa, 0x83, 0x8a, 0x8b, 0x90, 0x3f, 0x92, 0x9d,
0x4d, 0xf4, 0x47, 0xf5, 0xf6, 0x0d, 0xc8, 0x47, 0xe1, 0x99, 0x69, 0x94, 0x98, 0x25, 0x66, 0x2d,
0x1a, 0x2c, 0x3d, 0x23, 0x16, 0x53, 0xf9, 0xc6, 0x77, 0x45, 0x28, 0xee, 0xf2, 0x60, 0xde, 0xa0,
0xfe, 0x4b, 0xb7, 0x4d, 0xf1, 0x3f, 0x03, 0x9a, 0xd6, 0x36, 0x9e, 0xf9, 0xea, 0x7d, 0x75, 0x56,
0xd3, 0xd1, 0x48, 0xe1, 0x2d, 0x28, 0x26, 0x4c, 0x00, 0x5f, 0x9b, 0x6a, 0x14, 0xc4, 0xaf, 0xec,
0xab, 0x97, 0x4e, 0x94, 0x94, 0xa2, 0xad, 0x9e, 0xc2, 0x16, 0xe0, 0x48, 0xc2, 0x67, 0x45, 0x36,
0x93, 0x4e, 0x23, 0x85, 0x9f, 0x30, 0xcf, 0x1b, 0xf6, 0xc3, 0xb3, 0xe2, 0xb9, 0x31, 0x67, 0x72,
0xdc, 0x7f, 0x4f, 0xe1, 0x7f, 0x81, 0x95, 0xc6, 0x21, 0xfb, 0x1c, 0x1b, 0xf5, 0x94, 0x94, 0x64,
0x7f, 0x7e, 0x2e, 0xae, 0xe8, 0xef, 0x21, 0x46, 0x0a, 0xef, 0x01, 0x4e, 0xe2, 0xe2, 0x3d, 0xde,
0x53, 0x29, 0x9c, 0x37, 0xc9, 0x7b, 0xba, 0x29, 0x5c, 0x81, 0xe5, 0xa4, 0x6f, 0x9e, 0x8e, 0x6d,
0x8e, 0x26, 0x3f, 0x06, 0x3d, 0xe6, 0x07, 0x38, 0x59, 0x84, 0x4f, 0x5e, 0x18, 0xe6, 0x6d, 0x2f,
0x43, 0x31, 0xe1, 0x22, 0xf3, 0x11, 0xcc, 0x9b, 0x30, 0x52, 0xf8, 0x11, 0x68, 0x63, 0xc7, 0x5b,
0x98, 0x02, 0x13, 0x56, 0xa6, 0xc2, 0xc3, 0x94, 0x1c, 0x92, 0x7f, 0xd4, 0x98, 0x8f, 0xa6, 0x10,
0x8f, 0x1c, 0xa7, 0xe3, 0x98, 0x6f, 0xd0, 0x8f, 0x61, 0x99, 0xa9, 0x79, 0x12, 0x6c, 0x4e, 0x57,
0xca, 0xd5, 0xd9, 0xa7, 0x48, 0x9b, 0x61, 0xc2, 0x8d, 0x87, 0xb8, 0x37, 0x62, 0xec, 0x23, 0xc8,
0x8a, 0x78, 0x85, 0xd7, 0xa7, 0x24, 0x3b, 0x7e, 0xde, 0x9f, 0xe2, 0x67, 0xfc, 0x5f, 0x0e, 0x2e,
0x96, 0x62, 0x22, 0xbe, 0x4d, 0x91, 0x90, 0xec, 0xfc, 0x5f, 0x9d, 0xfd, 0x86, 0x69, 0xa4, 0xf0,
0x0e, 0x14, 0xe2, 0x2f, 0x10, 0xf8, 0x6f, 0xa6, 0x5a, 0x1f, 0x53, 0x8f, 0x13, 0xa7, 0x10, 0xf4,
0x09, 0x64, 0x45, 0x0c, 0xc5, 0x73, 0x9f, 0x90, 0xaf, 0x26, 0x67, 0x62, 0x6f, 0xb4, 0xdc, 0x0f,
0x57, 0xa6, 0x9e, 0xb2, 0xf1, 0xdb, 0x33, 0x10, 0x25, 0x1f, 0xba, 0x4f, 0xc5, 0xf8, 0x00, 0xd4,
0x72, 0xcf, 0x99, 0x13, 0x19, 0xa6, 0x88, 0x8c, 0x3d, 0x56, 0xa6, 0x70, 0x09, 0x60, 0xf2, 0x92,
0x84, 0x93, 0x65, 0xcc, 0xd4, 0x13, 0xd3, 0x3c, 0xe5, 0x6e, 0xc3, 0xea, 0x9e, 0x4f, 0xbb, 0x89,
0xfb, 0xe2, 0x8d, 0xc2, 0xc0, 0x03, 0x58, 0xe2, 0xf7, 0xcb, 0x94, 0xfb, 0x4d, 0x9e, 0xe0, 0xe6,
0x6d, 0x7c, 0xc4, 0x5f, 0x29, 0xd9, 0x5d, 0x84, 0xaf, 0x9c, 0x6c, 0x16, 0xcb, 0xb7, 0xb7, 0x79,
0x9b, 0x37, 0x21, 0x2f, 0xf5, 0xb6, 0x89, 0xaf, 0xce, 0x53, 0xe7, 0xde, 0xe6, 0x69, 0xe2, 0xdf,
0xdc, 0xf8, 0xe2, 0x5e, 0xc7, 0x0d, 0x0f, 0x87, 0x07, 0x77, 0xda, 0x5e, 0xef, 0x6e, 0xfb, 0xdf,
0x83, 0x7b, 0xf7, 0x1e, 0xdc, 0x0d, 0x86, 0x2f, 0xbb, 0x6e, 0xef, 0xee, 0x9c, 0xbf, 0x2a, 0x1e,
0x64, 0xf9, 0x7f, 0x14, 0x3f, 0xf8, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x0a, 0x17, 0x37,
0xcc, 0x28, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -208,8 +208,8 @@ message QueryResult {
repeated float scores = 4;
repeated float distances = 5;
repeated KeyValuePair extra_params = 6;
int64 query_id = 7;
int64 client_id = 8;
uint64 query_id = 7;
int64 proxy_id = 8;
}
/**
@ -675,6 +675,69 @@ enum OpType {
DELETE = 1;
}
enum ReqType {
// general operations
kCmd = 0;
/* collection operations */
kCreateCollection = 100;
kDropCollection = 101;
kHasCollection = 102;
kListCollections = 103;
kGetCollectionInfo = 104;
kGetCollectionStats = 105;
kCountEntities = 106;
/* partition operations */
kCreatePartition = 200;
kDropPartition = 201;
kHasPartition = 202;
kListPartitions = 203;
/* index operations */
kCreateIndex = 300;
kDropIndex = 301;
kDescribeIndex = 302;
/* data operations */
kInsert = 400;
kGetEntityByID = 401;
kDeleteEntityByID = 402;
kSearch = 403;
kListIDInSegment = 404;
/* other operations */
kLoadCollection = 500;
kFlush = 501;
kCompact = 502;
}
message QueryReqMsg {
string collection_name = 1;
repeated VectorParam vector_param = 2;
repeated string partition_tags = 3;
string dsl = 4;
repeated KeyValuePair extra_params = 5;
uint64 timestamp =6;
int64 proxy_id = 7;
uint64 query_id = 8;
ReqType req_type = 9;
}
message ManipulationReqMsg {
string collection_name = 1;
string partition_tag = 2;
repeated uint64 primary_keys = 3;
repeated RowData rows_data = 4;
uint64 timestamp =5;
uint64 segment_id = 6;
uint64 channel_id = 7;
ReqType req_type = 8;
int64 proxy_id = 9;
repeated KeyValuePair extra_params = 10;
}
message InsertOrDeleteMsg {
string collection_name = 1;
RowData rows_data = 2;

View File

@ -3,11 +3,13 @@ package informer
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
"github.com/czs007/suvlim/pkg/master/mock"
"log"
"strconv"
"time"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/pkg/master/mock"
)
func NewPulsarClient() PulsarClient {
@ -15,25 +17,11 @@ func NewPulsarClient() PulsarClient {
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
var client pulsar.Client
var err error
if conf.Config.Pulsar.Authentication {
client, err = pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
Authentication: pulsar.NewAuthenticationToken(conf.Config.Pulsar.Token),
//OperationTimeout: 30 * time.Second,
//ConnectionTimeout: 30 * time.Second,
})
} else {
client, err = pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
//OperationTimeout: 30 * time.Second,
//ConnectionTimeout: 30 * time.Second,
})
}
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: pulsarAddr,
OperationTimeout: 30 * time.Second,
ConnectionTimeout: 30 * time.Second,
})
if err != nil {
log.Fatalf("Could not instantiate Pulsar client: %v", err)
}
@ -65,10 +53,8 @@ func (pc PulsarClient) Listener(ssChan chan mock.SegmentStats) error {
if err != nil {
log.Println("SegmentUnMarshal Failed")
}
//fmt.Printf("Received message msgId: %#v -- content: '%s'\n",
// msg.ID(), m.SegementID)
fmt.Println("Received SegmentStats -- segmentID:", m.SegementID,
",memSize:", m.MemorySize, ",memRate:", m.MemoryRate, ",numRows:", m.Rows, ",status:", m.Status)
fmt.Printf("Received message msgId: %#v -- content: '%s'\n",
msg.ID(), m.SegementID)
ssChan <- m
consumer.Ack(msg)
}

View File

@ -7,8 +7,6 @@ import (
)
func TestFakeCreateCollectionByGRPC(t *testing.T) {
t.Skip("to fix test")
reason, segmentID := FakeCreateCollectionByGRPC()
if reason != "" {
t.Error(reason)

View File

@ -3,8 +3,10 @@ package mock
import (
"bytes"
"encoding/gob"
masterpb "github.com/czs007/suvlim/pkg/master/grpc/master"
"github.com/golang/protobuf/proto"
"time"
masterpb "github.com/czs007/suvlim/pkg/master/grpc/master"
)
type SegmentStats struct {
@ -57,7 +59,7 @@ type Segment struct {
Rows int64 `json:"rows"`
}
func NewSegment(id uint64, collectioID uint64, cName string, ptag string, chStart int, chEnd int, openTime uint64, closeTime uint64) Segment {
func NewSegment(id uint64, collectioID uint64, cName string, ptag string, chStart int, chEnd int, openTime time.Time, closeTime time.Time) Segment {
return Segment{
SegmentID: id,
CollectionID: collectioID,
@ -65,8 +67,8 @@ func NewSegment(id uint64, collectioID uint64, cName string, ptag string, chStar
PartitionTag: ptag,
ChannelStart: chStart,
ChannelEnd: chEnd,
OpenTimeStamp: openTime,
CloseTimeStamp: closeTime,
OpenTimeStamp: uint64(openTime.Unix()),
CloseTimeStamp: uint64(closeTime.Unix()),
}
}
func Segment2JSON(s Segment) (string, error) {

View File

@ -7,8 +7,6 @@ import (
)
func TestSegmentMarshal(t *testing.T) {
t.Skip("to fix test")
s := SegmentStats{
SegementID: uint64(12315),
MemorySize: uint64(233113),

View File

@ -46,13 +46,13 @@ func SegmentStatsController() {
defer close(ssChan)
ssClient := informer.NewPulsarClient()
// segmentCloseLog := make(map[uint64]uint64, 0)
segmentCloseLog := make(map[uint64]uint64, 0)
go ssClient.Listener(ssChan)
for {
select {
case ss := <-ssChan:
// ComputeCloseTime(&segmentCloseLog, ss, kvbase)
ComputeCloseTime(&segmentCloseLog, ss, kvbase)
UpdateSegmentStatus(ss, kvbase)
//case <-time.After(5 * time.Second):
// fmt.Println("timeout")
@ -61,19 +61,16 @@ func SegmentStatsController() {
}
}
func GetPhysicalTimeNow() uint64 {
return uint64(time.Now().UnixNano() / int64(time.Millisecond))
}
func ComputeCloseTime(segmentCloseLog *map[uint64]uint64, ss mock.SegmentStats, kvbase kv.Base) error {
segmentID := ss.SegementID
if _, ok := (*segmentCloseLog)[segmentID]; ok {
// This segment has been closed
log.Println("Segment", segmentID, "has been closed")
return nil
}
if int(ss.MemorySize) > int(conf.Config.Master.SegmentThreshole*0.8) {
currentTime := GetPhysicalTimeNow()
currentTime := time.Now()
memRate := int(ss.MemoryRate)
if memRate == 0 {
//memRate = 1
@ -83,54 +80,34 @@ func ComputeCloseTime(segmentCloseLog *map[uint64]uint64, ss mock.SegmentStats,
sec := float64(conf.Config.Master.SegmentThreshole*0.2) / float64(memRate)
data, err := kvbase.Load("segment/" + strconv.Itoa(int(ss.SegementID)))
if err != nil {
log.Println("Load segment failed")
return err
}
seg, err := mock.JSON2Segment(data)
if err != nil {
log.Println("JSON2Segment failed")
return err
}
seg.CloseTimeStamp = currentTime + uint64(sec * 1000)
// Reduce time gap between Proxy and Master
seg.CloseTimeStamp = seg.CloseTimeStamp + uint64(5 * 1000)
fmt.Println("Close segment = ", seg.SegmentID, ",Close time = ", seg.CloseTimeStamp)
segmentLogicTime := seg.CloseTimeStamp << 46 >> 46
seg.CloseTimeStamp = uint64(currentTime.Add(time.Duration(sec) * time.Second).Unix()) << 18 + segmentLogicTime
fmt.Println("memRate = ", memRate, ",sec = ", sec ,",Close time = ", seg.CloseTimeStamp)
updateData, err := mock.Segment2JSON(*seg)
if err != nil {
log.Println("Update segment, Segment2JSON failed")
return err
}
err = kvbase.Save("segment/"+strconv.Itoa(int(ss.SegementID)), updateData)
if err != nil {
log.Println("Save segment failed")
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(ss.SegementID)), updateData)
(*segmentCloseLog)[segmentID] = seg.CloseTimeStamp
//create new segment
newSegID := id.New().Uint64()
newSeg := mock.NewSegment(newSegID, seg.CollectionID, seg.CollectionName, "default", seg.ChannelStart, seg.ChannelEnd, currentTime, 1 << 46 - 1)
newSeg := mock.NewSegment(newSegID, seg.CollectionID, seg.CollectionName, "default", seg.ChannelStart, seg.ChannelEnd, currentTime, time.Unix(1<<36-1, 0))
newSegData, err := mock.Segment2JSON(*&newSeg)
if err != nil {
log.Println("Create new segment, Segment2JSON failed")
return err
}
//save to kv store
err = kvbase.Save("segment/"+strconv.Itoa(int(newSegID)), newSegData)
if err != nil {
log.Println("Save segment failed")
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(newSegID)), newSegData)
// update collection data
c, _ := kvbase.Load("collection/" + strconv.Itoa(int(seg.CollectionID)))
collection, err := mock.JSON2Collection(c)
if err != nil {
log.Println("JSON2Segment failed")
return err
}
segIDs := collection.SegmentIDs
@ -138,14 +115,9 @@ func ComputeCloseTime(segmentCloseLog *map[uint64]uint64, ss mock.SegmentStats,
collection.SegmentIDs = segIDs
cData, err := mock.Collection2JSON(*collection)
if err != nil {
log.Println("Collection2JSON failed")
return err
}
err = kvbase.Save("collection/"+strconv.Itoa(int(seg.CollectionID)), cData)
if err != nil {
log.Println("Save collection failed")
return err
}
kvbase.Save("segment/"+strconv.Itoa(int(seg.CollectionID)), cData)
}
return nil
}
@ -175,7 +147,7 @@ func UpdateSegmentStatus(ss mock.SegmentStats, kvbase kv.Base) error {
if err != nil {
return err
}
err = kvbase.Save("segment/"+strconv.Itoa(int(seg.SegmentID)), segData)
err = kvbase.Save("segment/"+strconv.Itoa(int(seg.CollectionID)), segData)
if err != nil {
return err
}
@ -262,8 +234,8 @@ func CollectionController(ch chan *messagepb.Mapping) {
time.Now(), fieldMetas, []uint64{sID, s2ID},
[]string{"default"})
cm := mock.GrpcMarshal(&c)
s := mock.NewSegment(sID, cID, collection.CollectionName, "default", 0, 511, GetPhysicalTimeNow(), 1 << 46 - 1)
s2 := mock.NewSegment(s2ID, cID, collection.CollectionName, "default", 512, 1023, GetPhysicalTimeNow(), 1 << 46 - 1)
s := mock.NewSegment(sID, cID, collection.CollectionName, "default", 0, 511, time.Now(), time.Unix(1<<36-1, 0))
s2 := mock.NewSegment(s2ID, cID, collection.CollectionName, "default", 512, 1023, time.Now(), time.Unix(1<<36-1, 0))
collectionData, _ := mock.Collection2JSON(*cm)
segmentData, err := mock.Segment2JSON(s)
if err != nil {
@ -298,75 +270,37 @@ func WriteCollection2Datastore(collection *messagepb.Mapping) error {
})
defer cli.Close()
kvbase := kv.NewEtcdKVBase(cli, conf.Config.Etcd.Rootpath)
sID := id.New().Uint64()
cID := id.New().Uint64()
fieldMetas := []*messagepb.FieldMeta{}
if collection.Schema != nil {
fieldMetas = collection.Schema.FieldMetas
}
queryNodeNum := conf.Config.Master.QueryNodeNum
topicNum := conf.Config.Pulsar.TopicNum
var topicNumPerQueryNode int
if topicNum % queryNodeNum != 0 {
topicNumPerQueryNode = topicNum / queryNodeNum + 1
} else {
topicNumPerQueryNode = topicNum / queryNodeNum
}
fmt.Println("QueryNodeNum = ", queryNodeNum)
fmt.Println("TopicNum = ", topicNum)
fmt.Println("TopicNumPerQueryNode = ", topicNumPerQueryNode)
sIDs := make([]uint64, queryNodeNum)
for i := 0; i < queryNodeNum; i++ {
// For generating different id
time.Sleep(1000 * time.Millisecond)
sIDs[i] = id.New().Uint64()
}
c := mock.NewCollection(cID, collection.CollectionName,
time.Now(), fieldMetas, sIDs,
time.Now(), fieldMetas, []uint64{sID},
[]string{"default"})
cm := mock.GrpcMarshal(&c)
s := mock.NewSegment(sID, cID, collection.CollectionName, "default", 0, conf.Config.Pulsar.TopicNum, time.Now(), time.Unix(1<<46-1, 0))
collectionData, err := mock.Collection2JSON(*cm)
if err != nil {
log.Fatal(err)
return err
}
segmentData, err := mock.Segment2JSON(s)
if err != nil {
log.Fatal(err)
return err
}
err = kvbase.Save("collection/"+strconv.FormatUint(cID, 10), collectionData)
if err != nil {
log.Fatal(err)
return err
}
for i := 0; i < queryNodeNum; i++ {
chStart := i * topicNumPerQueryNode
chEnd := (i + 1) * topicNumPerQueryNode
if chEnd > topicNum {
chEnd = topicNum - 1
}
s := mock.NewSegment(sIDs[i], cID, collection.CollectionName, "default", chStart, chEnd, GetPhysicalTimeNow(), 1 << 46 - 1)
segmentData, err := mock.Segment2JSON(s)
if err != nil {
log.Fatal(err)
return err
}
err = kvbase.Save("segment/"+strconv.FormatUint(sIDs[i], 10), segmentData)
if err != nil {
log.Fatal(err)
return err
}
err = kvbase.Save("segment/"+strconv.FormatUint(sID, 10), segmentData)
if err != nil {
log.Fatal(err)
return err
}
return nil
}

View File

@ -1,7 +1,7 @@
syntax = "proto3";
package milvus.grpc;
option go_package="msgpb";
option go_package="master/grpc/message";
enum ErrorCode {
SUCCESS = 0;
@ -208,8 +208,8 @@ message QueryResult {
repeated float scores = 4;
repeated float distances = 5;
repeated KeyValuePair extra_params = 6;
int64 query_id = 7;
int64 client_id = 8;
uint64 query_id = 7;
int64 proxy_id = 8;
}
/**
@ -675,6 +675,68 @@ enum OpType {
DELETE = 1;
}
enum ReqType {
// general operations
kCmd = 0;
/* collection operations */
kCreateCollection = 100;
kDropCollection = 101;
kHasCollection = 102;
kListCollections = 103;
kGetCollectionInfo = 104;
kGetCollectionStats = 105;
kCountEntities = 106;
/* partition operations */
kCreatePartition = 200;
kDropPartition = 201;
kHasPartition = 202;
kListPartitions = 203;
/* index operations */
kCreateIndex = 300;
kDropIndex = 301;
kDescribeIndex = 302;
/* data operations */
kInsert = 400;
kGetEntityByID = 401;
kDeleteEntityByID = 402;
kSearch = 403;
kListIDInSegment = 404;
/* other operations */
kLoadCollection = 500;
kFlush = 501;
kCompact = 502;
}
message QueryReqMsg {
string collection_name = 1;
repeated VectorParam vector_param = 2;
repeated string partition_tags = 3;
string dsl = 4;
repeated KeyValuePair extra_params = 5;
uint64 timestamp =6;
int64 proxy_id = 7;
uint64 query_id = 8;
ReqType req_type = 9;
}
message ManipulationReqMsg {
string collection_name = 1;
string partition_tag = 2;
repeated uint64 primary_keys = 3;
repeated RowData rows_data = 4;
uint64 timestamp =5;
uint64 segment_id = 6;
uint64 channel_id = 7;
ReqType req_type = 8;
int64 proxy_id = 9;
repeated KeyValuePair extra_params = 10;
}
message InsertOrDeleteMsg {
string collection_name = 1;
RowData rows_data = 2;

17
proxy-go/main.go Normal file
View File

@ -0,0 +1,17 @@
package main
import (
proxy "github.com/czs007/suvlim/proxy-go/proxy_node"
"log"
)
func main() {
cfg, err := proxy.ReadProxyOptionsFromConfig()
if err != nil {
log.Fatalf("read proxy options form config file , error = %v", err)
}
err = proxy.StartProxy(cfg)
if err != nil {
log.Fatalf("start proxy failed, error = %v", err)
}
}

View File

@ -0,0 +1,130 @@
package proxy_node
import (
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"sync"
)
type manipulationReq struct {
pb.ManipulationReqMsg
wg sync.WaitGroup
proxy *proxyServer
}
// TsMsg interfaces
func (req *manipulationReq) Ts() Timestamp {
return Timestamp(req.Timestamp)
}
func (req *manipulationReq) SetTs(ts Timestamp) {
req.Timestamp = uint64(ts)
}
// BaseRequest interfaces
func (req *manipulationReq) Type() pb.ReqType {
return req.ReqType
}
func (req *manipulationReq) PreExecute() pb.Status {
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *manipulationReq) Execute() pb.Status {
req.proxy.reqSch.manipulationsChan <- req
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *manipulationReq) PostExecute() pb.Status { // send into pulsar
req.wg.Add(1)
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *manipulationReq) WaitToFinish() pb.Status { // wait unitl send into pulsar
req.wg.Wait()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (s *proxyServer) restartManipulationRoutine(buf_size int) error {
s.reqSch.manipulationsChan = make(chan *manipulationReq, buf_size)
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: s.pulsarAddr})
if err != nil {
return err
}
readers := make([]pulsar.Producer, len(s.readerTopics))
for i, t := range s.readerTopics {
p, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: t})
if err != nil {
return err
}
readers[i] = p
}
deleter, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: s.deleteTopic})
if err != nil {
return err
}
go func() {
for {
select {
case <-s.ctx.Done():
deleter.Close()
for _, r := range readers {
r.Close()
}
pulsarClient.Close()
return
case ip := <-s.reqSch.manipulationsChan:
ts, st := s.getTimestamp(1)
if st.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("get time stamp failed, error code = %d, msg = %s, drop inset rows = %d", st.ErrorCode, st.Reason, len(ip.RowsData))
continue
}
mq := pb.ManipulationReqMsg{
CollectionName: ip.CollectionName,
PartitionTag: ip.PartitionTag,
PrimaryKeys: ip.PrimaryKeys,
RowsData: ip.RowsData,
Timestamp: uint64(ts[0]),
SegmentId: ip.SegmentId,
ChannelId: ip.ChannelId,
ReqType: ip.ReqType,
ProxyId: ip.ProxyId,
ExtraParams: ip.ExtraParams,
}
mb, err := proto.Marshal(&mq)
if err != nil {
log.Printf("Marshal ManipulationReqMsg failed, error = %v", err)
continue
}
switch ip.ReqType {
case pb.ReqType_kInsert:
if _, err := readers[mq.ChannelId].Send(s.ctx, &pulsar.ProducerMessage{Payload: mb}); err != nil {
log.Printf("post into puslar failed, error = %v", err)
}
break
case pb.ReqType_kDeleteEntityByID:
if _, err = deleter.Send(s.ctx, &pulsar.ProducerMessage{Payload: mb}); err != nil {
log.Printf("post into pulsar filed, error = %v", err)
}
default:
log.Printf("post unexpect ReqType = %d", ip.ReqType)
break
}
s.reqSch.m_timestamp_mux.Lock()
if s.reqSch.m_timestamp <= ts[0] {
s.reqSch.m_timestamp = ts[0]
} else {
log.Printf("there is some wrong with m_timestamp, it goes back, current = %d, previous = %d", ts[0], s.reqSch.m_timestamp)
}
s.reqSch.m_timestamp_mux.Unlock()
ip.wg.Done()
}
}
}()
return nil
}

View File

@ -0,0 +1,175 @@
package proxy_node
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
etcd "go.etcd.io/etcd/clientv3"
"strconv"
)
type BaseRequest interface {
Type() pb.ReqType
PreExecute() pb.Status
Execute() pb.Status
PostExecute() pb.Status
WaitToFinish() pb.Status
}
type ProxyOptions struct {
//proxy server
address string //grpc server address
master_address string //master server addess
collectionMetaRootPath string // etcd root path,read metas of collections and segments from etcd
pulsarAddr string // pulsar address for reader
readerTopicsPrefix string
numReadTopics int
deleteTopic string
queryTopic string
resultTopic string
resultGroup string
numReaderNode int
proxyId int64 //start from 1
etcdEndpoints []string
//timestamporacle
tsoRootPath string //etcd root path, store timestamp into this key
tsoSaveInterval uint64
//timetick
timeTickInterval uint64
timeTickTopic string
timeTickPeerId int64 //start from 1
// inner member
proxyServer *proxyServer
tso *timestampOracle
timeTick *timeTick
ctx context.Context
cancel context.CancelFunc
}
func ReadProxyOptionsFromConfig() (*ProxyOptions, error) {
etcdRootPath := conf.Config.Etcd.Rootpath
if etcdRootPath[len(etcdRootPath)-1] == '/' {
etcdRootPath = etcdRootPath[0 : len(etcdRootPath)-1]
}
return &ProxyOptions{
address: conf.Config.Proxy.Network.Address + ":" + strconv.Itoa(conf.Config.Proxy.Network.Port),
master_address: conf.Config.Master.Address + ":" + strconv.Itoa(int(conf.Config.Master.Port)),
collectionMetaRootPath: etcdRootPath,
pulsarAddr: "pulsar://" + conf.Config.Pulsar.Address + ":" + strconv.Itoa(int(conf.Config.Pulsar.Port)),
readerTopicsPrefix: conf.Config.Proxy.PulsarTopics.ReaderTopicPrefix,
numReadTopics: conf.Config.Proxy.PulsarTopics.NumReaderTopics,
deleteTopic: conf.Config.Proxy.PulsarTopics.DeleteTopic,
queryTopic: conf.Config.Proxy.PulsarTopics.QueryTopic,
resultTopic: conf.Config.Proxy.PulsarTopics.ResultTopic,
resultGroup: conf.Config.Proxy.PulsarTopics.ResultGroup,
numReaderNode: conf.Config.Proxy.NumReaderNodes,
proxyId: int64(conf.Config.Proxy.ProxyId),
etcdEndpoints: []string{conf.Config.Etcd.Address + ":" + strconv.Itoa(int(conf.Config.Etcd.Port))},
tsoRootPath: etcdRootPath,
tsoSaveInterval: uint64(conf.Config.Proxy.TosSaveInterval),
timeTickInterval: uint64(conf.Config.Proxy.TimeTickInterval),
timeTickTopic: conf.Config.Proxy.PulsarTopics.TimeTickTopic,
timeTickPeerId: int64(conf.Config.Proxy.ProxyId),
}, nil
}
func StartProxy(opt *ProxyOptions) error {
//global context
opt.ctx, opt.cancel = context.WithCancel(context.Background())
///////////////////// timestamporacle //////////////////////////
etcdTso, err := etcd.New(etcd.Config{Endpoints: opt.etcdEndpoints})
if err != nil {
return err
}
tso := &timestampOracle{
client: etcdTso,
ctx: opt.ctx,
rootPath: opt.tsoRootPath,
saveInterval: opt.tsoSaveInterval,
}
tso.Restart(opt.proxyId)
/////////////////// proxy server ///////////////////////////////
//readerTopics, send insert and delete message into these topics
readerTopics := make([]string, 0, opt.numReadTopics)
for i := 0; i < opt.numReadTopics; i++ {
readerTopics = append(readerTopics, opt.readerTopicsPrefix+strconv.Itoa(i))
}
etcdProxy, err := etcd.New(etcd.Config{Endpoints: opt.etcdEndpoints})
if err != nil {
return err
}
srv := &proxyServer{
address: opt.address,
masterAddress: opt.master_address,
rootPath: opt.collectionMetaRootPath,
pulsarAddr: opt.pulsarAddr,
readerTopics: readerTopics,
deleteTopic: opt.deleteTopic,
queryTopic: opt.queryTopic,
resultTopic: opt.resultTopic,
resultGroup: opt.resultTopic,
numReaderNode: opt.numReaderNode,
proxyId: opt.proxyId,
getTimestamp: tso.GetTimestamp,
client: etcdProxy,
ctx: opt.ctx,
}
//errChan := make(chan error, 1)
//go func() {
// err := startProxyServer(srv)
// errChan <- err
//}()
err = startProxyServer(srv)
if err != nil {
return err
}
//wait unit grpc server has started
//if err := <-errChan; err != nil {
// return err
//}
////////////////////////// time tick /////////////////////////////////
ttClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: opt.pulsarAddr})
if err != nil {
return err
}
ttProducer, err := ttClient.CreateProducer(pulsar.ProducerOptions{Topic: opt.timeTickTopic})
if err != nil {
return err
}
tt := &timeTick{
interval: opt.timeTickInterval,
pulsarProducer: ttProducer,
peer_id: opt.timeTickPeerId,
ctx: opt.ctx,
areRequestsDelivered: func(ts Timestamp) bool { return srv.reqSch.AreRequestsDelivered(ts, 2) },
getTimestamp: func() (Timestamp, pb.Status) {
ts, st := tso.GetTimestamp(1)
return ts[0], st
},
}
s := tt.Restart()
if s.ErrorCode != pb.ErrorCode_SUCCESS {
return fmt.Errorf(s.Reason)
}
opt.proxyServer = srv
opt.tso = tso
opt.timeTick = tt
srv.wg.Wait()
return nil
}

View File

@ -0,0 +1,435 @@
package proxy_node
import (
"context"
"encoding/binary"
"encoding/json"
"github.com/apache/pulsar-client-go/pulsar"
mpb "github.com/czs007/suvlim/pkg/master/grpc/master"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
etcd "go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
"sort"
"strconv"
"testing"
"time"
)
func TestProxyNode(t *testing.T) {
startTestMaster("localhost:11000", t)
testOpt := ProxyOptions{
address: "localhost:11001",
master_address: "localhost:11000",
collectionMetaRootPath: "/collections/meta",
pulsarAddr: "pulsar://localhost:6650",
readerTopicsPrefix: "reader-",
numReadTopics: 2,
deleteTopic: "deleteT",
queryTopic: "queryT",
resultTopic: "resultT",
resultGroup: "resultG",
numReaderNode: 2,
proxyId: 1,
etcdEndpoints: []string{"127.0.0.1:2379"},
tsoRootPath: "/tso",
tsoSaveInterval: 200,
timeTickInterval: 200,
timeTickTopic: "timetick",
timeTickPeerId: 1,
}
if err := StartProxy(&testOpt); err != nil {
t.Fatal(err)
}
startTime := uint64(time.Now().UnixNano()) / uint64(1e6)
t.Logf("start time stamp = %d", startTime)
etcdClient, err := etcd.New(etcd.Config{Endpoints: testOpt.etcdEndpoints})
assert.Nil(t, err)
//defer etcdClient.Close()
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: testOpt.pulsarAddr})
assert.Nil(t, err)
defer pulsarClient.Close()
go func() {
time.Sleep(time.Second)
for {
ts, err := etcdClient.Get(testOpt.ctx, testOpt.tsoRootPath+tsoKeyPath)
assert.Nil(t, err)
if len(ts.Kvs) != 1 {
t.Fatalf("save tso into etcd falied")
}
value, err := strconv.ParseUint(string(ts.Kvs[0].Value), 10, 64)
assert.Nil(t, err)
curValue, st := testOpt.tso.GetTimestamp(1)
assert.Equalf(t, st.ErrorCode, pb.ErrorCode_SUCCESS, "%s", st.Reason)
curTime := ToPhysicalTime(uint64(curValue[0]))
t.Logf("current time stamp = %d, saved time stamp = %d", curTime, value)
assert.GreaterOrEqual(t, uint64(curValue[0]), value)
assert.GreaterOrEqual(t, value, startTime)
time.Sleep(time.Duration(testOpt.tsoSaveInterval) * time.Millisecond)
}
}()
tickComsumer, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: testOpt.timeTickTopic,
SubscriptionName: testOpt.timeTickTopic + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer tickComsumer.Close()
reader, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topics: testOpt.proxyServer.readerTopics,
SubscriptionName: testOpt.readerTopicsPrefix + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer reader.Close()
query, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: testOpt.queryTopic,
SubscriptionName: testOpt.queryTopic + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer query.Close()
deleteC, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: testOpt.deleteTopic,
SubscriptionName: testOpt.deleteTopic + "G",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer deleteC.Close()
result, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: testOpt.resultTopic})
assert.Nil(t, err)
defer result.Close()
tick := time.Tick(500 * time.Millisecond)
// read pulsar channel until empty
func() {
cnt := 0
for {
select {
case <-tick:
cnt++
if cnt >= 3 {
return
}
case cm, ok := <-tickComsumer.Chan():
assert.Truef(t, ok, "time tick consumer topic has closed")
tickComsumer.AckID(cm.ID())
case cm, ok := <-reader.Chan():
assert.Truef(t, ok, "reader comsumer topic has closed")
reader.AckID(cm.ID())
case cm, ok := <-deleteC.Chan():
assert.Truef(t, ok, "delete topic has closed")
deleteC.AckID(cm.ID())
case cm, ok := <-query.Chan():
assert.Truef(t, ok, "query topic has closed")
query.AckID(cm.ID())
}
}
}()
go func() {
lastT := startTime
for {
cm, ok := <-tickComsumer.Chan()
assert.Truef(t, ok, "time tick consumer topic has closed")
tickComsumer.AckID(cm.ID())
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(cm.Payload(), &tsm); err != nil {
t.Fatal(err)
}
curT := ToPhysicalTime(tsm.Timestamp)
t.Logf("time tick = %d", curT)
assert.Greater(t, curT, lastT)
lastT = curT
}
}()
cm100 := mpb.Collection{
Id: 100,
Name: "cm100",
Schema: nil,
CreateTime: 0,
SegmentIds: []uint64{101, 102},
PartitionTags: nil,
Indexes: nil,
}
sm101 := mpb.Segment{
SegmentId: 101,
CollectionId: 100,
ChannelStart: 0,
ChannelEnd: 1,
Status: mpb.SegmentStatus_OPENED,
}
sm102 := mpb.Segment{
SegmentId: 102,
CollectionId: 100,
ChannelStart: 1,
ChannelEnd: 2,
Status: mpb.SegmentStatus_OPENED,
}
if cm100b, err := json.Marshal(&cm100); err != nil {
t.Fatal(err)
} else if _, err := etcdClient.Put(testOpt.ctx, testOpt.collectionMetaRootPath+"/"+keyCollectionPath+"/100", string(cm100b)); err != nil {
t.Fatal(err)
}
if sm101b, err := json.Marshal(&sm101); err != nil {
t.Fatal(err)
} else if _, err := etcdClient.Put(testOpt.ctx, testOpt.collectionMetaRootPath+"/"+keySegmentPath+"/101", string(sm101b)); err != nil {
t.Fatal(err)
}
if sm102b, err := json.Marshal(&sm102); err != nil {
t.Fatal(err)
} else if _, err := etcdClient.Put(testOpt.ctx, testOpt.collectionMetaRootPath+"/"+keySegmentPath+"/102", string(sm102b)); err != nil {
t.Fatal(err)
}
ctx1, _ := context.WithTimeout(testOpt.ctx, time.Second)
grpcConn, err := grpc.DialContext(ctx1, testOpt.address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer grpcConn.Close()
proxyClient := pb.NewMilvusServiceClient(grpcConn)
insertParm := pb.InsertParam{
CollectionName: "cm100",
Schema: nil,
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(10)},
{Blob: uint64ToBytes(11)},
{Blob: uint64ToBytes(12)},
{Blob: uint64ToBytes(13)},
{Blob: uint64ToBytes(14)},
{Blob: uint64ToBytes(15)},
},
EntityIdArray: []int64{10, 11, 12, 13, 14, 15},
PartitionTag: "",
ExtraParams: nil,
}
deleteParm := pb.DeleteByIDParam{
CollectionName: "cm100",
IdArray: []int64{20, 21},
}
searchParm := pb.SearchParam{
CollectionName: "cm100",
VectorParam: nil,
Dsl: "",
PartitionTag: nil,
ExtraParams: nil,
}
go func() {
cm, ok := <-query.Chan()
assert.Truef(t, ok, "query topic has closed")
query.AckID(cm.ID())
var qm pb.QueryReqMsg
if err := proto.Unmarshal(cm.Payload(), &qm); err != nil {
t.Fatal(err)
}
assert.Equal(t, qm.ProxyId, testOpt.proxyId)
assert.Equal(t, qm.CollectionName, "cm100")
t.Logf("query time stamp = %d", ToPhysicalTime(qm.Timestamp))
assert.Greater(t, ToPhysicalTime(qm.Timestamp), startTime)
r1 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{11, 13, 15},
ValidRow: []bool{true, true, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(11)},
{Blob: uint64ToBytes(13)},
{Blob: uint64ToBytes(15)},
},
},
RowNum: 3,
Scores: []float32{11, 13, 15},
Distances: []float32{11, 13, 15},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
r2 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{12, 14, 16},
ValidRow: []bool{true, false, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(12)},
{Blob: uint64ToBytes(14)},
{Blob: uint64ToBytes(16)},
},
},
RowNum: 3,
Scores: []float32{12, 14, 16},
Distances: []float32{12, 14, 16},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
if b1, err := proto.Marshal(&r1); err != nil {
t.Fatal(err)
} else if _, err := result.Send(testOpt.ctx, &pulsar.ProducerMessage{Payload: b1}); err != nil {
t.Fatal(err)
}
if b2, err := proto.Marshal(&r2); err != nil {
t.Fatal(err)
} else if _, err := result.Send(testOpt.ctx, &pulsar.ProducerMessage{Payload: b2}); err != nil {
t.Fatal(err)
}
}()
insertR, err := proxyClient.Insert(testOpt.ctx, &insertParm)
assert.Nil(t, err)
assert.Equalf(t, insertR.Status.ErrorCode, pb.ErrorCode_SUCCESS, "%s", insertR.Status.Reason)
assert.Equal(t, len(insertR.EntityIdArray), 6)
sort.Slice(insertR.EntityIdArray, func(i, j int) bool {
return insertR.EntityIdArray[i] < insertR.EntityIdArray[j]
})
for i := 0; i < len(insertR.EntityIdArray); i++ {
assert.Equal(t, insertR.EntityIdArray[i], int64(i+10))
}
var insertPrimaryKey []uint64
readerM1, ok := <-reader.Chan()
assert.True(t, ok)
reader.AckID(readerM1.ID())
var m1 pb.ManipulationReqMsg
if err := proto.UnmarshalMerge(readerM1.Payload(), &m1); err != nil {
t.Fatal(err)
}
assert.Equal(t, m1.CollectionName, "cm100")
assert.Equal(t, len(m1.PrimaryKeys), len(m1.RowsData))
t.Logf("reader time stamp = %d", ToPhysicalTime(m1.Timestamp))
assert.GreaterOrEqual(t, ToPhysicalTime(m1.Timestamp), startTime)
for i, k := range m1.PrimaryKeys {
insertPrimaryKey = append(insertPrimaryKey, k)
rowValue := binary.LittleEndian.Uint64(m1.RowsData[i].Blob)
t.Logf("insert primary key = %d, row data= %d", k, rowValue)
assert.Equal(t, k, rowValue)
}
readerM2, ok := <-reader.Chan()
assert.True(t, ok)
reader.AckID(readerM2.ID())
var m2 pb.ManipulationReqMsg
if err := proto.UnmarshalMerge(readerM2.Payload(), &m2); err != nil {
t.Fatal(err)
}
assert.Equal(t, m2.CollectionName, "cm100")
assert.Equal(t, len(m2.PrimaryKeys), len(m2.RowsData))
t.Logf("read time stamp = %d", ToPhysicalTime(m2.Timestamp))
assert.GreaterOrEqual(t, ToPhysicalTime(m2.Timestamp), startTime)
for i, k := range m2.PrimaryKeys {
insertPrimaryKey = append(insertPrimaryKey, k)
rowValue := binary.LittleEndian.Uint64(m2.RowsData[i].Blob)
t.Logf("insert primary key = %d, row data= %d", k, rowValue)
assert.Equal(t, k, rowValue)
}
sort.Slice(insertPrimaryKey, func(i, j int) bool {
return insertPrimaryKey[i] < insertPrimaryKey[j]
})
assert.Equal(t, len(insertPrimaryKey), 6)
for i := 0; i < len(insertPrimaryKey); i++ {
assert.Equal(t, insertPrimaryKey[i], uint64(i+10))
}
deleteR, err := proxyClient.DeleteByID(testOpt.ctx, &deleteParm)
assert.Nil(t, err)
assert.Equal(t, deleteR.ErrorCode, pb.ErrorCode_SUCCESS)
deleteM, ok := <-deleteC.Chan()
assert.True(t, ok)
deleteC.AckID(deleteM.ID())
var dm pb.ManipulationReqMsg
if err := proto.UnmarshalMerge(deleteM.Payload(), &dm); err != nil {
t.Fatal(err)
}
assert.Equal(t, dm.CollectionName, "cm100")
assert.Equal(t, len(dm.PrimaryKeys), 2)
t.Logf("delete time stamp = %d", ToPhysicalTime(dm.Timestamp))
assert.GreaterOrEqual(t, ToPhysicalTime(dm.Timestamp), startTime)
for i := 0; i < len(dm.PrimaryKeys); i++ {
assert.Equal(t, dm.PrimaryKeys[i], uint64(i+20))
}
searchR, err := proxyClient.Search(testOpt.ctx, &searchParm)
assert.Nil(t, err)
assert.Equal(t, searchR.Status.ErrorCode, pb.ErrorCode_SUCCESS)
assert.Equal(t, searchR.Entities.Status.ErrorCode, pb.ErrorCode_SUCCESS)
assert.Equal(t, len(searchR.Entities.Ids), 3)
assert.Equal(t, searchR.Entities.Ids, []int64{16, 15, 13})
assert.Equal(t, len(searchR.Entities.ValidRow), 3)
assert.Equal(t, searchR.Entities.ValidRow, []bool{true, true, true})
assert.Equal(t, len(searchR.Entities.RowsData), 3)
assert.Equal(t, searchR.Entities.RowsData, []*pb.RowData{
{Blob: uint64ToBytes(16)},
{Blob: uint64ToBytes(15)},
{Blob: uint64ToBytes(13)},
})
assert.Equal(t, len(searchR.Scores), 3)
assert.Equal(t, searchR.Scores, []float32{16, 15, 13})
assert.Equal(t, len(searchR.Distances), 3)
assert.Equal(t, searchR.Distances, []float32{16, 15, 13})
time.Sleep(time.Second)
}
func TestReadProxyOptionsFromConfig(t *testing.T) {
conf, err := ReadProxyOptionsFromConfig()
assert.Nil(t, err)
t.Log(conf.address)
t.Log(conf.master_address)
t.Log(conf.collectionMetaRootPath)
t.Log(conf.pulsarAddr)
t.Log(conf.readerTopicsPrefix)
t.Log(conf.numReadTopics)
t.Log(conf.deleteTopic)
t.Log(conf.queryTopic)
t.Log(conf.resultTopic)
t.Log(conf.resultGroup)
t.Log(conf.numReaderNode)
t.Log(conf.proxyId)
t.Log(conf.etcdEndpoints)
t.Log(conf.tsoRootPath)
t.Log(conf.tsoSaveInterval)
t.Log(conf.timeTickInterval)
t.Log(conf.timeTickTopic)
t.Log(conf.timeTickPeerId)
}

View File

@ -0,0 +1,252 @@
package proxy_node
import (
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"sort"
"sync"
)
type queryReq struct {
pb.QueryReqMsg
result []*pb.QueryResult
wg sync.WaitGroup
proxy *proxyServer
}
// BaseRequest interfaces
func (req *queryReq) Type() pb.ReqType {
return req.ReqType
}
func (req *queryReq) PreExecute() pb.Status {
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *queryReq) Execute() pb.Status {
req.proxy.reqSch.queryChan <- req
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *queryReq) PostExecute() pb.Status { // send into pulsar
req.wg.Add(1)
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (req *queryReq) WaitToFinish() pb.Status { // wait unitl send into pulsar
req.wg.Wait()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (s *proxyServer) restartQueryRoutine(buf_size int) error {
s.reqSch.queryChan = make(chan *queryReq, buf_size)
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: s.pulsarAddr})
if err != nil {
return nil
}
query, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: s.queryTopic})
if err != nil {
return err
}
result, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: s.resultTopic,
SubscriptionName: s.resultGroup,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
if err != nil {
return err
}
resultMap := make(map[uint64]*queryReq)
go func() {
defer result.Close()
defer query.Close()
defer pulsarClient.Close()
for {
select {
case <-s.ctx.Done():
return
case qm := <-s.reqSch.queryChan:
ts, st := s.getTimestamp(1)
if st.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("get time stamp failed, error code = %d, msg = %s", st.ErrorCode, st.Reason)
break
}
q := pb.QueryReqMsg{
CollectionName: qm.CollectionName,
VectorParam: qm.VectorParam,
PartitionTags: qm.PartitionTags,
Dsl: qm.Dsl,
ExtraParams: qm.ExtraParams,
Timestamp: uint64(ts[0]),
ProxyId: qm.ProxyId,
QueryId: qm.QueryId,
ReqType: qm.ReqType,
}
qb, err := proto.Marshal(&q)
if err != nil {
log.Printf("Marshal QueryReqMsg failed, error = %v", err)
continue
}
if _, err := query.Send(s.ctx, &pulsar.ProducerMessage{Payload: qb}); err != nil {
log.Printf("post into puslar failed, error = %v", err)
}
s.reqSch.q_timestamp_mux.Lock()
if s.reqSch.q_timestamp <= ts[0] {
s.reqSch.q_timestamp = ts[0]
} else {
log.Printf("there is some wrong with q_timestamp, it goes back, current = %d, previous = %d", ts[0], s.reqSch.q_timestamp)
}
s.reqSch.q_timestamp_mux.Unlock()
resultMap[qm.QueryId] = qm
//log.Printf("start search, query id = %d", qm.QueryId)
case cm, ok := <-result.Chan():
if !ok {
log.Printf("consumer of result topic has closed")
return
}
var rm pb.QueryResult
if err := proto.Unmarshal(cm.Message.Payload(), &rm); err != nil {
log.Printf("Unmarshal QueryReqMsg failed, error = %v", err)
break
}
if rm.ProxyId != s.proxyId {
break
}
qm, ok := resultMap[rm.QueryId]
if !ok {
log.Printf("unknown query id = %d", rm.QueryId)
break
}
qm.result = append(qm.result, &rm)
if len(qm.result) == s.numReaderNode {
qm.wg.Done()
delete(resultMap, rm.QueryId)
}
result.AckID(cm.ID())
}
}
}()
return nil
}
func (s *proxyServer) reduceResult(query *queryReq) *pb.QueryResult {
if s.numReaderNode == 1 {
return query.result[0]
}
var result []*pb.QueryResult
for _, r := range query.result {
if r.Status.ErrorCode == pb.ErrorCode_SUCCESS {
result = append(result, r)
}
}
if len(result) == 0 {
return query.result[0]
}
if len(result) == 1 {
return result[0]
}
var entities []*struct {
Ids int64
ValidRow bool
RowsData *pb.RowData
Scores float32
Distances float32
}
var rows int
result_err := func(msg string) *pb.QueryResult {
return &pb.QueryResult{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: msg,
},
}
}
for _, r := range result {
if len(r.Entities.Ids) > rows {
rows = len(r.Entities.Ids)
}
if len(r.Entities.Ids) != len(r.Entities.ValidRow) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Entities.ValidRow)=%d", len(r.Entities.Ids), len(r.Entities.ValidRow)))
}
if len(r.Entities.Ids) != len(r.Entities.RowsData) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Entities.RowsData)=%d", len(r.Entities.Ids), len(r.Entities.RowsData)))
}
if len(r.Entities.Ids) != len(r.Scores) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Scores)=%d", len(r.Entities.Ids), len(r.Scores)))
}
if len(r.Entities.Ids) != len(r.Distances) {
return result_err(fmt.Sprintf("len(Entities.Ids)=%d, len(Distances)=%d", len(r.Entities.Ids), len(r.Distances)))
}
for i := 0; i < len(r.Entities.Ids); i++ {
entity := struct {
Ids int64
ValidRow bool
RowsData *pb.RowData
Scores float32
Distances float32
}{
Ids: r.Entities.Ids[i],
ValidRow: r.Entities.ValidRow[i],
RowsData: r.Entities.RowsData[i],
Scores: r.Scores[i],
Distances: r.Distances[i],
}
entities = append(entities, &entity)
}
}
sort.Slice(entities, func(i, j int) bool {
if entities[i].ValidRow == true {
if entities[j].ValidRow == false {
return true
}
return entities[i].Scores > entities[j].Scores
} else {
return false
}
})
rIds := make([]int64, 0, rows)
rValidRow := make([]bool, 0, rows)
rRowsData := make([]*pb.RowData, 0, rows)
rScores := make([]float32, 0, rows)
rDistances := make([]float32, 0, rows)
for i := 0; i < rows; i++ {
rIds = append(rIds, entities[i].Ids)
rValidRow = append(rValidRow, entities[i].ValidRow)
rRowsData = append(rRowsData, entities[i].RowsData)
rScores = append(rScores, entities[i].Scores)
rDistances = append(rDistances, entities[i].Distances)
}
return &pb.QueryResult{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
Entities: &pb.Entities{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
Ids: rIds,
ValidRow: rValidRow,
RowsData: rRowsData,
},
RowNum: int64(rows),
Scores: rScores,
Distances: rDistances,
ExtraParams: result[0].ExtraParams,
QueryId: query.QueryId,
ProxyId: query.ProxyId,
}
}

View File

@ -0,0 +1,372 @@
package proxy_node
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/czs007/suvlim/conf"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"sort"
"strconv"
"sync"
)
const ReadStopFlagEnd int64 = 0
type ReaderTimeSync interface {
Start() error
Close()
TimeSync() <-chan TimeSyncMsg
ManipulationReqMsg() <-chan *pb.ManipulationReqMsg
IsManipulationReqMsgChanFull() bool
}
type TimeSyncMsg struct {
Timestamp uint64
NumRecorders int64
}
type ReaderTimeSyncOption func(*ReaderTimeSyncCfg)
type ReaderTimeSyncCfg struct {
pulsarAddr string
pulsarClient pulsar.Client
timeSyncConsumer pulsar.Consumer
readerConsumer pulsar.Consumer
readerProducer []pulsar.Producer
timesyncMsgChan chan TimeSyncMsg
manipulationReqMsgChan chan *pb.ManipulationReqMsg //output insert or delete msg
readStopFlagClientId int64
interval int64
proxyIdList []int64
readerQueueSize int
revTimesyncFromReader map[uint64]int
ctx context.Context
cancel context.CancelFunc
}
/*
layout of timestamp
time ms logic number
/-------46 bit-----------\/------18bit-----\
+-------------------------+================+
*/
func toMillisecond(ts *pb.TimeSyncMsg) int {
// get Millisecond in second
return int(ts.GetTimestamp() >> 18)
}
func NewReaderTimeSync(
timeSyncTopic string,
timeSyncSubName string,
readTopics []string,
readSubName string,
proxyIdList []int64,
readStopFlagClientId int64,
opts ...ReaderTimeSyncOption,
) (ReaderTimeSync, error) {
//pulsarAddr := "pulsar://"
//pulsarAddr += conf.Config.Pulsar.Address
//pulsarAddr += ":"
//pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
//interval := int64(conf.Config.Timesync.Interval)
//check if proxyId has duplication
if len(proxyIdList) == 0 {
return nil, fmt.Errorf("proxy id list is empty")
}
if len(proxyIdList) > 1 {
sort.Slice(proxyIdList, func(i int, j int) bool { return proxyIdList[i] < proxyIdList[j] })
}
for i := 1; i < len(proxyIdList); i++ {
if proxyIdList[i] == proxyIdList[i-1] {
return nil, fmt.Errorf("there are two proxies have the same id = %d", proxyIdList[i])
}
}
r := &ReaderTimeSyncCfg{
//interval: interval,
proxyIdList: proxyIdList,
}
for _, opt := range opts {
opt(r)
}
if r.interval == 0 {
r.interval = int64(conf.Config.Timesync.Interval)
if r.interval == 0 {
return nil, fmt.Errorf("interval is unsetted")
}
}
if len(r.pulsarAddr) == 0 {
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
r.pulsarAddr = pulsarAddr
}
//check if read topic is empty
if len(readTopics) == 0 {
return nil, fmt.Errorf("read topic is empyt")
}
//set default value
if r.readerQueueSize == 0 {
r.readerQueueSize = 1024
}
if readStopFlagClientId >= ReadStopFlagEnd {
return nil, fmt.Errorf("read stop flag client id should less than %d", ReadStopFlagEnd)
}
r.readStopFlagClientId = readStopFlagClientId
r.timesyncMsgChan = make(chan TimeSyncMsg, len(readTopics)*r.readerQueueSize)
r.manipulationReqMsgChan = make(chan *pb.ManipulationReqMsg, len(readTopics)*r.readerQueueSize)
r.revTimesyncFromReader = make(map[uint64]int)
r.ctx, r.cancel = context.WithCancel(context.Background())
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: r.pulsarAddr})
if err != nil {
return nil, fmt.Errorf("connect pulsar failed, %v", err)
}
r.pulsarClient = client
timeSyncChan := make(chan pulsar.ConsumerMessage, len(r.proxyIdList))
if r.timeSyncConsumer, err = r.pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: timeSyncTopic,
SubscriptionName: timeSyncSubName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: timeSyncChan,
}); err != nil {
return nil, fmt.Errorf("failed to subscribe topic %s, error = %v", timeSyncTopic, err)
}
readerChan := make(chan pulsar.ConsumerMessage, len(readTopics)*r.readerQueueSize)
if r.readerConsumer, err = r.pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topics: readTopics,
SubscriptionName: readSubName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: readerChan,
}); err != nil {
return nil, fmt.Errorf("failed to subscrive reader topics : %v, error = %v", readTopics, err)
}
r.readerProducer = make([]pulsar.Producer, 0, len(readTopics))
for i := 0; i < len(readTopics); i++ {
rp, err := r.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readTopics[i]})
if err != nil {
return nil, fmt.Errorf("failed to create reader producer %s, error = %v", readTopics[i], err)
}
r.readerProducer = append(r.readerProducer, rp)
}
return r, nil
}
func (r *ReaderTimeSyncCfg) Close() {
r.cancel()
r.timeSyncConsumer.Close()
r.readerConsumer.Close()
for i := 0; i < len(r.readerProducer); i++ {
r.readerProducer[i].Close()
}
r.pulsarClient.Close()
}
func (r *ReaderTimeSyncCfg) Start() error {
go r.startReadTopics()
go r.startTimeSync()
return r.ctx.Err()
}
func (r *ReaderTimeSyncCfg) ManipulationReqMsg() <-chan *pb.ManipulationReqMsg {
return r.manipulationReqMsgChan
}
func (r *ReaderTimeSyncCfg) TimeSync() <-chan TimeSyncMsg {
return r.timesyncMsgChan
}
func (r *ReaderTimeSyncCfg) TimeSyncChanLen() int {
return len(r.timesyncMsgChan)
}
func (r *ReaderTimeSyncCfg) IsManipulationReqMsgChanFull() bool {
return len(r.manipulationReqMsgChan) == len(r.readerProducer)*r.readerQueueSize
}
func (r *ReaderTimeSyncCfg) alignTimeSync(ts []*pb.TimeSyncMsg) []*pb.TimeSyncMsg {
if len(r.proxyIdList) > 1 {
if len(ts) > 1 {
for i := 1; i < len(r.proxyIdList); i++ {
curIdx := len(ts) - 1 - i
preIdx := len(ts) - i
timeGap := toMillisecond(ts[curIdx]) - toMillisecond(ts[preIdx])
if int64(timeGap) >= (r.interval/2) || int64(timeGap) <= (-r.interval/2) {
ts = ts[preIdx:]
return ts
}
}
ts = ts[len(ts)-len(r.proxyIdList):]
sort.Slice(ts, func(i int, j int) bool { return ts[i].Peer_Id < ts[j].Peer_Id })
for i := 0; i < len(r.proxyIdList); i++ {
if ts[i].Peer_Id != r.proxyIdList[i] {
ts = ts[:0]
return ts
}
}
}
} else {
if len(ts) > 1 {
ts = ts[len(ts)-1:]
}
}
return ts
}
func (r *ReaderTimeSyncCfg) readTimeSync(ctx context.Context, ts []*pb.TimeSyncMsg, n int) ([]*pb.TimeSyncMsg, error) {
for i := 0; i < n; i++ {
select {
case <-ctx.Done():
return nil, ctx.Err()
case cm, ok := <-r.timeSyncConsumer.Chan():
if ok == false {
return nil, fmt.Errorf("timesync consumer closed")
}
msg := cm.Message
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(msg.Payload(), &tsm); err != nil {
return nil, err
}
ts = append(ts, &tsm)
r.timeSyncConsumer.AckID(msg.ID())
}
}
return ts, nil
}
func (r *ReaderTimeSyncCfg) sendEOFMsg(ctx context.Context, msg *pulsar.ProducerMessage, index int, wg *sync.WaitGroup) {
if _, err := r.readerProducer[index].Send(ctx, msg); err != nil {
//TODO, log error
log.Printf("Send timesync flag error %v", err)
}
wg.Done()
}
func (r *ReaderTimeSyncCfg) startTimeSync() {
tsm := make([]*pb.TimeSyncMsg, 0, len(r.proxyIdList)*2)
ctx, _ := context.WithCancel(r.ctx)
var err error
for {
//var start time.Time
for len(tsm) != len(r.proxyIdList) {
tsm = r.alignTimeSync(tsm)
tsm, err = r.readTimeSync(ctx, tsm, len(r.proxyIdList)-len(tsm))
if err != nil {
if ctx.Err() != nil {
return
} else {
//TODO, log error msg
log.Printf("read time sync error %v", err)
}
}
}
ts := tsm[0].Timestamp
for i := 1; i < len(tsm); i++ {
if tsm[i].Timestamp < ts {
ts = tsm[i].Timestamp
}
}
tsm = tsm[:0]
//send timestamp flag to reader channel
msg := pb.ManipulationReqMsg{Timestamp: ts, ProxyId: r.readStopFlagClientId}
payload, err := proto.Marshal(&msg)
if err != nil {
//TODO log error
log.Printf("Marshal timesync flag error %v", err)
} else {
wg := sync.WaitGroup{}
wg.Add(len(r.readerProducer))
for index := range r.readerProducer {
go r.sendEOFMsg(ctx, &pulsar.ProducerMessage{Payload: payload}, index, &wg)
}
wg.Wait()
}
}
}
func (r *ReaderTimeSyncCfg) isReadStopFlag(imsg *pb.ManipulationReqMsg) bool {
return imsg.ProxyId < ReadStopFlagEnd
}
func (r *ReaderTimeSyncCfg) startReadTopics() {
ctx, _ := context.WithCancel(r.ctx)
tsm := TimeSyncMsg{Timestamp: 0, NumRecorders: 0}
for {
select {
case <-ctx.Done():
return
case cm, ok := <-r.readerConsumer.Chan():
if ok == false {
//TODO,log error
log.Printf("reader consumer closed")
}
msg := cm.Message
var imsg pb.ManipulationReqMsg
if err := proto.Unmarshal(msg.Payload(), &imsg); err != nil {
//TODO, log error
log.Printf("unmarshal InsertOrDeleteMsg error %v", err)
break
}
if r.isReadStopFlag(&imsg) { //timestamp flag
if imsg.ProxyId == r.readStopFlagClientId {
gval := r.revTimesyncFromReader[imsg.Timestamp]
gval++
if gval >= len(r.readerProducer) {
if imsg.Timestamp >= tsm.Timestamp {
tsm.Timestamp = imsg.Timestamp
r.timesyncMsgChan <- tsm
tsm.NumRecorders = 0
}
delete(r.revTimesyncFromReader, imsg.Timestamp)
} else {
r.revTimesyncFromReader[imsg.Timestamp] = gval
}
}
} else {
if r.IsManipulationReqMsgChanFull() {
log.Printf("WARN : Insert or delete chan is full ...")
}
tsm.NumRecorders++
r.manipulationReqMsgChan <- &imsg
}
r.readerConsumer.AckID(msg.ID())
}
}
}
func WithReaderQueueSize(size int) ReaderTimeSyncOption {
return func(r *ReaderTimeSyncCfg) {
r.readerQueueSize = size
}
}
func WithPulsarAddress(addr string) ReaderTimeSyncOption {
return func(r *ReaderTimeSyncCfg) {
r.pulsarAddr = addr
}
}
func WithInterval(interval int64) ReaderTimeSyncOption {
return func(r *ReaderTimeSyncCfg) {
r.interval = interval
}
}

View File

@ -0,0 +1,564 @@
package proxy_node
import (
"context"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"log"
"sync"
"testing"
"time"
)
const (
timeSyncTopic = "rtimesync"
timeSyncTopic2 = "rtimesync2"
timeSyncTopic3 = "rtimesync3"
timeSyncSubName = "rtimesync-g"
timeSyncSubName1 = "rtimesync-g1"
timeSyncSubName2 = "rtimesync-g2"
timeSyncSubName3 = "rtimesync-g3"
readerTopic1 = "rreader1"
readerTopic12 = "rreader12"
readerTopic13 = "rreader13"
readerTopic2 = "rreader2"
readerTopic22 = "rreader22"
readerTopic23 = "rreader23"
readerTopic3 = "rreader3"
readerTopic32 = "rreader32"
readerTopic33 = "rreader33"
readerTopic4 = "rreader4"
readerTopic42 = "rreader42"
readerTopic43 = "rreader43"
readerSubName = "rreader-g"
readerSubName1 = "rreader-g1"
readerSubName2 = "rreader-g2"
readerSubName3 = "rreader-g3"
interval = 200
readStopFlag int64 = -1
readStopFlag1 int64 = -1
readStopFlag2 int64 = -2
readStopFlag3 int64 = -3
)
func TestAlignTimeSync(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 2,
Timestamp: toTimestamp(20),
},
}
r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 3, "proxyIdList should be : 1 2 3")
for i := 0; i < len(r.proxyIdList); i++ {
assert.Equal(t, r.proxyIdList[i], ts[i].Peer_Id)
}
}
func TestAlignTimeSync2(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(150),
},
{
Peer_Id: 2,
Timestamp: toTimestamp(20),
},
}
ts = r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 3, "proxyIdList should be : 1 2 3")
assert.Equal(t, len(ts), 1)
assert.Equal(t, ts[0].Peer_Id, int64(2))
}
func TestAlignTimeSync3(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 2,
Timestamp: toTimestamp(20),
},
}
ts = r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 3, "proxyIdList should be : 1 2 3")
for i := 0; i < len(r.proxyIdList); i++ {
assert.Equal(t, r.proxyIdList[i], ts[i].Peer_Id)
}
}
func TestAlignTimeSync4(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(25),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(35),
},
}
ts = r.alignTimeSync(ts)
assert.Equalf(t, len(r.proxyIdList), 1, "proxyIdList should be : 1")
assert.Equal(t, len(ts), 1)
assert.Equal(t, getMillisecond(ts[0].Timestamp), uint64(35))
}
func TestAlignTimeSync5(t *testing.T) {
r := &ReaderTimeSyncCfg{
proxyIdList: []int64{1, 2, 3},
interval: 200,
}
ts := []*pb.TimeSyncMsg{
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 1,
Timestamp: toTimestamp(5),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(15),
},
{
Peer_Id: 3,
Timestamp: toTimestamp(20),
},
}
ts = r.alignTimeSync(ts)
assert.Zero(t, len(ts))
}
func TestNewReaderTimeSync(t *testing.T) {
r, err := NewReaderTimeSync(
timeSyncTopic,
timeSyncSubName,
[]string{readerTopic1, readerTopic2, readerTopic3, readerTopic4},
readerSubName,
[]int64{2, 1},
readStopFlag,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(8),
)
assert.Nil(t, err)
rr := r.(*ReaderTimeSyncCfg)
assert.NotNil(t, rr.pulsarClient)
assert.NotNil(t, rr.timeSyncConsumer)
assert.NotNil(t, rr.readerConsumer)
assert.NotNil(t, rr.readerProducer)
assert.Equal(t, rr.interval, int64(interval))
assert.Equal(t, rr.readStopFlagClientId, int64(readStopFlag))
assert.Equal(t, rr.readerQueueSize, 8)
assert.Equal(t, len(rr.proxyIdList), 2)
assert.Equal(t, rr.proxyIdList[0], int64(1))
assert.Equal(t, rr.proxyIdList[1], int64(2))
r.Close()
}
func TestPulsarClient(t *testing.T) {
t.Skip("skip pulsar client")
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
assert.Nil(t, err)
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
go startWriteTimeSync(1, timeSyncTopic, client, 2*time.Second, t)
go startWriteTimeSync(2, timeSyncTopic, client, 2*time.Second, t)
timeSyncChan := make(chan pulsar.ConsumerMessage)
consumer, err := client.Subscribe(pulsar.ConsumerOptions{
Topic: timeSyncTopic,
SubscriptionName: timeSyncSubName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: timeSyncChan,
})
assert.Nil(t, err)
for {
select {
case cm := <-timeSyncChan:
msg := cm.Message
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(msg.Payload(), &tsm); err != nil {
log.Fatal(err)
}
consumer.AckID(msg.ID())
log.Printf("read time stamp, id = %d, time stamp = %d\n", tsm.Peer_Id, tsm.Timestamp)
case <-ctx.Done():
break
}
if ctx.Err() != nil {
break
}
}
}
func TestReaderTimesync(t *testing.T) {
r, err := NewReaderTimeSync(timeSyncTopic,
timeSyncSubName,
[]string{readerTopic1, readerTopic2, readerTopic3, readerTopic4},
readerSubName,
[]int64{2, 1},
readStopFlag,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024),
)
assert.Nil(t, err)
rr := r.(*ReaderTimeSyncCfg)
pt1, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic})
assert.Nil(t, err)
pt2, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic})
assert.Nil(t, err)
pr1, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic1})
assert.Nil(t, err)
pr2, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic2})
assert.Nil(t, err)
pr3, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic3})
assert.Nil(t, err)
pr4, err := rr.pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic4})
assert.Nil(t, err)
go startProxy(pt1, 1, pr1, 1, pr2, 2, 2*time.Second, t)
go startProxy(pt2, 2, pr3, 3, pr4, 4, 2*time.Second, t)
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
r.Start()
var tsm1, tsm2 TimeSyncMsg
var totalRecordes int64 = 0
for {
if ctx.Err() != nil {
break
}
select {
case <-ctx.Done():
tsm1.NumRecorders = 0
break
case tsm1 = <-r.TimeSync():
}
if tsm1.NumRecorders > 0 {
log.Printf("timestamp %d, num records = %d", getMillisecond(tsm1.Timestamp), tsm1.NumRecorders)
totalRecordes += tsm1.NumRecorders
for i := int64(0); i < tsm1.NumRecorders; i++ {
im := <-r.ManipulationReqMsg()
//log.Printf("%d - %d", getMillisecond(im.Timestamp), getMillisecond(tsm2.Timestamp))
if im.Timestamp < tsm2.Timestamp {
t.Fatalf("time sync error , im.Timestamp = %d, tsm2.Timestamp = %d", im.Timestamp, tsm2.Timestamp)
}
}
tsm2 = tsm1
}
}
log.Printf("total recordes = %d", totalRecordes)
if totalRecordes != 800 {
t.Fatalf("total records should be 800")
}
r.Close()
pt1.Close()
pt2.Close()
pr1.Close()
pr2.Close()
pr3.Close()
pr4.Close()
}
func TestReaderTimesync2(t *testing.T) {
client, _ := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
pt1, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic2})
pt2, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic2})
pr1, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic12})
pr2, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic22})
pr3, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic32})
pr4, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic42})
go startProxy(pt1, 1, pr1, 1, pr2, 2, 2*time.Second, t)
go startProxy(pt2, 2, pr3, 3, pr4, 4, 2*time.Second, t)
r1, _ := NewReaderTimeSync(timeSyncTopic2,
timeSyncSubName1,
[]string{readerTopic12, readerTopic22, readerTopic32, readerTopic42},
readerSubName1,
[]int64{2, 1},
readStopFlag1,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024),
)
r2, _ := NewReaderTimeSync(timeSyncTopic2,
timeSyncSubName2,
[]string{readerTopic12, readerTopic22, readerTopic32, readerTopic42},
readerSubName2,
[]int64{2, 1},
readStopFlag2,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024),
)
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
rt := []ReaderTimeSync{r1, r2}
var wg sync.WaitGroup
for _, r := range rt {
r := r
_ = r.Start()
wg.Add(1)
go func() {
var tsm1, tsm2 TimeSyncMsg
var totalRecordes int64 = 0
work := false
defer wg.Done()
for {
if ctx.Err() != nil {
break
}
select {
case tsm1 = <-r.TimeSync():
work = true
default:
work = false
}
if work {
if tsm1.NumRecorders > 0 {
//log.Printf("timestamp %d, num records = %d", getMillisecond(tsm1.Timestamp), tsm1.NumRecorders)
totalRecordes += tsm1.NumRecorders
for i := int64(0); i < tsm1.NumRecorders; i++ {
im := <-r.ManipulationReqMsg()
//log.Printf("%d - %d", getMillisecond(im.Timestamp), getMillisecond(tsm2.Timestamp))
assert.GreaterOrEqual(t, im.Timestamp, tsm2.Timestamp)
}
tsm2 = tsm1
}
}
}
log.Printf("total recordes = %d", totalRecordes)
assert.Equal(t, totalRecordes, int64(800))
}()
}
wg.Wait()
r1.Close()
r2.Close()
pt1.Close()
pt2.Close()
pr1.Close()
pr2.Close()
pr3.Close()
pr4.Close()
}
func TestReaderTimesync3(t *testing.T) {
client, _ := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
pt, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: timeSyncTopic3})
pr1, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic13})
pr2, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic23})
pr3, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic33})
pr4, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: readerTopic43})
defer func() {
pr1.Close()
pr2.Close()
pr3.Close()
pr4.Close()
pt.Close()
client.Close()
}()
go func() {
total := 2 * 1000 / 10
ticker := time.Tick(10 * time.Millisecond)
var timestamp uint64 = 0
prlist := []pulsar.Producer{pr1, pr2, pr3, pr4}
for i := 1; i <= total; i++ {
<-ticker
timestamp += 10
for idx, pr := range prlist {
msg := pb.ManipulationReqMsg{ProxyId: int64(idx + 1), Timestamp: toTimestamp(timestamp)}
mb, err := proto.Marshal(&msg)
assert.Nil(t, err)
if _, err := pr.Send(context.Background(), &pulsar.ProducerMessage{Payload: mb}); err != nil {
t.Fatal(err)
}
}
if i%20 == 0 {
tm := pb.TimeSyncMsg{Peer_Id: 1, Timestamp: toTimestamp(timestamp)}
tb, err := proto.Marshal(&tm)
assert.Nil(t, err)
if _, err := pt.Send(context.Background(), &pulsar.ProducerMessage{Payload: tb}); err != nil {
t.Fatal(err)
}
}
}
}()
r, err := NewReaderTimeSync(timeSyncTopic3,
timeSyncSubName3,
[]string{readerTopic13, readerTopic23, readerTopic33, readerTopic43},
readerSubName3,
[]int64{1},
readStopFlag3,
WithPulsarAddress("pulsar://localhost:6650"),
WithInterval(interval),
WithReaderQueueSize(1024))
assert.Nil(t, err)
defer r.Close()
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
if err := r.Start(); err != nil {
t.Fatal(err)
}
var tsm1, tsm2 TimeSyncMsg
var totalRecords int64 = 0
for {
if ctx.Err() != nil {
break
}
select {
case <-ctx.Done():
tsm1.NumRecorders = 0
break
case tsm1 = <-r.TimeSync():
}
if tsm1.NumRecorders > 0 {
totalRecords += tsm1.NumRecorders
for i := int64(0); i < tsm1.NumRecorders; i++ {
im := <-r.ManipulationReqMsg()
assert.GreaterOrEqual(t, im.Timestamp, tsm2.Timestamp)
}
tsm2 = tsm1
}
}
log.Printf("total records = %d", totalRecords)
assert.Equal(t, totalRecords, int64(800))
}
func getMillisecond(ts uint64) uint64 {
return ts >> 18
}
func toTimestamp(ts uint64) uint64 {
return ts << 18
}
func startWriteTimeSync(id int64, topic string, client pulsar.Client, duration time.Duration, t *testing.T) {
p, _ := client.CreateProducer(pulsar.ProducerOptions{Topic: topic})
ticker := time.Tick(interval * time.Millisecond)
numSteps := int(duration / (interval * time.Millisecond))
var tm uint64 = 0
for i := 0; i < numSteps; i++ {
<-ticker
tm += interval
tsm := pb.TimeSyncMsg{Timestamp: toTimestamp(tm), Peer_Id: id}
tb, _ := proto.Marshal(&tsm)
if _, err := p.Send(context.Background(), &pulsar.ProducerMessage{Payload: tb}); err != nil {
t.Fatalf("send failed tsm id=%d, timestamp=%d, err=%v", tsm.Peer_Id, tsm.Timestamp, err)
} else {
//log.Printf("send tsm id=%d, timestamp=%d", tsm.Peer_Id, tsm.Timestamp)
}
}
}
func startProxy(pt pulsar.Producer, ptid int64, pr1 pulsar.Producer, prid1 int64, pr2 pulsar.Producer, prid2 int64, duration time.Duration, t *testing.T) {
total := int(duration / (10 * time.Millisecond))
ticker := time.Tick(10 * time.Millisecond)
var timestamp uint64 = 0
for i := 1; i <= total; i++ {
<-ticker
timestamp += 10
msg := pb.ManipulationReqMsg{ProxyId: int64(prid1), Timestamp: toTimestamp(timestamp)}
mb, err := proto.Marshal(&msg)
if err != nil {
t.Fatalf("marshal error %v", err)
}
if _, err := pr1.Send(context.Background(), &pulsar.ProducerMessage{Payload: mb}); err != nil {
t.Fatalf("send msg error %v", err)
}
msg.ProxyId = prid2
mb, err = proto.Marshal(&msg)
if err != nil {
t.Fatalf("marshal error %v", err)
}
if _, err := pr2.Send(context.Background(), &pulsar.ProducerMessage{Payload: mb}); err != nil {
t.Fatalf("send msg error %v", err)
}
//log.Printf("send msg id = [ %d %d ], timestamp = %d", prid1, prid2, timestamp)
if i%20 == 0 {
tm := pb.TimeSyncMsg{Peer_Id: ptid, Timestamp: toTimestamp(timestamp)}
tb, err := proto.Marshal(&tm)
if err != nil {
t.Fatalf("marshal error %v", err)
}
if _, err := pt.Send(context.Background(), &pulsar.ProducerMessage{Payload: tb}); err != nil {
t.Fatalf("send msg error %v", err)
}
//log.Printf("send timestamp id = %d, timestamp = %d", ptid, timestamp)
}
}
}

View File

@ -0,0 +1,56 @@
package proxy_node
import "sync"
type requestScheduler struct {
//definitions requestQueue
//manipulations requestQueue
manipulationsChan chan *manipulationReq // manipulation queue
m_timestamp Timestamp
m_timestamp_mux sync.Mutex
//queries requestQueue
queryChan chan *queryReq
q_timestamp Timestamp
q_timestamp_mux sync.Mutex
}
// @param selection
// bit_0 = 1: select definition queue
// bit_1 = 1: select manipulation queue
// bit_2 = 1: select query queue
// example: if mode = 3, then both definition and manipulation queues are selected
func (rs *requestScheduler) AreRequestsDelivered(ts Timestamp, selection uint32) bool {
r1 := func() bool {
if selection&uint32(2) == 0 {
return true
}
rs.m_timestamp_mux.Lock()
defer rs.m_timestamp_mux.Unlock()
if rs.m_timestamp >= ts {
return true
}
if len(rs.manipulationsChan) == 0 {
return true
}
return false
}()
r2 := func() bool {
if selection&uint32(4) == 0 {
return true
}
rs.q_timestamp_mux.Lock()
defer rs.q_timestamp_mux.Unlock()
if rs.q_timestamp >= ts {
return true
}
if len(rs.queryChan) == 0 {
return true
}
return false
}()
return r1 && r2
}

View File

@ -0,0 +1,478 @@
package proxy_node
import (
"context"
"encoding/json"
"fmt"
mpb "github.com/czs007/suvlim/pkg/master/grpc/master"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
master "github.com/czs007/suvlim/pkg/master/mock"
"github.com/golang/protobuf/proto"
etcd "go.etcd.io/etcd/clientv3"
"go.uber.org/atomic"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"log"
"net"
"sync"
"time"
)
const (
keyCollectionPath = "collection"
keySegmentPath = "segment"
)
type proxyServer struct {
pb.UnimplementedMilvusServiceServer
address string
masterAddress string
rootPath string // etcd root path
pulsarAddr string // pulsar address for reader
readerTopics []string //reader topics
deleteTopic string
queryTopic string
resultTopic string
resultGroup string
numReaderNode int
proxyId int64
getTimestamp func(count uint32) ([]Timestamp, pb.Status)
client *etcd.Client
ctx context.Context
wg sync.WaitGroup
////////////////////////////////////////////////////////////////
masterConn *grpc.ClientConn
masterClient mpb.MasterClient
grpcServer *grpc.Server
reqSch *requestScheduler
///////////////////////////////////////////////////////////////
collectionList map[uint64]*mpb.Collection
nameCollectionId map[string]uint64
segmentList map[uint64]*mpb.Segment
collectionMux sync.Mutex
queryId atomic.Uint64
}
func (s *proxyServer) CreateCollection(ctx context.Context, req *pb.Mapping) (*pb.Status, error) {
log.Printf("create collection %s", req.CollectionName)
return s.masterClient.CreateCollection(ctx, req)
}
func (s *proxyServer) CountCollection(ctx context.Context, req *pb.CollectionName) (*pb.CollectionRowCount, error) {
s.collectionMux.Lock()
defer s.collectionMux.Unlock()
collection_id, ok := s.nameCollectionId[req.CollectionName]
if !ok {
return &pb.CollectionRowCount{
CollectionRowCount: 0,
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: fmt.Sprintf("unable to get collection %s", req.CollectionName),
},
}, nil
}
if info, ok := s.collectionList[collection_id]; ok {
count := int64(0)
for _, seg_id := range info.SegmentIds {
if seg, ok := s.segmentList[seg_id]; ok {
count += seg.Rows
}
}
return &pb.CollectionRowCount{
CollectionRowCount: count,
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
}, nil
}
return &pb.CollectionRowCount{
CollectionRowCount: 0,
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: fmt.Sprintf("unable to get collection %s", req.CollectionName),
},
}, nil
}
func (s *proxyServer) CreateIndex(ctx context.Context, req *pb.IndexParam) (*pb.Status, error) {
log.Printf("create index, collection name = %s, index name = %s, filed_name = %s", req.CollectionName, req.IndexName, req.FieldName)
return s.masterClient.CreateIndex(ctx, req)
}
func (s *proxyServer) DeleteByID(ctx context.Context, req *pb.DeleteByIDParam) (*pb.Status, error) {
log.Printf("delete entites, total = %d", len(req.IdArray))
pm := &manipulationReq{
ManipulationReqMsg: pb.ManipulationReqMsg{
CollectionName: req.CollectionName,
ReqType: pb.ReqType_kDeleteEntityByID,
ProxyId: s.proxyId,
},
proxy: s,
}
for _, id := range req.IdArray {
pm.PrimaryKeys = append(pm.PrimaryKeys, uint64(id))
}
if len(pm.PrimaryKeys) > 1 {
if st := pm.PreExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
if st := pm.Execute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
if st := pm.PostExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
if st := pm.WaitToFinish(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &st, nil
}
}
return &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}, nil
}
func (s *proxyServer) Insert(ctx context.Context, req *pb.InsertParam) (*pb.EntityIds, error) {
log.Printf("Insert Entities, total = %d", len(req.RowsData))
ipm := make(map[uint32]*manipulationReq)
//TODO
if len(req.EntityIdArray) == 0 { //primary key is empty, set primary key by server
log.Printf("Set primary key")
}
if len(req.EntityIdArray) != len(req.RowsData) {
return &pb.EntityIds{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR,
Reason: fmt.Sprintf("length of EntityIdArray not equal to lenght of RowsData"),
},
EntityIdArray: req.EntityIdArray,
}, nil
}
for i := 0; i < len(req.EntityIdArray); i++ {
key := uint64(req.EntityIdArray[i])
hash, err := Hash32_Uint64(key)
if err != nil {
return nil, status.Errorf(codes.Unknown, "hash failed on %d", key)
}
hash = hash % uint32(len(s.readerTopics))
ip, ok := ipm[hash]
if !ok {
segId, err := s.getSegmentId(int32(hash), req.CollectionName)
if err != nil {
return nil, err
}
ipm[hash] = &manipulationReq{
ManipulationReqMsg: pb.ManipulationReqMsg{
CollectionName: req.CollectionName,
PartitionTag: req.PartitionTag,
SegmentId: segId,
ChannelId: uint64(hash),
ReqType: pb.ReqType_kInsert,
ProxyId: s.proxyId,
ExtraParams: req.ExtraParams,
},
proxy: s,
}
ip = ipm[hash]
}
ip.PrimaryKeys = append(ip.PrimaryKeys, key)
ip.RowsData = append(ip.RowsData, req.RowsData[i])
}
for _, ip := range ipm {
if st := ip.PreExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS { //do nothing
return &pb.EntityIds{
Status: &st,
EntityIdArray: req.EntityIdArray,
}, nil
}
if st := ip.Execute(); st.ErrorCode != pb.ErrorCode_SUCCESS { // push into chan
return &pb.EntityIds{
Status: &st,
EntityIdArray: req.EntityIdArray,
}, nil
}
if st := ip.PostExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS { //post to pulsar
return &pb.EntityIds{
Status: &st,
EntityIdArray: req.EntityIdArray,
}, nil
}
}
for _, ip := range ipm {
if st := ip.WaitToFinish(); st.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("Wait to finish failed, error code = %d", st.ErrorCode)
}
}
return &pb.EntityIds{
Status: &pb.Status{
ErrorCode: pb.ErrorCode_SUCCESS,
},
EntityIdArray: req.EntityIdArray,
}, nil
}
func (s *proxyServer) Search(ctx context.Context, req *pb.SearchParam) (*pb.QueryResult, error) {
qm := &queryReq{
QueryReqMsg: pb.QueryReqMsg{
CollectionName: req.CollectionName,
VectorParam: req.VectorParam,
PartitionTags: req.PartitionTag,
Dsl: req.Dsl,
ExtraParams: req.ExtraParams,
ProxyId: s.proxyId,
QueryId: s.queryId.Add(1),
ReqType: pb.ReqType_kSearch,
},
proxy: s,
}
log.Printf("search on collection %s, proxy id = %d, query id = %d", req.CollectionName, qm.ProxyId, qm.QueryId)
if st := qm.PreExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
if st := qm.Execute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
if st := qm.PostExecute(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
if st := qm.WaitToFinish(); st.ErrorCode != pb.ErrorCode_SUCCESS {
return &pb.QueryResult{
Status: &st,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}, nil
}
return s.reduceResult(qm), nil
}
//check if proxySerer is set correct
func (s *proxyServer) check() error {
if len(s.address) == 0 {
return fmt.Errorf("proxy address is unset")
}
if len(s.masterAddress) == 0 {
return fmt.Errorf("master address is unset")
}
if len(s.rootPath) == 0 {
return fmt.Errorf("root path for etcd is unset")
}
if len(s.pulsarAddr) == 0 {
return fmt.Errorf("pulsar address is unset")
}
if len(s.readerTopics) == 0 {
return fmt.Errorf("reader topics is unset")
}
if len(s.deleteTopic) == 0 {
return fmt.Errorf("delete topic is unset")
}
if len(s.queryTopic) == 0 {
return fmt.Errorf("query topic is unset")
}
if len(s.resultTopic) == 0 {
return fmt.Errorf("result topic is unset")
}
if len(s.resultGroup) == 0 {
return fmt.Errorf("result group is unset")
}
if s.numReaderNode <= 0 {
return fmt.Errorf("number of reader nodes is unset")
}
if s.proxyId <= 0 {
return fmt.Errorf("proxyId is unset")
}
log.Printf("proxy id = %d", s.proxyId)
if s.getTimestamp == nil {
return fmt.Errorf("getTimestamp is unset")
}
if s.client == nil {
return fmt.Errorf("etcd client is unset")
}
if s.ctx == nil {
return fmt.Errorf("context is unset")
}
return nil
}
func (s *proxyServer) getSegmentId(channelId int32, colName string) (uint64, error) {
s.collectionMux.Lock()
defer s.collectionMux.Unlock()
colId, ok := s.nameCollectionId[colName]
if !ok {
return 0, status.Errorf(codes.Unknown, "can't get collection id of %s", colName)
}
colInfo, ok := s.collectionList[colId]
if !ok {
return 0, status.Errorf(codes.Unknown, "can't get collection, name = %s, id = %d", colName, colId)
}
for _, segId := range colInfo.SegmentIds {
seg, ok := s.segmentList[segId]
if !ok {
return 0, status.Errorf(codes.Unknown, "can't get segment of %d", segId)
}
if seg.Status == mpb.SegmentStatus_OPENED {
if seg.ChannelStart <= channelId && channelId < seg.ChannelEnd {
return segId, nil
}
}
}
return 0, status.Errorf(codes.Unknown, "can't get segment id, channel id = %d", channelId)
}
func (s *proxyServer) connectMaster() error {
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
conn, err := grpc.DialContext(ctx, s.masterAddress, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
log.Printf("Connect to master failed, error= %v", err)
return err
}
log.Printf("Connected to master, master_addr=%s", s.masterAddress)
s.masterConn = conn
s.masterClient = mpb.NewMasterClient(conn)
return nil
}
func (s *proxyServer) Close() {
s.client.Close()
s.masterConn.Close()
s.grpcServer.Stop()
}
func (s *proxyServer) StartGrpcServer() error {
lis, err := net.Listen("tcp", s.address)
if err != nil {
return err
}
go func() {
s.wg.Add(1)
defer s.wg.Done()
server := grpc.NewServer()
pb.RegisterMilvusServiceServer(server, s)
err := server.Serve(lis)
if err != nil {
log.Fatalf("Proxy grpc server fatal error=%v", err)
}
}()
return nil
}
func (s *proxyServer) WatchEtcd() error {
s.collectionMux.Lock()
defer s.collectionMux.Unlock()
cos, err := s.client.Get(s.ctx, s.rootPath+"/"+keyCollectionPath, etcd.WithPrefix())
if err != nil {
return err
}
for _, cob := range cos.Kvs {
// TODO: simplify collection struct
var co mpb.Collection
var mco master.Collection
if err := json.Unmarshal(cob.Value, &mco); err != nil {
return err
}
proto.UnmarshalText(mco.GrpcMarshalString, &co)
s.nameCollectionId[co.Name] = co.Id
s.collectionList[co.Id] = &co
log.Printf("watch collection, name = %s, id = %d", co.Name, co.Id)
}
segs, err := s.client.Get(s.ctx, s.rootPath+"/"+keySegmentPath, etcd.WithPrefix())
if err != nil {
return err
}
for _, segb := range segs.Kvs {
var seg mpb.Segment
if err := json.Unmarshal(segb.Value, &seg); err != nil {
return err
}
s.segmentList[seg.SegmentId] = &seg
log.Printf("watch segment id = %d\n", seg.SegmentId)
}
cow := s.client.Watch(s.ctx, s.rootPath+"/"+keyCollectionPath, etcd.WithPrefix(), etcd.WithRev(cos.Header.Revision+1))
segw := s.client.Watch(s.ctx, s.rootPath+"/"+keySegmentPath, etcd.WithPrefix(), etcd.WithRev(segs.Header.Revision+1))
go func() {
s.wg.Add(1)
defer s.wg.Done()
for {
select {
case <-s.ctx.Done():
return
case coe := <-cow:
func() {
s.collectionMux.Lock()
defer s.collectionMux.Unlock()
for _, e := range coe.Events {
var co mpb.Collection
var mco master.Collection
if err := json.Unmarshal(e.Kv.Value, &mco); err != nil {
log.Printf("unmarshal Collection failed, error = %v", err)
} else {
proto.UnmarshalText(mco.GrpcMarshalString, &co)
s.nameCollectionId[co.Name] = co.Id
s.collectionList[co.Id] = &co
log.Printf("watch collection, name = %s, id = %d", co.Name, co.Id)
}
}
}()
case sege := <-segw:
func() {
s.collectionMux.Lock()
defer s.collectionMux.Unlock()
for _, e := range sege.Events {
var seg mpb.Segment
if err := json.Unmarshal(e.Kv.Value, &seg); err != nil {
log.Printf("unmarshal Segment failed, error = %v", err)
} else {
s.segmentList[seg.SegmentId] = &seg
log.Printf("watch segment id = %d\n", seg.SegmentId)
}
}
}()
}
}
}()
return nil
}
func startProxyServer(srv *proxyServer) error {
if err := srv.check(); err != nil {
return err
}
srv.reqSch = &requestScheduler{}
if err := srv.restartManipulationRoutine(1024); err != nil {
return err
}
if err := srv.restartQueryRoutine(1024); err != nil {
return err
}
srv.nameCollectionId = make(map[string]uint64)
srv.collectionList = make(map[uint64]*mpb.Collection)
srv.segmentList = make(map[uint64]*mpb.Segment)
if err := srv.connectMaster(); err != nil {
return err
}
if err := srv.WatchEtcd(); err != nil {
return err
}
srv.queryId.Store(uint64(time.Now().UnixNano()))
return srv.StartGrpcServer()
}

View File

@ -0,0 +1,522 @@
package proxy_node
import (
"context"
"encoding/binary"
"encoding/json"
"github.com/apache/pulsar-client-go/pulsar"
mpb "github.com/czs007/suvlim/pkg/master/grpc/master"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
"net"
"sort"
"testing"
"time"
"unsafe"
)
type testMasterServer struct {
mpb.UnimplementedMasterServer
}
func (*testMasterServer) CreateCollection(ctx context.Context, req *pb.Mapping) (*pb.Status, error) {
return &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS, Reason: req.CollectionName}, nil
}
func (*testMasterServer) CreateIndex(ctx context.Context, req *pb.IndexParam) (*pb.Status, error) {
return &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS, Reason: req.IndexName}, nil
}
func startTestMaster(master_addr string, t *testing.T) *grpc.Server {
lis, err := net.Listen("tcp", master_addr)
assert.Nil(t, err)
s := grpc.NewServer()
mpb.RegisterMasterServer(s, &testMasterServer{})
go func() {
if err := s.Serve(lis); err != nil {
t.Fatal(err)
}
}()
return s
}
func startTestProxyServer(proxy_addr string, master_addr string, t *testing.T) *proxyServer {
client, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
var timestamp uint64 = 1000
p := &proxyServer{
address: proxy_addr,
masterAddress: master_addr,
rootPath: "/proxy/root",
pulsarAddr: "pulsar://localhost:6650",
readerTopics: []string{"reader1", "reader2"},
deleteTopic: "deleteT",
queryTopic: "queryer",
resultTopic: "resulter",
resultGroup: "reusltG",
numReaderNode: 2,
proxyId: 1,
getTimestamp: func(count uint32) ([]Timestamp, pb.Status) {
timestamp += 100
t := make([]Timestamp, count)
for i := 0; i < int(count); i++ {
t[i] = Timestamp(timestamp)
}
return t, pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
},
client: client,
ctx: ctx,
}
go func() {
if err := startProxyServer(p); err != nil {
t.Fatal(err)
}
}()
return p
}
func uint64ToBytes(v uint64) []byte {
b := make([]byte, unsafe.Sizeof(v))
binary.LittleEndian.PutUint64(b, v)
return b
}
func TestProxyServer_CreateCollectionAndIndex(t *testing.T) {
_ = startTestMaster("localhost:10000", t)
//defer ms.Stop()
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10001", "localhost:10000", t)
//defer ps.Close()
time.Sleep(100 * time.Millisecond)
ctx := ps.ctx
conn, err := grpc.DialContext(ctx, "localhost:10001", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := pb.NewMilvusServiceClient(conn)
st, err := cli.CreateCollection(ctx, &pb.Mapping{CollectionName: "testCollectionName"})
assert.Nil(t, err)
assert.Equalf(t, st.ErrorCode, pb.ErrorCode_SUCCESS, "CreateCollection failed")
assert.Equalf(t, st.Reason, "testCollectionName", "CreateCollection failed")
st, err = cli.CreateIndex(ctx, &pb.IndexParam{IndexName: "testIndexName"})
assert.Nil(t, err)
assert.Equalf(t, st.ErrorCode, pb.ErrorCode_SUCCESS, "CreateIndex failed")
assert.Equalf(t, st.Reason, "testIndexName", "CreateIndex failed")
}
func TestProxyServer_WatchEtcd(t *testing.T) {
client, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
defer client.Close()
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
col1 := mpb.Collection{
Id: 1,
Name: "c1",
SegmentIds: []uint64{2, 3},
}
seg2 := mpb.Segment{
SegmentId: 2,
Rows: 10,
}
seg3 := mpb.Segment{
SegmentId: 3,
Rows: 10,
}
if cb1, err := json.Marshal(&col1); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keyCollectionPath+"/1", string(cb1)); err != nil {
t.Fatal(err)
}
if sb2, err := json.Marshal(&seg2); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/2", string(sb2)); err != nil {
t.Fatal(err)
}
if sb3, err := json.Marshal(&seg3); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/3", string(sb3)); err != nil {
t.Fatal(err)
}
_ = startTestMaster("localhost:10002", t)
//defer ms.Stop()
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10003", "localhost:10002", t)
//defer ps.Close()
time.Sleep(100 * time.Millisecond)
conn, err := grpc.DialContext(ps.ctx, "localhost:10003", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := pb.NewMilvusServiceClient(conn)
cr, err := cli.CountCollection(ps.ctx, &pb.CollectionName{CollectionName: "c1"})
assert.Nil(t, err)
assert.Equalf(t, cr.Status.ErrorCode, pb.ErrorCode_SUCCESS, "CountCollection failed : %s", cr.Status.Reason)
assert.Equalf(t, cr.CollectionRowCount, int64(20), "collection count expect to be 20, count = %d", cr.CollectionRowCount)
col4 := mpb.Collection{
Id: 4,
Name: "c4",
SegmentIds: []uint64{5},
}
seg5 := mpb.Segment{
SegmentId: 5,
Rows: 10,
}
if cb4, err := json.Marshal(&col4); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ps.ctx, "/proxy/root/"+keyCollectionPath+"/4", string(cb4)); err != nil {
t.Fatal(err)
}
if sb5, err := json.Marshal(&seg5); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ps.ctx, "/proxy/root/"+keySegmentPath+"/5", string(sb5)); err != nil {
t.Fatal(err)
}
cr, err = cli.CountCollection(ps.ctx, &pb.CollectionName{CollectionName: "c4"})
assert.Nil(t, err)
assert.Equalf(t, cr.Status.ErrorCode, pb.ErrorCode_SUCCESS, "CountCollection failed : %s", cr.Status.Reason)
assert.Equalf(t, cr.CollectionRowCount, int64(10), "collection count expect to be 10, count = %d", cr.CollectionRowCount)
}
func TestProxyServer_InsertAndDelete(t *testing.T) {
client, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
defer client.Close()
ctx, _ := context.WithTimeout(context.Background(), 2*time.Second)
col10 := mpb.Collection{
Id: 10,
Name: "col10",
Schema: nil,
CreateTime: 0,
SegmentIds: []uint64{11, 12},
PartitionTags: nil,
Indexes: nil,
}
seg11 := mpb.Segment{
SegmentId: 11,
CollectionId: 10,
ChannelStart: 0,
ChannelEnd: 1,
Status: mpb.SegmentStatus_OPENED,
}
seg12 := mpb.Segment{
SegmentId: 12,
CollectionId: 10,
ChannelStart: 1,
ChannelEnd: 2,
Status: mpb.SegmentStatus_OPENED,
}
if cb10, err := json.Marshal(&col10); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keyCollectionPath+"/10", string(cb10)); err != nil {
t.Fatal(err)
}
if sb11, err := json.Marshal(&seg11); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/11", string(sb11)); err != nil {
t.Fatal(err)
}
if sb12, err := json.Marshal(&seg12); err != nil {
t.Fatal(err)
} else if _, err := client.Put(ctx, "/proxy/root/"+keySegmentPath+"/12", string(sb12)); err != nil {
t.Fatal(err)
}
_ = startTestMaster("localhost:10004", t)
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10005", "localhost:10004", t)
//defer ps.Close()
time.Sleep(100 * time.Millisecond)
conn, err := grpc.DialContext(ps.ctx, "localhost:10005", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: ps.pulsarAddr})
assert.Nil(t, err)
defer pulsarClient.Close()
reader, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topics: ps.readerTopics,
SubscriptionName: "reader-group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer reader.Close()
deleter, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: ps.deleteTopic,
SubscriptionName: "delete-group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
pctx, _ := context.WithTimeout(ps.ctx, time.Second)
isbreak := false
for {
if isbreak {
break
}
select {
case <-pctx.Done():
isbreak = true
break
case cm, ok := <-reader.Chan():
if !ok {
t.Fatalf("reader closed")
}
reader.AckID(cm.ID())
break
case cm, ok := <-deleter.Chan():
assert.Truef(t, ok, "deleter closed")
deleter.AckID(cm.ID())
}
}
ip := pb.InsertParam{
CollectionName: "col10",
Schema: nil,
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(1)},
{Blob: uint64ToBytes(2)},
{Blob: uint64ToBytes(3)},
{Blob: uint64ToBytes(4)},
{Blob: uint64ToBytes(5)},
},
EntityIdArray: []int64{1, 2, 3, 4, 5},
PartitionTag: "",
ExtraParams: nil,
}
dp := pb.DeleteByIDParam{
CollectionName: "deleteCollection",
IdArray: []int64{1, 2, 3, 4, 5},
}
serverClient := pb.NewMilvusServiceClient(conn)
ir, err := serverClient.Insert(ps.ctx, &ip)
assert.Nil(t, err)
assert.Equalf(t, ir.Status.ErrorCode, pb.ErrorCode_SUCCESS, "Insert failed, error code = %d, reason = %s", ir.Status.ErrorCode, ir.Status.Reason)
assert.Equalf(t, len(ir.EntityIdArray), 5, "insert failed, len(ir.EntityIdArray) expect to be 5")
sort.Slice(ir.EntityIdArray, func(i int, j int) bool { return ir.EntityIdArray[i] < ir.EntityIdArray[j] })
for i := 0; i < 5; i++ {
assert.Equal(t, ir.EntityIdArray[i], int64(i+1))
}
dr, err := serverClient.DeleteByID(ps.ctx, &dp)
assert.Nil(t, err)
assert.Equalf(t, dr.ErrorCode, pb.ErrorCode_SUCCESS, "delete failed, error code = %d, reason = %s", dr.ErrorCode, dr.Reason)
var primaryKey []uint64
isbreak = false
for {
if isbreak {
break
}
select {
case <-ps.ctx.Done():
isbreak = true
break
case cm, ok := <-reader.Chan():
assert.Truef(t, ok, "reader closed")
msg := cm.Message
var m pb.ManipulationReqMsg
if err := proto.Unmarshal(msg.Payload(), &m); err != nil {
t.Fatal(err)
}
for i, k := range m.PrimaryKeys {
primaryKey = append(primaryKey, k)
rowValue := binary.LittleEndian.Uint64(m.RowsData[i].Blob)
t.Logf("primary key = %d, rowvalue =%d", k, rowValue)
assert.Equalf(t, k, rowValue, "key expect equal to row value")
}
reader.AckID(cm.ID())
break
case cm, ok := <-deleter.Chan():
assert.Truef(t, ok, "deleter closed")
var m pb.ManipulationReqMsg
if err := proto.Unmarshal(cm.Message.Payload(), &m); err != nil {
t.Fatal(err)
}
assert.Equalf(t, m.CollectionName, "deleteCollection", "delete failed, collection name = %s", m.CollectionName)
assert.Equalf(t, len(m.PrimaryKeys), 5, "delete failed,len(m.PrimaryKeys) = %d", len(m.PrimaryKeys))
for i, v := range m.PrimaryKeys {
assert.Equalf(t, v, uint64(i+1), "delete failed")
}
}
}
assert.Equalf(t, len(primaryKey), 5, "Receive from pulsar failed")
sort.Slice(primaryKey, func(i int, j int) bool { return primaryKey[i] < primaryKey[j] })
for i := 0; i < 5; i++ {
assert.Equalf(t, primaryKey[i], uint64(i+1), "insert failed")
}
t.Logf("m_timestamp = %d", ps.reqSch.m_timestamp)
assert.Equalf(t, ps.reqSch.m_timestamp, Timestamp(1300), "insert failed")
}
func TestProxyServer_Search(t *testing.T) {
_ = startTestMaster("localhost:10006", t)
time.Sleep(100 * time.Millisecond)
ps := startTestProxyServer("localhost:10007", "localhost:10006", t)
time.Sleep(100 * time.Millisecond)
conn, err := grpc.DialContext(ps.ctx, "localhost:10007", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
pulsarClient, err := pulsar.NewClient(pulsar.ClientOptions{URL: ps.pulsarAddr})
assert.Nil(t, err)
defer pulsarClient.Close()
query, err := pulsarClient.Subscribe(pulsar.ConsumerOptions{
Topic: ps.queryTopic,
SubscriptionName: "query-group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t, err)
defer query.Close()
result, err := pulsarClient.CreateProducer(pulsar.ProducerOptions{Topic: ps.resultTopic})
assert.Nil(t, err)
defer result.Close()
pctx, _ := context.WithTimeout(ps.ctx, time.Second)
func() {
for {
select {
case <-pctx.Done():
return
case cm, ok := <-query.Chan():
if !ok {
t.Fatal("query topic is closed")
}
query.AckID(cm.ID())
}
}
}()
go func() {
cm, ok := <-query.Chan()
query.AckID(cm.ID())
assert.Truef(t, ok, "query topic is closed")
var qm pb.QueryReqMsg
if err := proto.Unmarshal(cm.Payload(), &qm); err != nil {
t.Fatal(err)
}
if qm.ProxyId != ps.proxyId {
t.Fatalf("search failed, incorrect proxy id = %d", qm.ProxyId)
}
if qm.CollectionName != "collection_search" {
t.Fatalf("search failed, incorrect collection name = %s", qm.CollectionName)
}
r1 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{1, 3, 5},
ValidRow: []bool{true, true, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(1)},
{Blob: uint64ToBytes(3)},
{Blob: uint64ToBytes(5)},
},
},
RowNum: 3,
Scores: []float32{1, 3, 5},
Distances: []float32{1, 3, 5},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
r2 := pb.QueryResult{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Entities: &pb.Entities{
Status: &pb.Status{ErrorCode: pb.ErrorCode_SUCCESS},
Ids: []int64{2, 4, 6},
ValidRow: []bool{true, false, true},
RowsData: []*pb.RowData{
{Blob: uint64ToBytes(2)},
{Blob: uint64ToBytes(4)},
{Blob: uint64ToBytes(6)},
},
},
RowNum: 3,
Scores: []float32{2, 4, 6},
Distances: []float32{2, 4, 6},
ExtraParams: nil,
QueryId: qm.QueryId,
ProxyId: qm.ProxyId,
}
b1, err := proto.Marshal(&r1)
assert.Nil(t, err)
b2, err := proto.Marshal(&r2)
assert.Nil(t, err)
if _, err := result.Send(ps.ctx, &pulsar.ProducerMessage{Payload: b1}); err != nil {
t.Fatal(err)
}
if _, err := result.Send(ps.ctx, &pulsar.ProducerMessage{Payload: b2}); err != nil {
t.Fatal(err)
}
}()
sm := pb.SearchParam{
CollectionName: "collection_search",
VectorParam: nil,
Dsl: "",
PartitionTag: nil,
ExtraParams: nil,
}
serverClient := pb.NewMilvusServiceClient(conn)
qr, err := serverClient.Search(ps.ctx, &sm)
assert.Nil(t, err)
assert.Equalf(t, qr.Status.ErrorCode, pb.ErrorCode_SUCCESS, "query failed")
assert.Equalf(t, qr.Entities.Status.ErrorCode, pb.ErrorCode_SUCCESS, "query failed")
assert.Equalf(t, len(qr.Entities.Ids), 3, "query failed")
assert.Equalf(t, qr.Entities.Ids, []int64{6, 5, 3}, "query failed")
assert.Equalf(t, len(qr.Entities.ValidRow), 3, "query failed")
assert.Equalf(t, qr.Entities.ValidRow, []bool{true, true, true}, "query failed")
assert.Equalf(t, len(qr.Entities.RowsData), 3, "query failed")
assert.Equalf(t, qr.Entities.RowsData, []*pb.RowData{
{Blob: uint64ToBytes(6)},
{Blob: uint64ToBytes(5)},
{Blob: uint64ToBytes(3)},
}, "query failed")
assert.Equalf(t, len(qr.Scores), 3, "query failed")
assert.Equalf(t, qr.Scores, []float32{6, 5, 3}, "query failed")
assert.Equalf(t, len(qr.Distances), 3, "query failed")
assert.Equalf(t, qr.Distances, []float32{6, 5, 3}, "query failed")
}

View File

@ -0,0 +1,120 @@
package proxy_node
import (
"context"
"fmt"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
etcd "go.etcd.io/etcd/clientv3"
"log"
"strconv"
"sync"
"time"
)
const (
tsoKeyPath string = "/timestampOracle"
)
type timestamp struct {
physical uint64 // 18-63 bits
logical uint64 // 8-17 bits
id uint64 // 0-7 bits
}
type Timestamp uint64
type timestampOracle struct {
client *etcd.Client // client of a reliable meta service, i.e. etcd client
ctx context.Context
rootPath string // this timestampOracle's working root path on the reliable kv service
saveInterval uint64
lastSavedTime uint64
tso timestamp // monotonically increasing m_timestamp
mux sync.Mutex
}
func ToTimeStamp(t *timestamp) Timestamp {
ts := (t.physical << 18) + (t.logical << 8) + (t.id & uint64(0xFF))
return Timestamp(ts)
}
func ToPhysicalTime(t uint64) uint64 {
return t >> 18
}
func (tso *timestampOracle) Restart(id int64) {
go func() {
tso.loadTimestamp()
tso.tso.id = uint64(id)
ticker := time.Tick(time.Duration(tso.saveInterval) * time.Millisecond)
for {
select {
case <-ticker:
_, s := tso.GetTimestamp(1)
if s.ErrorCode == pb.ErrorCode_SUCCESS {
_ = tso.saveTimestamp()
}
break
case <-tso.ctx.Done():
if err := tso.client.Close(); err != nil {
log.Printf("close etcd client error %v", err)
}
return
}
}
}()
}
func (tso *timestampOracle) GetTimestamp(count uint32) ([]Timestamp, pb.Status) {
physical := uint64(time.Now().UnixNano()) / uint64(1e6)
var ctso timestamp
tso.mux.Lock()
if tso.tso.physical < physical {
tso.tso.physical = physical
}
ctso = tso.tso
tso.mux.Unlock()
tt := make([]Timestamp, 0, count)
// (TODO:shengjh) seems tso.tso has not been updated.
for i := uint32(0); i < count; i++ {
ctso.logical = uint64(i)
tt = append(tt, ToTimeStamp(&ctso))
}
return tt, pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (tso *timestampOracle) saveTimestamp() pb.Status {
tso.mux.Lock()
physical := tso.tso.physical
tso.mux.Unlock()
if _, err := tso.client.Put(tso.ctx, tso.rootPath+tsoKeyPath, strconv.FormatUint(physical, 10)); err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("put into etcd failed, error = %v", err)}
}
tso.mux.Lock()
tso.lastSavedTime = physical
tso.mux.Unlock()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (tso *timestampOracle) loadTimestamp() pb.Status {
ts, err := tso.client.Get(tso.ctx, tso.rootPath+tsoKeyPath)
if err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("get from etcd failed, error = %v", err)}
}
if len(ts.Kvs) != 0 {
n, err := strconv.ParseUint(string(ts.Kvs[0].Value), 10, 64)
if err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("ParseUint failed, error = %v", err)}
}
tso.mux.Lock()
tso.tso.physical = n
tso.lastSavedTime = n
tso.mux.Unlock()
} else {
tso.mux.Lock()
tso.tso.physical = uint64(time.Now().UnixNano()) / uint64(1e6)
tso.lastSavedTime = tso.tso.physical
tso.mux.Unlock()
}
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR}
}

View File

@ -0,0 +1,34 @@
package proxy_node
import (
"context"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/clientv3"
"testing"
"time"
)
func TestTimestampOracle(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{"127.0.0.1:2379"}})
assert.Nil(t, err)
defer cli.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tso := timestampOracle{
client: cli,
ctx: ctx,
rootPath: "/proxy/tso",
saveInterval: 200,
}
tso.Restart(0)
time.Sleep(time.Second)
tso.loadTimestamp()
tso.mux.Lock()
assert.GreaterOrEqualf(t, tso.tso.physical, uint64(100), "physical error")
t.Log("physical = ", tso.tso.physical)
tso.mux.Unlock()
ts, _ := tso.GetTimestamp(1)
t.Log("Timestamp = ", ts[0])
}

View File

@ -0,0 +1,74 @@
package proxy_node
import (
"context"
"fmt"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"log"
"time"
)
type timeTick struct {
lastTick Timestamp
currentTick Timestamp
interval uint64
pulsarProducer pulsar.Producer
peer_id int64
ctx context.Context
areRequestsDelivered func(ts Timestamp) bool
getTimestamp func() (Timestamp, pb.Status)
}
func (tt *timeTick) tick() pb.Status {
if tt.lastTick == tt.currentTick {
ts, s := tt.getTimestamp()
if s.ErrorCode != pb.ErrorCode_SUCCESS {
return s
}
tt.currentTick = ts
}
if tt.areRequestsDelivered(tt.currentTick) == false {
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
tsm := pb.TimeSyncMsg{
Timestamp: uint64(tt.currentTick),
Peer_Id: tt.peer_id,
SyncType: pb.SyncType_READ,
}
payload, err := proto.Marshal(&tsm)
if err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("marshal TimeSync failed, error = %v", err)}
}
if _, err := tt.pulsarProducer.Send(tt.ctx, &pulsar.ProducerMessage{Payload: payload}); err != nil {
return pb.Status{ErrorCode: pb.ErrorCode_UNEXPECTED_ERROR, Reason: fmt.Sprintf("send into pulsar failed, error = %v", err)}
}
tt.lastTick = tt.currentTick
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}
func (tt *timeTick) Restart() pb.Status {
tt.lastTick = 0
ts, s := tt.getTimestamp()
if s.ErrorCode != pb.ErrorCode_SUCCESS {
return s
}
tt.currentTick = ts
tick := time.Tick(time.Millisecond * time.Duration(tt.interval))
go func() {
for {
select {
case <-tick:
if s := tt.tick(); s.ErrorCode != pb.ErrorCode_SUCCESS {
log.Printf("timeTick error ,status = %d", int(s.ErrorCode))
}
case <-tt.ctx.Done():
tt.pulsarProducer.Close()
return
}
}
}()
return pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
}

View File

@ -0,0 +1,85 @@
package proxy_node
import (
"context"
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
func TestTimeTick(t *testing.T) {
client, err := pulsar.NewClient(pulsar.ClientOptions{URL: "pulsar://localhost:6650"})
assert.Nil(t,err)
producer, err := client.CreateProducer(pulsar.ProducerOptions{Topic: "timesync"})
assert.Nil(t,err)
consumer, err := client.Subscribe(pulsar.ConsumerOptions{
Topic: "timesync",
SubscriptionName: "timesync_group",
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
})
assert.Nil(t,err)
ctx, _ := context.WithTimeout(context.Background(), 4*time.Second)
var curTs Timestamp
curTs = 0
tt := timeTick{
interval: 200,
pulsarProducer: producer,
peer_id: 1,
ctx: ctx,
areRequestsDelivered: func(ts Timestamp) bool { return true },
getTimestamp: func() (Timestamp, pb.Status) {
curTs = curTs + 100
return curTs, pb.Status{ErrorCode: pb.ErrorCode_SUCCESS}
},
}
tt.Restart()
ctx2, _ := context.WithTimeout(context.Background(), time.Second*2)
isbreak := false
for {
if isbreak {
break
}
select {
case <-ctx2.Done():
isbreak = true
break
case cm, ok := <-consumer.Chan():
if !ok {
t.Fatalf("consumer closed")
}
consumer.AckID(cm.ID())
break
}
}
var lastTimestamp uint64 = 0
for {
select {
case <-ctx.Done():
return
case cm, ok := <-consumer.Chan():
if ok == false {
return
}
msg := cm.Message
var tsm pb.TimeSyncMsg
if err := proto.Unmarshal(msg.Payload(), &tsm); err != nil {
return
}
if tsm.Timestamp <= lastTimestamp {
t.Fatalf("current = %d, last = %d", uint64(tsm.Timestamp), uint64(lastTimestamp))
}
t.Log("current = ", tsm.Timestamp)
lastTimestamp = tsm.Timestamp
}
}
}

View File

@ -0,0 +1,21 @@
package proxy_node
import (
"encoding/binary"
"github.com/spaolacci/murmur3"
"unsafe"
)
func Hash32_Bytes(b []byte) (uint32, error) {
h := murmur3.New32()
if _, err := h.Write(b); err != nil {
return 0, err
}
return h.Sum32() & 0x7fffffff, nil
}
func Hash32_Uint64(v uint64) (uint32, error) {
b := make([]byte, unsafe.Sizeof(v))
binary.LittleEndian.PutUint64(b, v)
return Hash32_Bytes(b)
}

View File

@ -0,0 +1,30 @@
package proxy_node
import (
"github.com/stretchr/testify/assert"
"testing"
"unsafe"
)
func TestUint64(t *testing.T) {
var i int64 = -1
var u uint64 = uint64(i)
t.Log(i)
t.Log(u)
}
func TestHash32_Uint64(t *testing.T) {
var u uint64 = 0x12
h, err := Hash32_Uint64(u)
assert.Nil(t, err)
t.Log(h)
b := make([]byte, unsafe.Sizeof(u))
b[0] = 0x12
h2, err := Hash32_Bytes(b)
assert.Nil(t, err)
t.Log(h2)
assert.Equal(t, h, h2)
}

View File

@ -5,7 +5,7 @@ if [[ ! ${jobs+1} ]]; then
jobs=$(nproc)
fi
BUILD_OUTPUT_DIR="cmake_build_release"
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Release"
BUILD_UNITTEST="OFF"
INSTALL_PREFIX=$(pwd)/milvus

View File

@ -0,0 +1,45 @@
package main
import (
"context"
"fmt"
pb "github.com/czs007/suvlim/proxy/generate_entity_ids/proto"
"google.golang.org/grpc"
"log"
"time"
)
const (
address = "localhost:10087"
)
func getIds(length int64) {
con, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer con.Close()
c := pb.NewGreeterClient(con)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
r, err := c.GetEntityID(ctx, &pb.Request{Length: length})
if err != nil {
log.Fatalf("could not greet: %v", err)
}
fmt.Println("+++++++++++++++++++++++++++++++++++++")
fmt.Println(r.GetIds())
}
func main() {
go getIds(100)
go getIds(100)
time.Sleep(3 * time.Second)
}

View File

@ -0,0 +1,45 @@
package generate_entity_ids
import (
"context"
"fmt"
"github.com/apache/pulsar/pulsar-client-go/pulsar"
"log"
"time"
)
func CollectResult(clientNum int, topicName string) [][]byte {
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: "pulsar://localhost:6650",
})
if err != nil {
log.Fatal(err)
}
defer client.Close()
consumer, err := client.Subscribe(pulsar.ConsumerOptions{
Topic: topicName + "-partition-" + string(clientNum),
SubscriptionName: "subName",
})
if err != nil {
log.Fatal(err)
}
defer consumer.Close()
var result [][]byte
ctx, canc := context.WithTimeout(context.Background(), 5*time.Second)
msg, err := consumer.Receive(ctx)
if err != nil {
log.Fatal(err)
}
err = consumer.Ack(msg)
if err != nil{
log.Fatal(err)
}
result = append(result, msg.Payload())
fmt.Println("consumer receive the message successful!")
canc()
return result
}

View File

@ -0,0 +1,210 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: generate_id.proto
package generate_entity_ids
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// The request message containing the user's name.
type Request struct {
Length int64 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_72f50f761a21563e, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Request.Unmarshal(m, b)
}
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
}
func (m *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(m, src)
}
func (m *Request) XXX_Size() int {
return xxx_messageInfo_Request.Size(m)
}
func (m *Request) XXX_DiscardUnknown() {
xxx_messageInfo_Request.DiscardUnknown(m)
}
var xxx_messageInfo_Request proto.InternalMessageInfo
func (m *Request) GetLength() int64 {
if m != nil {
return m.Length
}
return 0
}
// The response message containing the greetings
type Reply struct {
Ids []int64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Reply) Reset() { *m = Reply{} }
func (m *Reply) String() string { return proto.CompactTextString(m) }
func (*Reply) ProtoMessage() {}
func (*Reply) Descriptor() ([]byte, []int) {
return fileDescriptor_72f50f761a21563e, []int{1}
}
func (m *Reply) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Reply.Unmarshal(m, b)
}
func (m *Reply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Reply.Marshal(b, m, deterministic)
}
func (m *Reply) XXX_Merge(src proto.Message) {
xxx_messageInfo_Reply.Merge(m, src)
}
func (m *Reply) XXX_Size() int {
return xxx_messageInfo_Reply.Size(m)
}
func (m *Reply) XXX_DiscardUnknown() {
xxx_messageInfo_Reply.DiscardUnknown(m)
}
var xxx_messageInfo_Reply proto.InternalMessageInfo
func (m *Reply) GetIds() []int64 {
if m != nil {
return m.Ids
}
return nil
}
func init() {
proto.RegisterType((*Request)(nil), "generate_entity_ids.Request")
proto.RegisterType((*Reply)(nil), "generate_entity_ids.Reply")
}
func init() { proto.RegisterFile("generate_id.proto", fileDescriptor_72f50f761a21563e) }
var fileDescriptor_72f50f761a21563e = []byte{
// 183 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x4f, 0xcd, 0x4b,
0x2d, 0x4a, 0x2c, 0x49, 0x8d, 0xcf, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x86,
0x0b, 0xa5, 0xe6, 0x95, 0x64, 0x96, 0x54, 0xc6, 0x67, 0xa6, 0x14, 0x2b, 0x29, 0x72, 0xb1, 0x07,
0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x89, 0x71, 0xb1, 0xe5, 0xa4, 0xe6, 0xa5, 0x97, 0x64,
0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x41, 0x79, 0x4a, 0x92, 0x5c, 0xac, 0x41, 0xa9, 0x05,
0x39, 0x95, 0x42, 0x02, 0x5c, 0xcc, 0x99, 0x29, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xcc, 0x41,
0x20, 0xa6, 0x51, 0x08, 0x17, 0xbb, 0x7b, 0x51, 0x6a, 0x6a, 0x49, 0x6a, 0x91, 0x90, 0x27, 0x17,
0xb7, 0x7b, 0x6a, 0x89, 0x2b, 0xd8, 0x64, 0x4f, 0x17, 0x21, 0x19, 0x3d, 0x2c, 0xb6, 0xe9, 0x41,
0xad, 0x92, 0x92, 0xc2, 0x21, 0x5b, 0x90, 0x53, 0xa9, 0xc4, 0xe0, 0x64, 0xce, 0x25, 0x94, 0x99,
0xaf, 0x97, 0x5e, 0x54, 0x90, 0x0c, 0x57, 0xe6, 0xe9, 0xe2, 0xc4, 0xe5, 0x0e, 0x67, 0x07, 0x30,
0x46, 0x61, 0xf3, 0x4c, 0x12, 0x1b, 0xd8, 0xa3, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xeb,
0x7c, 0x51, 0xda, 0xfd, 0x00, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// GreeterClient is the client API for Greeter service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type GreeterClient interface {
// Sends a greeting
GetEntityID(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Reply, error)
}
type greeterClient struct {
cc *grpc.ClientConn
}
func NewGreeterClient(cc *grpc.ClientConn) GreeterClient {
return &greeterClient{cc}
}
func (c *greeterClient) GetEntityID(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Reply, error) {
out := new(Reply)
err := c.cc.Invoke(ctx, "/generate_entity_ids.Greeter/GetEntityID", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// GreeterServer is the server API for Greeter service.
type GreeterServer interface {
// Sends a greeting
GetEntityID(context.Context, *Request) (*Reply, error)
}
// UnimplementedGreeterServer can be embedded to have forward compatible implementations.
type UnimplementedGreeterServer struct {
}
func (*UnimplementedGreeterServer) GetEntityID(ctx context.Context, req *Request) (*Reply, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetEntityID not implemented")
}
func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) {
s.RegisterService(&_Greeter_serviceDesc, srv)
}
func _Greeter_GetEntityID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Request)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GreeterServer).GetEntityID(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/generate_entity_ids.Greeter/GetEntityID",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GreeterServer).GetEntityID(ctx, req.(*Request))
}
return interceptor(ctx, in, info, handler)
}
var _Greeter_serviceDesc = grpc.ServiceDesc{
ServiceName: "generate_entity_ids.Greeter",
HandlerType: (*GreeterServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetEntityID",
Handler: _Greeter_GetEntityID_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "generate_id.proto",
}

View File

@ -0,0 +1,24 @@
syntax = "proto3";
option go_package = "generate_entity_ids";
option java_multiple_files = true;
option java_package = "io.grpc.generateID";
option java_outer_classname = "GenerateID";
package generate_entity_ids;
// The greeting service definition.
service Greeter {
// Sends a greeting
rpc GetEntityID (Request) returns (Reply) {}
}
// The request message containing the user's name.
message Request {
int64 length = 1;
}
// The response message containing the greetings
message Reply {
repeated int64 ids = 1;
}

View File

@ -0,0 +1,63 @@
package main
import (
"context"
pb "github.com/czs007/suvlim/proxy/generate_entity_ids/proto"
"google.golang.org/grpc"
"log"
"net"
"sync"
"time"
)
var (
currentID int64 = 0
)
const (
port = ":10087"
)
type server struct {
pb.UnimplementedGreeterServer
}
func (s *server) GetEntityID(ctx context.Context, in *pb.Request) (*pb.Reply, error) {
var mutex sync.Mutex
var ids []int64
length := in.Length
for i := int64(0); i < length; i++ {
go func() {
mutex.Lock()
ids = append(ids, currentID)
currentID++
mutex.Unlock()
}()
}
for{
if int64(len(ids)) < length {
time.Sleep(time.Second)
} else {
break
}
}
return &pb.Reply{Ids: ids}, nil
}
func main() {
listen, err := net.Listen("tcp", port)
if err != nil{
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
pb.RegisterGreeterServer(s, &server{})
if err := s.Serve(listen); err != nil{
log.Fatalf("failed to serve: %v", err)
}
}

View File

@ -81,12 +81,6 @@ ConfigMgr::ConfigMgr() {
/* pulsar */
{"pulsar.authentication", CreateBoolConfig("pulsar.authentication", false, &config.pulsar.authentication,
false, nullptr, nullptr)},
{"pulsar.user", CreateStringConfig("pulsar.user", false, &config.pulsar.user.value,
"user-default", nullptr, nullptr)},
{"pulsar.token", CreateStringConfig("pulsar.token", false, &config.pulsar.token.value,
"fake-token", nullptr, nullptr)},
{"pulsar.address", CreateStringConfig("pulsar.address", false, &config.pulsar.address.value,
"localhost", nullptr, nullptr)},
{"pulsar.port", CreateIntegerConfig("pulsar.port", false, 0, 65535, &config.pulsar.port.value,

View File

@ -74,9 +74,6 @@ struct ServerConfig {
} network;
struct Pulsar{
bool authentication{false};
String user{"user-default"};
String token{"fake-token"};
String address{"localhost"};
Integer port{6650};
Integer topicnum{1024};

View File

@ -7,8 +7,6 @@
#include <omp.h>
#include <numeric>
#include <algorithm>
#include <unistd.h>
#include "nlohmann/json.hpp"
#include "log/Log.h"
namespace milvus::message_client {
@ -28,15 +26,7 @@ Status MsgClientV2::Init(const std::string &insert_delete,
const std::string &search_by_id,
const std::string &search_result) {
//create pulsar client
std::shared_ptr<MsgClient> pulsar_client;
if (config.pulsar.authentication) {
pulsar::ClientConfiguration clientConfig;
clientConfig.setAuth(pulsar::AuthToken::createWithToken(config.pulsar.token.value));
pulsar_client = std::make_shared<MsgClient>(service_url_, clientConfig);
} else {
pulsar_client = std::make_shared<MsgClient>(service_url_);
}
auto pulsar_client = std::make_shared<MsgClient>(service_url_);
//create pulsar producer
ProducerConfiguration producerConfiguration;
producerConfiguration.setPartitionsRoutingMode(ProducerConfiguration::CustomPartition);
@ -76,21 +66,14 @@ Aggregation(std::vector<std::shared_ptr<grpc::QueryResult>> results, milvus::grp
}
std::vector<float> all_scores;
// Proxy get numQueries from row_num.
auto numQueries = results[0]->row_num();
auto topK = results[0]->distances_size() / numQueries;
// 2d array for multiple queries
std::vector<std::vector<float>> all_distance(numQueries);
std::vector<std::vector<int64_t>> all_entities_ids(numQueries);
std::vector<float> all_distance;
std::vector<int64_t> all_entities_ids;
std::vector<bool> all_valid_row;
std::vector<grpc::RowData> all_row_data;
std::vector<grpc::KeyValuePair> all_kv_pairs;
grpc::Status status;
// int row_num = 0;
int row_num = 0;
for (auto &result_per_node : results) {
if (result_per_node->status().error_code() != grpc::ErrorCode::SUCCESS) {
@ -98,66 +81,46 @@ Aggregation(std::vector<std::shared_ptr<grpc::QueryResult>> results, milvus::grp
// one_node_res->entities().status().error_code() != grpc::ErrorCode::SUCCESS) {
return Status(DB_ERROR, "QueryNode return wrong status!");
}
// assert(result_per_node->row_num() == numQueries);
for (int i = 0; i < numQueries; i++) {
for (int j = i * topK; j < (i + 1) * topK && j < result_per_node->distances_size(); j++) {
all_scores.push_back(result_per_node->scores()[j]);
all_distance[i].push_back(result_per_node->distances()[j]);
all_entities_ids[i].push_back(result_per_node->entities().ids(j));
}
for (int j = 0; j < result_per_node->distances_size(); j++) {
all_scores.push_back(result_per_node->scores()[j]);
all_distance.push_back(result_per_node->distances()[j]);
// all_kv_pairs.push_back(result_per_node->extra_params()[j]);
}
for (int k = 0; k < result_per_node->entities().ids_size(); ++k) {
all_entities_ids.push_back(result_per_node->entities().ids(k));
// all_valid_row.push_back(result_per_node->entities().valid_row(k));
// all_row_data.push_back(result_per_node->entities().rows_data(k));
}
if (result_per_node->row_num() > row_num) {
row_num = result_per_node->row_num();
}
// for (int j = 0; j < result_per_node->distances_size(); j++) {
// all_scores.push_back(result_per_node->scores()[j]);
// all_distance.push_back(result_per_node->distances()[j]);
//// all_kv_pairs.push_back(result_per_node->extra_params()[j]);
// }
// for (int k = 0; k < result_per_node->entities().ids_size(); ++k) {
// all_entities_ids.push_back(result_per_node->entities().ids(k));
//// all_valid_row.push_back(result_per_node->entities().valid_row(k));
//// all_row_data.push_back(result_per_node->entities().rows_data(k));
// }
// if (result_per_node->row_num() > row_num) {
// row_num = result_per_node->row_num();
// }
status = result_per_node->status();
}
std::vector<std::vector<int>> index_array;
for (int i = 0; i < numQueries; i++) {
auto &distance = all_distance[i];
std::vector<int> index(distance.size());
std::vector<int> index(all_distance.size());
iota(index.begin(), index.end(), 0);
std::stable_sort(index.begin(), index.end(),
[&distance](size_t i1, size_t i2) { return distance[i1] < distance[i2]; });
index_array.emplace_back(index);
}
iota(index.begin(), index.end(), 0);
std::stable_sort(index.begin(), index.end(),
[&all_distance](size_t i1, size_t i2) { return all_distance[i1] > all_distance[i2]; });
grpc::Entities result_entities;
for (int i = 0; i < numQueries; i++) {
for (int m = 0; m < topK; ++m) {
result->add_scores(all_scores[index_array[i][m]]);
result->add_distances(all_distance[i][index_array[i][m]]);
for (int m = 0; m < result->row_num(); ++m) {
result->add_scores(all_scores[index[m]]);
result->add_distances(all_distance[index[m]]);
// result->add_extra_params();
// result->mutable_extra_params(m)->CopyFrom(all_kv_pairs[index[m]]);
result_entities.add_ids(all_entities_ids[i][index_array[i][m]]);
result_entities.add_ids(all_entities_ids[index[m]]);
// result_entities.add_valid_row(all_valid_row[index[m]]);
// result_entities.add_rows_data();
// result_entities.mutable_rows_data(m)->CopyFrom(all_row_data[index[m]]);
}
}
result_entities.mutable_status()->CopyFrom(status);
result->set_row_num(numQueries);
result->set_row_num(row_num);
result->mutable_entities()->CopyFrom(result_entities);
result->set_query_id(results[0]->query_id());
// result->set_client_id(results[0]->client_id());
@ -204,11 +167,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
const std::function<uint64_t(const std::string &collection_name,
uint64_t channel_id,
uint64_t timestamp)> &segment_id) {
const uint64_t num_records_log = 100 * 10000;
static uint64_t num_inserted = 0;
static uint64_t size_inserted = 0;
using stdclock = std::chrono::high_resolution_clock;
static stdclock::duration time_cost;
auto start = stdclock::now();
// may have retry policy?
auto row_count = request.rows_data_size();
@ -227,14 +186,11 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
mut_msg.set_collection_name(request.collection_name());
mut_msg.set_partition_tag(request.partition_tag());
uint64_t uid = request.entity_id_array(i);
// auto channel_id = makeHash(&uid, sizeof(uint64_t)) % topic_num;
//TODO:: don't prove the correction
auto channel_id = this_thread;
auto channel_id = makeHash(&uid, sizeof(uint64_t)) % topic_num;
try {
mut_msg.set_segment_id(segment_id(request.collection_name(), channel_id, timestamp));
mut_msg.mutable_rows_data()->CopyFrom(request.rows_data(i));
mut_msg.mutable_extra_params()->CopyFrom(request.extra_params());
mut_msg.set_channel_id(channel_id);
auto callback = [&stats, &msg_sended, this_thread](Result result, const pulsar::MessageId &messageId) {
msg_sended += 1;
@ -242,7 +198,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
stats[this_thread] = Status(DB_ERROR, pulsar::strResult(result));
}
};
paralle_mut_producers_[channel_id]->sendAsync(mut_msg, callback);
paralle_mut_producers_[this_thread]->sendAsync(mut_msg, callback);
}
catch (const std::exception &e) {
msg_sended += 1;
@ -253,35 +209,10 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::InsertParam &request,
}
auto end = stdclock::now();
time_cost += (end - start);
num_inserted += row_count;
size_inserted += request.ByteSize();
if (num_inserted >= num_records_log) {
// char buff[128];
// auto r = getcwd(buff, 128);
auto path = std::string("/tmp");
std::ofstream file(path + "/proxy2pulsar.benchmark", std::fstream::app);
nlohmann::json json;
json["InsertTime"] = milvus::CommonUtil::TimeToString(start);
json["DurationInMilliseconds"] = std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count();
json["SizeInMB"] = size_inserted / 1024.0 / 1024.0;
json["ThroughputInMB"] = double(size_inserted) / std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count() * 1000 / 1024.0 / 1024;
json["NumRecords"] = num_inserted;
file << json.dump() << std::endl;
/*
file << "[" << milvus::CommonUtil::TimeToString(start) << "]"
<< " Insert " << num_inserted << " records, "
<< "size:" << size_inserted / 1024.0 / 1024.0 << "M, "
<< "cost" << std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count() / 1000.0 << "s, "
<< "throughput: "
<< double(size_inserted) / std::chrono::duration_cast<std::chrono::milliseconds>(time_cost).count() * 1000 / 1024.0
/ 1024
<< "M/s" << std::endl;
*/
time_cost = stdclock::duration(0);
num_inserted = 0;
size_inserted = 0;
}
auto data_size = request.ByteSize();
LOG_SERVER_INFO_ << "InsertReq Batch size:" << data_size / 1024.0 / 1024.0 << "M, "
<< "throughput: " << data_size / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1000 / 1024.0 / 1024
<< "M/s";
for (auto &stat : stats) {
if (!stat.ok()) {
@ -328,9 +259,7 @@ Status MsgClientV2::SendMutMessage(const milvus::grpc::DeleteByIDParam &request,
auto end = stdclock::now();
auto data_size = request.ByteSize();
LOG_SERVER_INFO_ << "InsertReq Batch size:" << data_size / 1024.0 / 1024.0 << "M, "
<< "throughput: "
<< data_size / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1000
/ 1024.0 / 1024
<< "throughput: " << data_size / std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() * 1000 / 1024.0 / 1024
<< "M/s";
for (auto &stat : stats) {

View File

@ -45,11 +45,15 @@ namespace message_client {
}
Result MsgProducer::send(milvus::grpc::InsertOrDeleteMsg &msg) {
int32_t channel_id = makeHash(std::to_string(msg.uid())) % 1024;
msg.set_channel_id(channel_id);
auto msg_str = msg.SerializeAsString();
return send(msg_str, msg.uid());
}
void MsgProducer::sendAsync(milvus::grpc::InsertOrDeleteMsg &msg, pulsar::SendCallback callback) {
int32_t channel_id = makeHash(std::to_string(msg.uid())) % 1024;
msg.set_channel_id(channel_id);
auto msg_str = msg.SerializeAsString();
return sendAsync(msg_str, msg.uid(), callback);
}

View File

@ -15,29 +15,13 @@ Status MessageWrapper::Init() {
(std::string{"pulsar://"} + config.pulsar.address() + ":" + std::to_string(config.pulsar.port()));
int client_id = config.proxy_id();
msg_client_ = std::make_shared<message_client::MsgClientV2>(client_id, pulsar_server_addr, config.pulsar.topicnum());
Status status;
if (config.pulsar.authentication) {
std::string insert_or_delete_topic_name = "InsertOrDelete-" + config.pulsar.user.value;
std::string search_topic_name = "Search-" + config.pulsar.user.value;
std::string search_by_id_topic_name = "SearchById-" + config.pulsar.user.value;
std::string search_result = "SearchResult-" + config.pulsar.user.value;
status = msg_client_->Init(insert_or_delete_topic_name,
search_topic_name,
"TimeSync",
search_by_id_topic_name,
search_result);
} else {
status = msg_client_->Init("InsertOrDelete", "Search", "TimeSync", "SearchById", "SearchResult");
}
// timeSync
time_sync_ = std::make_shared<timesync::TimeSync>(client_id, GetMessageTimeSyncTime, config.timesync.interval(), pulsar_server_addr, "TimeSync");
auto status = msg_client_->Init("InsertOrDelete", "Search", "TimeSync", "SearchById", "SearchResult");
if (!status.ok()){
return status;
}
// timeSync
time_sync_ = std::make_shared<timesync::TimeSync>(client_id, GetMessageTimeSyncTime, config.timesync.interval(), pulsar_server_addr, "TimeSync");
return status;
}
const std::shared_ptr<message_client::MsgClientV2> &MessageWrapper::MessageClient() {

View File

@ -72,7 +72,7 @@ Status MetaWrapper::Init() {
auto f = [&](const etcdserverpb::WatchResponse &res) {
UpdateMeta(res);
};
watcher_ = std::make_shared<milvus::master::Watcher>(etcd_addr, etcd_root_path_, f, true);
watcher_ = std::make_shared<milvus::master::Watcher>(etcd_addr, segment_path_, f, true);
return SyncMeta();
}
catch (const std::exception &e) {

View File

@ -23,9 +23,6 @@
#include <unordered_map>
#include <utility>
#include <vector>
#include <unistd.h>
#include "utils/CommonUtil.h"
#include "nlohmann/json.hpp"
#ifdef ENABLE_CPU_PROFILING
#include <gperftools/profiler.h>
@ -46,84 +43,15 @@ InsertReq::Create(const ContextPtr &context, const ::milvus::grpc::InsertParam *
Status
InsertReq::OnExecute() {
#ifndef BENCHMARK
#define BENCHMARK
#endif
#ifdef BENCHMARK
const uint64_t count_msg_num = 50000 * 10;
const double MB = 1024 * 1024;
using stdclock = std::chrono::high_resolution_clock;
static uint64_t inserted_count, inserted_size = 0;
static stdclock::time_point start, end;
const int interval = 2;
const int per_log_records = 10000 * 100;
static uint64_t ready_log_records = 0;
static int log_flag = 0;
static bool shouldBenchmark = false;
static std::stringstream log;
// char buff[128];
// auto r = getcwd(buff, 128);
auto path = std::string("/tmp");
std::ofstream file(path + "/proxy.benchmark", std::fstream::app);
#endif
LOG_SERVER_INFO_ << LogOut("[%s][%ld] ", "insert", 0) << "Execute InsertReq.";
auto &msg_client = MessageWrapper::GetInstance().MessageClient();
auto segment_id = [](const std::string &collection_name,
uint64_t channel_id,
uint64_t timestamp) {
return MetaWrapper::GetInstance().AskSegmentId(collection_name, channel_id, timestamp);
return MetaWrapper::GetInstance().AskSegmentId(collection_name, channel_id, timestamp);
};
#ifdef BENCHMARK
if (inserted_count >= count_msg_num && !shouldBenchmark) {
shouldBenchmark = true;
start = stdclock::now();
inserted_count = 0;
inserted_size = 0;
}
#endif
Status status;
status = msg_client->SendMutMessage(*insert_param_, timestamp_, segment_id);
#ifdef BENCHMARK
inserted_count += insert_param_->rows_data_size();
inserted_size += insert_param_->ByteSize();
if (shouldBenchmark) {
end = stdclock::now();
ready_log_records += inserted_count;
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() / 1000.0;
if (duration > interval) {
nlohmann::json json;
json["InsertTime"] = milvus::CommonUtil::TimeToString(start);
json["DurationInMilliseconds"] = duration * 1000;
json["SizeInMB"] = inserted_size / MB;
json["ThroughputInMB"] = double(inserted_size) / duration / MB;
json["NumRecords"] = inserted_count;
file << json.dump() << std::endl;
// log << "[" << milvus::CommonUtil::TimeToString(start) << "] "
// << "Insert "
// << inserted_count << " records, "
// << "size: " << inserted_size / MB << "MB, "
// << "cost: " << duration << "s, "
// << "throughput: "
// << double(inserted_size) / duration / MB
// << "M/s\n";
auto new_flag = ready_log_records / per_log_records;
if (new_flag != log_flag) {
log_flag = new_flag;
file << log.str();
file.flush();
log.str("");
}
inserted_size = 0;
inserted_count = 0;
start = stdclock::now();
}
}
#endif
return status;
}

View File

@ -25,17 +25,9 @@ TimeSync::TimeSync(int64_t id,
timestamp_(timestamp), interval_(interval), pulsar_addr_(pulsar_addr), time_sync_topic_(time_sync_topic) {
sync_msg_.set_peer_id(id);
auto timer = [&]() {
//create pulsar client
std::shared_ptr<milvus::message_client::MsgClient> pulsar_client;
if (config.pulsar.authentication) {
pulsar::ClientConfiguration clientConfig;
clientConfig.setAuth(pulsar::AuthToken::createWithToken(config.pulsar.token.value));
pulsar_client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_, clientConfig);
} else {
pulsar_client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_);
}
milvus::message_client::MsgProducer producer(pulsar_client, this->time_sync_topic_);
std::shared_ptr<milvus::message_client::MsgClient>
client = std::make_shared<milvus::message_client::MsgClient>(this->pulsar_addr_);
milvus::message_client::MsgProducer producer(client, this->time_sync_topic_);
for (;;) {
if (this->stop_) break;
@ -52,7 +44,7 @@ TimeSync::TimeSync(int64_t id,
if (rst != pulsar::ResultOk) {
//TODO, add log or throw exception
}
rst = pulsar_client->close();
rst = client->close();
if (rst != pulsar::ResultOk) {
//TODO, add log or throw exception
}

View File

@ -175,14 +175,6 @@ CommonUtil::TimeStrToTime(const std::string& time_str, time_t& time_integer, tm&
return true;
}
std::string CommonUtil::TimeToString(std::chrono::high_resolution_clock::time_point t) {
std::time_t tt = std::chrono::system_clock::to_time_t(t);
char buf[100] = {0};
std::strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", std::localtime(&tt));
return std::string(buf);
}
void
CommonUtil::ConvertTime(time_t time_integer, tm& time_struct) {
localtime_r(&time_integer, &time_struct);

View File

@ -15,7 +15,6 @@
#include <time.h>
#include <string>
#include <chrono>
namespace milvus {
@ -41,9 +40,6 @@ class CommonUtil {
TimeStrToTime(const std::string& time_str, time_t& time_integer, tm& time_struct,
const std::string& format = "%d-%d-%d %d:%d:%d");
static std::string
TimeToString(std::chrono::high_resolution_clock::time_point t);
static void
ConvertTime(time_t time_integer, tm& time_struct);
static void

View File

@ -64,7 +64,7 @@ add_custom_target(generate_suvlim_pb_grpc ALL DEPENDS protoc grpc_cpp_plugin)
add_custom_command(TARGET generate_suvlim_pb_grpc
POST_BUILD
COMMAND echo "${PROTOC_EXCUTABLE}"
# COMMAND bash "${PROTO_GEN_SCRIPTS_DIR}/generate_go.sh" -p "${PROTOC_EXCUTABLE}"
COMMAND bash "${PROTO_GEN_SCRIPTS_DIR}/generate_go.sh" -p "${PROTOC_EXCUTABLE}"
COMMAND bash "${PROTO_GEN_SCRIPTS_DIR}/generate_cpp.sh" -p "${PROTOC_EXCUTABLE}" -g "${GRPC_CPP_PLUGIN_EXCUTABLE}"
COMMAND ${PROTOC_EXCUTABLE} -I "${PROTO_PATH}/proto" --grpc_out "${PROTO_PATH}" --cpp_out "${PROTO_PATH}"
--plugin=protoc-gen-grpc="${GRPC_CPP_PLUGIN_EXCUTABLE}"

View File

@ -20,8 +20,9 @@ set( CONFIG_SRCS
)
set(unittest_srcs
ServerConfigTest.cpp
ConfigTypeTest1.cpp
# ConfigTypeTest2.cpp
ConfigTypeTest2.cpp
)

View File

@ -0,0 +1,19 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include <gtest/gtest.h>
#include "config/ServerConfig.h"
TEST(ServerConfigTest, parse_invalid_devices) {
auto collections = milvus::ParseGPUDevices("gpu0,gpu1");
ASSERT_EQ(collections.size(), 0);
}

View File

@ -8,8 +8,8 @@ set( GRPC_SERVICE_FILES
set(unittest_srcs
unittest_entry.cpp
consumer_test.cpp
# producer_test.cpp
# get_result_test.cpp
producer_test.cpp
get_result_test.cpp
test_pulsar.cpp)
@ -24,7 +24,6 @@ target_include_directories(test_pulsar PUBLIC ${PROJECT_BINARY_DIR}/thirdparty/a
target_link_libraries(test_pulsar
message_client_cpp
log
libboost_filesystem.a
libboost_system.a
libboost_serialization.a

View File

@ -1,23 +1,15 @@
#include <gtest/gtest.h>
#include "message_client/Consumer.h"
#include "message_client/Producer.h"
#include "grpc/message.pb.h"
TEST(CLIENT_CPP, CONSUMER) {
auto client = std::make_shared<milvus::message_client::MsgClient>("pulsar://localhost:6650");
milvus::message_client::MsgProducer producer(client, "test");
milvus::grpc::Status msg;
msg.set_error_code(::milvus::grpc::SUCCESS);
msg.set_reason("no reason");
std::string to_string = msg.SerializeAsString();
producer.send(to_string);
producer.close();
milvus::message_client::MsgConsumer consumer(client, "my_consumer");
consumer.subscribe("test");
auto res = consumer.receive(msg);
auto client= std::make_shared<milvus::message_client::MsgClient>("pulsar://localhost:6650");
milvus::message_client::MsgConsumer consumer(client, "my_consumer");
consumer.subscribe("test");
milvus::grpc::Status msg;
auto res = consumer.receive(msg);
// pb::TestData* data = (pb::TestData*)(msg.get());
std::cout << "Received: with payload reason" << msg.reason();
consumer.close();
client->close();
std::cout << "Received: with payload reason" << msg.reason();
consumer.close();
client->close();
}

View File

@ -20,7 +20,7 @@ TEST(CLIENT_CPP, PRODUCE_INSERT) {
int64_t offset = 1;
milvus::grpc::RowData data;
milvus::grpc::InsertOrDeleteMsg msg;
while (offset <= 1000) {
while (offset <= 100000) {
data.set_blob("a blob");
msg.set_collection_name("zilliz");
msg.set_partition_tag("milvus");
@ -33,8 +33,8 @@ TEST(CLIENT_CPP, PRODUCE_INSERT) {
std::string to_string = msg.SerializeAsString();
producer.send(to_string);
// if (offset % 20 == 0)
// usleep(200000);
if (offset % 20 == 0)
usleep(200000);
offset++;
}
// producer.close();

View File

@ -1,7 +1,4 @@
#include <gtest/gtest.h>
#include "easyloggingpp/easylogging++.h"
INITIALIZE_EASYLOGGINGPP
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);

View File

@ -1,28 +1,16 @@
package main
import (
"context"
"flag"
"fmt"
"github.com/czs007/suvlim/conf"
reader "github.com/czs007/suvlim/reader/read_node"
"strconv"
)
func main() {
ctx, _ := context.WithCancel(context.Background())
var yamlFile string
flag.StringVar(&yamlFile, "yaml", "", "yaml file")
flag.Parse()
// flag.Usage()
fmt.Println("yaml file: ", yamlFile)
conf.LoadConfig(yamlFile)
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
reader.StartQueryNode(ctx, pulsarAddr)
reader.StartQueryNode(pulsarAddr)
}

View File

@ -14,9 +14,6 @@ import (
)
type MessageClient struct {
// context
ctx context.Context
// timesync
timeSyncCfg *timesync.ReaderTimeSyncCfg
@ -25,12 +22,12 @@ type MessageClient struct {
key2SegChan chan *msgpb.Key2SegMsg
// pulsar
client pulsar.Client
client pulsar.Client
//searchResultProducer pulsar.Producer
searchResultProducers map[int64]pulsar.Producer
segmentsStatisticProducer pulsar.Producer
searchConsumer pulsar.Consumer
key2segConsumer pulsar.Consumer
searchConsumer pulsar.Consumer
key2segConsumer pulsar.Consumer
// batch messages
InsertOrDeleteMsg []*msgpb.InsertOrDeleteMsg
@ -45,7 +42,11 @@ type MessageClient struct {
}
func (mc *MessageClient) GetTimeNow() uint64 {
return mc.timestampBatchEnd
msg, ok := <-mc.timeSyncCfg.TimeSync()
if !ok {
fmt.Println("cnn't get data from timesync chan")
}
return msg.Timestamp
}
func (mc *MessageClient) TimeSyncStart() uint64 {
@ -82,45 +83,27 @@ func (mc *MessageClient) GetSearchChan() <-chan *msgpb.SearchMsg {
func (mc *MessageClient) receiveSearchMsg() {
for {
select {
case <-mc.ctx.Done():
return
default:
searchMsg := msgpb.SearchMsg{}
msg, err := mc.searchConsumer.Receive(mc.ctx)
if err != nil {
log.Println(err)
continue
}
err = proto.Unmarshal(msg.Payload(), &searchMsg)
if err != nil {
log.Fatal(err)
}
mc.searchChan <- &searchMsg
mc.searchConsumer.Ack(msg)
searchMsg := msgpb.SearchMsg{}
msg, err := mc.searchConsumer.Receive(context.Background())
err = proto.Unmarshal(msg.Payload(), &searchMsg)
if err != nil {
log.Fatal(err)
}
mc.searchChan <- &searchMsg
mc.searchConsumer.Ack(msg)
}
}
func (mc *MessageClient) receiveKey2SegMsg() {
for {
select {
case <-mc.ctx.Done():
return
default:
key2SegMsg := msgpb.Key2SegMsg{}
msg, err := mc.key2segConsumer.Receive(mc.ctx)
if err != nil {
log.Println(err)
continue
}
err = proto.Unmarshal(msg.Payload(), &key2SegMsg)
if err != nil {
log.Fatal(err)
}
mc.key2SegChan <- &key2SegMsg
mc.key2segConsumer.Ack(msg)
key2SegMsg := msgpb.Key2SegMsg{}
msg, err := mc.key2segConsumer.Receive(context.Background())
err = proto.Unmarshal(msg.Payload(), &key2SegMsg)
if err != nil {
log.Fatal(err)
}
mc.key2SegChan <- &key2SegMsg
mc.key2segConsumer.Ack(msg)
}
}
@ -159,20 +142,7 @@ func (mc *MessageClient) createConsumer(topicName string) pulsar.Consumer {
}
func (mc *MessageClient) createClient(url string) pulsar.Client {
if conf.Config.Pulsar.Authentication {
// create client with Authentication
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: url,
Authentication: pulsar.NewAuthenticationToken(conf.Config.Pulsar.Token),
})
if err != nil {
log.Fatal(err)
}
return client
}
// create client without Authentication
// create client
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: url,
})
@ -183,10 +153,7 @@ func (mc *MessageClient) createClient(url string) pulsar.Client {
return client
}
func (mc *MessageClient) InitClient(ctx context.Context, url string) {
// init context
mc.ctx = ctx
func (mc *MessageClient) InitClient(url string) {
//create client
mc.client = mc.createClient(url)
mc.MessageClientID = conf.Config.Reader.ClientId
@ -194,23 +161,8 @@ func (mc *MessageClient) InitClient(ctx context.Context, url string) {
//create producer
mc.searchResultProducers = make(map[int64]pulsar.Producer)
proxyIdList := conf.Config.Master.ProxyIdList
searchResultTopicName := "SearchResult-"
searchTopicName := "Search"
key2SegTopicName := "Key2Seg"
timeSyncTopicName := "TimeSync"
insertOrDeleteTopicName := "InsertOrDelete-"
if conf.Config.Pulsar.Authentication {
searchResultTopicName = "SearchResult-" + conf.Config.Pulsar.User + "-"
searchTopicName = "Search-" + conf.Config.Pulsar.User
key2SegTopicName = "Key2Seg-" + conf.Config.Pulsar.User
// timeSyncTopicName = "TimeSync-" + conf.Config.Pulsar.User
insertOrDeleteTopicName = "InsertOrDelete-" + conf.Config.Pulsar.User + "-"
}
for _, key := range proxyIdList {
topic := searchResultTopicName
for _, key := range proxyIdList{
topic := "SearchResult-"
topic = topic + strconv.Itoa(int(key))
mc.searchResultProducers[key] = mc.creatProducer(topic)
}
@ -219,8 +171,8 @@ func (mc *MessageClient) InitClient(ctx context.Context, url string) {
mc.segmentsStatisticProducer = mc.creatProducer(SegmentsStatisticTopicName)
//create consumer
mc.searchConsumer = mc.createConsumer(searchTopicName)
mc.key2segConsumer = mc.createConsumer(key2SegTopicName)
mc.searchConsumer = mc.createConsumer("Search")
mc.key2segConsumer = mc.createConsumer("Key2Seg")
// init channel
mc.searchChan = make(chan *msgpb.SearchMsg, conf.Config.Reader.SearchChanSize)
@ -230,19 +182,18 @@ func (mc *MessageClient) InitClient(ctx context.Context, url string) {
mc.Key2SegMsg = make([]*msgpb.Key2SegMsg, 0)
//init timesync
timeSyncTopic := timeSyncTopicName
timeSyncTopic := "TimeSync"
timeSyncSubName := "reader" + strconv.Itoa(mc.MessageClientID)
readTopics := make([]string, 0)
for i := conf.Config.Reader.TopicStart; i < conf.Config.Reader.TopicEnd; i++ {
str := insertOrDeleteTopicName
str := "ManipulationReqMsg-"
str = str + strconv.Itoa(i)
readTopics = append(readTopics, str)
}
readSubName := "reader" + strconv.Itoa(mc.MessageClientID)
readerQueueSize := timesync.WithReaderQueueSize(conf.Config.Reader.ReaderQueueSize)
timeSync, err := timesync.NewReaderTimeSync(ctx,
timeSyncTopic,
timeSync, err := timesync.NewReaderTimeSync(timeSyncTopic,
timeSyncSubName,
readTopics,
readSubName,
@ -253,7 +204,6 @@ func (mc *MessageClient) InitClient(ctx context.Context, url string) {
log.Fatal(err)
}
mc.timeSyncCfg = timeSync.(*timesync.ReaderTimeSyncCfg)
mc.timeSyncCfg.RoleType = timesync.Reader
mc.timestampBatchStart = 0
mc.timestampBatchEnd = 0
@ -261,26 +211,14 @@ func (mc *MessageClient) InitClient(ctx context.Context, url string) {
}
func (mc *MessageClient) Close() {
if mc.client != nil {
mc.client.Close()
}
mc.client.Close()
for key, _ := range mc.searchResultProducers {
if mc.searchResultProducers[key] != nil {
mc.searchResultProducers[key].Close()
}
}
if mc.segmentsStatisticProducer != nil {
mc.segmentsStatisticProducer.Close()
}
if mc.searchConsumer != nil {
mc.searchConsumer.Close()
}
if mc.key2segConsumer != nil {
mc.key2segConsumer.Close()
}
if mc.timeSyncCfg != nil {
mc.timeSyncCfg.Close()
mc.searchResultProducers[key].Close()
}
mc.segmentsStatisticProducer.Close()
mc.searchConsumer.Close()
mc.key2segConsumer.Close()
mc.timeSyncCfg.Close()
}
type MessageType int

View File

@ -1,7 +1,6 @@
package reader
import (
"context"
"encoding/binary"
"fmt"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
@ -13,8 +12,7 @@ import (
func TestIndex_BuildIndex(t *testing.T) {
// 1. Construct node, collection, partition and segment
ctx := context.Background()
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -76,5 +74,4 @@ func TestIndex_BuildIndex(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}

View File

@ -1,6 +1,7 @@
package reader
import (
"context"
"fmt"
"log"
"path"
@ -35,9 +36,9 @@ func GetSegmentObjId(key string) string {
func isCollectionObj(key string) bool {
prefix := path.Join(conf.Config.Etcd.Rootpath, CollectonPrefix) + "/"
prefix = strings.TrimSpace(prefix)
// println("prefix is :$", prefix)
println("prefix is :$", prefix)
index := strings.Index(key, prefix)
// println("index is :", index)
println("index is :", index)
return index == 0
}
@ -53,15 +54,8 @@ func isSegmentChannelRangeInQueryNodeChannelRange(segment *mock.Segment) bool {
log.Printf("Illegal segment channel range")
return false
}
var queryNodeChannelStart = conf.Config.Reader.TopicStart
var queryNodeChannelEnd = conf.Config.Reader.TopicEnd
if segment.ChannelStart >= queryNodeChannelStart && segment.ChannelEnd <= queryNodeChannelEnd {
return true
}
return false
// TODO: add query node channel range check
return true
}
func printCollectionStruct(obj *mock.Collection) {
@ -94,7 +88,7 @@ func (node *QueryNode) processCollectionCreate(id string, value string) {
println("error of json 2 collection")
println(err.Error())
}
//printCollectionStruct(collection)
printCollectionStruct(collection)
newCollection := node.NewCollection(collection.ID, collection.Name, collection.GrpcMarshalString)
for _, partitionTag := range collection.PartitionTags {
newCollection.NewPartition(partitionTag)
@ -108,11 +102,12 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
println("error of json 2 segment")
println(err.Error())
}
//printSegmentStruct(segment)
printSegmentStruct(segment)
if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
return
}
// TODO: fix this after channel range config finished
//if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
// return
//}
collection := node.GetCollectionByID(segment.CollectionID)
if collection != nil {
@ -130,7 +125,7 @@ func (node *QueryNode) processSegmentCreate(id string, value string) {
}
func (node *QueryNode) processCreate(key string, msg string) {
println("process create", key)
println("process create", key, ":", msg)
if isCollectionObj(key) {
objID := GetCollectionObjId(key)
node.processCollectionCreate(objID, msg)
@ -143,18 +138,19 @@ func (node *QueryNode) processCreate(key string, msg string) {
}
func (node *QueryNode) processSegmentModify(id string, value string) {
// println("Modify Segment: ", id)
println("Modify Segment: ", id)
segment, err := mock.JSON2Segment(value)
if err != nil {
println("error of json 2 segment")
println(err.Error())
}
// printSegmentStruct(segment)
printSegmentStruct(segment)
if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
return
}
// TODO: fix this after channel range config finished
//if !isSegmentChannelRangeInQueryNodeChannelRange(segment) {
// return
//}
seg, err := node.GetSegmentBySegmentID(int64(segment.SegmentID)) // todo change to uint64
if seg != nil {
@ -163,13 +159,13 @@ func (node *QueryNode) processSegmentModify(id string, value string) {
}
func (node *QueryNode) processCollectionModify(id string, value string) {
// println("Modify Collection: ", id)
println("Modify Collection: ", id)
collection, err := mock.JSON2Collection(value)
if err != nil {
println("error of json 2 collection")
println(err.Error())
}
// printCollectionStruct(collection)
printCollectionStruct(collection)
goCollection := node.GetCollectionByID(collection.ID)
if goCollection != nil {
@ -179,7 +175,7 @@ func (node *QueryNode) processCollectionModify(id string, value string) {
}
func (node *QueryNode) processModify(key string, msg string) {
// println("process modify")
println("process modify")
if isCollectionObj(key) {
objID := GetCollectionObjId(key)
node.processCollectionModify(objID, msg)
@ -218,7 +214,7 @@ func (node *QueryNode) processResp(resp clientv3.WatchResponse) error {
if err != nil {
return err
}
// println("processResp!!!!!\n")
println("processResp!!!!!\n")
for _, ev := range resp.Events {
if ev.IsCreate() {
@ -273,12 +269,12 @@ func (node *QueryNode) InitFromMeta() error {
return nil
}
func (node *QueryNode) RunMetaService(wg *sync.WaitGroup) {
func (node *QueryNode) RunMetaService(ctx context.Context, wg *sync.WaitGroup) {
//node.InitFromMeta()
metaChan := node.kvBase.WatchWithPrefix("")
for {
select {
case <-node.ctx.Done():
case <-ctx.Done():
wg.Done()
println("DONE!!!!!!")
return

View File

@ -14,13 +14,8 @@ package reader
import "C"
import (
"context"
"encoding/json"
"fmt"
"github.com/czs007/suvlim/conf"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/czs007/suvlim/pkg/master/kv"
"github.com/czs007/suvlim/reader/message_client"
"github.com/stretchr/testify/assert"
"log"
"sort"
@ -28,6 +23,9 @@ import (
"sync/atomic"
"time"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/czs007/suvlim/pkg/master/kv"
"github.com/czs007/suvlim/reader/message_client"
//"github.com/stretchr/testify/assert"
)
@ -70,27 +68,11 @@ type QueryInfo struct {
type MsgCounter struct {
InsertCounter int64
InsertTime time.Time
DeleteCounter int64
DeleteTime time.Time
SearchCounter int64
SearchTime time.Time
}
type InsertLog struct {
MsgLength int
DurationInMilliseconds int64
InsertTime time.Time
NumSince int64
Speed float64
}
type QueryNode struct {
// context
ctx context.Context
QueryNodeId uint64
Collections []*Collection
SegmentsMap map[int64]*Segment
@ -103,17 +85,16 @@ type QueryNode struct {
insertData InsertData
kvBase *kv.EtcdKVBase
msgCounter *MsgCounter
InsertLogs []InsertLog
}
func NewQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64) *QueryNode {
func NewQueryNode(queryNodeId uint64, timeSync uint64) *QueryNode {
mc := message_client.MessageClient{}
queryNodeTimeSync := &QueryNodeTime{
ReadTimeSyncMin: timeSync,
ReadTimeSyncMax: timeSync,
WriteTimeSync: timeSync,
ServiceTimeSync: timeSync,
ServiceTimeSync: timeSync,
TSOTimeSync: timeSync,
}
@ -133,7 +114,6 @@ func NewQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64) *Que
}
return &QueryNode{
ctx: ctx,
QueryNodeId: queryNodeId,
Collections: nil,
SegmentsMap: segmentsMap,
@ -145,20 +125,16 @@ func NewQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64) *Que
}
func (node *QueryNode) Close() {
if node.messageClient != nil {
node.messageClient.Close()
}
if node.kvBase != nil {
node.kvBase.Close()
}
node.messageClient.Close()
node.kvBase.Close()
}
func CreateQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64, mc *message_client.MessageClient) *QueryNode {
func CreateQueryNode(queryNodeId uint64, timeSync uint64, mc *message_client.MessageClient) *QueryNode {
queryNodeTimeSync := &QueryNodeTime{
ReadTimeSyncMin: timeSync,
ReadTimeSyncMax: timeSync,
WriteTimeSync: timeSync,
ServiceTimeSync: timeSync,
ServiceTimeSync: timeSync,
TSOTimeSync: timeSync,
}
@ -173,15 +149,11 @@ func CreateQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64, m
msgCounter := MsgCounter{
InsertCounter: 0,
InsertTime: time.Now(),
DeleteCounter: 0,
DeleteTime: time.Now(),
SearchCounter: 0,
SearchTime: time.Now(),
}
return &QueryNode{
ctx: ctx,
QueryNodeId: queryNodeId,
Collections: nil,
SegmentsMap: segmentsMap,
@ -189,7 +161,6 @@ func CreateQueryNode(ctx context.Context, queryNodeId uint64, timeSync uint64, m
queryNodeTimeSync: queryNodeTimeSync,
buffer: buffer,
msgCounter: &msgCounter,
InsertLogs: make([]InsertLog, 0),
}
}
@ -274,70 +245,69 @@ func (node *QueryNode) InitQueryNodeCollection() {
func (node *QueryNode) RunInsertDelete(wg *sync.WaitGroup) {
const Debug = true
const CountInsertMsgBaseline = 1000 * 1000
var BaselineCounter int64 = 0
const CountMsgNum = 1000 * 1000
if Debug {
var printFlag = true
var startTime = true
var start time.Time
for {
select {
case <-node.ctx.Done():
wg.Done()
return
default:
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin)
assert.NotEqual(nil, 0, timeRange.timestampMax)
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin)
assert.NotEqual(nil, 0, timeRange.timestampMax)
if node.msgCounter.InsertCounter/CountInsertMsgBaseline != BaselineCounter {
node.WriteQueryLog()
BaselineCounter = node.msgCounter.InsertCounter / CountInsertMsgBaseline
}
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
node.WriterDelete()
node.PreInsertAndDelete()
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
if startTime {
fmt.Println("============> Start Test <============")
startTime = false
start = time.Now()
}
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
node.WriterDelete()
node.PreInsertAndDelete()
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
// Test insert time
if printFlag && node.msgCounter.InsertCounter >= CountMsgNum {
printFlag = false
timeSince := time.Since(start)
fmt.Println("============> Do", node.msgCounter.InsertCounter, "Insert in", timeSince, "<============")
}
}
} else {
for {
select {
case <-node.ctx.Done():
wg.Done()
return
default:
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin)
assert.NotEqual(nil, 0, timeRange.timestampMax)
}
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
for {
var msgLen = node.PrepareBatchMsg()
var timeRange = TimeRange{node.messageClient.TimeSyncStart(), node.messageClient.TimeSyncEnd()}
assert.NotEqual(nil, 0, timeRange.timestampMin)
assert.NotEqual(nil, 0, timeRange.timestampMax)
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
node.WriterDelete()
node.PreInsertAndDelete()
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
}
if msgLen[0] == 0 && len(node.buffer.InsertDeleteBuffer) <= 0 {
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
continue
}
node.QueryNodeDataInit()
node.MessagesPreprocess(node.messageClient.InsertOrDeleteMsg, timeRange)
//fmt.Println("MessagesPreprocess Done")
node.WriterDelete()
node.PreInsertAndDelete()
//fmt.Println("PreInsertAndDelete Done")
node.DoInsertAndDelete()
//fmt.Println("DoInsertAndDelete Done")
node.queryNodeTimeSync.UpdateSearchTimeSync(timeRange)
}
wg.Done()
}
@ -358,23 +328,20 @@ func (node *QueryNode) TestInsertDelete(timeRange TimeRange) {
func (node *QueryNode) RunSearch(wg *sync.WaitGroup) {
for {
select {
case <-node.ctx.Done():
wg.Done()
return
case msg := <-node.messageClient.GetSearchChan():
node.messageClient.SearchMsg = node.messageClient.SearchMsg[:0]
node.messageClient.SearchMsg = append(node.messageClient.SearchMsg, msg)
//for {
//if node.messageClient.SearchMsg[0].Timestamp < node.queryNodeTimeSync.ServiceTimeSync {
var status = node.Search(node.messageClient.SearchMsg)
fmt.Println("Do Search done")
if status.ErrorCode != 0 {
fmt.Println("Search Failed")
node.PublishFailedSearchResult()
fmt.Println("Do Search...")
for {
if node.messageClient.SearchMsg[0].Timestamp < node.queryNodeTimeSync.ServiceTimeSync {
var status = node.Search(node.messageClient.SearchMsg)
if status.ErrorCode != 0 {
fmt.Println("Search Failed")
node.PublishFailedSearchResult()
}
break
}
}
//break
//}
//}
default:
}
}
@ -517,9 +484,9 @@ func (node *QueryNode) PreInsertAndDelete() msgPb.Status {
func (node *QueryNode) DoInsertAndDelete() msgPb.Status {
var wg sync.WaitGroup
// Do insert
for segmentID := range node.insertData.insertRecords {
for segmentID, records := range node.insertData.insertRecords {
wg.Add(1)
go node.DoInsert(segmentID, &wg)
go node.DoInsert(segmentID, &records, &wg)
}
// Do delete
@ -529,15 +496,16 @@ func (node *QueryNode) DoInsertAndDelete() msgPb.Status {
}
wg.Add(1)
var deleteTimestamps = node.deleteData.deleteTimestamps[segmentID]
fmt.Println("Doing delete......")
go node.DoDelete(segmentID, &deleteIDs, &deleteTimestamps, &wg)
fmt.Println("Do delete done")
}
wg.Wait()
return msgPb.Status{ErrorCode: msgPb.ErrorCode_SUCCESS}
}
func (node *QueryNode) DoInsert(segmentID int64, wg *sync.WaitGroup) msgPb.Status {
func (node *QueryNode) DoInsert(segmentID int64, records *[][]byte, wg *sync.WaitGroup) msgPb.Status {
fmt.Println("Doing insert..., len = ", len(node.insertData.insertIDs[segmentID]))
var targetSegment, err = node.GetSegmentBySegmentID(segmentID)
if err != nil {
fmt.Println(err.Error())
@ -546,14 +514,10 @@ func (node *QueryNode) DoInsert(segmentID int64, wg *sync.WaitGroup) msgPb.Statu
ids := node.insertData.insertIDs[segmentID]
timestamps := node.insertData.insertTimestamps[segmentID]
records := node.insertData.insertRecords[segmentID]
offsets := node.insertData.insertOffset[segmentID]
err = targetSegment.SegmentInsert(offsets, &ids, &timestamps, &records)
fmt.Println("Do insert done, len = ", len(node.insertData.insertIDs[segmentID]))
node.QueryLog(len(ids))
node.msgCounter.InsertCounter += int64(len(ids))
err = targetSegment.SegmentInsert(offsets, &ids, &timestamps, records)
if err != nil {
fmt.Println(err.Error())
return msgPb.Status{ErrorCode: 1}
@ -592,7 +556,7 @@ func (node *QueryNode) QueryJson2Info(queryJson *string) *QueryInfo {
return nil
}
//fmt.Println(query)
fmt.Println(query)
return &query
}
@ -609,6 +573,8 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// TODO: Do not receive batched search requests
for _, msg := range searchMessages {
var clientId = msg.ClientId
var resultsTmp = make([]SearchResultTmp, 0)
var searchTimestamp = msg.Timestamp
// ServiceTimeSync update by readerTimeSync, which is get from proxy.
@ -617,8 +583,8 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// So the ServiceTimeSync is always less than searchTimestamp.
// Here, we manually make searchTimestamp's logic time minus `conf.Config.Timesync.Interval` milliseconds.
// Which means `searchTimestamp.logicTime = searchTimestamp.logicTime - conf.Config.Timesync.Interval`.
var logicTimestamp = searchTimestamp << 46 >> 46
searchTimestamp = (searchTimestamp>>18-uint64(conf.Config.Timesync.Interval+600))<<18 + logicTimestamp
// var logicTimestamp = searchTimestamp << 46 >> 46
// searchTimestamp = (searchTimestamp >> 18 - uint64(conf.Config.Timesync.Interval)) << 18 + logicTimestamp
var vector = msg.Records
// We now only the first Json is valid.
@ -627,55 +593,31 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
// 1. Timestamp check
// TODO: return or wait? Or adding graceful time
if searchTimestamp > node.queryNodeTimeSync.ServiceTimeSync {
fmt.Println("Invalid query time, timestamp = ", searchTimestamp>>18, ", SearchTimeSync = ", node.queryNodeTimeSync.ServiceTimeSync>>18)
fmt.Println("Invalid query time, timestamp = ", searchTimestamp >> 18, ", SearchTimeSync = ", node.queryNodeTimeSync.ServiceTimeSync >> 18)
return msgPb.Status{ErrorCode: 1}
}
// 2. Get query information from query json
query := node.QueryJson2Info(&queryJson)
// 2d slice for receiving multiple queries's results
var resultsTmp = make([][]SearchResultTmp, query.NumQueries)
for i := 0; i < int(query.NumQueries); i++ {
resultsTmp[i] = make([]SearchResultTmp, 0)
}
// 3. Do search in all segments
for _, segment := range node.SegmentsMap {
if segment.GetRowCount() <= 0 {
// Skip empty segment
continue
}
//fmt.Println("Search in segment:", segment.SegmentId, ",segment rows:", segment.GetRowCount())
var res, err = segment.SegmentSearch(query, searchTimestamp, vector)
if err != nil {
fmt.Println(err.Error())
return msgPb.Status{ErrorCode: 1}
}
for i := 0; i < int(query.NumQueries); i++ {
for j := i * query.TopK; j < (i+1)*query.TopK; j++ {
resultsTmp[i] = append(resultsTmp[i], SearchResultTmp{
ResultId: res.ResultIds[j],
ResultDistance: res.ResultDistances[j],
})
}
for i := 0; i < len(res.ResultIds); i++ {
resultsTmp = append(resultsTmp, SearchResultTmp{ResultId: res.ResultIds[i], ResultDistance: res.ResultDistances[i]})
}
}
// 4. Reduce results
for _, rTmp := range resultsTmp {
sort.Slice(rTmp, func(i, j int) bool {
return rTmp[i].ResultDistance < rTmp[j].ResultDistance
})
}
for _, rTmp := range resultsTmp {
if len(rTmp) > query.TopK {
rTmp = rTmp[:query.TopK]
}
}
sort.Slice(resultsTmp, func(i, j int) bool {
return resultsTmp[i].ResultDistance < resultsTmp[j].ResultDistance
})
resultsTmp = resultsTmp[:query.TopK]
var entities = msgPb.Entities{
Ids: make([]int64, 0),
}
@ -688,19 +630,15 @@ func (node *QueryNode) Search(searchMessages []*msgPb.SearchMsg) msgPb.Status {
QueryId: msg.Uid,
ClientId: clientId,
}
for _, rTmp := range resultsTmp {
for _, res := range rTmp {
results.Entities.Ids = append(results.Entities.Ids, res.ResultId)
results.Distances = append(results.Distances, res.ResultDistance)
results.Scores = append(results.Distances, float32(0))
}
for _, res := range resultsTmp {
results.Entities.Ids = append(results.Entities.Ids, res.ResultId)
results.Distances = append(results.Distances, res.ResultDistance)
results.Scores = append(results.Distances, float32(0))
}
// Send numQueries to RowNum.
results.RowNum = query.NumQueries
results.RowNum = int64(len(results.Distances))
// 5. publish result to pulsar
//fmt.Println(results.Entities.Ids)
//fmt.Println(results.Distances)
node.PublishSearchResult(&results)
}

View File

@ -7,12 +7,13 @@ import (
"sync"
)
func StartQueryNode(ctx context.Context, pulsarURL string) {
func StartQueryNode(pulsarURL string) {
mc := message_client.MessageClient{}
mc.InitClient(ctx, pulsarURL)
mc.InitClient(pulsarURL)
mc.ReceiveMessage()
qn := CreateQueryNode(ctx, 0, 0, &mc)
qn := CreateQueryNode(0, 0, &mc)
ctx := context.Background()
// Segments Services
go qn.SegmentManagementService()
@ -27,7 +28,7 @@ func StartQueryNode(ctx context.Context, pulsarURL string) {
}
wg.Add(3)
go qn.RunMetaService(&wg)
go qn.RunMetaService(ctx, &wg)
go qn.RunInsertDelete(&wg)
go qn.RunSearch(&wg)
wg.Wait()

View File

@ -1,25 +1,22 @@
package reader
import (
"context"
"github.com/czs007/suvlim/conf"
"strconv"
"testing"
"time"
)
const ctxTimeInMillisecond = 200
// NOTE: start pulsar before test
func TestReader_startQueryNode(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
//pulsarURL := "pulsar://localhost:6650"
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
StartQueryNode(ctx, pulsarAddr)
println(pulsarAddr)
StartQueryNode(pulsarAddr)
//go StartQueryNode(pulsarAddr, 0)
//StartQueryNode(pulsarAddr, 1)
}

View File

@ -1,39 +1,21 @@
package reader
import (
"context"
"github.com/czs007/suvlim/conf"
"github.com/czs007/suvlim/reader/message_client"
"strconv"
"testing"
"time"
masterPb "github.com/czs007/suvlim/pkg/master/grpc/master"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
)
// NOTE: start pulsar before test
func TestResult_PublishSearchResult(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
mc := message_client.MessageClient{}
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
mc.InitClient(ctx, pulsarAddr)
node := CreateQueryNode(ctx, 0, 0, &mc)
// Construct node, collection, partition and segment
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
node.SegmentsMap[0] = segment
// TODO: start pulsar server
const N = 10
var entityIDs = msgPb.Entities{
Ids: make([]int64, N),
@ -47,26 +29,11 @@ func TestResult_PublishSearchResult(t *testing.T) {
result.Distances = append(result.Distances, float32(i))
}
node.PublishSearchResult(&result)
node.Close()
}
// NOTE: start pulsar before test
func TestResult_PublishFailedSearchResult(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
mc := message_client.MessageClient{}
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
mc.InitClient(ctx, pulsarAddr)
node := CreateQueryNode(ctx, 0, 0, &mc)
// Construct node, collection, partition and segment
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -74,27 +41,11 @@ func TestResult_PublishFailedSearchResult(t *testing.T) {
// TODO: start pulsar server
node.PublishFailedSearchResult()
node.Close()
}
// NOTE: start pulsar before test
func TestResult_PublicStatistic(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
mc := message_client.MessageClient{}
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
mc.InitClient(ctx, pulsarAddr)
node := CreateQueryNode(ctx, 0, 0, &mc)
// Construct node, collection, partition and segment
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -115,6 +66,4 @@ func TestResult_PublicStatistic(t *testing.T) {
// TODO: start pulsar server
node.PublicStatistic(&statisticData)
node.Close()
}

View File

@ -16,7 +16,6 @@ import (
"fmt"
"github.com/czs007/suvlim/errors"
msgPb "github.com/czs007/suvlim/pkg/master/grpc/message"
"github.com/stretchr/testify/assert"
"strconv"
"unsafe"
)
@ -84,16 +83,15 @@ func (s *Segment) CloseSegment(collection* Collection) error {
}
// Build index after closing segment
//s.SegmentStatus = SegmentIndexing
//fmt.Println("Building index...")
//s.buildIndex(collection)
s.SegmentStatus = SegmentIndexing
fmt.Println("Building index...")
s.buildIndex(collection)
// TODO: remove redundant segment indexed status
// Change segment status to indexed
//s.SegmentStatus = SegmentIndexed
//fmt.Println("Segment closed and indexed")
s.SegmentStatus = SegmentIndexed
fmt.Println("Segment closed and indexed")
fmt.Println("Segment closed")
return nil
}
@ -144,13 +142,9 @@ func (s *Segment) SegmentInsert(offset int64, entityIDs *[]int64, timestamps *[]
var numOfRow = len(*entityIDs)
var sizeofPerRow = len((*records)[0])
assert.Equal(nil, numOfRow, len(*records))
var rawData = make([]byte, numOfRow * sizeofPerRow)
var copyOffset = 0
var rawData = make([]byte, numOfRow*sizeofPerRow)
for i := 0; i < len(*records); i++ {
copy(rawData[copyOffset:], (*records)[i])
copyOffset += sizeofPerRow
copy(rawData, (*records)[i])
}
var cOffset = C.long(offset)
@ -218,8 +212,8 @@ func (s *Segment) SegmentSearch(query *QueryInfo, timestamp uint64, vectorRecord
field_name: C.CString(query.FieldName),
}
resultIds := make([]int64, int64(query.TopK) * query.NumQueries)
resultDistances := make([]float32, int64(query.TopK) * query.NumQueries)
resultIds := make([]int64, query.TopK)
resultDistances := make([]float32, query.TopK)
var cTimestamp = C.ulong(timestamp)
var cResultIds = (*C.long)(&resultIds[0])
@ -242,7 +236,7 @@ func (s *Segment) SegmentSearch(query *QueryInfo, timestamp uint64, vectorRecord
return nil, errors.New("Search failed, error code = " + strconv.Itoa(int(status)))
}
//fmt.Println("Search Result---- Ids =", resultIds, ", Distances =", resultDistances)
fmt.Println("Search Result---- Ids =", resultIds, ", Distances =", resultDistances)
return &SearchResult{ResultIds: resultIds, ResultDistances: resultDistances}, nil
}

View File

@ -19,10 +19,11 @@ func (node *QueryNode) SegmentsManagement() {
for _, partition := range collection.Partitions {
for _, segment := range partition.Segments {
if segment.SegmentStatus != SegmentOpened {
log.Println("Segment have been closed")
continue
}
// fmt.Println("timeNow = ", timeNow, "SegmentCloseTime = ", segment.SegmentCloseTime)
fmt.Println("timeNow = ", timeNow, "SegmentCloseTime = ", segment.SegmentCloseTime)
if timeNow >= segment.SegmentCloseTime {
go segment.CloseSegment(collection)
}
@ -35,13 +36,8 @@ func (node *QueryNode) SegmentManagementService() {
sleepMillisecondTime := 1000
fmt.Println("do segments management in ", strconv.Itoa(sleepMillisecondTime), "ms")
for {
select {
case <-node.ctx.Done():
return
default:
time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond)
node.SegmentsManagement()
}
time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond)
node.SegmentsManagement()
}
}
@ -96,12 +92,7 @@ func (node *QueryNode) SegmentStatisticService() {
sleepMillisecondTime := 1000
fmt.Println("do segments statistic in ", strconv.Itoa(sleepMillisecondTime), "ms")
for {
select {
case <-node.ctx.Done():
return
default:
time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond)
node.SegmentStatistic(sleepMillisecondTime)
}
time.Sleep(time.Duration(sleepMillisecondTime) * time.Millisecond)
node.SegmentStatistic(sleepMillisecondTime)
}
}

View File

@ -1,93 +1,53 @@
package reader
import (
"context"
"github.com/czs007/suvlim/conf"
"github.com/czs007/suvlim/reader/message_client"
"strconv"
"testing"
"time"
)
func TestSegmentManagement_SegmentsManagement(t *testing.T) {
// Construct node, collection, partition and segment
ctx := context.Background()
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
node.SegmentsMap[0] = segment
// TODO: fix segment management
node.SegmentsManagement()
node.Close()
}
func TestSegmentManagement_SegmentService(t *testing.T) {
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
// Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
node.SegmentsMap[0] = segment
// TODO: fix segment service
node.SegmentManagementService()
node.Close()
}
// NOTE: start pulsar before test
func TestSegmentManagement_SegmentStatistic(t *testing.T) {
conf.LoadConfig("config.yaml")
ctx, _ := context.WithCancel(context.Background())
mc := message_client.MessageClient{}
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
mc.InitClient(ctx, pulsarAddr)
mc.ReceiveMessage()
node := CreateQueryNode(ctx, 0, 0, &mc)
// Construct node, collection, partition and segment
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
node.SegmentsMap[0] = segment
// TODO: start pulsar server
node.SegmentStatistic(1000)
node.Close()
}
// NOTE: start pulsar before test
func TestSegmentManagement_SegmentStatisticService(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
mc := message_client.MessageClient{}
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
mc.InitClient(ctx, pulsarAddr)
mc.ReceiveMessage()
node := CreateQueryNode(ctx, 0, 0, &mc)
// Construct node, collection, partition and segment
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
node.SegmentsMap[0] = segment
// TODO: start pulsar server
node.SegmentStatisticService()
node.Close()
}

View File

@ -1,7 +1,6 @@
package reader
import (
"context"
"encoding/binary"
"fmt"
"math"
@ -13,8 +12,7 @@ import (
func TestSegment_ConstructorAndDestructor(t *testing.T) {
// 1. Construct node, collection, partition and segment
ctx := context.Background()
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -23,14 +21,11 @@ func TestSegment_ConstructorAndDestructor(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_SegmentInsert(t *testing.T) {
// 1. Construct node, collection, partition and segment
ctx := context.Background()
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -71,14 +66,11 @@ func TestSegment_SegmentInsert(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_SegmentDelete(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -99,14 +91,11 @@ func TestSegment_SegmentDelete(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_SegmentSearch(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -163,14 +152,11 @@ func TestSegment_SegmentSearch(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_SegmentPreInsert(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -183,14 +169,11 @@ func TestSegment_SegmentPreInsert(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_SegmentPreDelete(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -203,16 +186,13 @@ func TestSegment_SegmentPreDelete(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
// Segment util functions test
////////////////////////////////////////////////////////////////////////////
func TestSegment_GetStatus(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -225,14 +205,11 @@ func TestSegment_GetStatus(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_Close(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -245,14 +222,11 @@ func TestSegment_Close(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_GetRowCount(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -297,14 +271,11 @@ func TestSegment_GetRowCount(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_GetDeletedCount(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -330,14 +301,11 @@ func TestSegment_GetDeletedCount(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_GetMemSize(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -376,28 +344,22 @@ func TestSegment_GetMemSize(t *testing.T) {
// 6. Get memory usage in bytes
var memSize = segment.GetMemSize()
assert.Equal(t, memSize, uint64(2785280))
assert.Equal(t, memSize, uint64(1048714))
// 7. Destruct collection, partition and segment
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}
func TestSegment_RealSchemaTest(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
//var schemaString = "id: 6873737669791618215\nname: \"collection0\"\nschema: \u003c\n " +
// "field_metas: \u003c\n field_name: \"age\"\n type: INT32\n dim: 1\n \u003e\n " +
// "field_metas: \u003c\n field_name: \"field_1\"\n type: VECTOR_FLOAT\n dim: 16\n \u003e\n" +
// "\u003e\ncreate_time: 1600416765\nsegment_ids: 6873737669791618215\npartition_tags: \"default\"\n"
var schemaString = "id: 6875229265736357360\nname: \"collection0\"\nschema: \u003c\n " +
"field_metas: \u003c\n field_name: \"field_3\"\n type: INT32\n dim: 1\n \u003e\n " +
"field_metas: \u003c\n field_name: \"field_vec\"\n type: VECTOR_FLOAT\n dim: 16\n " +
"\u003e\n\u003e\ncreate_time: 1600764055\nsegment_ids: 6875229265736357360\npartition_tags: \"default\"\n"
node := NewQueryNode(ctx, 0, 0)
var schemaString = "id: 6875229265736357360\nname: \"collection0\"\nschema: \u003c\n field_metas: \u003c\n field_name: \"field_3\"\n type: INT32\n \u003e\n field_metas: \u003c\n field_name: \"field_vec\"\n type: VECTOR_FLOAT\n \u003e\n\u003e\ncreate_time: 1600764055\nsegment_ids: 6875229265736357360\npartition_tags: \"default\"\n"
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", schemaString)
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -438,6 +400,4 @@ func TestSegment_RealSchemaTest(t *testing.T) {
partition.DeleteSegment(segment)
collection.DeletePartition(partition)
node.DeleteCollection(collection)
node.Close()
}

View File

@ -1,13 +1,8 @@
package reader
import (
"encoding/json"
"errors"
"fmt"
log "github.com/apache/pulsar/pulsar-client-go/logutil"
"os"
"strconv"
"time"
)
// Function `GetSegmentByEntityId` should return entityIDs, timestamps and segmentIDs
@ -73,54 +68,3 @@ func (c *Collection) GetPartitionByName(partitionName string) (partition *Partit
return nil
// TODO: remove from c.Partitions
}
func (node *QueryNode) QueryLog(length int) {
node.msgCounter.InsertCounter += int64(length)
timeNow := time.Now()
duration := timeNow.Sub(node.msgCounter.InsertTime)
speed := float64(length) / duration.Seconds()
insertLog := InsertLog{
MsgLength: length,
DurationInMilliseconds: duration.Milliseconds(),
InsertTime: timeNow,
NumSince: node.msgCounter.InsertCounter,
Speed: speed,
}
node.InsertLogs = append(node.InsertLogs, insertLog)
node.msgCounter.InsertTime = timeNow
}
func (node *QueryNode) WriteQueryLog() {
f, err := os.OpenFile("/tmp/query_node_insert.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
// write logs
for _, insertLog := range node.InsertLogs {
insertLogJson, err := json.Marshal(&insertLog)
if err != nil {
log.Fatal(err)
}
writeString := string(insertLogJson) + "\n"
fmt.Println(writeString)
_, err2 := f.WriteString(writeString)
if err2 != nil {
log.Fatal(err2)
}
}
// reset InsertLogs buffer
node.InsertLogs = make([]InsertLog, 0)
err = f.Close()
if err != nil {
log.Fatal(err)
}
fmt.Println("write log done")
}

View File

@ -1,42 +1,18 @@
package reader
import (
"context"
"github.com/czs007/suvlim/conf"
"github.com/czs007/suvlim/reader/message_client"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
// NOTE: start pulsar before test
func TestUtilFunctions_GetKey2Segments(t *testing.T) {
conf.LoadConfig("config.yaml")
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, _ := context.WithDeadline(context.Background(), d)
mc := message_client.MessageClient{}
pulsarAddr := "pulsar://"
pulsarAddr += conf.Config.Pulsar.Address
pulsarAddr += ":"
pulsarAddr += strconv.FormatInt(int64(conf.Config.Pulsar.Port), 10)
mc.InitClient(ctx, pulsarAddr)
mc.ReceiveMessage()
node := CreateQueryNode(ctx, 0, 0, &mc)
node.messageClient.PrepareKey2SegmentMsg()
var _, _, _ = node.GetKey2Segments()
node.Close()
// TODO: Add GetKey2Segments test
}
func TestUtilFunctions_GetCollectionByCollectionName(t *testing.T) {
ctx := context.Background()
// 1. Construct node, and collections
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var _ = node.NewCollection(0, "collection0", "")
// 2. Get collection by collectionName
@ -45,15 +21,12 @@ func TestUtilFunctions_GetCollectionByCollectionName(t *testing.T) {
assert.Equal(t, c0.CollectionName, "collection0")
c0 = node.GetCollectionByID(0)
assert.NotNil(t, c0)
assert.Equal(t, c0.CollectionID, uint64(0))
node.Close()
assert.Equal(t, c0.CollectionID, 0)
}
func TestUtilFunctions_GetSegmentBySegmentID(t *testing.T) {
ctx := context.Background()
// 1. Construct node, collection, partition and segment
node := NewQueryNode(ctx, 0, 0)
node := NewQueryNode(0, 0)
var collection = node.NewCollection(0, "collection0", "")
var partition = collection.NewPartition("partition0")
var segment = partition.NewSegment(0)
@ -63,6 +36,4 @@ func TestUtilFunctions_GetSegmentBySegmentID(t *testing.T) {
var s0, err = node.GetSegmentBySegmentID(0)
assert.NoError(t, err)
assert.Equal(t, s0.SegmentId, int64(0))
node.Close()
}

View File

@ -62,8 +62,8 @@ function collect() {
# official go code ship with the crate, so we need to generate it manually.
cd ${ROOT_DIR}/proto
PB_FILES=("message.proto")
GRPC_FILES=("pdpb.proto" "metapb.proto")
PB_FILES=()
GRPC_FILES=("message.proto" "pdpb.proto" "metapb.proto")
ALL_FILES=("${PB_FILES[@]}")
ALL_FILES+=("${GRPC_FILES[@]}")

View File

@ -1,7 +1,7 @@
#!/bin/bash
BUILD_OUTPUT_DIR="cmake_build_release"
BUILD_TYPE="Release"
BUILD_OUTPUT_DIR="cmake_build"
BUILD_TYPE="Debug"
MAKE_CLEAN="OFF"
RUN_CPPLINT="OFF"

Some files were not shown because too many files have changed in this diff Show More