Add parquet payload

Signed-off-by: neza2017 <yefu.chen@zilliz.com>
pull/4973/head^2
neza2017 2020-12-02 18:31:56 +08:00 committed by yefu.chen
parent 36cf8a8ea7
commit cec903da19
108 changed files with 4313 additions and 7569 deletions
.github/workflows
.jenkins/modules
configs/advanced
docs/developer_guides

2
.env
View File

@ -1,7 +1,7 @@
REPO=milvusdb/milvus-distributed-dev
ARCH=amd64
UBUNTU=18.04
DATE_VERSION=20201202-085131
DATE_VERSION=20201120-092740
LATEST_DATE_VERSION=latest
PULSAR_ADDRESS=pulsar://pulsar:6650
ETCD_ADDRESS=etcd:2379

View File

@ -6,12 +6,12 @@ on:
push:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'build/docker/env/**'
- 'build/docker/**'
- '.github/workflows/publish-builder.yaml'
pull_request:
# file paths to consider in the event. Optional; defaults to all.
paths:
- 'build/docker/env/**'
- 'build/docker/**'
- '.github/workflows/publish-builder.yaml'
jobs:

View File

@ -12,15 +12,12 @@ dir ('build/docker/deploy') {
try {
withCredentials([usernamePassword(credentialsId: "${env.DOCKER_CREDENTIALS_ID}", usernameVariable: 'DOCKER_USERNAME', passwordVariable: 'DOCKER_PASSWORD')]) {
sh 'docker login -u ${DOCKER_USERNAME} -p ${DOCKER_PASSWORD} ${DOKCER_REGISTRY_URL}'
sh 'docker pull ${SOURCE_REPO}/master:${SOURCE_TAG} || true'
sh 'docker-compose build --force-rm master'
sh 'docker-compose push master'
sh 'docker pull ${SOURCE_REPO}/proxy:${SOURCE_TAG} || true'
sh 'docker-compose build --force-rm proxy'
sh 'docker-compose push proxy'
sh 'docker pull ${SOURCE_REPO}/querynode:${SOURCE_TAG} || true'
sh 'docker-compose build --force-rm querynode'
sh 'docker-compose push querynode'

View File

@ -1,37 +0,0 @@
try {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d etcd'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d pulsar'
dir ('build/docker/deploy') {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} pull'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d'
}
dir ('build/docker/test') {
sh 'docker pull ${SOURCE_REPO}/pytest:${SOURCE_TAG} || true'
sh 'docker-compose build --force-rm regression'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run --rm regression'
try {
withCredentials([usernamePassword(credentialsId: "${env.DOCKER_CREDENTIALS_ID}", usernameVariable: 'DOCKER_USERNAME', passwordVariable: 'DOCKER_PASSWORD')]) {
sh 'docker login -u ${DOCKER_USERNAME} -p ${DOCKER_PASSWORD} ${DOKCER_REGISTRY_URL}'
sh 'docker-compose push regression'
}
} catch (exc) {
throw exc
} finally {
sh 'docker logout ${DOKCER_REGISTRY_URL}'
}
}
} catch(exc) {
throw exc
} finally {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v pulsar'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} rm -f -s -v etcd'
dir ('build/docker/deploy') {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} down --rmi all -v || true'
}
dir ('build/docker/test') {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run --rm regression /bin/bash -c "rm -rf __pycache__ && rm -rf .pytest_cache"'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} down --rmi all -v || true'
}
}

View File

@ -35,7 +35,7 @@ fmt:
@echo "Running $@ check"
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh cmd/
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh internal/
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh tests/go/
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh test/
#TODO: Check code specifications by golangci-lint
lint:
@ -43,13 +43,13 @@ lint:
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./internal/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./cmd/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./tests/go/...
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=3m --config ./.golangci.yml ./test/...
ruleguard:
@echo "Running $@ check"
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./internal/...
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./cmd/...
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./tests/go/...
@${GOPATH}/bin/ruleguard -rules ruleguard.rules.go ./test/...
verifiers: cppcheck fmt lint ruleguard

View File

@ -18,11 +18,6 @@ pipeline {
PACKAGE_ARTFACTORY_URL = "${JFROG_ARTFACTORY_URL}/${PROJECT_NAME}/package/${PACKAGE_NAME}"
DOCKER_CREDENTIALS_ID = "ba070c98-c8cc-4f7c-b657-897715f359fc"
DOKCER_REGISTRY_URL = "registry.zilliz.com"
SOURCE_REPO = "${DOKCER_REGISTRY_URL}/milvus-distributed"
TARGET_REPO = "${DOKCER_REGISTRY_URL}/milvus-distributed"
SOURCE_TAG = "${CHANGE_TARGET ? CHANGE_TARGET : SEMVER}-${LOWER_BUILD_TYPE}"
TARGET_TAG = "${SEMVER}-${LOWER_BUILD_TYPE}"
DOCKER_BUILDKIT = 1
}
stages {
stage ('Build and UnitTest') {
@ -56,28 +51,18 @@ pipeline {
yamlFile "build/ci/jenkins/pod/docker-pod.yaml"
}
}
environment{
SOURCE_REPO = "${DOKCER_REGISTRY_URL}/milvus-distributed"
TARGET_REPO = "${DOKCER_REGISTRY_URL}/milvus-distributed"
SOURCE_TAG = "${CHANGE_TARGET ? CHANGE_TARGET : SEMVER}-${LOWER_BUILD_TYPE}"
TARGET_TAG = "${SEMVER}-${LOWER_BUILD_TYPE}"
DOCKER_BUILDKIT = 1
}
steps {
container('publish-images') {
MPLModule('Publish')
}
}
}
stage ('Dev Test') {
agent {
label "performance"
}
environment {
DOCKER_COMPOSE_PROJECT_NAME = "${PROJECT_NAME}-${SEMVER}-${env.BUILD_NUMBER}".replaceAll("\\.", "-").replaceAll("_", "-")
}
steps {
MPLModule('Python Regression')
}
post {
cleanup {
deleteDir() /* clean up our workspace */
}
}
}
}
}

View File

@ -13,6 +13,8 @@ services:
ETCD_ADDRESS: ${ETCD_ADDRESS}
networks:
- milvus
ports:
- "53100:53100"
proxy:
image: ${TARGET_REPO}/proxy:${TARGET_TAG}
@ -24,6 +26,8 @@ services:
environment:
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
MASTER_ADDRESS: ${MASTER_ADDRESS}
ports:
- "19530:19530"
networks:
- milvus

View File

@ -1,18 +0,0 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
FROM python:3.6.8-jessie
COPY ./tests/python/requirements.txt /requirements.txt
RUN python3 -m pip install -r /requirements.txt
CMD ["tail", "-f", "/dev/null"]

View File

@ -1,20 +0,0 @@
version: '3.5'
services:
regression:
image: ${TARGET_REPO}/pytest:${TARGET_TAG}
build:
context: ../../../
dockerfile: build/docker/test/Dockerfile
cache_from:
- ${SOURCE_REPO}/pytest:${SOURCE_TAG}
volumes:
- ../../..:/milvus-distributed:delegated
working_dir: "/milvus-distributed/tests/python"
command: >
/bin/bash -c "pytest --ip proxy"
networks:
- milvus
networks:
milvus:

View File

@ -34,6 +34,7 @@ msgChannel:
channelRange:
insert: [0, 1]
delete: [0, 1]
dataDefinition: [0,1]
k2s: [0, 1]
search: [0, 1]
searchResult: [0, 1]

View File

@ -59,12 +59,16 @@ services:
etcd:
image: quay.io/coreos/etcd:v3.4.13
command: etcd -advertise-client-urls=http://127.0.0.1:2379 -listen-client-urls http://0.0.0.0:2379
ports:
- "2379:2379"
networks:
- milvus
pulsar:
image: apachepulsar/pulsar:2.6.1
command: bin/pulsar standalone
ports:
- "6650:6650"
networks:
- milvus

View File

@ -380,13 +380,18 @@ func (segMgr *SegmentManager) AssignSegmentID(segIDReq []*internalpb.SegIDReques
// "/msg_stream/insert"
message SysConfigRequest {
repeated string keys = 1;
repeated string key_prefixes = 2;
MsgType msg_type = 1;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
repeated string keys = 5;
repeated string key_prefixes = 6;
}
message SysConfigResponse {
repeated string keys = 1;
repeated string values = 2;
common.Status status = 1;
repeated string keys = 2;
repeated string values = 3;
}
```
@ -394,12 +399,11 @@ message SysConfigResponse {
```go
type SysConfig struct {
etcdKV *etcd
etcdPathPrefix string
kv *kv.EtcdKV
}
func (conf *SysConfig) InitFromFile(filePath string) (error)
func (conf *SysConfig) GetByPrefix(keyPrefix string) ([]string, error)
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error)
func (conf *SysConfig) Get(keys []string) ([]string, error)
```

View File

@ -54,7 +54,7 @@ func (ia *IDAllocator) syncID() {
cancel()
if err != nil {
log.Println("syncID Failed!!!!!")
log.Panic("syncID Failed!!!!!")
return
}
ia.idStart = resp.GetID()

View File

@ -210,7 +210,7 @@ func (sa *SegIDAssigner) syncSegments() {
}
if err != nil {
log.Println("syncSemgnet Failed!!!!!")
log.Panic("syncID Failed!!!!!")
return
}
}

View File

@ -63,7 +63,7 @@ func (ta *TimestampAllocator) syncTs() {
cancel()
if err != nil {
log.Println("syncTimestamp Failed!!!!!")
log.Panic("syncID Failed!!!!!")
return
}
ta.lastTsBegin = resp.GetTimestamp()

View File

@ -93,8 +93,8 @@ endif ()
set(INDEX_INCLUDE_DIRS ${INDEX_INCLUDE_DIRS} PARENT_SCOPE)
if (KNOWHERE_BUILD_TESTS)
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DELPP_DISABLE_LOGS")
#add_subdirectory(unittest)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DELPP_DISABLE_LOGS")
add_subdirectory(unittest)
endif ()
config_summary()

View File

@ -143,7 +143,6 @@ endif ()
target_link_libraries(
knowhere
milvus_utils
${depend_libs}
)

View File

@ -76,6 +76,10 @@ class PartitionDescriptionDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<PartitionDescription> _instance;
} _PartitionDescription_default_instance_;
class SysConfigResponseDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<SysConfigResponse> _instance;
} _SysConfigResponse_default_instance_;
class HitsDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<Hits> _instance;
@ -311,7 +315,22 @@ static void InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto() {
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto}, {
&scc_info_Status_common_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[15];
static void InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
{
void* ptr = &::milvus::proto::service::_SysConfigResponse_default_instance_;
new (ptr) ::milvus::proto::service::SysConfigResponse();
::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
}
::milvus::proto::service::SysConfigResponse::InitAsDefaultInstance();
}
::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_SysConfigResponse_service_5fmsg_2eproto =
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto}, {
&scc_info_Status_common_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[16];
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_service_5fmsg_2eproto[1];
static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_service_5fmsg_2eproto = nullptr;
@ -414,6 +433,14 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_service_5fmsg_2eproto::offsets
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, name_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, statistics_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, status_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, keys_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, values_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::Hits, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
@ -443,8 +470,9 @@ static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOB
{ 73, -1, sizeof(::milvus::proto::service::IntegerRangeResponse)},
{ 81, -1, sizeof(::milvus::proto::service::CollectionDescription)},
{ 89, -1, sizeof(::milvus::proto::service::PartitionDescription)},
{ 97, -1, sizeof(::milvus::proto::service::Hits)},
{ 105, -1, sizeof(::milvus::proto::service::QueryResult)},
{ 97, -1, sizeof(::milvus::proto::service::SysConfigResponse)},
{ 105, -1, sizeof(::milvus::proto::service::Hits)},
{ 113, -1, sizeof(::milvus::proto::service::QueryResult)},
};
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
@ -461,6 +489,7 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_IntegerRangeResponse_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_CollectionDescription_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_PartitionDescription_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_SysConfigResponse_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_Hits_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_QueryResult_default_instance_),
};
@ -499,20 +528,22 @@ const char descriptor_table_protodef_service_5fmsg_2eproto[] PROTOBUF_SECTION_VA
"\006status\030\001 \001(\0132\033.milvus.proto.common.Stat"
"us\0221\n\004name\030\002 \001(\0132#.milvus.proto.service."
"PartitionName\0225\n\nstatistics\030\003 \003(\0132!.milv"
"us.proto.common.KeyValuePair\"5\n\004Hits\022\013\n\003"
"IDs\030\001 \003(\003\022\020\n\010row_data\030\002 \003(\014\022\016\n\006scores\030\003 "
"\003(\002\"H\n\013QueryResult\022+\n\006status\030\001 \001(\0132\033.mil"
"vus.proto.common.Status\022\014\n\004hits\030\002 \003(\014*@\n"
"\017PlaceholderType\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BIN"
"ARY\020d\022\020\n\014VECTOR_FLOAT\020eBCZAgithub.com/zi"
"lliztech/milvus-distributed/internal/pro"
"to/servicepbb\006proto3"
"us.proto.common.KeyValuePair\"^\n\021SysConfi"
"gResponse\022+\n\006status\030\001 \001(\0132\033.milvus.proto"
".common.Status\022\014\n\004keys\030\002 \003(\t\022\016\n\006values\030\003"
" \003(\t\"5\n\004Hits\022\013\n\003IDs\030\001 \003(\003\022\020\n\010row_data\030\002 "
"\003(\014\022\016\n\006scores\030\003 \003(\002\"H\n\013QueryResult\022+\n\006st"
"atus\030\001 \001(\0132\033.milvus.proto.common.Status\022"
"\014\n\004hits\030\002 \003(\014*@\n\017PlaceholderType\022\010\n\004NONE"
"\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR_FLOAT\020eB"
"CZAgithub.com/zilliztech/milvus-distribu"
"ted/internal/proto/servicepbb\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_service_5fmsg_2eproto_deps[2] = {
&::descriptor_table_common_2eproto,
&::descriptor_table_schema_2eproto,
};
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[15] = {
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[16] = {
&scc_info_BoolResponse_service_5fmsg_2eproto.base,
&scc_info_CollectionDescription_service_5fmsg_2eproto.base,
&scc_info_CollectionName_service_5fmsg_2eproto.base,
@ -528,14 +559,15 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_ser
&scc_info_RowBatch_service_5fmsg_2eproto.base,
&scc_info_StringListResponse_service_5fmsg_2eproto.base,
&scc_info_StringResponse_service_5fmsg_2eproto.base,
&scc_info_SysConfigResponse_service_5fmsg_2eproto.base,
};
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_service_5fmsg_2eproto_once;
static bool descriptor_table_service_5fmsg_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_service_5fmsg_2eproto = {
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1620,
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 15, 2,
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1716,
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 16, 2,
schemas, file_default_instances, TableStruct_service_5fmsg_2eproto::offsets,
file_level_metadata_service_5fmsg_2eproto, 15, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
file_level_metadata_service_5fmsg_2eproto, 16, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
};
// Force running AddDescriptors() at dynamic initialization time.
@ -5159,6 +5191,398 @@ void PartitionDescription::InternalSwap(PartitionDescription* other) {
}
// ===================================================================
void SysConfigResponse::InitAsDefaultInstance() {
::milvus::proto::service::_SysConfigResponse_default_instance_._instance.get_mutable()->status_ = const_cast< ::milvus::proto::common::Status*>(
::milvus::proto::common::Status::internal_default_instance());
}
class SysConfigResponse::_Internal {
public:
static const ::milvus::proto::common::Status& status(const SysConfigResponse* msg);
};
const ::milvus::proto::common::Status&
SysConfigResponse::_Internal::status(const SysConfigResponse* msg) {
return *msg->status_;
}
void SysConfigResponse::clear_status() {
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
delete status_;
}
status_ = nullptr;
}
SysConfigResponse::SysConfigResponse()
: ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
SharedCtor();
// @@protoc_insertion_point(constructor:milvus.proto.service.SysConfigResponse)
}
SysConfigResponse::SysConfigResponse(const SysConfigResponse& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_internal_metadata_(nullptr),
keys_(from.keys_),
values_(from.values_) {
_internal_metadata_.MergeFrom(from._internal_metadata_);
if (from.has_status()) {
status_ = new ::milvus::proto::common::Status(*from.status_);
} else {
status_ = nullptr;
}
// @@protoc_insertion_point(copy_constructor:milvus.proto.service.SysConfigResponse)
}
void SysConfigResponse::SharedCtor() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
status_ = nullptr;
}
SysConfigResponse::~SysConfigResponse() {
// @@protoc_insertion_point(destructor:milvus.proto.service.SysConfigResponse)
SharedDtor();
}
void SysConfigResponse::SharedDtor() {
if (this != internal_default_instance()) delete status_;
}
void SysConfigResponse::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
const SysConfigResponse& SysConfigResponse::default_instance() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
return *internal_default_instance();
}
void SysConfigResponse::Clear() {
// @@protoc_insertion_point(message_clear_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
keys_.Clear();
values_.Clear();
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
delete status_;
}
status_ = nullptr;
_internal_metadata_.Clear();
}
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* SysConfigResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
::PROTOBUF_NAMESPACE_ID::uint32 tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
CHK_(ptr);
switch (tag >> 3) {
// .milvus.proto.common.Status status = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
ptr = ctx->ParseMessage(mutable_status(), ptr);
CHK_(ptr);
} else goto handle_unusual;
continue;
// repeated string keys = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_keys(), ptr, ctx, "milvus.proto.service.SysConfigResponse.keys");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18);
} else goto handle_unusual;
continue;
// repeated string values = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_values(), ptr, ctx, "milvus.proto.service.SysConfigResponse.values");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
} else goto handle_unusual;
continue;
default: {
handle_unusual:
if ((tag & 7) == 4 || tag == 0) {
ctx->SetLastTag(tag);
goto success;
}
ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
CHK_(ptr != nullptr);
continue;
}
} // switch
} // while
success:
return ptr;
failure:
ptr = nullptr;
goto success;
#undef CHK_
}
#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
bool SysConfigResponse::MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
::PROTOBUF_NAMESPACE_ID::uint32 tag;
// @@protoc_insertion_point(parse_start:milvus.proto.service.SysConfigResponse)
for (;;) {
::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// .milvus.proto.common.Status status = 1;
case 1: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
input, mutable_status()));
} else {
goto handle_unusual;
}
break;
}
// repeated string keys = 2;
case 2: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_keys()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(this->keys_size() - 1).data(),
static_cast<int>(this->keys(this->keys_size() - 1).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.service.SysConfigResponse.keys"));
} else {
goto handle_unusual;
}
break;
}
// repeated string values = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_values()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(this->values_size() - 1).data(),
static_cast<int>(this->values(this->values_size() - 1).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.service.SysConfigResponse.values"));
} else {
goto handle_unusual;
}
break;
}
default: {
handle_unusual:
if (tag == 0) {
goto success;
}
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
input, tag, _internal_metadata_.mutable_unknown_fields()));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:milvus.proto.service.SysConfigResponse)
return true;
failure:
// @@protoc_insertion_point(parse_failure:milvus.proto.service.SysConfigResponse)
return false;
#undef DO_
}
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SysConfigResponse::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
1, _Internal::status(this), output);
}
// repeated string keys = 2;
for (int i = 0, n = this->keys_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.keys");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
2, this->keys(i), output);
}
// repeated string values = 3;
for (int i = 0, n = this->values_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(i).data(), static_cast<int>(this->values(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.values");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
3, this->values(i), output);
}
if (_internal_metadata_.have_unknown_fields()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
_internal_metadata_.unknown_fields(), output);
}
// @@protoc_insertion_point(serialize_end:milvus.proto.service.SysConfigResponse)
}
::PROTOBUF_NAMESPACE_ID::uint8* SysConfigResponse::InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const {
// @@protoc_insertion_point(serialize_to_array_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessageToArray(
1, _Internal::status(this), target);
}
// repeated string keys = 2;
for (int i = 0, n = this->keys_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.keys");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(2, this->keys(i), target);
}
// repeated string values = 3;
for (int i = 0, n = this->values_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(i).data(), static_cast<int>(this->values(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.values");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(3, this->values(i), target);
}
if (_internal_metadata_.have_unknown_fields()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:milvus.proto.service.SysConfigResponse)
return target;
}
size_t SysConfigResponse::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:milvus.proto.service.SysConfigResponse)
size_t total_size = 0;
if (_internal_metadata_.have_unknown_fields()) {
total_size +=
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
_internal_metadata_.unknown_fields());
}
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated string keys = 2;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->keys_size());
for (int i = 0, n = this->keys_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->keys(i));
}
// repeated string values = 3;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->values_size());
for (int i = 0, n = this->values_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->values(i));
}
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*status_);
}
int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
SetCachedSize(cached_size);
return total_size;
}
void SysConfigResponse::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:milvus.proto.service.SysConfigResponse)
GOOGLE_DCHECK_NE(&from, this);
const SysConfigResponse* source =
::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<SysConfigResponse>(
&from);
if (source == nullptr) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.proto.service.SysConfigResponse)
MergeFrom(*source);
}
}
void SysConfigResponse::MergeFrom(const SysConfigResponse& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:milvus.proto.service.SysConfigResponse)
GOOGLE_DCHECK_NE(&from, this);
_internal_metadata_.MergeFrom(from._internal_metadata_);
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
keys_.MergeFrom(from.keys_);
values_.MergeFrom(from.values_);
if (from.has_status()) {
mutable_status()->::milvus::proto::common::Status::MergeFrom(from.status());
}
}
void SysConfigResponse::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:milvus.proto.service.SysConfigResponse)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void SysConfigResponse::CopyFrom(const SysConfigResponse& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:milvus.proto.service.SysConfigResponse)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool SysConfigResponse::IsInitialized() const {
return true;
}
void SysConfigResponse::InternalSwap(SysConfigResponse* other) {
using std::swap;
_internal_metadata_.Swap(&other->_internal_metadata_);
keys_.InternalSwap(CastToBase(&other->keys_));
values_.InternalSwap(CastToBase(&other->values_));
swap(status_, other->status_);
}
::PROTOBUF_NAMESPACE_ID::Metadata SysConfigResponse::GetMetadata() const {
return GetMetadataStatic();
}
// ===================================================================
void Hits::InitAsDefaultInstance() {
@ -5911,6 +6335,9 @@ template<> PROTOBUF_NOINLINE ::milvus::proto::service::CollectionDescription* Ar
template<> PROTOBUF_NOINLINE ::milvus::proto::service::PartitionDescription* Arena::CreateMaybeMessage< ::milvus::proto::service::PartitionDescription >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::PartitionDescription >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage< ::milvus::proto::service::SysConfigResponse >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::SysConfigResponse >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::service::Hits* Arena::CreateMaybeMessage< ::milvus::proto::service::Hits >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::Hits >(arena);
}

View File

@ -50,7 +50,7 @@ struct TableStruct_service_5fmsg_2eproto {
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[15]
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[16]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[];
static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[];
@ -105,6 +105,9 @@ extern StringListResponseDefaultTypeInternal _StringListResponse_default_instanc
class StringResponse;
class StringResponseDefaultTypeInternal;
extern StringResponseDefaultTypeInternal _StringResponse_default_instance_;
class SysConfigResponse;
class SysConfigResponseDefaultTypeInternal;
extern SysConfigResponseDefaultTypeInternal _SysConfigResponse_default_instance_;
} // namespace service
} // namespace proto
} // namespace milvus
@ -124,6 +127,7 @@ template<> ::milvus::proto::service::QueryResult* Arena::CreateMaybeMessage<::mi
template<> ::milvus::proto::service::RowBatch* Arena::CreateMaybeMessage<::milvus::proto::service::RowBatch>(Arena*);
template<> ::milvus::proto::service::StringListResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringListResponse>(Arena*);
template<> ::milvus::proto::service::StringResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringResponse>(Arena*);
template<> ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage<::milvus::proto::service::SysConfigResponse>(Arena*);
PROTOBUF_NAMESPACE_CLOSE
namespace milvus {
namespace proto {
@ -2154,6 +2158,178 @@ class PartitionDescription :
};
// -------------------------------------------------------------------
class SysConfigResponse :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.SysConfigResponse) */ {
public:
SysConfigResponse();
virtual ~SysConfigResponse();
SysConfigResponse(const SysConfigResponse& from);
SysConfigResponse(SysConfigResponse&& from) noexcept
: SysConfigResponse() {
*this = ::std::move(from);
}
inline SysConfigResponse& operator=(const SysConfigResponse& from) {
CopyFrom(from);
return *this;
}
inline SysConfigResponse& operator=(SysConfigResponse&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
return GetDescriptor();
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
return GetMetadataStatic().descriptor;
}
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
return GetMetadataStatic().reflection;
}
static const SysConfigResponse& default_instance();
static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY
static inline const SysConfigResponse* internal_default_instance() {
return reinterpret_cast<const SysConfigResponse*>(
&_SysConfigResponse_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
friend void swap(SysConfigResponse& a, SysConfigResponse& b) {
a.Swap(&b);
}
inline void Swap(SysConfigResponse* other) {
if (other == this) return;
InternalSwap(other);
}
// implements Message ----------------------------------------------
inline SysConfigResponse* New() const final {
return CreateMaybeMessage<SysConfigResponse>(nullptr);
}
SysConfigResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
return CreateMaybeMessage<SysConfigResponse>(arena);
}
void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void CopyFrom(const SysConfigResponse& from);
void MergeFrom(const SysConfigResponse& from);
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
bool IsInitialized() const final;
size_t ByteSizeLong() const final;
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
#else
bool MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final;
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final;
::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const final;
int GetCachedSize() const final { return _cached_size_.Get(); }
private:
inline void SharedCtor();
inline void SharedDtor();
void SetCachedSize(int size) const final;
void InternalSwap(SysConfigResponse* other);
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
return "milvus.proto.service.SysConfigResponse";
}
private:
inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const {
return nullptr;
}
inline void* MaybeArenaPtr() const {
return nullptr;
}
public:
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
private:
static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_service_5fmsg_2eproto);
return ::descriptor_table_service_5fmsg_2eproto.file_level_metadata[kIndexInFileMessages];
}
public:
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
enum : int {
kKeysFieldNumber = 2,
kValuesFieldNumber = 3,
kStatusFieldNumber = 1,
};
// repeated string keys = 2;
int keys_size() const;
void clear_keys();
const std::string& keys(int index) const;
std::string* mutable_keys(int index);
void set_keys(int index, const std::string& value);
void set_keys(int index, std::string&& value);
void set_keys(int index, const char* value);
void set_keys(int index, const char* value, size_t size);
std::string* add_keys();
void add_keys(const std::string& value);
void add_keys(std::string&& value);
void add_keys(const char* value);
void add_keys(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& keys() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_keys();
// repeated string values = 3;
int values_size() const;
void clear_values();
const std::string& values(int index) const;
std::string* mutable_values(int index);
void set_values(int index, const std::string& value);
void set_values(int index, std::string&& value);
void set_values(int index, const char* value);
void set_values(int index, const char* value, size_t size);
std::string* add_values();
void add_values(const std::string& value);
void add_values(std::string&& value);
void add_values(const char* value);
void add_values(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& values() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_values();
// .milvus.proto.common.Status status = 1;
bool has_status() const;
void clear_status();
const ::milvus::proto::common::Status& status() const;
::milvus::proto::common::Status* release_status();
::milvus::proto::common::Status* mutable_status();
void set_allocated_status(::milvus::proto::common::Status* status);
// @@protoc_insertion_point(class_scope:milvus.proto.service.SysConfigResponse)
private:
class _Internal;
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> keys_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> values_;
::milvus::proto::common::Status* status_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_service_5fmsg_2eproto;
};
// -------------------------------------------------------------------
class Hits :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.Hits) */ {
public:
@ -2196,7 +2372,7 @@ class Hits :
&_Hits_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
14;
friend void swap(Hits& a, Hits& b) {
a.Swap(&b);
@ -2367,7 +2543,7 @@ class QueryResult :
&_QueryResult_default_instance_);
}
static constexpr int kIndexInFileMessages =
14;
15;
friend void swap(QueryResult& a, QueryResult& b) {
a.Swap(&b);
@ -3880,6 +4056,185 @@ PartitionDescription::statistics() const {
// -------------------------------------------------------------------
// SysConfigResponse
// .milvus.proto.common.Status status = 1;
inline bool SysConfigResponse::has_status() const {
return this != internal_default_instance() && status_ != nullptr;
}
inline const ::milvus::proto::common::Status& SysConfigResponse::status() const {
const ::milvus::proto::common::Status* p = status_;
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.status)
return p != nullptr ? *p : *reinterpret_cast<const ::milvus::proto::common::Status*>(
&::milvus::proto::common::_Status_default_instance_);
}
inline ::milvus::proto::common::Status* SysConfigResponse::release_status() {
// @@protoc_insertion_point(field_release:milvus.proto.service.SysConfigResponse.status)
::milvus::proto::common::Status* temp = status_;
status_ = nullptr;
return temp;
}
inline ::milvus::proto::common::Status* SysConfigResponse::mutable_status() {
if (status_ == nullptr) {
auto* p = CreateMaybeMessage<::milvus::proto::common::Status>(GetArenaNoVirtual());
status_ = p;
}
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.status)
return status_;
}
inline void SysConfigResponse::set_allocated_status(::milvus::proto::common::Status* status) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == nullptr) {
delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(status_);
}
if (status) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = nullptr;
if (message_arena != submessage_arena) {
status = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, status, submessage_arena);
}
} else {
}
status_ = status;
// @@protoc_insertion_point(field_set_allocated:milvus.proto.service.SysConfigResponse.status)
}
// repeated string keys = 2;
inline int SysConfigResponse::keys_size() const {
return keys_.size();
}
inline void SysConfigResponse::clear_keys() {
keys_.Clear();
}
inline const std::string& SysConfigResponse::keys(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.keys)
return keys_.Get(index);
}
inline std::string* SysConfigResponse::mutable_keys(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.keys)
return keys_.Mutable(index);
}
inline void SysConfigResponse::set_keys(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
keys_.Mutable(index)->assign(value);
}
inline void SysConfigResponse::set_keys(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
keys_.Mutable(index)->assign(std::move(value));
}
inline void SysConfigResponse::set_keys(int index, const char* value) {
GOOGLE_DCHECK(value != nullptr);
keys_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::set_keys(int index, const char* value, size_t size) {
keys_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.keys)
}
inline std::string* SysConfigResponse::add_keys() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.keys)
return keys_.Add();
}
inline void SysConfigResponse::add_keys(const std::string& value) {
keys_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(std::string&& value) {
keys_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(const char* value) {
GOOGLE_DCHECK(value != nullptr);
keys_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(const char* value, size_t size) {
keys_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.keys)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
SysConfigResponse::keys() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.keys)
return keys_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
SysConfigResponse::mutable_keys() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.keys)
return &keys_;
}
// repeated string values = 3;
inline int SysConfigResponse::values_size() const {
return values_.size();
}
inline void SysConfigResponse::clear_values() {
values_.Clear();
}
inline const std::string& SysConfigResponse::values(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.values)
return values_.Get(index);
}
inline std::string* SysConfigResponse::mutable_values(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.values)
return values_.Mutable(index);
}
inline void SysConfigResponse::set_values(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
values_.Mutable(index)->assign(value);
}
inline void SysConfigResponse::set_values(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
values_.Mutable(index)->assign(std::move(value));
}
inline void SysConfigResponse::set_values(int index, const char* value) {
GOOGLE_DCHECK(value != nullptr);
values_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::set_values(int index, const char* value, size_t size) {
values_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.values)
}
inline std::string* SysConfigResponse::add_values() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.values)
return values_.Add();
}
inline void SysConfigResponse::add_values(const std::string& value) {
values_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(std::string&& value) {
values_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(const char* value) {
GOOGLE_DCHECK(value != nullptr);
values_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(const char* value, size_t size) {
values_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.values)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
SysConfigResponse::values() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.values)
return values_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
SysConfigResponse::mutable_values() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.values)
return &values_;
}
// -------------------------------------------------------------------
// Hits
// repeated int64 IDs = 1;
@ -4152,6 +4507,8 @@ QueryResult::mutable_hits() {
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)

View File

@ -12,4 +12,4 @@ set(MILVUS_QUERY_SRCS
BruteForceSearch.cpp
)
add_library(milvus_query ${MILVUS_QUERY_SRCS})
target_link_libraries(milvus_query milvus_proto milvus_utils)
target_link_libraries(milvus_query milvus_proto)

View File

@ -24,22 +24,18 @@ namespace milvus::query {
static std::unique_ptr<VectorPlanNode>
ParseVecNode(Plan* plan, const Json& out_body) {
Assert(out_body.is_object());
// TODO add binary info
auto vec_node = std::make_unique<FloatVectorANNS>();
Assert(out_body.size() == 1);
auto iter = out_body.begin();
std::string field_name = iter.key();
auto& vec_info = iter.value();
Assert(vec_info.is_object());
auto topK = vec_info["topk"];
AssertInfo(topK > 0, "topK must greater than 0");
AssertInfo(topK < 16384, "topK is too large");
vec_node->query_info_.topK_ = topK;
vec_node->query_info_.metric_type_ = vec_info.at("metric_type");
vec_node->query_info_.search_params_ = vec_info.at("params");
vec_node->query_info_.metric_type_ = vec_info["metric_type"];
vec_node->query_info_.search_params_ = vec_info["params"];
vec_node->query_info_.field_id_ = field_name;
vec_node->placeholder_tag_ = vec_info.at("query");
vec_node->placeholder_tag_ = vec_info["query"];
auto tag = vec_node->placeholder_tag_;
AssertInfo(!plan->tag2field_.count(tag), "duplicated placeholder tag");
plan->tag2field_.emplace(tag, field_name);
@ -60,8 +56,6 @@ to_lower(const std::string& raw) {
return data;
}
template <class...>
constexpr std::false_type always_false{};
template <typename T>
std::unique_ptr<Expr>
ParseRangeNodeImpl(const Schema& schema, const std::string& field_name, const Json& body) {
@ -69,19 +63,11 @@ ParseRangeNodeImpl(const Schema& schema, const std::string& field_name, const Js
auto data_type = schema[field_name].get_data_type();
expr->data_type_ = data_type;
expr->field_id_ = field_name;
Assert(body.is_object());
for (auto& item : body.items()) {
auto op_name = to_lower(item.key());
AssertInfo(RangeExpr::mapping_.count(op_name), "op(" + op_name + ") not found");
auto op = RangeExpr::mapping_.at(op_name);
if constexpr (std::is_integral_v<T>) {
Assert(item.value().is_number_integer());
} else if constexpr (std::is_floating_point_v<T>) {
Assert(item.value().is_number());
} else {
static_assert(always_false<T>, "unsupported type");
}
T value = item.value();
expr->conditions_.emplace_back(op, value);
}
@ -97,10 +83,8 @@ ParseRangeNode(const Schema& schema, const Json& out_body) {
auto data_type = schema[field_name].get_data_type();
Assert(!field_is_vector(data_type));
switch (data_type) {
case DataType::BOOL: {
PanicInfo("bool is not supported in Range node");
// return ParseRangeNodeImpl<bool>(schema, field_name, body);
}
case DataType::BOOL:
return ParseRangeNodeImpl<bool>(schema, field_name, body);
case DataType::INT8:
return ParseRangeNodeImpl<int8_t>(schema, field_name, body);
case DataType::INT16:
@ -125,17 +109,16 @@ CreatePlanImplNaive(const Schema& schema, const std::string& dsl_str) {
nlohmann::json vec_pack;
std::optional<std::unique_ptr<Expr>> predicate;
auto& bool_dsl = dsl.at("bool");
auto& bool_dsl = dsl["bool"];
if (bool_dsl.contains("must")) {
auto& packs = bool_dsl.at("must");
Assert(packs.is_array());
auto& packs = bool_dsl["must"];
for (auto& pack : packs) {
if (pack.contains("vector")) {
auto& out_body = pack.at("vector");
auto& out_body = pack["vector"];
plan->plan_node_ = ParseVecNode(plan.get(), out_body);
} else if (pack.contains("range")) {
AssertInfo(!predicate, "unsupported complex DSL");
auto& out_body = pack.at("range");
auto& out_body = pack["range"];
predicate = ParseRangeNode(schema, out_body);
} else {
PanicInfo("unsupported node");
@ -143,7 +126,7 @@ CreatePlanImplNaive(const Schema& schema, const std::string& dsl_str) {
}
AssertInfo(plan->plan_node_, "vector node not found");
} else if (bool_dsl.contains("vector")) {
auto& out_body = bool_dsl.at("vector");
auto& out_body = bool_dsl["vector"];
plan->plan_node_ = ParseVecNode(plan.get(), out_body);
Assert(plan->plan_node_);
} else {

View File

@ -17,7 +17,7 @@ add_library(milvus_segcore SHARED
)
target_link_libraries(milvus_segcore
tbb milvus_utils pthread knowhere log milvus_proto
tbb utils pthread knowhere log milvus_proto
dl backtrace
milvus_common
milvus_query

View File

@ -19,9 +19,6 @@ NewCollection(const char* collection_proto) {
auto collection = std::make_unique<milvus::segcore::Collection>(proto);
// TODO: delete print
std::cout << "create collection " << collection->get_collection_name() << std::endl;
return (void*)collection.release();
}

View File

@ -50,187 +50,97 @@ DeleteMarshaledHits(CMarshaledHits c_marshaled_hits) {
}
struct SearchResultPair {
uint64_t id_;
float distance_;
SearchResult* search_result_;
int64_t offset_;
int64_t index_;
int64_t segment_id_;
SearchResultPair(float distance, SearchResult* search_result, int64_t offset, int64_t index)
: distance_(distance), search_result_(search_result), offset_(offset), index_(index) {
SearchResultPair(uint64_t id, float distance, int64_t segment_id)
: id_(id), distance_(distance), segment_id_(segment_id) {
}
bool
operator<(const SearchResultPair& pair) const {
return (distance_ < pair.distance_);
}
void
reset_distance() {
distance_ = search_result_->result_distances_[offset_];
}
};
void
GetResultData(std::vector<std::vector<int64_t>>& search_records,
std::vector<SearchResult*>& search_results,
GetResultData(std::vector<SearchResult*>& search_results,
SearchResult& final_result,
int64_t query_offset,
bool* is_selected,
int64_t topk) {
auto num_segments = search_results.size();
AssertInfo(num_segments > 0, "num segment must greater than 0");
std::map<int, int> iter_loc_peer_result;
std::vector<SearchResultPair> result_pairs;
for (int j = 0; j < num_segments; ++j) {
auto id = search_results[j]->result_ids_[query_offset];
auto distance = search_results[j]->result_distances_[query_offset];
auto search_result = search_results[j];
AssertInfo(search_result != nullptr, "search result must not equal to nullptr");
result_pairs.push_back(SearchResultPair(distance, search_result, query_offset, j));
result_pairs.push_back(SearchResultPair(id, distance, j));
iter_loc_peer_result[j] = query_offset;
}
int64_t loc_offset = query_offset;
AssertInfo(topk > 0, "topK must greater than 0");
for (int i = 0; i < topk; ++i) {
result_pairs[0].reset_distance();
std::sort(result_pairs.begin(), result_pairs.end());
final_result.result_ids_.push_back(result_pairs[0].id_);
final_result.result_distances_.push_back(result_pairs[0].distance_);
for (int i = 1; i < topk; ++i) {
auto segment_id = result_pairs[0].segment_id_;
auto query_offset = ++(iter_loc_peer_result[segment_id]);
auto id = search_results[segment_id]->result_ids_[query_offset];
auto distance = search_results[segment_id]->result_distances_[query_offset];
result_pairs[0] = SearchResultPair(id, distance, segment_id);
std::sort(result_pairs.begin(), result_pairs.end());
auto& result_pair = result_pairs[0];
auto index = result_pair.index_;
is_selected[index] = true;
result_pair.search_result_->result_offsets_.push_back(loc_offset++);
search_records[index].push_back(result_pair.offset_++);
final_result.result_ids_.push_back(result_pairs[0].id_);
final_result.result_distances_.push_back(result_pairs[0].distance_);
}
}
void
ResetSearchResult(std::vector<std::vector<int64_t>>& search_records,
std::vector<SearchResult*>& search_results,
bool* is_selected) {
auto num_segments = search_results.size();
AssertInfo(num_segments > 0, "num segment must greater than 0");
for (int i = 0; i < num_segments; i++) {
if (is_selected[i] == false) {
continue;
}
auto search_result = search_results[i];
AssertInfo(search_result != nullptr, "search result must not equal to nullptr");
std::vector<float> result_distances;
std::vector<int64_t> internal_seg_offsets;
std::vector<int64_t> result_ids;
for (int j = 0; j < search_records[i].size(); j++) {
auto& offset = search_records[i][j];
auto distance = search_result->result_distances_[offset];
auto internal_seg_offset = search_result->internal_seg_offsets_[offset];
auto id = search_result->result_ids_[offset];
result_distances.push_back(distance);
internal_seg_offsets.push_back(internal_seg_offset);
result_ids.push_back(id);
}
search_result->result_distances_ = result_distances;
search_result->internal_seg_offsets_ = internal_seg_offsets;
search_result->result_ids_ = result_ids;
}
}
CStatus
ReduceQueryResults(CQueryResult* c_search_results, int64_t num_segments, bool* is_selected) {
CQueryResult
ReduceQueryResults(CQueryResult* query_results, int64_t num_segments) {
std::vector<SearchResult*> search_results;
for (int i = 0; i < num_segments; ++i) {
search_results.push_back((SearchResult*)c_search_results[i]);
search_results.push_back((SearchResult*)query_results[i]);
}
try {
auto topk = search_results[0]->topK_;
auto num_queries = search_results[0]->num_queries_;
std::vector<std::vector<int64_t>> search_records(num_segments);
auto topk = search_results[0]->topK_;
auto num_queries = search_results[0]->num_queries_;
auto final_result = std::make_unique<SearchResult>();
int64_t query_offset = 0;
for (int j = 0; j < num_queries; ++j) {
GetResultData(search_records, search_results, query_offset, is_selected, topk);
query_offset += topk;
}
ResetSearchResult(search_records, search_results, is_selected);
auto status = CStatus();
status.error_code = Success;
status.error_msg = "";
return status;
} catch (std::exception& e) {
auto status = CStatus();
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
return status;
int64_t query_offset = 0;
for (int j = 0; j < num_queries; ++j) {
GetResultData(search_results, *final_result, query_offset, topk);
query_offset += topk;
}
return (CQueryResult)final_result.release();
}
CStatus
ReorganizeQueryResults(CMarshaledHits* c_marshaled_hits,
CMarshaledHits
ReorganizeQueryResults(CQueryResult c_query_result,
CPlan c_plan,
CPlaceholderGroup* c_placeholder_groups,
int64_t num_groups,
CQueryResult* c_search_results,
bool* is_selected,
int64_t num_segments,
CPlan c_plan) {
try {
auto marshaledHits = std::make_unique<MarshaledHits>(num_groups);
auto topk = GetTopK(c_plan);
std::vector<int64_t> num_queries_peer_group;
int64_t total_num_queries = 0;
for (int i = 0; i < num_groups; i++) {
auto num_queries = GetNumOfQueries(c_placeholder_groups[i]);
num_queries_peer_group.push_back(num_queries);
total_num_queries += num_queries;
}
std::vector<float> result_distances(total_num_queries * topk);
std::vector<int64_t> result_ids(total_num_queries * topk);
std::vector<std::vector<char>> row_datas(total_num_queries * topk);
int64_t count = 0;
for (int i = 0; i < num_segments; i++) {
if (is_selected[i] == false) {
continue;
int64_t num_groups) {
auto marshaledHits = std::make_unique<MarshaledHits>(num_groups);
auto search_result = (milvus::engine::QueryResult*)c_query_result;
auto& result_ids = search_result->result_ids_;
auto& result_distances = search_result->result_distances_;
auto topk = GetTopK(c_plan);
int64_t queries_offset = 0;
for (int i = 0; i < num_groups; i++) {
auto num_queries = GetNumOfQueries(c_placeholder_groups[i]);
MarshaledHitsPeerGroup& hits_peer_group = (*marshaledHits).marshaled_hits_[i];
for (int j = 0; j < num_queries; j++) {
auto index = topk * queries_offset++;
milvus::proto::service::Hits hits;
for (int k = index; k < index + topk; k++) {
hits.add_ids(result_ids[k]);
hits.add_scores(result_distances[k]);
}
auto search_result = (SearchResult*)c_search_results[i];
AssertInfo(search_result != nullptr, "search result must not equal to nullptr");
auto size = search_result->result_offsets_.size();
for (int j = 0; j < size; j++) {
auto loc = search_result->result_offsets_[j];
result_distances[loc] = search_result->result_distances_[j];
row_datas[loc] = search_result->row_data_[j];
result_ids[loc] = search_result->result_ids_[j];
}
count += size;
auto blob = hits.SerializeAsString();
hits_peer_group.hits_.push_back(blob);
hits_peer_group.blob_length_.push_back(blob.size());
}
AssertInfo(count == total_num_queries * topk, "the reduces result's size less than total_num_queries*topk");
int64_t fill_hit_offset = 0;
for (int i = 0; i < num_groups; i++) {
MarshaledHitsPeerGroup& hits_peer_group = (*marshaledHits).marshaled_hits_[i];
for (int j = 0; j < num_queries_peer_group[i]; j++) {
milvus::proto::service::Hits hits;
for (int k = 0; k < topk; k++, fill_hit_offset++) {
hits.add_ids(result_ids[fill_hit_offset]);
hits.add_scores(result_distances[fill_hit_offset]);
auto& row_data = row_datas[fill_hit_offset];
hits.add_row_data(row_data.data(), row_data.size());
}
auto blob = hits.SerializeAsString();
hits_peer_group.hits_.push_back(blob);
hits_peer_group.blob_length_.push_back(blob.size());
}
}
auto status = CStatus();
status.error_code = Success;
status.error_msg = "";
auto marshled_res = (CMarshaledHits)marshaledHits.release();
*c_marshaled_hits = marshled_res;
return status;
} catch (std::exception& e) {
auto status = CStatus();
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
*c_marshaled_hits = nullptr;
return status;
}
return (CMarshaledHits)marshaledHits.release();
}
int64_t

View File

@ -25,17 +25,14 @@ DeleteMarshaledHits(CMarshaledHits c_marshaled_hits);
int
MergeInto(int64_t num_queries, int64_t topk, float* distances, int64_t* uids, float* new_distances, int64_t* new_uids);
CStatus
ReduceQueryResults(CQueryResult* query_results, int64_t num_segments, bool* is_selected);
CQueryResult
ReduceQueryResults(CQueryResult* query_results, int64_t num_segments);
CStatus
ReorganizeQueryResults(CMarshaledHits* c_marshaled_hits,
CMarshaledHits
ReorganizeQueryResults(CQueryResult query_result,
CPlan c_plan,
CPlaceholderGroup* c_placeholder_groups,
int64_t num_groups,
CQueryResult* c_search_results,
bool* is_selected,
int64_t num_segments,
CPlan c_plan);
int64_t num_groups);
int64_t
GetHitsBlobSize(CMarshaledHits c_marshaled_hits);

View File

@ -155,24 +155,6 @@ Search(CSegmentBase c_segment,
return status;
}
CStatus
FillTargetEntry(CSegmentBase c_segment, CPlan c_plan, CQueryResult c_result) {
auto segment = (milvus::segcore::SegmentBase*)c_segment;
auto plan = (milvus::query::Plan*)c_plan;
auto result = (milvus::engine::QueryResult*)c_result;
auto status = CStatus();
try {
auto res = segment->FillTargetEntry(plan, *result);
status.error_code = Success;
status.error_msg = "";
} catch (std::runtime_error& e) {
status.error_code = UnexpectedException;
status.error_msg = strdup(e.what());
}
return status;
}
//////////////////////////////////////////////////////////////////
int

View File

@ -61,9 +61,6 @@ Search(CSegmentBase c_segment,
int num_groups,
CQueryResult* result);
CStatus
FillTargetEntry(CSegmentBase c_segment, CPlan c_plan, CQueryResult result);
//////////////////////////////////////////////////////////////////
int

View File

@ -17,8 +17,8 @@ set(UTILS_FILES
EasyAssert.cpp
)
add_library( milvus_utils STATIC ${UTILS_FILES} )
add_library( utils STATIC ${UTILS_FILES} )
target_link_libraries(milvus_utils
target_link_libraries(utils
libboost_filesystem.a
libboost_system.a)

View File

@ -11,20 +11,11 @@
#include <iostream>
#include "EasyAssert.h"
// #define BOOST_STACKTRACE_USE_ADDR2LINE
#define BOOST_STACKTRACE_USE_BACKTRACE
#include <boost/stacktrace.hpp>
#include <sstream>
namespace milvus::impl {
std::string
EasyStackTrace() {
auto stack_info = boost::stacktrace::stacktrace();
std::ostringstream ss;
ss << stack_info;
return ss.str();
}
void
EasyAssertInfo(
bool value, std::string_view expr_str, std::string_view filename, int lineno, std::string_view extra_info) {
@ -35,15 +26,11 @@ EasyAssertInfo(
if (!extra_info.empty()) {
info += " => " + std::string(extra_info);
}
throw std::runtime_error(info + "\n" + EasyStackTrace());
auto fuck = boost::stacktrace::stacktrace();
std::cout << fuck;
// std::string s = fuck;
// info += ;
throw std::runtime_error(info);
}
}
[[noreturn]] void
ThrowWithTrace(const std::exception& exception) {
auto err_msg = exception.what() + std::string("\n") + EasyStackTrace();
throw std::runtime_error(err_msg);
}
} // namespace milvus::impl

View File

@ -11,7 +11,6 @@
#pragma once
#include <string_view>
#include <exception>
#include <stdio.h>
#include <stdlib.h>
@ -21,11 +20,7 @@ namespace milvus::impl {
void
EasyAssertInfo(
bool value, std::string_view expr_str, std::string_view filename, int lineno, std::string_view extra_info);
[[noreturn]] void
ThrowWithTrace(const std::exception& exception);
} // namespace milvus::impl
}
#define AssertInfo(expr, info) milvus::impl::EasyAssertInfo(bool(expr), #expr, __FILE__, __LINE__, (info))
#define Assert(expr) AssertInfo((expr), "")

View File

@ -11,13 +11,6 @@
#pragma once
#include "utils/EasyAssert.h"
#define JSON_ASSERT(expr) Assert((expr))
// TODO: dispatch error by type
#define JSON_THROW_USER(e) milvus::impl::ThrowWithTrace((e))
#include "nlohmann/json.hpp"
namespace milvus {

View File

@ -24,6 +24,5 @@ target_link_libraries(all_tests
knowhere
log
pthread
milvus_utils
)
install (TARGETS all_tests DESTINATION unittest)

View File

@ -641,14 +641,8 @@ TEST(CApiTest, Reduce) {
results.push_back(res1);
results.push_back(res2);
bool is_selected[1] = {false};
status = ReduceQueryResults(results.data(), 1, is_selected);
assert(status.error_code == Success);
FillTargetEntry(segment, plan, res1);
void* reorganize_search_result = nullptr;
status = ReorganizeQueryResults(&reorganize_search_result, placeholderGroups.data(), 1, results.data(), is_selected,
1, plan);
assert(status.error_code == Success);
auto reduced_search_result = ReduceQueryResults(results.data(), 2);
auto reorganize_search_result = ReorganizeQueryResults(reduced_search_result, plan, placeholderGroups.data(), 1);
auto hits_blob_size = GetHitsBlobSize(reorganize_search_result);
assert(hits_blob_size > 0);
std::vector<char> hits_blob;
@ -666,6 +660,7 @@ TEST(CApiTest, Reduce) {
DeletePlaceholderGroup(placeholderGroup);
DeleteQueryResult(res1);
DeleteQueryResult(res2);
DeleteQueryResult(reduced_search_result);
DeleteMarshaledHits(reorganize_search_result);
DeleteCollection(collection);
DeleteSegment(segment);

View File

@ -107,83 +107,6 @@ TEST(Expr, Range) {
std::cout << out.dump(4);
}
TEST(Expr, InvalidRange) {
SUCCEED();
using namespace milvus;
using namespace milvus::query;
using namespace milvus::segcore;
std::string dsl_string = R"(
{
"bool": {
"must": [
{
"range": {
"age": {
"GT": 1,
"LT": "100"
}
}
},
{
"vector": {
"fakevec": {
"metric_type": "L2",
"params": {
"nprobe": 10
},
"query": "$0",
"topk": 10
}
}
}
]
}
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("age", DataType::INT32);
ASSERT_ANY_THROW(CreatePlan(*schema, dsl_string));
}
TEST(Expr, InvalidDSL) {
SUCCEED();
using namespace milvus;
using namespace milvus::query;
using namespace milvus::segcore;
std::string dsl_string = R"(
{
"float": {
"must": [
{
"range": {
"age": {
"GT": 1,
"LT": 100
}
}
},
{
"vector": {
"fakevec": {
"metric_type": "L2",
"params": {
"nprobe": 10
},
"query": "$0",
"topk": 10
}
}
}
]
}
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("age", DataType::INT32);
ASSERT_ANY_THROW(CreatePlan(*schema, dsl_string));
}
TEST(Expr, ShowExecutor) {
using namespace milvus::query;
using namespace milvus::segcore;

View File

@ -5,6 +5,7 @@ import (
"log"
"github.com/golang/protobuf/proto"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
@ -85,8 +86,23 @@ func (t *createCollectionTask) Execute() error {
// TODO: initial partition?
PartitionTags: make([]string, 0),
}
err = t.mt.AddCollection(&collection)
if err != nil {
return err
}
return t.mt.AddCollection(&collection)
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: t.req.Timestamp,
EndTimestamp: t.req.Timestamp,
HashValues: []uint32{0},
}
timeTickMsg := &ms.CreateCollectionMsg{
BaseMsg: baseMsg,
CreateCollectionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -102,7 +118,7 @@ func (t *dropCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return Timestamp(t.req.Timestamp), nil
return t.req.Timestamp, nil
}
func (t *dropCollectionTask) Execute() error {
@ -118,7 +134,29 @@ func (t *dropCollectionTask) Execute() error {
collectionID := collectionMeta.ID
return t.mt.DeleteCollection(collectionID)
err = t.mt.DeleteCollection(collectionID)
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.DropCollectionMsg{
BaseMsg: baseMsg,
DropCollectionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -134,7 +172,7 @@ func (t *hasCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return Timestamp(t.req.Timestamp), nil
return t.req.Timestamp, nil
}
func (t *hasCollectionTask) Execute() error {
@ -147,8 +185,8 @@ func (t *hasCollectionTask) Execute() error {
if err == nil {
t.hasCollection = true
}
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -181,6 +219,7 @@ func (t *describeCollectionTask) Execute() error {
t.description.Schema = collection.Schema
return nil
}
//////////////////////////////////////////////////////////////////////////

View File

@ -55,6 +55,7 @@ func TestMaster_CollectionTask(t *testing.T) {
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
DDChannelNames: []string{"dd1", "dd2"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",

View File

@ -0,0 +1,79 @@
package master
import (
"log"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type getSysConfigsTask struct {
baseTask
configkv *kv.EtcdKV
req *internalpb.SysConfigRequest
keys []string
values []string
}
func (t *getSysConfigsTask) Type() internalpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
}
func (t *getSysConfigsTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Timestamp, nil
}
func (t *getSysConfigsTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
sc := &SysConfig{kv: t.configkv}
keyMap := make(map[string]bool)
// Load configs with prefix
for _, prefix := range t.req.KeyPrefixes {
prefixKeys, prefixVals, err := sc.GetByPrefix(prefix)
if err != nil {
return errors.Errorf("Load configs by prefix wrong: %s", err.Error())
}
t.keys = append(t.keys, prefixKeys...)
t.values = append(t.values, prefixVals...)
}
for _, key := range t.keys {
keyMap[key] = true
}
// Load specific configs
if len(t.req.Keys) > 0 {
// To clean up duplicated keys
cleanKeys := []string{}
for _, key := range t.req.Keys {
if v, ok := keyMap[key]; (!ok) || (ok && !v) {
cleanKeys = append(cleanKeys, key)
keyMap[key] = true
continue
}
log.Println("[GetSysConfigs] Warning: duplicate key:", key)
}
v, err := sc.Get(cleanKeys)
if err != nil {
return errors.Errorf("Load configs wrong: %s", err.Error())
}
t.keys = append(t.keys, cleanKeys...)
t.values = append(t.values, v...)
}
return nil
}

View File

@ -0,0 +1,150 @@
package master
import (
"context"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_ConfigTask(t *testing.T) {
Init()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
require.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
}
svr, err := CreateServer(ctx)
require.Nil(t, err)
err = svr.Run(10002)
defer svr.Close()
require.Nil(t, err)
conn, err := grpc.DialContext(ctx, "127.0.0.1:10002", grpc.WithInsecure(), grpc.WithBlock())
require.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
sc := SysConfig{kv: svr.kvBase}
sc.InitFromFile(".")
configRequest := &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: testKeys,
KeyPrefixes: []string{},
}
response, err := cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
assert.ElementsMatch(t, testKeys, response.Keys)
assert.ElementsMatch(t, testVals, response.Values)
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
configRequest = &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: []string{},
KeyPrefixes: []string{"/master"},
}
response, err = cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
for i := range response.GetKeys() {
assert.True(t, strings.HasPrefix(response.GetKeys()[i], "/master"))
}
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
t.Run("Test duplicate keys and key prefix", func(t *testing.T) {
configRequest.Keys = []string{}
configRequest.KeyPrefixes = []string{"/master"}
resp, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(resp.GetKeys()), len(resp.GetValues()))
assert.NotEqual(t, 0, len(resp.GetKeys()))
configRequest.Keys = []string{"/master/port"}
configRequest.KeyPrefixes = []string{"/master"}
respDup, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(respDup.GetKeys()), len(respDup.GetValues()))
assert.NotEqual(t, 0, len(respDup.GetKeys()))
assert.Equal(t, len(respDup.GetKeys()), len(resp.GetKeys()))
})
}

View File

@ -0,0 +1,111 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
master: # 21
address: localhost
port: 53100
pulsarmoniterinterval: 1
pulsartopic: "monitor-topic"
proxyidlist: [1, 2]
proxyTimeSyncChannels: ["proxy1", "proxy2"]
proxyTimeSyncSubName: "proxy-topic"
softTimeTickBarrierInterval: 500
writeidlist: [3, 4]
writeTimeSyncChannels: ["write3", "write4"]
writeTimeSyncSubName: "write-topic"
dmTimeSyncChannels: ["dm5", "dm6"]
k2sTimeSyncChannels: ["k2s7", "k2s8"]
defaultSizePerRecord: 1024
minimumAssignSize: 1048576
segmentThreshold: 536870912
segmentExpireDuration: 2000
segmentThresholdFactor: 0.75
querynodenum: 1
writenodenum: 1
statsChannels: "statistic"
etcd: # 4
address: localhost
port: 2379
rootpath: by-dev
segthreshold: 10000
timesync: # 1
interval: 400
storage: # 5
driver: TIKV
address: localhost
port: 2379
accesskey:
secretkey:
pulsar: # 6
authentication: false
user: user-default
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
address: localhost
port: 6650
topicnum: 128
reader: # 7
clientid: 0
stopflag: -1
readerqueuesize: 10000
searchchansize: 10000
key2segchansize: 10000
topicstart: 0
topicend: 128
writer: # 8
clientid: 0
stopflag: -2
readerqueuesize: 10000
searchbyidchansize: 10000
parallelism: 100
topicstart: 0
topicend: 128
bucket: "zilliz-hz"
proxy: # 21
timezone: UTC+8
proxy_id: 1
numReaderNodes: 2
tsoSaveInterval: 200
timeTickInterval: 200
pulsarTopics:
readerTopicPrefix: "milvusReader"
numReaderTopics: 2
deleteTopic: "milvusDeleter"
queryTopic: "milvusQuery"
resultTopic: "milvusResult"
resultGroup: "milvusResultGroup"
timeTickTopic: "milvusTimeTick"
network:
address: 0.0.0.0
port: 19530
logs:
level: debug
trace.enable: true
path: /tmp/logs
max_log_file_size: 1024MB
log_rotate_num: 0
storage:
path: /var/lib/milvus
auto_flush_interval: 1

View File

@ -271,7 +271,7 @@ func (s *Master) HasPartition(ctx context.Context, in *internalpb.HasPartitionRe
return &servicepb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
Reason: "WaitToFinish failed",
},
Value: t.(*hasPartitionTask).hasPartition,
}, nil
@ -359,6 +359,43 @@ func (s *Master) ShowPartitions(ctx context.Context, in *internalpb.ShowPartitio
return t.(*showPartitionTask).stringListResponse, nil
}
func (s *Master) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
var t task = &getSysConfigsTask{
req: in,
configkv: s.kvBase,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
keys: []string{},
values: []string{},
}
response := &servicepb.SysConfigResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
},
}
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Status.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Status.Reason = "Get System Config failed: " + err.Error()
return response, nil
}
response.Keys = t.(*getSysConfigsTask).keys
response.Values = t.(*getSysConfigsTask).values
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
//----------------------------------------Internal GRPC Service--------------------------------
func (s *Master) AllocTimestamp(ctx context.Context, request *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {

View File

@ -10,7 +10,6 @@ import (
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
@ -123,6 +122,11 @@ func CreateServer(ctx context.Context) (*Master, error) {
}
tsMsgProducer.SetWriteNodeTtBarrier(writeTimeTickBarrier)
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(Params.DDChannelNames)
tsMsgProducer.SetDDSyncStream(pulsarDDStream)
pulsarDMStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDMStream.SetPulsarClient(pulsarAddr)
pulsarDMStream.CreatePulsarProducers(Params.InsertChannelNames)
@ -161,7 +165,10 @@ func CreateServer(ctx context.Context) (*Master, error) {
return nil, err
}
m.scheduler = NewDDRequestScheduler(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
m.scheduler = NewDDRequestScheduler(ctx)
m.scheduler.SetDDMsgStream(pulsarDDStream)
m.scheduler.SetIDAllocator(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
m.segmentMgr = NewSegmentManager(metakv,
func() (UniqueID, error) { return m.idAllocator.AllocOne() },
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
@ -248,6 +255,11 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
return err
}
s.serverLoopWg.Add(1)
if err := s.scheduler.Start(); err != nil {
return err
}
s.serverLoopWg.Add(1)
go s.grpcLoop(grpcPort)
@ -255,9 +267,6 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
return err
}
s.serverLoopWg.Add(1)
go s.tasksExecutionLoop()
s.serverLoopWg.Add(1)
go s.segmentStatisticsLoop()
@ -270,6 +279,8 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
func (s *Master) stopServerLoop() {
s.timesSyncMsgProducer.Close()
s.serverLoopWg.Done()
s.scheduler.Close()
s.serverLoopWg.Done()
if s.grpcServer != nil {
s.grpcServer.GracefulStop()
@ -337,33 +348,6 @@ func (s *Master) tsLoop() {
}
}
func (s *Master) tasksExecutionLoop() {
defer s.serverLoopWg.Done()
ctx, cancel := context.WithCancel(s.serverLoopCtx)
defer cancel()
for {
select {
case task := <-s.scheduler.reqQueue:
timeStamp, err := (task).Ts()
if err != nil {
log.Println(err)
} else {
if timeStamp < s.scheduler.scheduleTimeStamp {
task.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, s.scheduler.scheduleTimeStamp))
} else {
s.scheduler.scheduleTimeStamp = timeStamp
err = task.Execute()
task.Notify(err)
}
}
case <-ctx.Done():
log.Print("server is closed, exit task execution loop")
return
}
}
}
func (s *Master) segmentStatisticsLoop() {
defer s.serverLoopWg.Done()
defer s.segmentStatusMsg.Close()

View File

@ -0,0 +1,246 @@
package master
import (
"context"
"log"
"math/rand"
"strconv"
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"go.uber.org/zap"
"google.golang.org/grpc"
)
func receiveTimeTickMsg(stream *ms.MsgStream) bool {
for {
result := (*stream).Consume()
if len(result.Msgs) > 0 {
return true
}
}
}
func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
msgPack := ms.MsgPack{}
for _, vi := range ttmsgs {
msgPack.Msgs = append(msgPack.Msgs, getTtMsg(internalPb.MsgType_kTimeTick, UniqueID(vi[0]), Timestamp(vi[1])))
}
return &msgPack
}
func TestMaster(t *testing.T) {
Init()
pulsarAddr := Params.PulsarAddress
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr, err := CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
proxyTimeTickStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
proxyTimeTickStream.SetPulsarClient(pulsarAddr)
proxyTimeTickStream.CreatePulsarProducers(Params.ProxyTimeTickChannelNames)
proxyTimeTickStream.Start()
writeNodeStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
writeNodeStream.SetPulsarClient(pulsarAddr)
writeNodeStream.CreatePulsarProducers(Params.WriteNodeTimeTickChannelNames)
writeNodeStream.Start()
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
ddMs.SetPulsarClient(pulsarAddr)
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
ddMs.Start()
dMMs := ms.NewPulsarMsgStream(ctx, 1024)
dMMs.SetPulsarClient(pulsarAddr)
dMMs.CreatePulsarConsumers(Params.InsertChannelNames, "DMStream", ms.NewUnmarshalDispatcher(), 1024)
dMMs.Start()
k2sMs := ms.NewPulsarMsgStream(ctx, 1024)
k2sMs.SetPulsarClient(pulsarAddr)
k2sMs.CreatePulsarConsumers(Params.K2SChannelNames, "K2SStream", ms.NewUnmarshalDispatcher(), 1024)
k2sMs.Start()
ttsoftmsgs := [][2]uint64{
{0, 10},
}
msgSoftPackAddr := getTimeTickMsgPack(ttsoftmsgs)
proxyTimeTickStream.Produce(msgSoftPackAddr)
var dMMsgstream ms.MsgStream = dMMs
assert.True(t, receiveTimeTickMsg(&dMMsgstream))
var ddMsgstream ms.MsgStream = ddMs
assert.True(t, receiveTimeTickMsg(&ddMsgstream))
tthardmsgs := [][2]int{
{3, 10},
}
msghardPackAddr := getMsgPack(tthardmsgs)
writeNodeStream.Produce(msghardPackAddr)
var k2sMsgstream ms.MsgStream = k2sMs
assert.True(t, receiveTimeTickMsg(&k2sMsgstream))
conn, err := grpc.DialContext(ctx, "127.0.0.1:53100", grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{},
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, err := cli.CreateCollection(ctx, &createCollectionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var consumeMsg ms.MsgStream = ddMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////CreatePartition////////////////////////
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
st, err = cli.CreatePartition(ctx, &createPartitionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var createPartitionMsg *ms.CreatePartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createPartitionMsg = v.(*ms.CreatePartitionMsg)
}
break
}
}
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
////////////////////////////DropPartition////////////////////////
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
st, err = cli.DropPartition(ctx, &dropPartitionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var dropPartitionMsg *ms.DropPartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropPartitionMsg = v.(*ms.DropPartitionMsg)
}
break
}
}
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
////////////////////////////DropCollection////////////////////////
dropCollectionReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
}
st, err = cli.DropCollection(ctx, &dropCollectionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var dropCollectionMsg *ms.DropCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropCollectionMsg = v.(*ms.DropCollectionMsg)
}
break
}
}
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
cancel()
svr.Close()
}

View File

@ -40,6 +40,7 @@ type ParamTable struct {
// msgChannel
ProxyTimeTickChannelNames []string
WriteNodeTimeTickChannelNames []string
DDChannelNames []string
InsertChannelNames []string
K2SChannelNames []string
QueryNodeStatsChannelName string
@ -97,6 +98,7 @@ func (p *ParamTable) Init() {
p.initProxyTimeTickChannelNames()
p.initWriteNodeTimeTickChannelNames()
p.initInsertChannelNames()
p.initDDChannelNames()
p.initK2SChannelNames()
p.initQueryNodeStatsChannelName()
p.initMsgChannelSubName()
@ -382,6 +384,27 @@ func (p *ParamTable) initWriteNodeTimeTickChannelNames() {
p.WriteNodeTimeTickChannelNames = channels
}
func (p *ParamTable) initDDChannelNames() {
ch, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
if err != nil {
log.Fatal(err)
}
id, err := p.Load("nodeID.queryNodeIDList")
if err != nil {
log.Panicf("load query node id list error, %s", err.Error())
}
ids := strings.Split(id, ",")
channels := make([]string, 0, len(ids))
for _, i := range ids {
_, err := strconv.ParseInt(i, 10, 64)
if err != nil {
log.Panicf("load query node id list error, %s", err.Error())
}
channels = append(channels, ch+"-"+i)
}
p.DDChannelNames = channels
}
func (p *ParamTable) initInsertChannelNames() {
ch, err := p.Load("msgChannel.chanNamePrefix.insert")
if err != nil {

View File

@ -4,6 +4,7 @@ import (
"errors"
"log"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
@ -66,7 +67,30 @@ func (t *createPartitionTask) Execute() error {
if err != nil {
return err
}
return t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
ts, err := t.Ts()
if err != nil {
return err
}
err = t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.CreatePartitionMsg{
BaseMsg: baseMsg,
CreatePartitionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -98,7 +122,29 @@ func (t *dropPartitionTask) Execute() error {
return err
}
return t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
err = t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.DropPartitionMsg{
BaseMsg: baseMsg,
DropPartitionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -132,6 +178,7 @@ func (t *hasPartitionTask) Execute() error {
t.hasPartition = t.mt.HasPartition(collectionMeta.ID, partitionName.Tag)
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -168,6 +215,7 @@ func (t *describePartitionTask) Execute() error {
t.description = &description
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -208,4 +256,5 @@ func (t *showPartitionTask) Execute() error {
t.stringListResponse = &stringListResponse
return nil
}

View File

@ -1,17 +1,36 @@
package master
import (
"context"
"log"
"github.com/zilliztech/milvus-distributed/internal/errors"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
)
//type ddRequestScheduler interface {}
//type ddReqFIFOScheduler struct {}
type ddRequestScheduler struct {
ctx context.Context
cancel context.CancelFunc
globalIDAllocator func() (UniqueID, error)
reqQueue chan task
scheduleTimeStamp Timestamp
ddMsgStream ms.MsgStream
}
func NewDDRequestScheduler(allocGlobalID func() (UniqueID, error)) *ddRequestScheduler {
func NewDDRequestScheduler(ctx context.Context) *ddRequestScheduler {
const channelSize = 1024
ctx2, cancel := context.WithCancel(ctx)
rs := ddRequestScheduler{
globalIDAllocator: allocGlobalID,
reqQueue: make(chan task, channelSize),
ctx: ctx2,
cancel: cancel,
reqQueue: make(chan task, channelSize),
}
return &rs
}
@ -20,3 +39,51 @@ func (rs *ddRequestScheduler) Enqueue(task task) error {
rs.reqQueue <- task
return nil
}
func (rs *ddRequestScheduler) SetIDAllocator(allocGlobalID func() (UniqueID, error)) {
rs.globalIDAllocator = allocGlobalID
}
func (rs *ddRequestScheduler) SetDDMsgStream(ddStream ms.MsgStream) {
rs.ddMsgStream = ddStream
}
func (rs *ddRequestScheduler) scheduleLoop() {
for {
select {
case task := <-rs.reqQueue:
err := rs.schedule(task)
if err != nil {
log.Println(err)
}
case <-rs.ctx.Done():
log.Print("server is closed, exit task execution loop")
return
}
}
}
func (rs *ddRequestScheduler) schedule(t task) error {
timeStamp, err := t.Ts()
if err != nil {
log.Println(err)
return err
}
if timeStamp < rs.scheduleTimeStamp {
t.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, rs.scheduleTimeStamp))
} else {
rs.scheduleTimeStamp = timeStamp
err = t.Execute()
t.Notify(err)
}
return nil
}
func (rs *ddRequestScheduler) Start() error {
go rs.scheduleLoop()
return nil
}
func (rs *ddRequestScheduler) Close() {
rs.cancel()
}

View File

@ -0,0 +1,342 @@
package master
import (
"context"
"math/rand"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/kv"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"go.etcd.io/etcd/clientv3"
)
func TestMaster_Scheduler_Collection(t *testing.T) {
Init()
etcdAddress := Params.EtcdAddress
kvRootPath := Params.MetaRootPath
pulsarAddr := Params.PulsarAddress
producerChannels := []string{"ddstream"}
consumerChannels := []string{"ddstream"}
consumerSubName := "substream"
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcdKV)
assert.Nil(t, err)
defer meta.client.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(producerChannels)
pulsarDDStream.Start()
defer pulsarDDStream.Close()
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
consumeMs.SetPulsarClient(pulsarAddr)
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
consumeMs.Start()
defer consumeMs.Close()
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
err = idAllocator.Initialize()
assert.Nil(t, err)
scheduler := NewDDRequestScheduler(ctx)
scheduler.SetDDMsgStream(pulsarDDStream)
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
scheduler.Start()
defer scheduler.Close()
rand.Seed(time.Now().Unix())
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "string",
AutoID: true,
Fields: nil,
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
////////////////////////////CreateCollection////////////////////////
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
var createCollectionTask task = &createCollectionTask{
req: &createCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createCollectionTask)
assert.Nil(t, err)
err = createCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var consumeMsg ms.MsgStream = consumeMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////DropCollection////////////////////////
dropCollectionReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
}
var dropCollectionTask task = &dropCollectionTask{
req: &dropCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(dropCollectionTask)
assert.Nil(t, err)
err = dropCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var dropCollectionMsg *ms.DropCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropCollectionMsg = v.(*ms.DropCollectionMsg)
}
break
}
}
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
}
func TestMaster_Scheduler_Partition(t *testing.T) {
Init()
etcdAddress := Params.EtcdAddress
kvRootPath := Params.MetaRootPath
pulsarAddr := Params.PulsarAddress
producerChannels := []string{"ddstream"}
consumerChannels := []string{"ddstream"}
consumerSubName := "substream"
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcdKV)
assert.Nil(t, err)
defer meta.client.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(producerChannels)
pulsarDDStream.Start()
defer pulsarDDStream.Close()
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
consumeMs.SetPulsarClient(pulsarAddr)
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
consumeMs.Start()
defer consumeMs.Close()
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
err = idAllocator.Initialize()
assert.Nil(t, err)
scheduler := NewDDRequestScheduler(ctx)
scheduler.SetDDMsgStream(pulsarDDStream)
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
scheduler.Start()
defer scheduler.Close()
rand.Seed(time.Now().Unix())
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "string",
AutoID: true,
Fields: nil,
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
////////////////////////////CreateCollection////////////////////////
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
var createCollectionTask task = &createCollectionTask{
req: &createCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createCollectionTask)
assert.Nil(t, err)
err = createCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var consumeMsg ms.MsgStream = consumeMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////CreatePartition////////////////////////
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
var createPartitionTask task = &createPartitionTask{
req: &createPartitionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createPartitionTask)
assert.Nil(t, err)
err = createPartitionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var createPartitionMsg *ms.CreatePartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createPartitionMsg = v.(*ms.CreatePartitionMsg)
}
break
}
}
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
////////////////////////////DropPartition////////////////////////
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
var dropPartitionTask task = &dropPartitionTask{
req: &dropPartitionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(dropPartitionTask)
assert.Nil(t, err)
err = dropPartitionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var dropPartitionMsg *ms.DropPartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropPartitionMsg = v.(*ms.DropPartitionMsg)
}
break
}
}
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
}

View File

@ -0,0 +1,117 @@
package master
import (
"fmt"
"log"
"os"
"path"
"path/filepath"
"strings"
"github.com/spf13/viper"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
)
type SysConfig struct {
kv *kv.EtcdKV
}
// Initialize Configs from config files, and store them in Etcd.
func (conf *SysConfig) InitFromFile(filePath string) error {
memConfigs, err := conf.getConfigFiles(filePath)
if err != nil {
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
}
for _, memConfig := range memConfigs {
if err := conf.saveToEtcd(memConfig, "config"); err != nil {
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
}
}
return nil
}
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error) {
realPrefix := path.Join("config", strings.ToLower(keyPrefix))
keys, values, err = conf.kv.LoadWithPrefix(realPrefix)
for index := range keys {
keys[index] = strings.Replace(keys[index], conf.kv.GetPath("config"), "", 1)
}
if err != nil {
return nil, nil, err
}
log.Println("Loaded", len(keys), "pairs of configs with prefix", keyPrefix)
return keys, values, err
}
// Get specific configs for keys.
func (conf *SysConfig) Get(keys []string) ([]string, error) {
var keysToLoad []string
for i := range keys {
keysToLoad = append(keysToLoad, path.Join("config", strings.ToLower(keys[i])))
}
values, err := conf.kv.MultiLoad(keysToLoad)
if err != nil {
return nil, err
}
return values, nil
}
func (conf *SysConfig) getConfigFiles(filePath string) ([]*viper.Viper, error) {
var vipers []*viper.Viper
err := filepath.Walk(filePath,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// all names
if !info.IsDir() && filepath.Ext(path) == ".yaml" {
log.Println("Config files ", info.Name())
currentConf := viper.New()
currentConf.SetConfigFile(path)
if err := currentConf.ReadInConfig(); err != nil {
log.Panic("Config file error: ", err)
}
vipers = append(vipers, currentConf)
}
return nil
})
if err != nil {
return nil, err
}
if len(vipers) == 0 {
return nil, errors.Errorf("There are no config files in the path `%s`.\n", filePath)
}
return vipers, nil
}
func (conf *SysConfig) saveToEtcd(memConfig *viper.Viper, secondRootPath string) error {
configMaps := map[string]string{}
allKeys := memConfig.AllKeys()
for _, key := range allKeys {
etcdKey := strings.ReplaceAll(key, ".", "/")
etcdKey = path.Join(secondRootPath, etcdKey)
val := memConfig.Get(key)
if val == nil {
configMaps[etcdKey] = ""
continue
}
configMaps[etcdKey] = fmt.Sprintf("%v", val)
}
if err := conf.kv.MultiSave(configMaps); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,209 @@
package master
import (
"context"
"fmt"
"log"
"path"
"strings"
"testing"
"time"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/clientv3"
)
func Test_SysConfig(t *testing.T) {
Init()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{Params.EtcdAddress},
DialTimeout: 5 * time.Second,
})
require.Nil(t, err)
_, err = cli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
rootPath := "/test/root"
configKV := kv.NewEtcdKV(cli, rootPath)
defer configKV.Close()
sc := SysConfig{kv: configKV}
require.Equal(t, rootPath, sc.kv.GetPath("."))
t.Run("tests on contig_test.yaml", func(t *testing.T) {
err = sc.InitFromFile(".")
require.Nil(t, err)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
vals, err := sc.Get(testKeys)
assert.Nil(t, err)
for i := range testVals {
assert.Equal(t, testVals[i], vals[i])
}
keys, vals, err := sc.GetByPrefix("/master")
assert.Nil(t, err)
for i := range keys {
assert.True(t, strings.HasPrefix(keys[i], "/master/"))
}
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 21, len(keys))
// Test get all configs
keys, vals, err = sc.GetByPrefix("/")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 73, len(vals))
// Test get configs with prefix not exist
keys, vals, err = sc.GetByPrefix("/config")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 0, len(keys))
assert.Equal(t, 0, len(vals))
_, _, err = sc.GetByPrefix("//././../../../../../..//../")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix("/master/./address")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix(".")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix("\\")
assert.Nil(t, err)
})
t.Run("getConfigFiles", func(t *testing.T) {
filePath := "../../configs"
vipers, err := sc.getConfigFiles(filePath)
assert.Nil(t, err)
assert.NotNil(t, vipers[0])
filePath = "/path/not/exists"
_, err = sc.getConfigFiles(filePath)
assert.NotNil(t, err)
log.Println(err)
})
t.Run("Test saveToEtcd Normal", func(t *testing.T) {
_, err = cli.Delete(ctx, "/test/root/config", clientv3.WithPrefix())
require.Nil(t, err)
v := viper.New()
v.Set("a.suba1", "v1")
v.Set("a.suba2", "v2")
v.Set("a.suba3.subsuba1", "v3")
v.Set("a.suba3.subsuba2", "v4")
secondRootPath := "config"
err := sc.saveToEtcd(v, secondRootPath)
assert.Nil(t, err)
value, err := sc.kv.Load(path.Join(secondRootPath, "a/suba1"))
assert.Nil(t, err)
assert.Equal(t, "v1", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba2"))
assert.Nil(t, err)
assert.Equal(t, "v2", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba1"))
assert.Nil(t, err)
assert.Equal(t, "v3", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba2"))
assert.Nil(t, err)
assert.Equal(t, "v4", value)
keys, values, err := sc.kv.LoadWithPrefix(path.Join(secondRootPath, "a"))
assert.Nil(t, err)
assert.Equal(t, 4, len(keys))
assert.Equal(t, 4, len(values))
assert.ElementsMatch(t, []string{
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba1"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba2"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba1"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba2"),
}, keys)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
keys = []string{
"/a/suba1",
"/a/suba2",
"/a/suba3/subsuba1",
"/a/suba3/subsuba2",
}
values, err = sc.Get(keys)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
keysAfter, values, err := sc.GetByPrefix("/a")
fmt.Println(keysAfter)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
assert.ElementsMatch(t, keys, keysAfter)
})
t.Run("Test saveToEtcd Different value types", func(t *testing.T) {
v := viper.New()
v.Set("string", "string")
v.Set("number", 1)
v.Set("nil", nil)
v.Set("float", 1.2)
v.Set("intslice", []int{100, 200})
v.Set("stringslice", []string{"a", "b"})
v.Set("stringmapstring", map[string]string{"k1": "1", "k2": "2"})
secondRootPath := "test_save_to_etcd_different_value_types"
err := sc.saveToEtcd(v, secondRootPath)
require.Nil(t, err)
keys, values, err := sc.kv.LoadWithPrefix(path.Join("/", secondRootPath))
assert.Nil(t, err)
assert.Equal(t, 7, len(keys))
assert.Equal(t, 7, len(values))
assert.ElementsMatch(t, []string{
path.Join(sc.kv.GetPath(secondRootPath), "nil"),
path.Join(sc.kv.GetPath(secondRootPath), "string"),
path.Join(sc.kv.GetPath(secondRootPath), "number"),
path.Join(sc.kv.GetPath(secondRootPath), "float"),
path.Join(sc.kv.GetPath(secondRootPath), "intslice"),
path.Join(sc.kv.GetPath(secondRootPath), "stringslice"),
path.Join(sc.kv.GetPath(secondRootPath), "stringmapstring"),
}, keys)
assert.ElementsMatch(t, []string{"", "string", "1", "1.2", "[100 200]", "[a b]", "map[k1:1 k2:2]"}, values)
})
}

View File

@ -81,13 +81,18 @@ func receiveMsg(stream *ms.MsgStream) []uint64 {
func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
Init()
pulsarAddress := Params.PulsarAddress
producerChannels := []string{"proxyTtBarrier"}
consumerChannels := []string{"proxyTtBarrier"}
consumerSubName := "proxyTtBarrier"
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
proxyTtInputStream, proxyTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels := []string{"proxyDMTtBarrier"}
consumerChannels := []string{"proxyDMTtBarrier"}
consumerSubName := "proxyDMTtBarrier"
proxyDMTtInputStream, proxyDMTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels = []string{"proxyDDTtBarrier"}
consumerChannels = []string{"proxyDDTtBarrier"}
consumerSubName = "proxyDDTtBarrier"
proxyDDTtInputStream, proxyDDTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels = []string{"writeNodeBarrier"}
consumerChannels = []string{"writeNodeBarrier"}
@ -97,14 +102,20 @@ func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
timeSyncProducer, _ := NewTimeSyncMsgProducer(ctx)
timeSyncProducer.SetProxyTtBarrier(&TestTickBarrier{ctx: ctx})
timeSyncProducer.SetWriteNodeTtBarrier(&TestTickBarrier{ctx: ctx})
timeSyncProducer.SetDMSyncStream(*proxyTtInputStream)
timeSyncProducer.SetDMSyncStream(*proxyDMTtInputStream)
timeSyncProducer.SetDDSyncStream(*proxyDDTtInputStream)
timeSyncProducer.SetK2sSyncStream(*writeNodeInputStream)
(*proxyTtOutputStream).Start()
(*proxyDMTtOutputStream).Start()
(*proxyDDTtOutputStream).Start()
(*writeNodeOutputStream).Start()
timeSyncProducer.Start()
expected := []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result1 := receiveMsg(proxyTtOutputStream)
result1 := receiveMsg(proxyDMTtOutputStream)
assert.Equal(t, expected, result1)
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result1 = receiveMsg(proxyDDTtOutputStream)
assert.Equal(t, expected, result1)
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result2 := receiveMsg(writeNodeOutputStream)
assert.Equal(t, expected, result2)

View File

@ -15,7 +15,8 @@ type timeSyncMsgProducer struct {
//hardTimeTickBarrier
writeNodeTtBarrier TimeTickBarrier
dmSyncStream ms.MsgStream // insert & delete
ddSyncStream ms.MsgStream // insert & delete
dmSyncStream ms.MsgStream
k2sSyncStream ms.MsgStream
ctx context.Context
@ -34,6 +35,9 @@ func (syncMsgProducer *timeSyncMsgProducer) SetProxyTtBarrier(proxyTtBarrier Tim
func (syncMsgProducer *timeSyncMsgProducer) SetWriteNodeTtBarrier(writeNodeTtBarrier TimeTickBarrier) {
syncMsgProducer.writeNodeTtBarrier = writeNodeTtBarrier
}
func (syncMsgProducer *timeSyncMsgProducer) SetDDSyncStream(ddSync ms.MsgStream) {
syncMsgProducer.ddSyncStream = ddSync
}
func (syncMsgProducer *timeSyncMsgProducer) SetDMSyncStream(dmSync ms.MsgStream) {
syncMsgProducer.dmSyncStream = dmSync
@ -43,7 +47,7 @@ func (syncMsgProducer *timeSyncMsgProducer) SetK2sSyncStream(k2sSync ms.MsgStrea
syncMsgProducer.k2sSyncStream = k2sSync
}
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, stream ms.MsgStream) error {
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, streams []ms.MsgStream) error {
for {
select {
case <-syncMsgProducer.ctx.Done():
@ -72,7 +76,9 @@ func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier
TimeTickMsg: timeTickResult,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
err = stream.Broadcast(&msgPack)
for _, stream := range streams {
err = stream.Broadcast(&msgPack)
}
if err != nil {
return err
}
@ -91,16 +97,17 @@ func (syncMsgProducer *timeSyncMsgProducer) Start() error {
return err
}
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, syncMsgProducer.dmSyncStream)
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, syncMsgProducer.k2sSyncStream)
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, []ms.MsgStream{syncMsgProducer.dmSyncStream, syncMsgProducer.ddSyncStream})
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, []ms.MsgStream{syncMsgProducer.k2sSyncStream})
return nil
}
func (syncMsgProducer *timeSyncMsgProducer) Close() {
syncMsgProducer.proxyTtBarrier.Close()
syncMsgProducer.writeNodeTtBarrier.Close()
syncMsgProducer.ddSyncStream.Close()
syncMsgProducer.dmSyncStream.Close()
syncMsgProducer.k2sSyncStream.Close()
syncMsgProducer.cancel()
syncMsgProducer.proxyTtBarrier.Close()
syncMsgProducer.writeNodeTtBarrier.Close()
}

593
internal/msgstream/msg.go Normal file
View File

@ -0,0 +1,593 @@
package msgstream
import (
"github.com/golang/protobuf/proto"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type MsgType = internalPb.MsgType
type TsMsg interface {
BeginTs() Timestamp
EndTs() Timestamp
Type() MsgType
HashKeys() []uint32
Marshal(TsMsg) ([]byte, error)
Unmarshal([]byte) (TsMsg, error)
}
type BaseMsg struct {
BeginTimestamp Timestamp
EndTimestamp Timestamp
HashValues []uint32
}
func (bm *BaseMsg) BeginTs() Timestamp {
return bm.BeginTimestamp
}
func (bm *BaseMsg) EndTs() Timestamp {
return bm.EndTimestamp
}
func (bm *BaseMsg) HashKeys() []uint32 {
return bm.HashValues
}
/////////////////////////////////////////Insert//////////////////////////////////////////
type InsertMsg struct {
BaseMsg
internalPb.InsertRequest
}
func (it *InsertMsg) Type() MsgType {
return it.MsgType
}
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
insertMsg := input.(*InsertMsg)
insertRequest := &insertMsg.InsertRequest
mb, err := proto.Marshal(insertRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
insertRequest := internalPb.InsertRequest{}
err := proto.Unmarshal(input, &insertRequest)
if err != nil {
return nil, err
}
insertMsg := &InsertMsg{InsertRequest: insertRequest}
for _, timestamp := range insertMsg.Timestamps {
insertMsg.BeginTimestamp = timestamp
insertMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range insertMsg.Timestamps {
if timestamp > insertMsg.EndTimestamp {
insertMsg.EndTimestamp = timestamp
}
if timestamp < insertMsg.BeginTimestamp {
insertMsg.BeginTimestamp = timestamp
}
}
return insertMsg, nil
}
/////////////////////////////////////////Delete//////////////////////////////////////////
type DeleteMsg struct {
BaseMsg
internalPb.DeleteRequest
}
func (dt *DeleteMsg) Type() MsgType {
return dt.MsgType
}
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
deleteTask := input.(*DeleteMsg)
deleteRequest := &deleteTask.DeleteRequest
mb, err := proto.Marshal(deleteRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
deleteRequest := internalPb.DeleteRequest{}
err := proto.Unmarshal(input, &deleteRequest)
if err != nil {
return nil, err
}
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
for _, timestamp := range deleteMsg.Timestamps {
deleteMsg.BeginTimestamp = timestamp
deleteMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range deleteMsg.Timestamps {
if timestamp > deleteMsg.EndTimestamp {
deleteMsg.EndTimestamp = timestamp
}
if timestamp < deleteMsg.BeginTimestamp {
deleteMsg.BeginTimestamp = timestamp
}
}
return deleteMsg, nil
}
/////////////////////////////////////////Search//////////////////////////////////////////
type SearchMsg struct {
BaseMsg
internalPb.SearchRequest
}
func (st *SearchMsg) Type() MsgType {
return st.MsgType
}
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
searchTask := input.(*SearchMsg)
searchRequest := &searchTask.SearchRequest
mb, err := proto.Marshal(searchRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
searchRequest := internalPb.SearchRequest{}
err := proto.Unmarshal(input, &searchRequest)
if err != nil {
return nil, err
}
searchMsg := &SearchMsg{SearchRequest: searchRequest}
searchMsg.BeginTimestamp = searchMsg.Timestamp
searchMsg.EndTimestamp = searchMsg.Timestamp
return searchMsg, nil
}
/////////////////////////////////////////SearchResult//////////////////////////////////////////
type SearchResultMsg struct {
BaseMsg
internalPb.SearchResult
}
func (srt *SearchResultMsg) Type() MsgType {
return srt.MsgType
}
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
searchResultTask := input.(*SearchResultMsg)
searchResultRequest := &searchResultTask.SearchResult
mb, err := proto.Marshal(searchResultRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
searchResultRequest := internalPb.SearchResult{}
err := proto.Unmarshal(input, &searchResultRequest)
if err != nil {
return nil, err
}
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
return searchResultMsg, nil
}
/////////////////////////////////////////TimeTick//////////////////////////////////////////
type TimeTickMsg struct {
BaseMsg
internalPb.TimeTickMsg
}
func (tst *TimeTickMsg) Type() MsgType {
return tst.MsgType
}
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
timeTickTask := input.(*TimeTickMsg)
timeTick := &timeTickTask.TimeTickMsg
mb, err := proto.Marshal(timeTick)
if err != nil {
return nil, err
}
return mb, nil
}
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
timeTickMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeTickMsg)
if err != nil {
return nil, err
}
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
timeTick.BeginTimestamp = timeTick.Timestamp
timeTick.EndTimestamp = timeTick.Timestamp
return timeTick, nil
}
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
type QueryNodeSegStatsMsg struct {
BaseMsg
internalPb.QueryNodeSegStats
}
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
return qs.MsgType
}
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
mb, err := proto.Marshal(queryNodeSegStats)
if err != nil {
return nil, err
}
return mb, nil
}
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
queryNodeSegStats := internalPb.QueryNodeSegStats{}
err := proto.Unmarshal(input, &queryNodeSegStats)
if err != nil {
return nil, err
}
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
return queryNodeSegStatsMsg, nil
}
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
//type Key2SegMsg struct {
// BaseMsg
// internalPb.Key2SegMsg
//}
//
//func (k2st *Key2SegMsg) Type() MsgType {
// return
//}
/////////////////////////////////////////CreateCollection//////////////////////////////////////////
type CreateCollectionMsg struct {
BaseMsg
internalPb.CreateCollectionRequest
}
func (cc *CreateCollectionMsg) Type() MsgType {
return cc.MsgType
}
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
createCollectionMsg := input.(*CreateCollectionMsg)
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
mb, err := proto.Marshal(createCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (cc *CreateCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
createCollectionRequest := internalPb.CreateCollectionRequest{}
err := proto.Unmarshal(input, &createCollectionRequest)
if err != nil {
return nil, err
}
createCollectionMsg := &CreateCollectionMsg{CreateCollectionRequest: createCollectionRequest}
createCollectionMsg.BeginTimestamp = createCollectionMsg.Timestamp
createCollectionMsg.EndTimestamp = createCollectionMsg.Timestamp
return createCollectionMsg, nil
}
/////////////////////////////////////////DropCollection//////////////////////////////////////////
type DropCollectionMsg struct {
BaseMsg
internalPb.DropCollectionRequest
}
func (dc *DropCollectionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
dropCollectionMsg := input.(*DropCollectionMsg)
dropCollectionRequest := &dropCollectionMsg.DropCollectionRequest
mb, err := proto.Marshal(dropCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DropCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
dropCollectionRequest := internalPb.DropCollectionRequest{}
err := proto.Unmarshal(input, &dropCollectionRequest)
if err != nil {
return nil, err
}
dropCollectionMsg := &DropCollectionMsg{DropCollectionRequest: dropCollectionRequest}
dropCollectionMsg.BeginTimestamp = dropCollectionMsg.Timestamp
dropCollectionMsg.EndTimestamp = dropCollectionMsg.Timestamp
return dropCollectionMsg, nil
}
/////////////////////////////////////////HasCollection//////////////////////////////////////////
type HasCollectionMsg struct {
BaseMsg
internalPb.HasCollectionRequest
}
func (hc *HasCollectionMsg) Type() MsgType {
return hc.MsgType
}
func (hc *HasCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
hasCollectionMsg := input.(*HasCollectionMsg)
hasCollectionRequest := &hasCollectionMsg.HasCollectionRequest
mb, err := proto.Marshal(hasCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (hc *HasCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
hasCollectionRequest := internalPb.HasCollectionRequest{}
err := proto.Unmarshal(input, &hasCollectionRequest)
if err != nil {
return nil, err
}
hasCollectionMsg := &HasCollectionMsg{HasCollectionRequest: hasCollectionRequest}
hasCollectionMsg.BeginTimestamp = hasCollectionMsg.Timestamp
hasCollectionMsg.EndTimestamp = hasCollectionMsg.Timestamp
return hasCollectionMsg, nil
}
/////////////////////////////////////////DescribeCollection//////////////////////////////////////////
type DescribeCollectionMsg struct {
BaseMsg
internalPb.DescribeCollectionRequest
}
func (dc *DescribeCollectionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DescribeCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
describeCollectionMsg := input.(*DescribeCollectionMsg)
describeCollectionRequest := &describeCollectionMsg.DescribeCollectionRequest
mb, err := proto.Marshal(describeCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DescribeCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
describeCollectionRequest := internalPb.DescribeCollectionRequest{}
err := proto.Unmarshal(input, &describeCollectionRequest)
if err != nil {
return nil, err
}
describeCollectionMsg := &DescribeCollectionMsg{DescribeCollectionRequest: describeCollectionRequest}
describeCollectionMsg.BeginTimestamp = describeCollectionMsg.Timestamp
describeCollectionMsg.EndTimestamp = describeCollectionMsg.Timestamp
return describeCollectionMsg, nil
}
/////////////////////////////////////////ShowCollection//////////////////////////////////////////
type ShowCollectionMsg struct {
BaseMsg
internalPb.ShowCollectionRequest
}
func (sc *ShowCollectionMsg) Type() MsgType {
return sc.MsgType
}
func (sc *ShowCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
showCollectionMsg := input.(*ShowCollectionMsg)
showCollectionRequest := &showCollectionMsg.ShowCollectionRequest
mb, err := proto.Marshal(showCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (sc *ShowCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
showCollectionRequest := internalPb.ShowCollectionRequest{}
err := proto.Unmarshal(input, &showCollectionRequest)
if err != nil {
return nil, err
}
showCollectionMsg := &ShowCollectionMsg{ShowCollectionRequest: showCollectionRequest}
showCollectionMsg.BeginTimestamp = showCollectionMsg.Timestamp
showCollectionMsg.EndTimestamp = showCollectionMsg.Timestamp
return showCollectionMsg, nil
}
/////////////////////////////////////////CreatePartition//////////////////////////////////////////
type CreatePartitionMsg struct {
BaseMsg
internalPb.CreatePartitionRequest
}
func (cc *CreatePartitionMsg) Type() MsgType {
return cc.MsgType
}
func (cc *CreatePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
createPartitionMsg := input.(*CreatePartitionMsg)
createPartitionRequest := &createPartitionMsg.CreatePartitionRequest
mb, err := proto.Marshal(createPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (cc *CreatePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
createPartitionRequest := internalPb.CreatePartitionRequest{}
err := proto.Unmarshal(input, &createPartitionRequest)
if err != nil {
return nil, err
}
createPartitionMsg := &CreatePartitionMsg{CreatePartitionRequest: createPartitionRequest}
createPartitionMsg.BeginTimestamp = createPartitionMsg.Timestamp
createPartitionMsg.EndTimestamp = createPartitionMsg.Timestamp
return createPartitionMsg, nil
}
/////////////////////////////////////////DropPartition//////////////////////////////////////////
type DropPartitionMsg struct {
BaseMsg
internalPb.DropPartitionRequest
}
func (dc *DropPartitionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DropPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
dropPartitionMsg := input.(*DropPartitionMsg)
dropPartitionRequest := &dropPartitionMsg.DropPartitionRequest
mb, err := proto.Marshal(dropPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DropPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
dropPartitionRequest := internalPb.DropPartitionRequest{}
err := proto.Unmarshal(input, &dropPartitionRequest)
if err != nil {
return nil, err
}
dropPartitionMsg := &DropPartitionMsg{DropPartitionRequest: dropPartitionRequest}
dropPartitionMsg.BeginTimestamp = dropPartitionMsg.Timestamp
dropPartitionMsg.EndTimestamp = dropPartitionMsg.Timestamp
return dropPartitionMsg, nil
}
/////////////////////////////////////////HasPartition//////////////////////////////////////////
type HasPartitionMsg struct {
BaseMsg
internalPb.HasPartitionRequest
}
func (hc *HasPartitionMsg) Type() MsgType {
return hc.MsgType
}
func (hc *HasPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
hasPartitionMsg := input.(*HasPartitionMsg)
hasPartitionRequest := &hasPartitionMsg.HasPartitionRequest
mb, err := proto.Marshal(hasPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (hc *HasPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
hasPartitionRequest := internalPb.HasPartitionRequest{}
err := proto.Unmarshal(input, &hasPartitionRequest)
if err != nil {
return nil, err
}
hasPartitionMsg := &HasPartitionMsg{HasPartitionRequest: hasPartitionRequest}
hasPartitionMsg.BeginTimestamp = hasPartitionMsg.Timestamp
hasPartitionMsg.EndTimestamp = hasPartitionMsg.Timestamp
return hasPartitionMsg, nil
}
/////////////////////////////////////////DescribePartition//////////////////////////////////////////
type DescribePartitionMsg struct {
BaseMsg
internalPb.DescribePartitionRequest
}
func (dc *DescribePartitionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DescribePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
describePartitionMsg := input.(*DescribePartitionMsg)
describePartitionRequest := &describePartitionMsg.DescribePartitionRequest
mb, err := proto.Marshal(describePartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DescribePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
describePartitionRequest := internalPb.DescribePartitionRequest{}
err := proto.Unmarshal(input, &describePartitionRequest)
if err != nil {
return nil, err
}
describePartitionMsg := &DescribePartitionMsg{DescribePartitionRequest: describePartitionRequest}
describePartitionMsg.BeginTimestamp = describePartitionMsg.Timestamp
describePartitionMsg.EndTimestamp = describePartitionMsg.Timestamp
return describePartitionMsg, nil
}
/////////////////////////////////////////ShowPartition//////////////////////////////////////////
type ShowPartitionMsg struct {
BaseMsg
internalPb.ShowPartitionRequest
}
func (sc *ShowPartitionMsg) Type() MsgType {
return sc.MsgType
}
func (sc *ShowPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
showPartitionMsg := input.(*ShowPartitionMsg)
showPartitionRequest := &showPartitionMsg.ShowPartitionRequest
mb, err := proto.Marshal(showPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (sc *ShowPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
showPartitionRequest := internalPb.ShowPartitionRequest{}
err := proto.Unmarshal(input, &showPartitionRequest)
if err != nil {
return nil, err
}
showPartitionMsg := &ShowPartitionMsg{ShowPartitionRequest: showPartitionRequest}
showPartitionMsg.BeginTimestamp = showPartitionMsg.Timestamp
showPartitionMsg.EndTimestamp = showPartitionMsg.Timestamp
return showPartitionMsg, nil
}

View File

@ -5,7 +5,6 @@ import (
"log"
"reflect"
"sync"
"time"
"github.com/apache/pulsar-client-go/pulsar"
"github.com/golang/protobuf/proto"
@ -70,22 +69,11 @@ func (ms *PulsarMsgStream) SetPulsarClient(address string) {
func (ms *PulsarMsgStream) CreatePulsarProducers(channels []string) {
for i := 0; i < len(channels); i++ {
fn := func() error {
pp, err := (*ms.client).CreateProducer(pulsar.ProducerOptions{Topic: channels[i]})
if err != nil {
return err
}
if pp == nil {
return errors.New("pulsar is not ready, producer is nil")
}
ms.producers = append(ms.producers, &pp)
return nil
}
err := Retry(10, time.Millisecond*200, fn)
pp, err := (*ms.client).CreateProducer(pulsar.ProducerOptions{Topic: channels[i]})
if err != nil {
errMsg := "Failed to create producer " + channels[i] + ", error = " + err.Error()
panic(errMsg)
log.Printf("Failed to create querynode producer %s, error = %v", channels[i], err)
}
ms.producers = append(ms.producers, &pp)
}
}
@ -95,29 +83,18 @@ func (ms *PulsarMsgStream) CreatePulsarConsumers(channels []string,
pulsarBufSize int64) {
ms.unmarshal = unmarshal
for i := 0; i < len(channels); i++ {
fn := func() error {
receiveChannel := make(chan pulsar.ConsumerMessage, pulsarBufSize)
pc, err := (*ms.client).Subscribe(pulsar.ConsumerOptions{
Topic: channels[i],
SubscriptionName: subName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: receiveChannel,
})
if err != nil {
return err
}
if pc == nil {
return errors.New("pulsar is not ready, consumer is nil")
}
ms.consumers = append(ms.consumers, &pc)
return nil
}
err := Retry(10, time.Millisecond*200, fn)
receiveChannel := make(chan pulsar.ConsumerMessage, pulsarBufSize)
pc, err := (*ms.client).Subscribe(pulsar.ConsumerOptions{
Topic: channels[i],
SubscriptionName: subName,
Type: pulsar.KeyShared,
SubscriptionInitialPosition: pulsar.SubscriptionPositionEarliest,
MessageChannel: receiveChannel,
})
if err != nil {
errMsg := "Failed to create consumer " + channels[i] + ", error = " + err.Error()
panic(errMsg)
log.Printf("Failed to subscribe topic, error = %v", err)
}
ms.consumers = append(ms.consumers, &pc)
}
}

View File

@ -1,32 +0,0 @@
package msgstream
import (
"log"
"time"
)
// Reference: https://blog.cyeam.com/golang/2018/08/27/retry
func Retry(attempts int, sleep time.Duration, fn func() error) error {
if err := fn(); err != nil {
if s, ok := err.(InterruptError); ok {
return s.error
}
if attempts--; attempts > 0 {
log.Printf("retry func error: %s. attempts #%d after %s.", err.Error(), attempts, sleep)
time.Sleep(sleep)
return Retry(attempts, 2*sleep, fn)
}
return err
}
return nil
}
type InterruptError struct {
error
}
func NoRetryError(err error) InterruptError {
return InterruptError{err}
}

View File

@ -1,263 +0,0 @@
package msgstream
import (
"github.com/golang/protobuf/proto"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type MsgType = internalPb.MsgType
type TsMsg interface {
BeginTs() Timestamp
EndTs() Timestamp
Type() MsgType
HashKeys() []uint32
Marshal(TsMsg) ([]byte, error)
Unmarshal([]byte) (TsMsg, error)
}
type BaseMsg struct {
BeginTimestamp Timestamp
EndTimestamp Timestamp
HashValues []uint32
}
func (bm *BaseMsg) BeginTs() Timestamp {
return bm.BeginTimestamp
}
func (bm *BaseMsg) EndTs() Timestamp {
return bm.EndTimestamp
}
func (bm *BaseMsg) HashKeys() []uint32 {
return bm.HashValues
}
/////////////////////////////////////////Insert//////////////////////////////////////////
type InsertMsg struct {
BaseMsg
internalPb.InsertRequest
}
func (it *InsertMsg) Type() MsgType {
return it.MsgType
}
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
insertMsg := input.(*InsertMsg)
insertRequest := &insertMsg.InsertRequest
mb, err := proto.Marshal(insertRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
insertRequest := internalPb.InsertRequest{}
err := proto.Unmarshal(input, &insertRequest)
if err != nil {
return nil, err
}
insertMsg := &InsertMsg{InsertRequest: insertRequest}
for _, timestamp := range insertMsg.Timestamps {
insertMsg.BeginTimestamp = timestamp
insertMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range insertMsg.Timestamps {
if timestamp > insertMsg.EndTimestamp {
insertMsg.EndTimestamp = timestamp
}
if timestamp < insertMsg.BeginTimestamp {
insertMsg.BeginTimestamp = timestamp
}
}
return insertMsg, nil
}
/////////////////////////////////////////Delete//////////////////////////////////////////
type DeleteMsg struct {
BaseMsg
internalPb.DeleteRequest
}
func (dt *DeleteMsg) Type() MsgType {
return dt.MsgType
}
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
deleteTask := input.(*DeleteMsg)
deleteRequest := &deleteTask.DeleteRequest
mb, err := proto.Marshal(deleteRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
deleteRequest := internalPb.DeleteRequest{}
err := proto.Unmarshal(input, &deleteRequest)
if err != nil {
return nil, err
}
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
for _, timestamp := range deleteMsg.Timestamps {
deleteMsg.BeginTimestamp = timestamp
deleteMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range deleteMsg.Timestamps {
if timestamp > deleteMsg.EndTimestamp {
deleteMsg.EndTimestamp = timestamp
}
if timestamp < deleteMsg.BeginTimestamp {
deleteMsg.BeginTimestamp = timestamp
}
}
return deleteMsg, nil
}
/////////////////////////////////////////Search//////////////////////////////////////////
type SearchMsg struct {
BaseMsg
internalPb.SearchRequest
}
func (st *SearchMsg) Type() MsgType {
return st.MsgType
}
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
searchTask := input.(*SearchMsg)
searchRequest := &searchTask.SearchRequest
mb, err := proto.Marshal(searchRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
searchRequest := internalPb.SearchRequest{}
err := proto.Unmarshal(input, &searchRequest)
if err != nil {
return nil, err
}
searchMsg := &SearchMsg{SearchRequest: searchRequest}
searchMsg.BeginTimestamp = searchMsg.Timestamp
searchMsg.EndTimestamp = searchMsg.Timestamp
return searchMsg, nil
}
/////////////////////////////////////////SearchResult//////////////////////////////////////////
type SearchResultMsg struct {
BaseMsg
internalPb.SearchResult
}
func (srt *SearchResultMsg) Type() MsgType {
return srt.MsgType
}
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
searchResultTask := input.(*SearchResultMsg)
searchResultRequest := &searchResultTask.SearchResult
mb, err := proto.Marshal(searchResultRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
searchResultRequest := internalPb.SearchResult{}
err := proto.Unmarshal(input, &searchResultRequest)
if err != nil {
return nil, err
}
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
return searchResultMsg, nil
}
/////////////////////////////////////////TimeTick//////////////////////////////////////////
type TimeTickMsg struct {
BaseMsg
internalPb.TimeTickMsg
}
func (tst *TimeTickMsg) Type() MsgType {
return tst.MsgType
}
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
timeTickTask := input.(*TimeTickMsg)
timeTick := &timeTickTask.TimeTickMsg
mb, err := proto.Marshal(timeTick)
if err != nil {
return nil, err
}
return mb, nil
}
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
timeTickMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeTickMsg)
if err != nil {
return nil, err
}
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
timeTick.BeginTimestamp = timeTick.Timestamp
timeTick.EndTimestamp = timeTick.Timestamp
return timeTick, nil
}
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
type QueryNodeSegStatsMsg struct {
BaseMsg
internalPb.QueryNodeSegStats
}
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
return qs.MsgType
}
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
mb, err := proto.Marshal(queryNodeSegStats)
if err != nil {
return nil, err
}
return mb, nil
}
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
queryNodeSegStats := internalPb.QueryNodeSegStats{}
err := proto.Unmarshal(input, &queryNodeSegStats)
if err != nil {
return nil, err
}
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
return queryNodeSegStatsMsg, nil
}
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
//type Key2SegMsg struct {
// BaseMsg
// internalPb.Key2SegMsg
//}
//
//func (k2st *Key2SegMsg) Type() MsgType {
// return
//}

View File

@ -30,6 +30,11 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
searchMsg := SearchMsg{}
searchResultMsg := SearchResultMsg{}
timeTickMsg := TimeTickMsg{}
createCollectionMsg := CreateCollectionMsg{}
dropCollectionMsg := DropCollectionMsg{}
createPartitionMsg := CreatePartitionMsg{}
dropPartitionMsg := DropPartitionMsg{}
queryNodeSegStatsMsg := QueryNodeSegStatsMsg{}
dispatcher.tempMap = make(map[internalPb.MsgType]UnmarshalFunc)
dispatcher.tempMap[internalPb.MsgType_kInsert] = insertMsg.Unmarshal
@ -38,6 +43,11 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
dispatcher.tempMap[internalPb.MsgType_kSearchResult] = searchResultMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kTimeTick] = timeTickMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kQueryNodeSegStats] = queryNodeSegStatsMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kCreateCollection] = createCollectionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kDropCollection] = dropCollectionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kCreatePartition] = createPartitionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kDropPartition] = dropPartitionMsg.Unmarshal
}
func NewUnmarshalDispatcher() *UnmarshalDispatcher {

View File

@ -14,6 +14,7 @@ enum MsgType {
kHasCollection = 102;
kDescribeCollection = 103;
kShowCollections = 104;
kGetSysConfigs = 105;
/* Definition Requests: partition */
kCreatePartition = 200;
@ -33,6 +34,7 @@ enum MsgType {
/* System Control */
kTimeTick = 1200;
kQueryNodeSegStats = 1201;
}
enum PeerRole {
@ -223,6 +225,19 @@ message SearchRequest {
}
/**
* @brief Request of DescribePartition
*/
message SysConfigRequest {
MsgType msg_type = 1;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
repeated string keys = 5;
repeated string key_prefixes = 6;
}
message SearchResult {
MsgType msg_type = 1;
common.Status status = 2;
@ -266,4 +281,4 @@ message QueryNodeSegStats {
MsgType msg_type = 1;
int64 peerID = 2;
repeated SegmentStats seg_stats = 3;
}
}

View File

@ -32,6 +32,7 @@ const (
MsgType_kHasCollection MsgType = 102
MsgType_kDescribeCollection MsgType = 103
MsgType_kShowCollections MsgType = 104
MsgType_kGetSysConfigs MsgType = 105
// Definition Requests: partition
MsgType_kCreatePartition MsgType = 200
MsgType_kDropPartition MsgType = 201
@ -56,6 +57,7 @@ var MsgType_name = map[int32]string{
102: "kHasCollection",
103: "kDescribeCollection",
104: "kShowCollections",
105: "kGetSysConfigs",
200: "kCreatePartition",
201: "kDropPartition",
202: "kHasPartition",
@ -76,6 +78,7 @@ var MsgType_value = map[string]int32{
"kHasCollection": 102,
"kDescribeCollection": 103,
"kShowCollections": 104,
"kGetSysConfigs": 105,
"kCreatePartition": 200,
"kDropPartition": 201,
"kHasPartition": 202,
@ -1579,6 +1582,87 @@ func (m *SearchRequest) GetQuery() *commonpb.Blob {
return nil
}
//*
// @brief Request of DescribePartition
type SysConfigRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
ProxyID int64 `protobuf:"varint,3,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Keys []string `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
KeyPrefixes []string `protobuf:"bytes,6,rep,name=key_prefixes,json=keyPrefixes,proto3" json:"key_prefixes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SysConfigRequest) Reset() { *m = SysConfigRequest{} }
func (m *SysConfigRequest) String() string { return proto.CompactTextString(m) }
func (*SysConfigRequest) ProtoMessage() {}
func (*SysConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{21}
}
func (m *SysConfigRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SysConfigRequest.Unmarshal(m, b)
}
func (m *SysConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SysConfigRequest.Marshal(b, m, deterministic)
}
func (m *SysConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SysConfigRequest.Merge(m, src)
}
func (m *SysConfigRequest) XXX_Size() int {
return xxx_messageInfo_SysConfigRequest.Size(m)
}
func (m *SysConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SysConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SysConfigRequest proto.InternalMessageInfo
func (m *SysConfigRequest) GetMsgType() MsgType {
if m != nil {
return m.MsgType
}
return MsgType_kNone
}
func (m *SysConfigRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
}
return 0
}
func (m *SysConfigRequest) GetProxyID() int64 {
if m != nil {
return m.ProxyID
}
return 0
}
func (m *SysConfigRequest) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *SysConfigRequest) GetKeys() []string {
if m != nil {
return m.Keys
}
return nil
}
func (m *SysConfigRequest) GetKeyPrefixes() []string {
if m != nil {
return m.KeyPrefixes
}
return nil
}
type SearchResult struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
Status *commonpb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
@ -1597,7 +1681,7 @@ func (m *SearchResult) Reset() { *m = SearchResult{} }
func (m *SearchResult) String() string { return proto.CompactTextString(m) }
func (*SearchResult) ProtoMessage() {}
func (*SearchResult) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{21}
return fileDescriptor_7eb37f6b80b23116, []int{22}
}
func (m *SearchResult) XXX_Unmarshal(b []byte) error {
@ -1687,7 +1771,7 @@ func (m *TimeTickMsg) Reset() { *m = TimeTickMsg{} }
func (m *TimeTickMsg) String() string { return proto.CompactTextString(m) }
func (*TimeTickMsg) ProtoMessage() {}
func (*TimeTickMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{22}
return fileDescriptor_7eb37f6b80b23116, []int{23}
}
func (m *TimeTickMsg) XXX_Unmarshal(b []byte) error {
@ -1744,7 +1828,7 @@ func (m *Key2Seg) Reset() { *m = Key2Seg{} }
func (m *Key2Seg) String() string { return proto.CompactTextString(m) }
func (*Key2Seg) ProtoMessage() {}
func (*Key2Seg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{23}
return fileDescriptor_7eb37f6b80b23116, []int{24}
}
func (m *Key2Seg) XXX_Unmarshal(b []byte) error {
@ -1812,7 +1896,7 @@ func (m *Key2SegMsg) Reset() { *m = Key2SegMsg{} }
func (m *Key2SegMsg) String() string { return proto.CompactTextString(m) }
func (*Key2SegMsg) ProtoMessage() {}
func (*Key2SegMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{24}
return fileDescriptor_7eb37f6b80b23116, []int{25}
}
func (m *Key2SegMsg) XXX_Unmarshal(b []byte) error {
@ -1861,7 +1945,7 @@ func (m *SegmentStats) Reset() { *m = SegmentStats{} }
func (m *SegmentStats) String() string { return proto.CompactTextString(m) }
func (*SegmentStats) ProtoMessage() {}
func (*SegmentStats) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{25}
return fileDescriptor_7eb37f6b80b23116, []int{26}
}
func (m *SegmentStats) XXX_Unmarshal(b []byte) error {
@ -1923,7 +2007,7 @@ func (m *QueryNodeSegStats) Reset() { *m = QueryNodeSegStats{} }
func (m *QueryNodeSegStats) String() string { return proto.CompactTextString(m) }
func (*QueryNodeSegStats) ProtoMessage() {}
func (*QueryNodeSegStats) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{26}
return fileDescriptor_7eb37f6b80b23116, []int{27}
}
func (m *QueryNodeSegStats) XXX_Unmarshal(b []byte) error {
@ -1989,6 +2073,7 @@ func init() {
proto.RegisterType((*InsertRequest)(nil), "milvus.proto.internal.InsertRequest")
proto.RegisterType((*DeleteRequest)(nil), "milvus.proto.internal.DeleteRequest")
proto.RegisterType((*SearchRequest)(nil), "milvus.proto.internal.SearchRequest")
proto.RegisterType((*SysConfigRequest)(nil), "milvus.proto.internal.SysConfigRequest")
proto.RegisterType((*SearchResult)(nil), "milvus.proto.internal.SearchResult")
proto.RegisterType((*TimeTickMsg)(nil), "milvus.proto.internal.TimeTickMsg")
proto.RegisterType((*Key2Seg)(nil), "milvus.proto.internal.Key2Seg")
@ -2000,94 +2085,98 @@ func init() {
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
var fileDescriptor_7eb37f6b80b23116 = []byte{
// 1416 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1c, 0xc5,
0x13, 0x4f, 0xef, 0xac, 0xf7, 0x51, 0x6b, 0xaf, 0xc7, 0x6d, 0x3b, 0xd9, 0x24, 0x7f, 0x25, 0xce,
0xe4, 0x2f, 0x62, 0x82, 0xb0, 0x85, 0xc3, 0x81, 0xdc, 0x20, 0xde, 0x43, 0x96, 0xc8, 0x51, 0x18,
0x5b, 0x20, 0xa1, 0x48, 0xa3, 0xf1, 0x6e, 0x65, 0x77, 0x34, 0x4f, 0x77, 0xcf, 0xda, 0x59, 0x1f,
0x38, 0xe5, 0x03, 0xc0, 0x81, 0x03, 0x07, 0x24, 0x8e, 0x9c, 0x22, 0xf8, 0x16, 0xbc, 0xae, 0x1c,
0xf8, 0x0a, 0x20, 0x88, 0x04, 0xe1, 0x8e, 0xba, 0x7b, 0x1e, 0x3b, 0x7e, 0x46, 0x4a, 0x0c, 0x96,
0x7c, 0x9b, 0xaa, 0xe9, 0xe9, 0xaa, 0xfa, 0xfd, 0xaa, 0x6b, 0xaa, 0x1a, 0xa8, 0x13, 0xc4, 0xc8,
0x02, 0xdb, 0xb3, 0x7c, 0xde, 0x5f, 0x8a, 0x58, 0x18, 0x87, 0x74, 0xde, 0x77, 0xbc, 0xed, 0x21,
0x57, 0xd2, 0x52, 0xba, 0xe0, 0xd2, 0x64, 0x37, 0xf4, 0xfd, 0x30, 0x50, 0xea, 0x4b, 0x33, 0x1c,
0xd9, 0xb6, 0xd3, 0xc5, 0xfc, 0x3b, 0x23, 0x80, 0x7a, 0xa7, 0x6d, 0xe2, 0xd6, 0x10, 0x79, 0x4c,
0xcf, 0x43, 0x25, 0x42, 0x64, 0x9d, 0x76, 0x8b, 0x2c, 0x90, 0x45, 0xcd, 0x4c, 0x24, 0x7a, 0x0b,
0xca, 0x2c, 0xf4, 0xb0, 0x55, 0x5a, 0x20, 0x8b, 0xcd, 0x95, 0xab, 0x4b, 0x07, 0xda, 0x5a, 0x7a,
0x80, 0xc8, 0xcc, 0xd0, 0x43, 0x53, 0x2e, 0xa6, 0x73, 0x30, 0xd1, 0x0d, 0x87, 0x41, 0xdc, 0xd2,
0x16, 0xc8, 0xe2, 0x94, 0xa9, 0x04, 0xa3, 0x0f, 0x20, 0xec, 0xf1, 0x28, 0x0c, 0x38, 0xd2, 0x5b,
0x50, 0xe1, 0xb1, 0x1d, 0x0f, 0xb9, 0x34, 0xd8, 0x58, 0xb9, 0x5c, 0xdc, 0x3a, 0x71, 0x7e, 0x5d,
0x2e, 0x31, 0x93, 0xa5, 0xb4, 0x09, 0xa5, 0x4e, 0x5b, 0xfa, 0xa2, 0x99, 0xa5, 0x4e, 0xfb, 0x10,
0x43, 0x21, 0xc0, 0x06, 0x0f, 0xff, 0xc5, 0xc8, 0xb6, 0xa1, 0x21, 0x0d, 0xbe, 0x4c, 0x68, 0xff,
0x83, 0x7a, 0xec, 0xf8, 0xc8, 0x63, 0xdb, 0x8f, 0xa4, 0x4f, 0x65, 0x33, 0x57, 0x1c, 0x62, 0xf7,
0x09, 0x81, 0xc9, 0x75, 0xec, 0xe7, 0x2c, 0x66, 0xcb, 0xc8, 0xd8, 0x32, 0xb1, 0x75, 0x77, 0x60,
0x07, 0x01, 0x7a, 0x09, 0x78, 0x13, 0x66, 0xae, 0xa0, 0x97, 0xa1, 0xde, 0x0d, 0x3d, 0xcf, 0x0a,
0x6c, 0x1f, 0xe5, 0xf6, 0x75, 0xb3, 0x26, 0x14, 0xf7, 0x6d, 0x1f, 0xe9, 0x75, 0x98, 0x8a, 0x6c,
0x16, 0x3b, 0xb1, 0x13, 0x06, 0x56, 0x6c, 0xf7, 0x5b, 0x65, 0xb9, 0x60, 0x32, 0x53, 0x6e, 0xd8,
0x7d, 0xe3, 0x29, 0x01, 0xfa, 0x1e, 0xe7, 0x4e, 0x3f, 0x28, 0x38, 0xf3, 0x4a, 0x81, 0xbf, 0x07,
0xd3, 0x11, 0x32, 0x2b, 0x71, 0xdb, 0x62, 0xb8, 0xd5, 0xd2, 0x16, 0xb4, 0xc5, 0xc6, 0xca, 0xf5,
0x43, 0xbe, 0x1f, 0x77, 0xc5, 0x9c, 0x8a, 0x90, 0xad, 0xaa, 0x4f, 0x4d, 0xdc, 0x32, 0xbe, 0x24,
0x30, 0x2d, 0xdf, 0x2b, 0xaf, 0x7d, 0x0c, 0x24, 0x74, 0x5c, 0xa8, 0x12, 0x67, 0x95, 0x70, 0x0c,
0x74, 0x07, 0xb2, 0x52, 0x04, 0xb4, 0x7c, 0x1c, 0xa0, 0x13, 0x07, 0x00, 0xfa, 0x8c, 0xc0, 0x6c,
0x01, 0xd0, 0x93, 0x4b, 0xac, 0x1b, 0x30, 0x8d, 0x8f, 0x23, 0x87, 0xa1, 0xd5, 0x1b, 0x32, 0x5b,
0x38, 0x20, 0x83, 0x29, 0x9b, 0x4d, 0xa5, 0x6e, 0x27, 0x5a, 0xfa, 0x10, 0xce, 0x8f, 0x13, 0x60,
0x67, 0xc8, 0xb5, 0xca, 0x92, 0x87, 0xd7, 0x8e, 0xe2, 0x21, 0xc7, 0xd9, 0x9c, 0xcb, 0xa9, 0xc8,
0xb5, 0xc6, 0xcf, 0x04, 0x2e, 0xac, 0x32, 0xb4, 0x63, 0x5c, 0x0d, 0x3d, 0x0f, 0xbb, 0xc2, 0x64,
0x9a, 0x47, 0xb7, 0xa1, 0xe6, 0xf3, 0xbe, 0x15, 0x8f, 0x22, 0x94, 0x71, 0x37, 0x57, 0xae, 0x1c,
0x62, 0x6b, 0x8d, 0xf7, 0x37, 0x46, 0x11, 0x9a, 0x55, 0x5f, 0x3d, 0x08, 0x82, 0x18, 0x6e, 0x65,
0x25, 0x43, 0x09, 0x45, 0x44, 0xb4, 0xbd, 0x88, 0xb4, 0xa0, 0x1a, 0xb1, 0xf0, 0xf1, 0xa8, 0xd3,
0x96, 0xe4, 0x69, 0x66, 0x2a, 0xd2, 0xb7, 0xa0, 0xc2, 0xbb, 0x03, 0xf4, 0x6d, 0x49, 0x5a, 0x63,
0xe5, 0xe2, 0x81, 0xf0, 0xdf, 0xf1, 0xc2, 0x4d, 0x33, 0x59, 0x28, 0x98, 0x9c, 0x6f, 0xb3, 0x30,
0x3a, 0xc5, 0x51, 0xad, 0xc1, 0x74, 0x37, 0xf3, 0x4e, 0x25, 0xad, 0x0a, 0xef, 0xff, 0x45, 0x7f,
0x92, 0x1f, 0xc8, 0x52, 0x1e, 0x8a, 0x48, 0x68, 0xb3, 0xd9, 0x2d, 0xc8, 0xc6, 0x1f, 0x04, 0xe6,
0xee, 0xda, 0xfc, 0xec, 0x04, 0xfc, 0x17, 0x81, 0x8b, 0x6d, 0xe4, 0x5d, 0xe6, 0x6c, 0xe2, 0xd9,
0x89, 0xfa, 0x2b, 0x02, 0xf3, 0xeb, 0x83, 0x70, 0xe7, 0xf4, 0x46, 0x6c, 0xfc, 0x4e, 0xe0, 0xbc,
0xaa, 0x29, 0x0f, 0xd2, 0xe2, 0x7a, 0xea, 0x58, 0x79, 0x1f, 0x9a, 0xf9, 0xef, 0x60, 0x8c, 0x94,
0xeb, 0x07, 0x93, 0x92, 0x05, 0x22, 0x39, 0xc9, 0xff, 0x24, 0x92, 0x92, 0xdf, 0x08, 0xcc, 0x89,
0x5a, 0x73, 0x36, 0xa2, 0xfd, 0x95, 0xc0, 0xec, 0x5d, 0x9b, 0x9f, 0x8d, 0x60, 0x9f, 0x11, 0x68,
0xa5, 0x35, 0xe6, 0x6c, 0x44, 0x2c, 0x7e, 0x23, 0xa2, 0xbe, 0x9c, 0xde, 0x68, 0x5f, 0x71, 0x41,
0xfd, 0xb3, 0x04, 0x53, 0x9d, 0x80, 0x23, 0x8b, 0x4f, 0x2c, 0xd2, 0x1b, 0xfb, 0x3d, 0x56, 0xfd,
0xfe, 0x1e, 0x5f, 0x5e, 0xa8, 0xeb, 0x17, 0xb8, 0x71, 0xec, 0x8b, 0xee, 0xad, 0xd3, 0x96, 0x91,
0x6b, 0x66, 0xae, 0x28, 0x36, 0xce, 0x15, 0xf5, 0x36, 0x6f, 0x9c, 0xc7, 0x50, 0xad, 0x16, 0x51,
0xbd, 0x02, 0x90, 0x81, 0xcf, 0x5b, 0xb5, 0x05, 0x6d, 0xb1, 0x6c, 0x8e, 0x69, 0xc4, 0x50, 0xc1,
0xc2, 0x9d, 0x4e, 0x9b, 0xb7, 0xea, 0x0b, 0x9a, 0x18, 0x2a, 0x94, 0x44, 0xdf, 0x86, 0x1a, 0x0b,
0x77, 0xac, 0x9e, 0x1d, 0xdb, 0x2d, 0x90, 0x0d, 0xe9, 0x11, 0xdd, 0x59, 0x95, 0x85, 0x3b, 0x6d,
0x3b, 0xb6, 0x8d, 0x27, 0x25, 0x98, 0x6a, 0xa3, 0x87, 0x31, 0xfe, 0xf7, 0xa0, 0x17, 0x10, 0x2b,
0x1f, 0x81, 0xd8, 0xc4, 0x51, 0x88, 0x55, 0xf6, 0x21, 0x76, 0x0d, 0x26, 0x23, 0xe6, 0xf8, 0x36,
0x1b, 0x59, 0x2e, 0x8e, 0x78, 0xab, 0x2a, 0x71, 0x6b, 0x24, 0xba, 0x7b, 0x38, 0xe2, 0xc6, 0x73,
0x02, 0x53, 0xeb, 0x68, 0xb3, 0xee, 0xe0, 0xc4, 0x60, 0x18, 0xf3, 0x5f, 0x2b, 0xfa, 0x5f, 0x38,
0x7f, 0xe5, 0xbd, 0xe7, 0xef, 0x75, 0xd0, 0x19, 0xf2, 0xa1, 0x17, 0x5b, 0x39, 0x38, 0x0a, 0x80,
0x69, 0xa5, 0x5f, 0xcd, 0x20, 0x5a, 0x86, 0x89, 0xad, 0x21, 0xb2, 0x91, 0x4c, 0xb7, 0x23, 0xf9,
0x57, 0xeb, 0x8c, 0xa7, 0x25, 0x31, 0x3e, 0xab, 0xb0, 0xc5, 0x56, 0x2f, 0x13, 0x75, 0x3e, 0x9a,
0x95, 0x5e, 0x7c, 0x34, 0xcb, 0xa0, 0xd2, 0x0e, 0x81, 0x6a, 0x4f, 0xc9, 0xb9, 0x06, 0x93, 0xd2,
0x73, 0x2b, 0x08, 0x7b, 0x98, 0x01, 0xd1, 0x90, 0xba, 0xfb, 0x52, 0x55, 0x44, 0xb3, 0xf2, 0x22,
0x68, 0x56, 0x0f, 0x46, 0x93, 0x42, 0x79, 0xe0, 0xc4, 0xea, 0x08, 0x4e, 0x9a, 0xf2, 0xd9, 0xf8,
0x04, 0x1a, 0x1b, 0x8e, 0x8f, 0x1b, 0x4e, 0xd7, 0x5d, 0xe3, 0xfd, 0x97, 0x81, 0x2b, 0xbf, 0x1b,
0x28, 0x15, 0xee, 0x06, 0x8e, 0x2c, 0xc6, 0xc6, 0x17, 0x04, 0xaa, 0xf7, 0x70, 0xb4, 0xb2, 0x8e,
0x7d, 0x89, 0x9d, 0x38, 0xfa, 0xe9, 0xbc, 0x2e, 0x05, 0x7a, 0x15, 0x1a, 0x63, 0xc9, 0x9e, 0x6c,
0x0e, 0x79, 0xae, 0x1f, 0x53, 0xed, 0x2f, 0x42, 0xcd, 0xe1, 0xd6, 0xb6, 0xed, 0x39, 0x3d, 0x89,
0x7d, 0xcd, 0xac, 0x3a, 0xfc, 0x43, 0x21, 0x8a, 0x63, 0x96, 0x55, 0x37, 0xde, 0x9a, 0x90, 0x87,
0x68, 0x4c, 0x63, 0x3c, 0x04, 0x48, 0x5c, 0x13, 0xd0, 0x64, 0xcc, 0x92, 0x71, 0x66, 0xdf, 0x81,
0xaa, 0x8b, 0xa3, 0x15, 0x8e, 0xfd, 0x56, 0x49, 0xd6, 0xa8, 0xc3, 0xf0, 0x4a, 0x76, 0x32, 0xd3,
0xe5, 0xc6, 0xe7, 0xea, 0xa6, 0x47, 0x18, 0x13, 0x39, 0xc4, 0x8b, 0xd5, 0x97, 0xec, 0xad, 0xbe,
0x57, 0xa1, 0xe1, 0xa3, 0x1f, 0xb2, 0x91, 0xc5, 0x9d, 0x5d, 0x4c, 0x61, 0x50, 0xaa, 0x75, 0x67,
0x17, 0x45, 0xa0, 0xc1, 0xd0, 0xb7, 0x58, 0xb8, 0xc3, 0xd3, 0xf3, 0x18, 0x0c, 0x7d, 0x33, 0xdc,
0xe1, 0xf4, 0x0d, 0x98, 0x61, 0xd8, 0xc5, 0x20, 0xf6, 0x46, 0x96, 0x1f, 0xf6, 0x9c, 0x47, 0x0e,
0xa6, 0x60, 0xe8, 0xe9, 0x8b, 0xb5, 0x44, 0x6f, 0x7c, 0x4d, 0x60, 0xe6, 0x83, 0x34, 0xfd, 0xd6,
0xb1, 0xaf, 0x9c, 0x3b, 0x81, 0xc4, 0x78, 0x57, 0xc6, 0x6b, 0x89, 0x83, 0xc3, 0x8f, 0xbf, 0xf9,
0xc9, 0x70, 0x32, 0x6b, 0x3c, 0x71, 0xea, 0xe6, 0x2f, 0x25, 0xa8, 0x26, 0xe6, 0x68, 0x1d, 0x26,
0xdc, 0xfb, 0x61, 0x80, 0xfa, 0x39, 0x3a, 0x0f, 0x33, 0xee, 0xde, 0x9b, 0x07, 0xbd, 0x47, 0x67,
0x61, 0xda, 0x2d, 0x0e, 0xee, 0x3a, 0x52, 0x0a, 0x4d, 0xb7, 0x30, 0xdb, 0xea, 0x8f, 0xe8, 0x05,
0x98, 0x75, 0xf7, 0x8f, 0x7f, 0xba, 0x48, 0x01, 0xdd, 0x2d, 0x4e, 0x48, 0x5c, 0x1f, 0xd0, 0x79,
0xd0, 0xdd, 0x3d, 0x43, 0x89, 0xfe, 0x1d, 0xa1, 0xb3, 0xd0, 0x74, 0x0b, 0xbd, 0xbb, 0xfe, 0x3d,
0xa1, 0x14, 0xa6, 0xdc, 0xf1, 0x16, 0x57, 0xff, 0x81, 0xd0, 0x0b, 0x40, 0xdd, 0x7d, 0x9d, 0xa0,
0xfe, 0x23, 0xa1, 0x73, 0x30, 0xed, 0x16, 0x1a, 0x26, 0xae, 0xff, 0x44, 0xe8, 0x24, 0x54, 0x5d,
0xd5, 0x55, 0xe8, 0x9f, 0x6a, 0x52, 0x52, 0xbf, 0x3b, 0xfd, 0x33, 0x25, 0xa9, 0xf2, 0xa7, 0x3f,
0xd7, 0xa4, 0xb1, 0xf1, 0x62, 0xa8, 0xff, 0xad, 0xd1, 0x26, 0xd4, 0xdd, 0xf4, 0xc0, 0xeb, 0xdf,
0xd4, 0xa5, 0xf1, 0x7d, 0x6c, 0xeb, 0xdf, 0xd6, 0x6f, 0xde, 0x86, 0x5a, 0x7a, 0x5f, 0x47, 0x01,
0x2a, 0x6b, 0x36, 0x8f, 0x91, 0xe9, 0xe7, 0xc4, 0xb3, 0x89, 0x76, 0x0f, 0x99, 0x4e, 0xc4, 0xf3,
0x47, 0xcc, 0x11, 0xfa, 0x92, 0xc0, 0xff, 0x81, 0xa8, 0x69, 0xba, 0x76, 0xa7, 0xfd, 0xf1, 0x9d,
0xbe, 0x13, 0x0f, 0x86, 0x9b, 0xa2, 0x46, 0x2e, 0xef, 0x3a, 0x9e, 0xe7, 0xec, 0xc6, 0xd8, 0x1d,
0x2c, 0x2b, 0x72, 0xdf, 0xec, 0x39, 0x3c, 0x66, 0xce, 0xe6, 0x30, 0xc6, 0xde, 0x72, 0x4a, 0xf1,
0xb2, 0x64, 0x3c, 0x13, 0xa3, 0xcd, 0xcd, 0x8a, 0xd4, 0xdc, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff,
0x56, 0x57, 0x32, 0x28, 0x20, 0x17, 0x00, 0x00,
// 1474 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
0x12, 0xf6, 0x70, 0x28, 0x3e, 0x8a, 0x14, 0x35, 0x6a, 0x49, 0x36, 0x6d, 0x2f, 0x6c, 0x79, 0xbc,
0x58, 0x6b, 0xbd, 0x58, 0x09, 0x2b, 0xef, 0x61, 0x7d, 0xdb, 0xb5, 0x08, 0xac, 0xb9, 0x86, 0x0c,
0xed, 0x50, 0x48, 0x80, 0xc0, 0xc0, 0x60, 0x44, 0x96, 0x86, 0x83, 0x79, 0xaa, 0x7b, 0x28, 0x99,
0x3a, 0xe4, 0xe4, 0x1f, 0x90, 0x1c, 0x72, 0xc8, 0x21, 0x40, 0x8e, 0x39, 0x19, 0xc9, 0xbf, 0xc8,
0xeb, 0x14, 0x20, 0x7f, 0x22, 0x81, 0x63, 0x20, 0x71, 0xee, 0x41, 0x77, 0xcf, 0x83, 0xa3, 0xa7,
0x01, 0x5b, 0x89, 0x00, 0xdd, 0xba, 0x6a, 0x7a, 0xba, 0xaa, 0xbe, 0xaf, 0xba, 0xba, 0xba, 0x81,
0x38, 0x41, 0x8c, 0x34, 0xb0, 0x3c, 0xd3, 0x67, 0xf6, 0x72, 0x44, 0xc3, 0x38, 0x24, 0x0b, 0xbe,
0xe3, 0xed, 0x8e, 0x98, 0x94, 0x96, 0xd3, 0x09, 0xd7, 0x9a, 0xfd, 0xd0, 0xf7, 0xc3, 0x40, 0xaa,
0xaf, 0xcd, 0x32, 0xa4, 0xbb, 0x4e, 0x1f, 0xf3, 0xff, 0xf4, 0x00, 0xea, 0xdd, 0x8e, 0x81, 0x3b,
0x23, 0x64, 0x31, 0xb9, 0x0c, 0x95, 0x08, 0x91, 0x76, 0x3b, 0x6d, 0x65, 0x51, 0x59, 0x52, 0x8d,
0x44, 0x22, 0xf7, 0xa0, 0x4c, 0x43, 0x0f, 0xdb, 0xa5, 0x45, 0x65, 0xa9, 0xb5, 0x7a, 0x73, 0xf9,
0x48, 0x5b, 0xcb, 0x1b, 0x88, 0xd4, 0x08, 0x3d, 0x34, 0xc4, 0x64, 0x32, 0x0f, 0x53, 0xfd, 0x70,
0x14, 0xc4, 0x6d, 0x75, 0x51, 0x59, 0x9a, 0x36, 0xa4, 0xa0, 0xdb, 0x00, 0xdc, 0x1e, 0x8b, 0xc2,
0x80, 0x21, 0xb9, 0x07, 0x15, 0x16, 0x5b, 0xf1, 0x88, 0x09, 0x83, 0x8d, 0xd5, 0xeb, 0xc5, 0xa5,
0x13, 0xe7, 0x7b, 0x62, 0x8a, 0x91, 0x4c, 0x25, 0x2d, 0x28, 0x75, 0x3b, 0xc2, 0x17, 0xd5, 0x28,
0x75, 0x3b, 0xc7, 0x18, 0x0a, 0x01, 0x36, 0x59, 0xf8, 0x3b, 0x46, 0xb6, 0x0b, 0x0d, 0x61, 0xf0,
0x4d, 0x42, 0xfb, 0x13, 0xd4, 0x63, 0xc7, 0x47, 0x16, 0x5b, 0x7e, 0x24, 0x7c, 0x2a, 0x1b, 0xb9,
0xe2, 0x18, 0xbb, 0xcf, 0x14, 0x68, 0xf6, 0xd0, 0xce, 0x59, 0xcc, 0xa6, 0x29, 0x13, 0xd3, 0xf8,
0xd2, 0xfd, 0xa1, 0x15, 0x04, 0xe8, 0x25, 0xe0, 0x4d, 0x19, 0xb9, 0x82, 0x5c, 0x87, 0x7a, 0x3f,
0xf4, 0x3c, 0x33, 0xb0, 0x7c, 0x14, 0xcb, 0xd7, 0x8d, 0x1a, 0x57, 0x3c, 0xb6, 0x7c, 0x24, 0xb7,
0x61, 0x3a, 0xb2, 0x68, 0xec, 0xc4, 0x4e, 0x18, 0x98, 0xb1, 0x65, 0xb7, 0xcb, 0x62, 0x42, 0x33,
0x53, 0x6e, 0x5a, 0xb6, 0xfe, 0x5c, 0x01, 0xf2, 0x1f, 0xc6, 0x1c, 0x3b, 0x28, 0x38, 0xf3, 0x56,
0x81, 0x7f, 0x04, 0x33, 0x11, 0x52, 0x33, 0x71, 0xdb, 0xa4, 0xb8, 0xd3, 0x56, 0x17, 0xd5, 0xa5,
0xc6, 0xea, 0xed, 0x63, 0xfe, 0x9f, 0x74, 0xc5, 0x98, 0x8e, 0x90, 0xae, 0xc9, 0x5f, 0x0d, 0xdc,
0xd1, 0x3f, 0x51, 0x60, 0x46, 0x7c, 0x97, 0x5e, 0xfb, 0x18, 0x08, 0xe8, 0x18, 0x57, 0x25, 0xce,
0x4a, 0xe1, 0x14, 0xe8, 0x8e, 0x64, 0xa5, 0x08, 0x68, 0xf9, 0x34, 0x40, 0xa7, 0x8e, 0x00, 0xf4,
0xa5, 0x02, 0x73, 0x05, 0x40, 0xcf, 0x2e, 0xb1, 0xee, 0xc0, 0x0c, 0x3e, 0x8d, 0x1c, 0x8a, 0xe6,
0x60, 0x44, 0x2d, 0xee, 0x80, 0x08, 0xa6, 0x6c, 0xb4, 0xa4, 0xba, 0x93, 0x68, 0xc9, 0x13, 0xb8,
0x3c, 0x49, 0x80, 0x95, 0x21, 0xd7, 0x2e, 0x0b, 0x1e, 0xfe, 0x72, 0x12, 0x0f, 0x39, 0xce, 0xc6,
0x7c, 0x4e, 0x45, 0xae, 0xd5, 0xbf, 0x57, 0xe0, 0xca, 0x1a, 0x45, 0x2b, 0xc6, 0xb5, 0xd0, 0xf3,
0xb0, 0xcf, 0x4d, 0xa6, 0x79, 0x74, 0x1f, 0x6a, 0x3e, 0xb3, 0xcd, 0x78, 0x1c, 0xa1, 0x88, 0xbb,
0xb5, 0x7a, 0xe3, 0x18, 0x5b, 0xeb, 0xcc, 0xde, 0x1c, 0x47, 0x68, 0x54, 0x7d, 0x39, 0xe0, 0x04,
0x51, 0xdc, 0xc9, 0x4a, 0x86, 0x14, 0x8a, 0x88, 0xa8, 0x07, 0x11, 0x69, 0x43, 0x35, 0xa2, 0xe1,
0xd3, 0x71, 0xb7, 0x23, 0xc8, 0x53, 0x8d, 0x54, 0x24, 0xff, 0x80, 0x0a, 0xeb, 0x0f, 0xd1, 0xb7,
0x04, 0x69, 0x8d, 0xd5, 0xab, 0x47, 0xc2, 0xff, 0xc0, 0x0b, 0xb7, 0x8c, 0x64, 0x22, 0x67, 0x72,
0xa1, 0x43, 0xc3, 0xe8, 0x1c, 0x47, 0xb5, 0x0e, 0x33, 0xfd, 0xcc, 0x3b, 0x99, 0xb4, 0x32, 0xbc,
0x3f, 0x17, 0xfd, 0x49, 0x0e, 0x90, 0xe5, 0x3c, 0x14, 0x9e, 0xd0, 0x46, 0xab, 0x5f, 0x90, 0xf5,
0x9f, 0x14, 0x98, 0x7f, 0x68, 0xb1, 0x8b, 0x13, 0xf0, 0x2f, 0x0a, 0x5c, 0xed, 0x20, 0xeb, 0x53,
0x67, 0x0b, 0x2f, 0x4e, 0xd4, 0x9f, 0x2a, 0xb0, 0xd0, 0x1b, 0x86, 0x7b, 0xe7, 0x37, 0x62, 0xfd,
0x85, 0x02, 0x97, 0x65, 0x4d, 0xd9, 0x48, 0x8b, 0xeb, 0xb9, 0x63, 0xe5, 0x7f, 0xd0, 0xca, 0x8f,
0x83, 0x09, 0x52, 0x6e, 0x1f, 0x4d, 0x4a, 0x16, 0x88, 0xe0, 0x24, 0x3f, 0x49, 0x04, 0x25, 0x3f,
0x2a, 0x30, 0xcf, 0x6b, 0xcd, 0xc5, 0x88, 0xf6, 0x07, 0x05, 0xe6, 0x1e, 0x5a, 0xec, 0x62, 0x04,
0xfb, 0x52, 0x81, 0x76, 0x5a, 0x63, 0x2e, 0x46, 0xc4, 0xfc, 0x18, 0xe1, 0xf5, 0xe5, 0xfc, 0x46,
0xfb, 0x96, 0x0b, 0xea, 0xcf, 0x25, 0x98, 0xee, 0x06, 0x0c, 0x69, 0x7c, 0x66, 0x91, 0xde, 0x39,
0xec, 0xb1, 0xec, 0xf7, 0x0f, 0xf8, 0xf2, 0x5a, 0x5d, 0x3f, 0xc7, 0x8d, 0xa1, 0xcd, 0xbb, 0xb7,
0x6e, 0x47, 0x44, 0xae, 0x1a, 0xb9, 0xa2, 0xd8, 0x38, 0x57, 0xe4, 0xd7, 0xbc, 0x71, 0x9e, 0x40,
0xb5, 0x5a, 0x44, 0xf5, 0x06, 0x40, 0x06, 0x3e, 0x6b, 0xd7, 0x16, 0xd5, 0xa5, 0xb2, 0x31, 0xa1,
0xe1, 0x97, 0x0a, 0x1a, 0xee, 0x75, 0x3b, 0xac, 0x5d, 0x5f, 0x54, 0xf9, 0xa5, 0x42, 0x4a, 0xe4,
0x9f, 0x50, 0xa3, 0xe1, 0x9e, 0x39, 0xb0, 0x62, 0xab, 0x0d, 0xa2, 0x21, 0x3d, 0xa1, 0x3b, 0xab,
0xd2, 0x70, 0xaf, 0x63, 0xc5, 0x96, 0xfe, 0xac, 0x04, 0xd3, 0x1d, 0xf4, 0x30, 0xc6, 0x3f, 0x1e,
0xf4, 0x02, 0x62, 0xe5, 0x13, 0x10, 0x9b, 0x3a, 0x09, 0xb1, 0xca, 0x21, 0xc4, 0x6e, 0x41, 0x33,
0xa2, 0x8e, 0x6f, 0xd1, 0xb1, 0xe9, 0xe2, 0x98, 0xb5, 0xab, 0x02, 0xb7, 0x46, 0xa2, 0x7b, 0x84,
0x63, 0xa6, 0xbf, 0x52, 0x60, 0xba, 0x87, 0x16, 0xed, 0x0f, 0xcf, 0x0c, 0x86, 0x09, 0xff, 0xd5,
0xa2, 0xff, 0x85, 0xfd, 0x57, 0x3e, 0xb8, 0xff, 0xfe, 0x0a, 0x1a, 0x45, 0x36, 0xf2, 0x62, 0x33,
0x07, 0x47, 0x02, 0x30, 0x23, 0xf5, 0x6b, 0x19, 0x44, 0x2b, 0x30, 0xb5, 0x33, 0x42, 0x3a, 0x16,
0xe9, 0x76, 0x22, 0xff, 0x72, 0x9e, 0xfe, 0x9d, 0x02, 0x5a, 0x6f, 0xcc, 0xd6, 0xc2, 0x60, 0xdb,
0xb1, 0xcf, 0x5d, 0xe4, 0x04, 0xca, 0x82, 0xaf, 0xa9, 0x45, 0x75, 0xa9, 0x6e, 0x88, 0x31, 0xe7,
0xd2, 0xc5, 0xb1, 0x19, 0x51, 0xdc, 0x76, 0x9e, 0xa2, 0x64, 0xbb, 0x6e, 0x34, 0x5c, 0x1c, 0x6f,
0x24, 0x2a, 0xfd, 0x79, 0x09, 0x9a, 0x29, 0x97, 0x1c, 0x9f, 0x37, 0x09, 0x28, 0xbf, 0x6f, 0x96,
0x5e, 0xff, 0xbe, 0x99, 0xa1, 0xa0, 0x1e, 0x83, 0xc2, 0x81, 0x3a, 0x7a, 0x0b, 0x9a, 0x82, 0x0e,
0x33, 0x08, 0x07, 0x98, 0xb1, 0xdb, 0x10, 0xba, 0xc7, 0x42, 0x55, 0x04, 0xaa, 0xf2, 0x3a, 0x29,
0x52, 0x3d, 0x3a, 0x45, 0x08, 0x94, 0x87, 0x4e, 0x2c, 0xeb, 0x4a, 0xd3, 0x10, 0x63, 0xfd, 0x7d,
0x68, 0x6c, 0x3a, 0x3e, 0x6e, 0x3a, 0x7d, 0x77, 0x9d, 0xd9, 0x6f, 0x02, 0x57, 0xfe, 0xe0, 0x51,
0x2a, 0x3c, 0x78, 0x9c, 0x78, 0xc2, 0xe8, 0x1f, 0x2b, 0x50, 0x7d, 0x84, 0xe3, 0xd5, 0x1e, 0xda,
0x02, 0x3b, 0x5e, 0xcf, 0xd2, 0x47, 0x08, 0x21, 0x90, 0x9b, 0xd0, 0x98, 0xd8, 0xc1, 0xc9, 0xe2,
0x90, 0x6f, 0xe0, 0x53, 0x8e, 0xb0, 0xab, 0x50, 0x73, 0x98, 0xb9, 0x6b, 0x79, 0xce, 0x40, 0x60,
0x5f, 0x33, 0xaa, 0x0e, 0x7b, 0x87, 0x8b, 0xbc, 0x76, 0x64, 0x25, 0x5b, 0x66, 0x9a, 0x6a, 0x4c,
0x68, 0xf4, 0x27, 0x00, 0x89, 0x6b, 0x1c, 0x9a, 0x8c, 0x59, 0x65, 0x92, 0xd9, 0x7f, 0x41, 0xd5,
0xc5, 0xf1, 0x2a, 0x43, 0xbb, 0x5d, 0x12, 0x85, 0xf7, 0x38, 0xbc, 0x92, 0x95, 0x8c, 0x74, 0xba,
0xfe, 0x91, 0x7c, 0xbe, 0xe2, 0xc6, 0x78, 0x0e, 0xb1, 0xe2, 0x91, 0xa2, 0x1c, 0x3c, 0x52, 0x6e,
0x42, 0xc3, 0x47, 0x3f, 0xa4, 0x63, 0x93, 0x39, 0xfb, 0x98, 0xc2, 0x20, 0x55, 0x3d, 0x67, 0x1f,
0x79, 0xa0, 0xc1, 0xc8, 0x37, 0x69, 0xb8, 0xc7, 0xd2, 0xad, 0x16, 0x8c, 0x7c, 0x23, 0xdc, 0x63,
0xe4, 0x6f, 0x30, 0x4b, 0xb1, 0x8f, 0x41, 0xec, 0x8d, 0x4d, 0x3f, 0x1c, 0x38, 0xdb, 0x0e, 0xa6,
0x60, 0x68, 0xe9, 0x87, 0xf5, 0x44, 0xaf, 0x7f, 0xa6, 0xc0, 0xec, 0xff, 0xd3, 0xf4, 0xeb, 0xa1,
0x2d, 0x9d, 0x3b, 0x83, 0xc4, 0xf8, 0xb7, 0x88, 0xd7, 0xe4, 0x1b, 0x87, 0x9d, 0xfe, 0x9c, 0x95,
0xe1, 0x64, 0xd4, 0x58, 0xe2, 0xd4, 0xdd, 0x17, 0x25, 0xa8, 0x26, 0xe6, 0x48, 0x1d, 0xa6, 0xdc,
0xc7, 0x61, 0x80, 0xda, 0x25, 0xb2, 0x00, 0xb3, 0xee, 0xc1, 0xe7, 0x14, 0x6d, 0x40, 0xe6, 0x60,
0xc6, 0x2d, 0xbe, 0x46, 0x68, 0x48, 0x08, 0xb4, 0xdc, 0xc2, 0x85, 0x5d, 0xdb, 0x26, 0x57, 0x60,
0xce, 0x3d, 0x7c, 0xa7, 0xd5, 0x78, 0x0a, 0x68, 0x6e, 0xf1, 0xda, 0xc7, 0xb4, 0xa1, 0x58, 0xe2,
0xbf, 0x18, 0x67, 0xb5, 0x94, 0x69, 0x0e, 0x59, 0x00, 0xcd, 0x3d, 0x70, 0xfb, 0xd2, 0xbe, 0x54,
0xc8, 0x1c, 0xb4, 0xdc, 0xc2, 0x25, 0x45, 0xfb, 0x4a, 0x21, 0x04, 0xa6, 0xdd, 0xc9, 0x5e, 0x5e,
0xfb, 0x5a, 0x21, 0x57, 0x80, 0xb8, 0x87, 0x5a, 0x5e, 0xed, 0x1b, 0x85, 0xcc, 0xc3, 0x8c, 0x5b,
0xe8, 0x0c, 0x99, 0xf6, 0xad, 0x42, 0x9a, 0x50, 0x75, 0x65, 0xfb, 0xa4, 0x7d, 0xa0, 0x0a, 0x49,
0x9e, 0xeb, 0xda, 0x87, 0x52, 0x92, 0x25, 0x51, 0x7b, 0xa5, 0x0a, 0x63, 0x93, 0x05, 0x52, 0xfb,
0x55, 0x25, 0x2d, 0xa8, 0xbb, 0x69, 0x11, 0xd0, 0x3e, 0xaf, 0x0b, 0xe3, 0x87, 0x32, 0x40, 0xfb,
0xa2, 0x7e, 0xf7, 0x3e, 0xd4, 0xd2, 0x87, 0x49, 0x02, 0x50, 0x59, 0xb7, 0x58, 0x8c, 0x54, 0xbb,
0xc4, 0xc7, 0x06, 0x5a, 0x03, 0xa4, 0x9a, 0xc2, 0xc7, 0xef, 0x52, 0x87, 0xeb, 0x4b, 0x9c, 0x93,
0x0d, 0x5e, 0xe7, 0x34, 0xf5, 0x41, 0xe7, 0xbd, 0x07, 0xb6, 0x13, 0x0f, 0x47, 0x5b, 0xbc, 0x6e,
0xae, 0xec, 0x3b, 0x9e, 0xe7, 0xec, 0xc7, 0xd8, 0x1f, 0xae, 0x48, 0xc2, 0xff, 0x3e, 0x70, 0x58,
0x4c, 0x9d, 0xad, 0x51, 0x8c, 0x83, 0x95, 0x94, 0xf6, 0x15, 0x91, 0x05, 0x99, 0x18, 0x6d, 0x6d,
0x55, 0x84, 0xe6, 0xde, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x9f, 0x3d, 0x09, 0x18,
0x00, 0x00,
}

View File

@ -89,6 +89,15 @@ service Master {
rpc ShowPartitions(internal.ShowPartitionRequest) returns (service.StringListResponse) {}
/**
* @brief This method is used to get system configs
*
* @param SysConfigRequest, keys or key_prefixes of the configs.
*
* @return SysConfigResponse
*/
rpc GetSysConfigs(internal.SysConfigRequest) returns (service.SysConfigResponse) {}
rpc AllocTimestamp(internal.TsoRequest) returns (internal.TsoResponse) {}
rpc AllocID(internal.IDRequest) returns (internal.IDResponse) {}

View File

@ -30,36 +30,38 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
func init() { proto.RegisterFile("master.proto", fileDescriptor_f9c348dec43a6705) }
var fileDescriptor_f9c348dec43a6705 = []byte{
// 458 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x7b, 0x1a, 0x92, 0xd5, 0xb5, 0xcc, 0xdc, 0xca, 0x85, 0xf5, 0x04, 0x2d, 0x4b, 0x10,
0x7c, 0x01, 0xd6, 0xe5, 0xb0, 0x4a, 0x20, 0x4d, 0xeb, 0x2e, 0x80, 0xd0, 0x70, 0xb2, 0xa7, 0xf4,
0x81, 0x13, 0x07, 0xbf, 0x97, 0x21, 0xed, 0x23, 0xf1, 0x29, 0x51, 0x93, 0x26, 0xa9, 0x69, 0x5d,
0xca, 0x6e, 0xb5, 0xfd, 0xf3, 0xef, 0x5f, 0xbf, 0xf7, 0x14, 0xd1, 0xcf, 0x14, 0x31, 0xd8, 0xa0,
0xb0, 0x86, 0x8d, 0x7c, 0x96, 0xa1, 0xbe, 0x2f, 0xa9, 0x5e, 0x05, 0xf5, 0xd1, 0xa8, 0x9f, 0x98,
0x2c, 0x33, 0x79, 0xbd, 0x39, 0x92, 0x98, 0x33, 0xd8, 0x5c, 0xe9, 0xdb, 0x8c, 0xd2, 0xf5, 0xde,
0x09, 0x81, 0xbd, 0xc7, 0x04, 0xba, 0xad, 0xb7, 0xbf, 0x85, 0x38, 0xfa, 0x58, 0xdd, 0x97, 0x4a,
0x3c, 0xbd, 0xb0, 0xa0, 0x18, 0x2e, 0x8c, 0xd6, 0x90, 0x30, 0x9a, 0x5c, 0x06, 0x81, 0x93, 0xd4,
0x38, 0x83, 0xbf, 0xc1, 0x6b, 0xf8, 0x59, 0x02, 0xf1, 0xe8, 0xb9, 0xcb, 0xaf, 0xff, 0xd1, 0x82,
0x15, 0x97, 0x34, 0xee, 0xc9, 0xaf, 0x62, 0x10, 0x59, 0x53, 0x6c, 0x04, 0xbc, 0xf6, 0x04, 0xb8,
0xd8, 0x81, 0xfa, 0x58, 0x1c, 0x5f, 0x2a, 0xda, 0xb0, 0x4f, 0x3d, 0x76, 0x87, 0x6a, 0xe4, 0x63,
0x17, 0x5e, 0xd7, 0x2a, 0x98, 0x19, 0xa3, 0xaf, 0x81, 0x0a, 0x93, 0x13, 0x8c, 0x7b, 0xb2, 0x14,
0x32, 0x02, 0x4a, 0x2c, 0xc6, 0x9b, 0x75, 0x7a, 0xe3, 0x7b, 0xc6, 0x16, 0xda, 0xa4, 0x4d, 0x77,
0xa7, 0x75, 0x60, 0x7d, 0xb5, 0x58, 0xfd, 0x1c, 0xf7, 0xe4, 0x0f, 0x31, 0x5c, 0x2c, 0xcd, 0xaf,
0xee, 0x98, 0xbc, 0xa5, 0x73, 0xb9, 0x26, 0xef, 0xe5, 0xee, 0xbc, 0x05, 0x5b, 0xcc, 0xd3, 0x0f,
0x48, 0xbc, 0xf1, 0xc6, 0x5b, 0x31, 0xac, 0x1b, 0x7c, 0xa5, 0x2c, 0x63, 0xf5, 0xc0, 0xb3, 0xbd,
0x83, 0xd0, 0x72, 0x07, 0x36, 0xea, 0x8b, 0x38, 0x5e, 0x35, 0xb8, 0xd3, 0x4f, 0xf7, 0x8c, 0xc1,
0xff, 0xca, 0xbf, 0x89, 0xfe, 0xa5, 0xa2, 0xce, 0x3d, 0xf1, 0x0f, 0xc1, 0x96, 0xfa, 0xb0, 0x19,
0xb0, 0xe2, 0xa4, 0x69, 0x6c, 0x17, 0x13, 0xfe, 0x63, 0x04, 0xb6, 0xb2, 0x26, 0xbb, 0xb3, 0x5a,
0xce, 0x1d, 0x00, 0x14, 0x83, 0x55, 0x63, 0xdb, 0x53, 0xf2, 0xd6, 0xcc, 0xc1, 0x1e, 0xd3, 0xfe,
0x4f, 0x62, 0x70, 0xae, 0xb5, 0x49, 0x6e, 0x30, 0x03, 0x62, 0x95, 0x15, 0xf2, 0xd4, 0x13, 0x75,
0x43, 0xc6, 0x53, 0x39, 0x17, 0x69, 0xd5, 0x57, 0xe2, 0x49, 0xa5, 0x9e, 0x47, 0xf2, 0x85, 0xe7,
0xc2, 0x3c, 0x6a, 0x94, 0xa7, 0x7b, 0x88, 0xd6, 0xf8, 0x5d, 0x0c, 0xcf, 0x89, 0x30, 0xcd, 0x17,
0x90, 0x66, 0x90, 0xf3, 0x3c, 0x92, 0xaf, 0x3c, 0xf7, 0x5a, 0xae, 0x8b, 0x98, 0x1c, 0x82, 0x36,
0x59, 0xb3, 0xd9, 0xe7, 0xf7, 0x29, 0xf2, 0xb2, 0x8c, 0x57, 0x33, 0x17, 0x3e, 0xa0, 0xd6, 0xf8,
0xc0, 0x90, 0x2c, 0xc3, 0x5a, 0x72, 0x76, 0x87, 0xc4, 0x16, 0xe3, 0x92, 0xe1, 0x2e, 0x6c, 0x54,
0x61, 0x65, 0x0e, 0xeb, 0x6f, 0x74, 0x11, 0xc7, 0x47, 0xd5, 0xfa, 0xdd, 0x9f, 0x00, 0x00, 0x00,
0xff, 0xff, 0xa0, 0xb5, 0xeb, 0xf6, 0xd1, 0x05, 0x00, 0x00,
// 484 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x95, 0xdf, 0x6e, 0xd3, 0x30,
0x14, 0x87, 0x7b, 0x35, 0x24, 0xd3, 0x3f, 0xcc, 0xdc, 0x95, 0x1b, 0xd6, 0x9b, 0x41, 0xcb, 0x12,
0x04, 0x2f, 0xc0, 0xda, 0x48, 0xac, 0x12, 0x48, 0xd3, 0xb2, 0x1b, 0x40, 0x68, 0x24, 0xd9, 0x21,
0x35, 0x24, 0x76, 0xf0, 0x39, 0x19, 0xa2, 0x2f, 0xc1, 0x2b, 0xa3, 0x26, 0x75, 0x52, 0xd3, 0xba,
0x94, 0xdd, 0xd5, 0xf6, 0xe7, 0xdf, 0x57, 0x9f, 0x73, 0xa4, 0xb0, 0x6e, 0x1e, 0x21, 0x81, 0xf6,
0x0a, 0xad, 0x48, 0xf1, 0xc7, 0xb9, 0xc8, 0xee, 0x4a, 0xac, 0x57, 0x5e, 0x7d, 0x34, 0xec, 0x26,
0x2a, 0xcf, 0x95, 0xac, 0x37, 0x87, 0x5c, 0x48, 0x02, 0x2d, 0xa3, 0xec, 0x26, 0xc7, 0x74, 0xbd,
0x77, 0x8c, 0xa0, 0xef, 0x44, 0x02, 0xed, 0xd6, 0xab, 0xdf, 0x0f, 0xd9, 0xd1, 0xfb, 0xea, 0x3e,
0x8f, 0xd8, 0xa3, 0x99, 0x86, 0x88, 0x60, 0xa6, 0xb2, 0x0c, 0x12, 0x12, 0x4a, 0x72, 0xcf, 0xb3,
0x4c, 0x26, 0xd3, 0xfb, 0x1b, 0xbc, 0x82, 0x1f, 0x25, 0x20, 0x0d, 0x9f, 0xd8, 0xfc, 0xfa, 0x1f,
0x85, 0x14, 0x51, 0x89, 0xa3, 0x0e, 0xff, 0xcc, 0xfa, 0x81, 0x56, 0xc5, 0x86, 0xe0, 0x85, 0x43,
0x60, 0x63, 0x07, 0xc6, 0xc7, 0xac, 0x77, 0x11, 0xe1, 0x46, 0xfa, 0xc4, 0x91, 0x6e, 0x51, 0x26,
0x7c, 0x64, 0xc3, 0xeb, 0x5a, 0x79, 0x53, 0xa5, 0xb2, 0x2b, 0xc0, 0x42, 0x49, 0x84, 0x51, 0x87,
0x97, 0x8c, 0x07, 0x80, 0x89, 0x16, 0xf1, 0x66, 0x9d, 0x5e, 0xba, 0x9e, 0xb1, 0x85, 0x1a, 0xdb,
0x64, 0xb7, 0xad, 0x05, 0xeb, 0xab, 0xc5, 0xea, 0xe7, 0xa8, 0xc3, 0xbf, 0xb3, 0x41, 0xb8, 0x50,
0x3f, 0xdb, 0x63, 0x74, 0x96, 0xce, 0xe6, 0x8c, 0xef, 0xd9, 0x6e, 0x5f, 0x48, 0x5a, 0xc8, 0xf4,
0x9d, 0x40, 0xda, 0x78, 0xe3, 0x0d, 0x1b, 0xd4, 0x0d, 0xbe, 0x8c, 0x34, 0x89, 0xea, 0x81, 0x67,
0x7b, 0x07, 0xa1, 0xe1, 0x0e, 0x6c, 0xd4, 0x27, 0xd6, 0x5b, 0x35, 0xb8, 0x8d, 0x9f, 0xec, 0x19,
0x83, 0xff, 0x0d, 0xff, 0xc2, 0xba, 0x17, 0x11, 0xb6, 0xd9, 0x63, 0xf7, 0x10, 0x6c, 0x45, 0x1f,
0x36, 0x03, 0x9a, 0x1d, 0x9b, 0xc6, 0xb6, 0x1a, 0xff, 0x1f, 0x23, 0xb0, 0xe5, 0x1a, 0xef, 0x76,
0x35, 0x9c, 0x3d, 0x00, 0x82, 0xf5, 0x57, 0x8d, 0x6d, 0x4e, 0xd1, 0x59, 0x33, 0x0b, 0xbb, 0x4f,
0xfb, 0x13, 0xd6, 0x7b, 0x0b, 0x14, 0xfe, 0xc2, 0x99, 0x92, 0x5f, 0x45, 0x8a, 0xfc, 0xd4, 0x65,
0x32, 0x88, 0xb1, 0x9c, 0x3a, 0x2c, 0x2d, 0xd7, 0x48, 0x3e, 0xb0, 0xfe, 0x79, 0x96, 0xa9, 0xe4,
0x5a, 0xe4, 0x80, 0x14, 0xe5, 0x05, 0x3f, 0x71, 0x58, 0xae, 0x51, 0x39, 0xda, 0x63, 0x23, 0x4d,
0xf4, 0x25, 0x7b, 0x50, 0x45, 0xcf, 0x03, 0xfe, 0xd4, 0x71, 0x61, 0x1e, 0x98, 0xc8, 0x93, 0x3d,
0x44, 0x93, 0xf8, 0x8d, 0x0d, 0xce, 0x11, 0x45, 0x2a, 0x43, 0x48, 0x73, 0x90, 0x34, 0x0f, 0xf8,
0x73, 0xc7, 0xbd, 0x86, 0x6b, 0x15, 0xe3, 0x43, 0x50, 0xe3, 0x9a, 0x4e, 0x3f, 0xbe, 0x49, 0x05,
0x2d, 0xca, 0x78, 0x35, 0xd8, 0xfe, 0x52, 0x64, 0x99, 0x58, 0x12, 0x24, 0x0b, 0xbf, 0x0e, 0x39,
0xbb, 0x15, 0x48, 0x5a, 0xc4, 0x25, 0xc1, 0xad, 0x6f, 0xa2, 0xfc, 0x2a, 0xd9, 0xaf, 0x3f, 0x04,
0x45, 0x1c, 0x1f, 0x55, 0xeb, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x75, 0x7d, 0xec,
0x36, 0x06, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -134,6 +136,13 @@ type MasterClient interface {
//
// @return StringListResponse
ShowPartitions(ctx context.Context, in *internalpb.ShowPartitionRequest, opts ...grpc.CallOption) (*servicepb.StringListResponse, error)
//*
// @brief This method is used to get system configs
//
// @param SysConfigRequest, keys or key_prefixes of the configs.
//
// @return SysConfigResponse
GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error)
AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error)
AllocID(ctx context.Context, in *internalpb.IDRequest, opts ...grpc.CallOption) (*internalpb.IDResponse, error)
AssignSegmentID(ctx context.Context, in *internalpb.AssignSegIDRequest, opts ...grpc.CallOption) (*internalpb.AssignSegIDResponse, error)
@ -237,6 +246,15 @@ func (c *masterClient) ShowPartitions(ctx context.Context, in *internalpb.ShowPa
return out, nil
}
func (c *masterClient) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error) {
out := new(servicepb.SysConfigResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/GetSysConfigs", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *masterClient) AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error) {
out := new(internalpb.TsoResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/AllocTimestamp", in, out, opts...)
@ -326,6 +344,13 @@ type MasterServer interface {
//
// @return StringListResponse
ShowPartitions(context.Context, *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error)
//*
// @brief This method is used to get system configs
//
// @param SysConfigRequest, keys or key_prefixes of the configs.
//
// @return SysConfigResponse
GetSysConfigs(context.Context, *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error)
AllocTimestamp(context.Context, *internalpb.TsoRequest) (*internalpb.TsoResponse, error)
AllocID(context.Context, *internalpb.IDRequest) (*internalpb.IDResponse, error)
AssignSegmentID(context.Context, *internalpb.AssignSegIDRequest) (*internalpb.AssignSegIDResponse, error)
@ -365,6 +390,9 @@ func (*UnimplementedMasterServer) DescribePartition(ctx context.Context, req *in
func (*UnimplementedMasterServer) ShowPartitions(ctx context.Context, req *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ShowPartitions not implemented")
}
func (*UnimplementedMasterServer) GetSysConfigs(ctx context.Context, req *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSysConfigs not implemented")
}
func (*UnimplementedMasterServer) AllocTimestamp(ctx context.Context, req *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocTimestamp not implemented")
}
@ -559,6 +587,24 @@ func _Master_ShowPartitions_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
func _Master_GetSysConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.SysConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServer).GetSysConfigs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.master.Master/GetSysConfigs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServer).GetSysConfigs(ctx, req.(*internalpb.SysConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Master_AllocTimestamp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.TsoRequest)
if err := dec(in); err != nil {
@ -657,6 +703,10 @@ var _Master_serviceDesc = grpc.ServiceDesc{
MethodName: "ShowPartitions",
Handler: _Master_ShowPartitions_Handler,
},
{
MethodName: "GetSysConfigs",
Handler: _Master_GetSysConfigs_Handler,
},
{
MethodName: "AllocTimestamp",
Handler: _Master_AllocTimestamp_Handler,

View File

@ -135,6 +135,14 @@ message PartitionDescription {
repeated common.KeyValuePair statistics = 3;
}
/**
* @brief Response of GetSysConfig
*/
message SysConfigResponse {
common.Status status = 1;
repeated string keys = 2;
repeated string values = 3;
}
/**
* @brief Entities hit by query

View File

@ -737,6 +737,63 @@ func (m *PartitionDescription) GetStatistics() []*commonpb.KeyValuePair {
return nil
}
//*
// @brief Response of GetSysConfig
type SysConfigResponse struct {
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"`
Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SysConfigResponse) Reset() { *m = SysConfigResponse{} }
func (m *SysConfigResponse) String() string { return proto.CompactTextString(m) }
func (*SysConfigResponse) ProtoMessage() {}
func (*SysConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
}
func (m *SysConfigResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SysConfigResponse.Unmarshal(m, b)
}
func (m *SysConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SysConfigResponse.Marshal(b, m, deterministic)
}
func (m *SysConfigResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SysConfigResponse.Merge(m, src)
}
func (m *SysConfigResponse) XXX_Size() int {
return xxx_messageInfo_SysConfigResponse.Size(m)
}
func (m *SysConfigResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SysConfigResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SysConfigResponse proto.InternalMessageInfo
func (m *SysConfigResponse) GetStatus() *commonpb.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *SysConfigResponse) GetKeys() []string {
if m != nil {
return m.Keys
}
return nil
}
func (m *SysConfigResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
//*
// @brief Entities hit by query
type Hits struct {
@ -752,7 +809,7 @@ func (m *Hits) Reset() { *m = Hits{} }
func (m *Hits) String() string { return proto.CompactTextString(m) }
func (*Hits) ProtoMessage() {}
func (*Hits) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
}
func (m *Hits) XXX_Unmarshal(b []byte) error {
@ -808,7 +865,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
return fileDescriptor_b4b40b84dd2f74cb, []int{15}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
@ -858,6 +915,7 @@ func init() {
proto.RegisterType((*IntegerRangeResponse)(nil), "milvus.proto.service.IntegerRangeResponse")
proto.RegisterType((*CollectionDescription)(nil), "milvus.proto.service.CollectionDescription")
proto.RegisterType((*PartitionDescription)(nil), "milvus.proto.service.PartitionDescription")
proto.RegisterType((*SysConfigResponse)(nil), "milvus.proto.service.SysConfigResponse")
proto.RegisterType((*Hits)(nil), "milvus.proto.service.Hits")
proto.RegisterType((*QueryResult)(nil), "milvus.proto.service.QueryResult")
}
@ -865,52 +923,53 @@ func init() {
func init() { proto.RegisterFile("service_msg.proto", fileDescriptor_b4b40b84dd2f74cb) }
var fileDescriptor_b4b40b84dd2f74cb = []byte{
// 739 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xdb, 0x4a,
0x10, 0x3e, 0x8e, 0x43, 0x4e, 0x98, 0x38, 0x21, 0xec, 0xc9, 0x41, 0x06, 0x6e, 0x72, 0x8c, 0x38,
0x8d, 0x5a, 0x35, 0x91, 0xa0, 0x52, 0xc5, 0x45, 0xa5, 0x26, 0x40, 0x5b, 0x7e, 0x14, 0xe8, 0x12,
0x21, 0xd1, 0x4a, 0x8d, 0x36, 0xf6, 0xca, 0x5e, 0xd5, 0xf1, 0x5a, 0xde, 0x35, 0x51, 0x78, 0x90,
0xbe, 0x44, 0x1f, 0xa4, 0x77, 0x7d, 0xa6, 0xca, 0x6b, 0x93, 0x1f, 0x4a, 0x55, 0x0a, 0xdc, 0xcd,
0xcc, 0xee, 0xcc, 0x37, 0xbf, 0x1f, 0x2c, 0x0b, 0x1a, 0x5d, 0x32, 0x9b, 0xf6, 0x87, 0xc2, 0x6d,
0x86, 0x11, 0x97, 0x1c, 0xd5, 0x86, 0xcc, 0xbf, 0x8c, 0x45, 0xaa, 0x35, 0xb3, 0xf7, 0x35, 0xc3,
0xe6, 0xc3, 0x21, 0x0f, 0x52, 0xeb, 0x9a, 0x21, 0x6c, 0x8f, 0x0e, 0x49, 0xaa, 0x59, 0x3b, 0x50,
0xd9, 0xe5, 0xbe, 0x4f, 0x6d, 0xc9, 0x78, 0xd0, 0x25, 0x43, 0x8a, 0x9e, 0xc0, 0x92, 0x3d, 0xb1,
0xf4, 0x03, 0x32, 0xa4, 0xa6, 0x56, 0xd7, 0x1a, 0x8b, 0xb8, 0x62, 0xcf, 0x7d, 0xb4, 0x0e, 0xa1,
0x7c, 0x4a, 0x22, 0xc9, 0xfe, 0xd8, 0x13, 0x55, 0x41, 0x97, 0xc4, 0x35, 0x73, 0xea, 0x31, 0x11,
0xad, 0xaf, 0x1a, 0x14, 0x31, 0x1f, 0x75, 0x88, 0xb4, 0xbd, 0xbb, 0xc7, 0xd9, 0x80, 0x72, 0x78,
0x9d, 0x41, 0x7f, 0x1a, 0xd1, 0x98, 0x18, 0x7b, 0xc4, 0x45, 0x2f, 0xa0, 0x18, 0xf1, 0x51, 0xdf,
0x21, 0x92, 0x98, 0x7a, 0x5d, 0x6f, 0x94, 0xb6, 0x56, 0x9b, 0x73, 0x6d, 0xca, 0xba, 0xd3, 0xf1,
0xf9, 0x00, 0xff, 0x1d, 0xf1, 0xd1, 0x1e, 0x91, 0x04, 0xad, 0xc3, 0xa2, 0x47, 0x84, 0xd7, 0xff,
0x4c, 0xc7, 0xc2, 0xcc, 0xd7, 0xf5, 0x46, 0x19, 0x17, 0x13, 0xc3, 0x11, 0x1d, 0x0b, 0x6b, 0x04,
0xd5, 0x53, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x73, 0xe2, 0xc7, 0x93, 0x9a, 0xb4, 0x49,
0x4d, 0x68, 0x07, 0xf2, 0x72, 0x1c, 0x52, 0x95, 0x54, 0x65, 0x6b, 0xb3, 0x79, 0xdb, 0x6c, 0x9a,
0x33, 0x71, 0x7a, 0xe3, 0x90, 0x62, 0xe5, 0x82, 0x56, 0xa0, 0x70, 0x99, 0x44, 0x15, 0x2a, 0x63,
0x03, 0x67, 0x9a, 0xf5, 0x69, 0x0e, 0xf8, 0x6d, 0xc4, 0xe3, 0x10, 0x1d, 0x82, 0x11, 0x4e, 0x6d,
0xc2, 0xd4, 0x54, 0x8d, 0xff, 0xff, 0x16, 0x4e, 0xa5, 0x8d, 0xe7, 0x7c, 0xad, 0x2f, 0x1a, 0x2c,
0xbc, 0x8f, 0x69, 0x34, 0xbe, 0xfb, 0x0c, 0x36, 0xa1, 0x32, 0x37, 0x03, 0x61, 0xe6, 0xea, 0x7a,
0x63, 0x11, 0x97, 0x67, 0x87, 0x20, 0x92, 0xf6, 0x38, 0xc2, 0x37, 0xf5, 0xb4, 0x3d, 0x8e, 0xf0,
0xd1, 0x33, 0x58, 0x9e, 0xc1, 0xee, 0xbb, 0x49, 0x31, 0x66, 0xbe, 0xae, 0x35, 0x0c, 0x5c, 0x0d,
0x6f, 0x14, 0x69, 0x7d, 0x84, 0xca, 0x99, 0x8c, 0x58, 0xe0, 0x62, 0x2a, 0x42, 0x1e, 0x08, 0x8a,
0xb6, 0xa1, 0x20, 0x24, 0x91, 0xb1, 0x50, 0x79, 0x95, 0xb6, 0xd6, 0x6f, 0x1d, 0xea, 0x99, 0xfa,
0x82, 0xb3, 0xaf, 0xa8, 0x06, 0x0b, 0xaa, 0x93, 0xd9, 0xa2, 0xa4, 0x8a, 0x75, 0x01, 0x46, 0x87,
0x73, 0xff, 0x11, 0x43, 0x17, 0xaf, 0x43, 0x13, 0x40, 0x69, 0xde, 0xc7, 0x4c, 0xc8, 0x87, 0x01,
0x4c, 0x77, 0x22, 0x6d, 0xf0, 0xf5, 0x4e, 0x0c, 0xe0, 0x9f, 0x83, 0x40, 0x52, 0x97, 0x46, 0x8f,
0x8d, 0xa1, 0x4f, 0x30, 0x04, 0xd4, 0x32, 0x0c, 0x4c, 0x02, 0x97, 0x3e, 0xb8, 0x53, 0x03, 0xea,
0xb2, 0x40, 0x75, 0x4a, 0xc7, 0xa9, 0x92, 0x2c, 0x08, 0x0d, 0x1c, 0xb5, 0x20, 0x3a, 0x4e, 0x44,
0xeb, 0xbb, 0x06, 0xff, 0x4e, 0xb9, 0x69, 0x8f, 0x0a, 0x3b, 0x62, 0x61, 0x22, 0xde, 0x0f, 0xf6,
0x15, 0x14, 0x52, 0xe6, 0x53, 0xb8, 0xa5, 0x9f, 0x0e, 0x32, 0x65, 0xc5, 0x29, 0xe0, 0x99, 0x32,
0xe0, 0xcc, 0x09, 0xb5, 0x01, 0x92, 0x40, 0x4c, 0x48, 0x66, 0x8b, 0x8c, 0x48, 0xfe, 0xbb, 0x15,
0xf7, 0x88, 0x8e, 0xd5, 0x6d, 0x9d, 0x12, 0x16, 0xe1, 0x19, 0x27, 0xeb, 0x9b, 0x06, 0xb5, 0x09,
0x63, 0x3e, 0xb8, 0x9e, 0x97, 0x90, 0x57, 0x67, 0x99, 0x56, 0xb3, 0xf1, 0x8b, 0x7b, 0x9f, 0x25,
0x68, 0xac, 0x1c, 0x1e, 0xa3, 0x92, 0x23, 0xc8, 0xbf, 0x63, 0x52, 0x5d, 0xf5, 0xc1, 0x5e, 0x4a,
0x39, 0x3a, 0x4e, 0x44, 0xb4, 0x3a, 0xc3, 0xb6, 0x39, 0xc5, 0x5d, 0x13, 0x4a, 0x5d, 0x49, 0x06,
0xc0, 0xa3, 0x8c, 0xd4, 0x72, 0x38, 0xd3, 0xac, 0x73, 0x28, 0x29, 0xce, 0xc1, 0x54, 0xc4, 0xbe,
0xbc, 0x5f, 0x33, 0x10, 0xe4, 0x3d, 0x26, 0x45, 0x06, 0xa9, 0xe4, 0xa7, 0xaf, 0x61, 0xe9, 0x06,
0xbb, 0xa2, 0x22, 0xe4, 0xbb, 0x27, 0xdd, 0xfd, 0xea, 0x5f, 0x68, 0x19, 0xca, 0xe7, 0xfb, 0xbb,
0xbd, 0x13, 0xdc, 0xef, 0x1c, 0x74, 0xdb, 0xf8, 0xa2, 0xea, 0xa0, 0x2a, 0x18, 0x99, 0xe9, 0xcd,
0xf1, 0x49, 0xbb, 0x57, 0xa5, 0x9d, 0xdd, 0x0f, 0x6d, 0x97, 0x49, 0x2f, 0x1e, 0x24, 0xa8, 0xad,
0x2b, 0xe6, 0xfb, 0xec, 0x4a, 0x52, 0xdb, 0x6b, 0xa5, 0x19, 0x3d, 0x77, 0x98, 0x90, 0x11, 0x1b,
0xc4, 0x92, 0x3a, 0x2d, 0x16, 0x48, 0x1a, 0x05, 0xc4, 0x6f, 0xa9, 0x34, 0x5b, 0xd9, 0x00, 0xc2,
0xc1, 0xa0, 0xa0, 0x0c, 0xdb, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x08, 0x5d, 0xa4, 0xaf,
0x07, 0x00, 0x00,
// 762 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xe3, 0x46,
0x14, 0xae, 0xe3, 0x90, 0x86, 0x13, 0x27, 0x24, 0xd3, 0x14, 0x19, 0xb8, 0x49, 0x8d, 0x68, 0xa3,
0x56, 0x4d, 0x24, 0xa8, 0x54, 0x71, 0x51, 0xa9, 0x49, 0xa0, 0x2d, 0x3f, 0x0a, 0x74, 0x12, 0x21,
0xd1, 0x4a, 0x8d, 0x26, 0xf6, 0xd4, 0x1e, 0xd5, 0xf1, 0x58, 0x9e, 0x31, 0x51, 0x78, 0x90, 0xbe,
0xc4, 0x3e, 0xc8, 0xde, 0xed, 0x33, 0xad, 0x3c, 0x36, 0xf9, 0x61, 0x59, 0x2d, 0x4b, 0xb8, 0x3b,
0xe7, 0xcc, 0x9c, 0xf3, 0x9d, 0xdf, 0x0f, 0x6a, 0x82, 0x46, 0x77, 0xcc, 0xa6, 0xa3, 0x89, 0x70,
0x5b, 0x61, 0xc4, 0x25, 0x47, 0xf5, 0x09, 0xf3, 0xef, 0x62, 0x91, 0x6a, 0xad, 0xec, 0x7d, 0xd7,
0xb0, 0xf9, 0x64, 0xc2, 0x83, 0xd4, 0xba, 0x6b, 0x08, 0xdb, 0xa3, 0x13, 0x92, 0x6a, 0xd6, 0x31,
0x54, 0x7a, 0xdc, 0xf7, 0xa9, 0x2d, 0x19, 0x0f, 0xfa, 0x64, 0x42, 0xd1, 0x77, 0xb0, 0x65, 0xcf,
0x2d, 0xa3, 0x80, 0x4c, 0xa8, 0xa9, 0x35, 0xb4, 0xe6, 0x26, 0xae, 0xd8, 0x2b, 0x1f, 0xad, 0x73,
0x28, 0x5f, 0x93, 0x48, 0xb2, 0xcf, 0xf6, 0x44, 0x55, 0xd0, 0x25, 0x71, 0xcd, 0x9c, 0x7a, 0x4c,
0x44, 0xeb, 0x8d, 0x06, 0x45, 0xcc, 0xa7, 0x5d, 0x22, 0x6d, 0xef, 0xf9, 0x71, 0xf6, 0xa1, 0x1c,
0x3e, 0x64, 0x30, 0x5a, 0x44, 0x34, 0xe6, 0xc6, 0x21, 0x71, 0xd1, 0x4f, 0x50, 0x8c, 0xf8, 0x74,
0xe4, 0x10, 0x49, 0x4c, 0xbd, 0xa1, 0x37, 0x4b, 0x87, 0x3b, 0xad, 0x95, 0x36, 0x65, 0xdd, 0xe9,
0xfa, 0x7c, 0x8c, 0xbf, 0x8c, 0xf8, 0xf4, 0x84, 0x48, 0x82, 0xf6, 0x60, 0xd3, 0x23, 0xc2, 0x1b,
0xfd, 0x47, 0x67, 0xc2, 0xcc, 0x37, 0xf4, 0x66, 0x19, 0x17, 0x13, 0xc3, 0x05, 0x9d, 0x09, 0x6b,
0x0a, 0xd5, 0x6b, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x1b, 0xe2, 0xc7, 0xf3, 0x9a, 0xb4,
0x79, 0x4d, 0xe8, 0x18, 0xf2, 0x72, 0x16, 0x52, 0x95, 0x54, 0xe5, 0xf0, 0xa0, 0xf5, 0xd4, 0x6c,
0x5a, 0x4b, 0x71, 0x86, 0xb3, 0x90, 0x62, 0xe5, 0x82, 0xb6, 0xa1, 0x70, 0x97, 0x44, 0x15, 0x2a,
0x63, 0x03, 0x67, 0x9a, 0xf5, 0xcf, 0x0a, 0xf0, 0xef, 0x11, 0x8f, 0x43, 0x74, 0x0e, 0x46, 0xb8,
0xb0, 0x09, 0x53, 0x53, 0x35, 0x7e, 0xfb, 0x49, 0x38, 0x95, 0x36, 0x5e, 0xf1, 0xb5, 0xfe, 0xd7,
0x60, 0xe3, 0xcf, 0x98, 0x46, 0xb3, 0xe7, 0xcf, 0xe0, 0x00, 0x2a, 0x2b, 0x33, 0x10, 0x66, 0xae,
0xa1, 0x37, 0x37, 0x71, 0x79, 0x79, 0x08, 0x22, 0x69, 0x8f, 0x23, 0x7c, 0x53, 0x4f, 0xdb, 0xe3,
0x08, 0x1f, 0xfd, 0x00, 0xb5, 0x25, 0xec, 0x91, 0x9b, 0x14, 0x63, 0xe6, 0x1b, 0x5a, 0xd3, 0xc0,
0xd5, 0xf0, 0x51, 0x91, 0xd6, 0xdf, 0x50, 0x19, 0xc8, 0x88, 0x05, 0x2e, 0xa6, 0x22, 0xe4, 0x81,
0xa0, 0xe8, 0x08, 0x0a, 0x42, 0x12, 0x19, 0x0b, 0x95, 0x57, 0xe9, 0x70, 0xef, 0xc9, 0xa1, 0x0e,
0xd4, 0x17, 0x9c, 0x7d, 0x45, 0x75, 0xd8, 0x50, 0x9d, 0xcc, 0x16, 0x25, 0x55, 0xac, 0x5b, 0x30,
0xba, 0x9c, 0xfb, 0xaf, 0x18, 0xba, 0xf8, 0x10, 0x9a, 0x00, 0x4a, 0xf3, 0xbe, 0x64, 0x42, 0xae,
0x07, 0xb0, 0xd8, 0x89, 0xb4, 0xc1, 0x0f, 0x3b, 0x31, 0x86, 0xaf, 0xce, 0x02, 0x49, 0x5d, 0x1a,
0xbd, 0x36, 0x86, 0x3e, 0xc7, 0x10, 0x50, 0xcf, 0x30, 0x30, 0x09, 0x5c, 0xba, 0x76, 0xa7, 0xc6,
0xd4, 0x65, 0x81, 0xea, 0x94, 0x8e, 0x53, 0x25, 0x59, 0x10, 0x1a, 0x38, 0x6a, 0x41, 0x74, 0x9c,
0x88, 0xd6, 0x3b, 0x0d, 0xbe, 0x5e, 0x70, 0xd3, 0x09, 0x15, 0x76, 0xc4, 0xc2, 0x44, 0x7c, 0x19,
0xec, 0x2f, 0x50, 0x48, 0x99, 0x4f, 0xe1, 0x96, 0x3e, 0x38, 0xc8, 0x94, 0x15, 0x17, 0x80, 0x03,
0x65, 0xc0, 0x99, 0x13, 0xea, 0x00, 0x24, 0x81, 0x98, 0x90, 0xcc, 0x16, 0x19, 0x91, 0x7c, 0xf3,
0x24, 0xee, 0x05, 0x9d, 0xa9, 0xdb, 0xba, 0x26, 0x2c, 0xc2, 0x4b, 0x4e, 0xd6, 0x5b, 0x0d, 0xea,
0x73, 0xc6, 0x5c, 0xbb, 0x9e, 0x9f, 0x21, 0xaf, 0xce, 0x32, 0xad, 0x66, 0xff, 0x23, 0xf7, 0xbe,
0x4c, 0xd0, 0x58, 0x39, 0xbc, 0x46, 0x25, 0x12, 0x6a, 0x83, 0x99, 0xe8, 0xf1, 0xe0, 0x5f, 0xb6,
0xe6, 0x45, 0x22, 0xc8, 0x2b, 0x8a, 0x4d, 0x77, 0x5a, 0xc9, 0x8f, 0xd8, 0x6f, 0xb1, 0xe9, 0x17,
0x90, 0xff, 0x83, 0x49, 0xc5, 0x25, 0x67, 0x27, 0x29, 0xd1, 0xe9, 0x38, 0x11, 0xd1, 0xce, 0x12,
0xc7, 0xe7, 0x14, 0x63, 0xce, 0x89, 0x7c, 0x3b, 0x19, 0x3b, 0x8f, 0xb2, 0x60, 0x39, 0x9c, 0x69,
0xd6, 0x0d, 0x94, 0x14, 0xd3, 0x61, 0x2a, 0x62, 0x5f, 0xbe, 0x38, 0x79, 0x8f, 0x49, 0x91, 0x41,
0x2a, 0xf9, 0xfb, 0x5f, 0x61, 0xeb, 0x11, 0xa7, 0xa3, 0x22, 0xe4, 0xfb, 0x57, 0xfd, 0xd3, 0xea,
0x17, 0xa8, 0x06, 0xe5, 0x9b, 0xd3, 0xde, 0xf0, 0x0a, 0x8f, 0xba, 0x67, 0xfd, 0x0e, 0xbe, 0xad,
0x3a, 0xa8, 0x0a, 0x46, 0x66, 0xfa, 0xed, 0xf2, 0xaa, 0x33, 0xac, 0xd2, 0x6e, 0xef, 0xaf, 0x8e,
0xcb, 0xa4, 0x17, 0x8f, 0x13, 0xd4, 0xf6, 0x3d, 0xf3, 0x7d, 0x76, 0x2f, 0xa9, 0xed, 0xb5, 0xd3,
0x8c, 0x7e, 0x74, 0x98, 0x90, 0x11, 0x1b, 0xc7, 0x92, 0x3a, 0x6d, 0x16, 0x48, 0x1a, 0x05, 0xc4,
0x6f, 0xab, 0x34, 0xdb, 0xd9, 0xd8, 0xc3, 0xf1, 0xb8, 0xa0, 0x0c, 0x47, 0xef, 0x03, 0x00, 0x00,
0xff, 0xff, 0x8d, 0x5a, 0x44, 0x98, 0x25, 0x08, 0x00, 0x00,
}

View File

@ -18,7 +18,6 @@ const (
)
func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
log.Println("insert into: ", in.CollectionName)
it := &InsertTask{
Condition: NewTaskCondition(ctx),
BaseInsertTask: BaseInsertTask{
@ -77,7 +76,6 @@ func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.
}
func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSchema) (*commonpb.Status, error) {
log.Println("create collection: ", req)
cct := &CreateCollectionTask{
Condition: NewTaskCondition(ctx),
CreateCollectionRequest: internalpb.CreateCollectionRequest{
@ -119,7 +117,6 @@ func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSc
}
func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
log.Println("search: ", req.CollectionName, req.Dsl)
qt := &QueryTask{
Condition: NewTaskCondition(ctx),
SearchRequest: internalpb.SearchRequest{
@ -167,7 +164,6 @@ func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.Qu
}
func (p *Proxy) DropCollection(ctx context.Context, req *servicepb.CollectionName) (*commonpb.Status, error) {
log.Println("drop collection: ", req)
dct := &DropCollectionTask{
Condition: NewTaskCondition(ctx),
DropCollectionRequest: internalpb.DropCollectionRequest{
@ -208,7 +204,6 @@ func (p *Proxy) DropCollection(ctx context.Context, req *servicepb.CollectionNam
}
func (p *Proxy) HasCollection(ctx context.Context, req *servicepb.CollectionName) (*servicepb.BoolResponse, error) {
log.Println("has collection: ", req)
hct := &HasCollectionTask{
Condition: NewTaskCondition(ctx),
HasCollectionRequest: internalpb.HasCollectionRequest{
@ -253,7 +248,6 @@ func (p *Proxy) HasCollection(ctx context.Context, req *servicepb.CollectionName
}
func (p *Proxy) DescribeCollection(ctx context.Context, req *servicepb.CollectionName) (*servicepb.CollectionDescription, error) {
log.Println("describe collection: ", req)
dct := &DescribeCollectionTask{
Condition: NewTaskCondition(ctx),
DescribeCollectionRequest: internalpb.DescribeCollectionRequest{
@ -298,7 +292,6 @@ func (p *Proxy) DescribeCollection(ctx context.Context, req *servicepb.Collectio
}
func (p *Proxy) ShowCollections(ctx context.Context, req *commonpb.Empty) (*servicepb.StringListResponse, error) {
log.Println("show collections")
sct := &ShowCollectionsTask{
Condition: NewTaskCondition(ctx),
ShowCollectionRequest: internalpb.ShowCollectionRequest{
@ -342,7 +335,6 @@ func (p *Proxy) ShowCollections(ctx context.Context, req *commonpb.Empty) (*serv
}
func (p *Proxy) CreatePartition(ctx context.Context, in *servicepb.PartitionName) (*commonpb.Status, error) {
log.Println("create partition", in)
cpt := &CreatePartitionTask{
Condition: NewTaskCondition(ctx),
CreatePartitionRequest: internalpb.CreatePartitionRequest{
@ -388,7 +380,6 @@ func (p *Proxy) CreatePartition(ctx context.Context, in *servicepb.PartitionName
}
func (p *Proxy) DropPartition(ctx context.Context, in *servicepb.PartitionName) (*commonpb.Status, error) {
log.Println("drop partition: ", in)
dpt := &DropPartitionTask{
Condition: NewTaskCondition(ctx),
DropPartitionRequest: internalpb.DropPartitionRequest{
@ -435,7 +426,6 @@ func (p *Proxy) DropPartition(ctx context.Context, in *servicepb.PartitionName)
}
func (p *Proxy) HasPartition(ctx context.Context, in *servicepb.PartitionName) (*servicepb.BoolResponse, error) {
log.Println("has partition: ", in)
hpt := &HasPartitionTask{
Condition: NewTaskCondition(ctx),
HasPartitionRequest: internalpb.HasPartitionRequest{
@ -488,7 +478,6 @@ func (p *Proxy) HasPartition(ctx context.Context, in *servicepb.PartitionName) (
}
func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionName) (*servicepb.PartitionDescription, error) {
log.Println("describe partition: ", in)
dpt := &DescribePartitionTask{
Condition: NewTaskCondition(ctx),
DescribePartitionRequest: internalpb.DescribePartitionRequest{
@ -543,7 +532,6 @@ func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionNa
}
func (p *Proxy) ShowPartitions(ctx context.Context, req *servicepb.CollectionName) (*servicepb.StringListResponse, error) {
log.Println("show partitions: ", req)
spt := &ShowPartitionsTask{
Condition: NewTaskCondition(ctx),
ShowPartitionRequest: internalpb.ShowPartitionRequest{

View File

@ -4,26 +4,30 @@ import (
"context"
"sync"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
)
type Cache interface {
Hit(collectionName string) bool
Get(collectionName string) (*servicepb.CollectionDescription, error)
Sync(collectionName string) error
Update(collectionName string, desc *servicepb.CollectionDescription) error
Update(collectionName string) error
Remove(collectionName string) error
}
var globalMetaCache Cache
type SimpleMetaCache struct {
mu sync.RWMutex
metas map[string]*servicepb.CollectionDescription // collection name to schema
ctx context.Context
proxyInstance *Proxy
mu sync.RWMutex
proxyID UniqueID
metas map[string]*servicepb.CollectionDescription // collection name to schema
masterClient masterpb.MasterClient
reqIDAllocator *allocator.IDAllocator
tsoAllocator *allocator.TimestampAllocator
ctx context.Context
}
func (metaCache *SimpleMetaCache) Hit(collectionName string) bool {
@ -43,34 +47,58 @@ func (metaCache *SimpleMetaCache) Get(collectionName string) (*servicepb.Collect
return schema, nil
}
func (metaCache *SimpleMetaCache) Sync(collectionName string) error {
dct := &DescribeCollectionTask{
Condition: NewTaskCondition(metaCache.ctx),
DescribeCollectionRequest: internalpb.DescribeCollectionRequest{
MsgType: internalpb.MsgType_kDescribeCollection,
CollectionName: &servicepb.CollectionName{
CollectionName: collectionName,
},
},
masterClient: metaCache.proxyInstance.masterClient,
func (metaCache *SimpleMetaCache) Update(collectionName string) error {
reqID, err := metaCache.reqIDAllocator.AllocOne()
if err != nil {
return err
}
ts, err := metaCache.tsoAllocator.AllocOne()
if err != nil {
return err
}
hasCollectionReq := &internalpb.HasCollectionRequest{
MsgType: internalpb.MsgType_kHasCollection,
ReqID: reqID,
Timestamp: ts,
ProxyID: metaCache.proxyID,
CollectionName: &servicepb.CollectionName{
CollectionName: collectionName,
},
}
has, err := metaCache.masterClient.HasCollection(metaCache.ctx, hasCollectionReq)
if err != nil {
return err
}
if !has.Value {
return errors.New("collection " + collectionName + " not exists")
}
var cancel func()
dct.ctx, cancel = context.WithTimeout(metaCache.ctx, reqTimeoutInterval)
defer cancel()
err := metaCache.proxyInstance.sched.DdQueue.Enqueue(dct)
reqID, err = metaCache.reqIDAllocator.AllocOne()
if err != nil {
return err
}
ts, err = metaCache.tsoAllocator.AllocOne()
if err != nil {
return err
}
req := &internalpb.DescribeCollectionRequest{
MsgType: internalpb.MsgType_kDescribeCollection,
ReqID: reqID,
Timestamp: ts,
ProxyID: metaCache.proxyID,
CollectionName: &servicepb.CollectionName{
CollectionName: collectionName,
},
}
resp, err := metaCache.masterClient.DescribeCollection(metaCache.ctx, req)
if err != nil {
return err
}
return dct.WaitToFinish()
}
func (metaCache *SimpleMetaCache) Update(collectionName string, desc *servicepb.CollectionDescription) error {
metaCache.mu.Lock()
defer metaCache.mu.Unlock()
metaCache.metas[collectionName] = resp
metaCache.metas[collectionName] = desc
return nil
}
@ -87,14 +115,23 @@ func (metaCache *SimpleMetaCache) Remove(collectionName string) error {
return nil
}
func newSimpleMetaCache(ctx context.Context, proxyInstance *Proxy) *SimpleMetaCache {
func newSimpleMetaCache(ctx context.Context,
mCli masterpb.MasterClient,
idAllocator *allocator.IDAllocator,
tsoAllocator *allocator.TimestampAllocator) *SimpleMetaCache {
return &SimpleMetaCache{
metas: make(map[string]*servicepb.CollectionDescription),
proxyInstance: proxyInstance,
ctx: ctx,
metas: make(map[string]*servicepb.CollectionDescription),
masterClient: mCli,
reqIDAllocator: idAllocator,
tsoAllocator: tsoAllocator,
proxyID: Params.ProxyID(),
ctx: ctx,
}
}
func initGlobalMetaCache(ctx context.Context, proxyInstance *Proxy) {
globalMetaCache = newSimpleMetaCache(ctx, proxyInstance)
func initGlobalMetaCache(ctx context.Context,
mCli masterpb.MasterClient,
idAllocator *allocator.IDAllocator,
tsoAllocator *allocator.TimestampAllocator) {
globalMetaCache = newSimpleMetaCache(ctx, mCli, idAllocator, tsoAllocator)
}

View File

@ -163,7 +163,7 @@ func (pt *ParamTable) convertRangeToSlice(rangeStr, sep string) []int {
panic(err)
}
var ret []int
for i := start; i < end; i++ {
for i := start; i <= end; i++ {
ret = append(ret, i)
}
return ret

View File

@ -109,7 +109,7 @@ func (p *Proxy) startProxy() error {
if err != nil {
return err
}
initGlobalMetaCache(p.proxyLoopCtx, p)
initGlobalMetaCache(p.proxyLoopCtx, p.masterClient, p.idAllocator, p.tsoAllocator)
p.manipulationMsgStream.Start()
p.queryMsgStream.Start()
p.sched.Start()

View File

@ -119,7 +119,7 @@ func createCollection(t *testing.T, name string) {
Name: name,
Description: "no description",
AutoID: true,
Fields: make([]*schemapb.FieldSchema, 2),
Fields: make([]*schemapb.FieldSchema, 1),
}
fieldName := "Field1"
req.Fields[0] = &schemapb.FieldSchema{
@ -127,24 +127,6 @@ func createCollection(t *testing.T, name string) {
Description: "no description",
DataType: schemapb.DataType_INT32,
}
fieldName = "vec"
req.Fields[1] = &schemapb.FieldSchema{
Name: fieldName,
Description: "vector",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "16",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "metric_type",
Value: "L2",
},
},
}
resp, err := proxyClient.CreateCollection(ctx, req)
assert.Nil(t, err)
msg := "Create Collection " + name + " should succeed!"
@ -157,7 +139,7 @@ func dropCollection(t *testing.T, name string) {
}
resp, err := proxyClient.DropCollection(ctx, req)
assert.Nil(t, err)
msg := "Drop Collection " + name + " should succeed! err :" + resp.Reason
msg := "Drop Collection " + name + " should succeed!"
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
}
@ -170,7 +152,6 @@ func TestProxy_CreateCollection(t *testing.T) {
go func(group *sync.WaitGroup) {
defer group.Done()
createCollection(t, collectionName)
dropCollection(t, collectionName)
}(&wg)
}
wg.Wait()
@ -184,11 +165,9 @@ func TestProxy_HasCollection(t *testing.T) {
wg.Add(1)
go func(group *sync.WaitGroup) {
defer group.Done()
createCollection(t, collectionName)
has := hasCollection(t, collectionName)
msg := "Should has Collection " + collectionName
assert.Equal(t, has, true, msg)
dropCollection(t, collectionName)
}(&wg)
}
wg.Wait()
@ -203,7 +182,6 @@ func TestProxy_DescribeCollection(t *testing.T) {
wg.Add(1)
go func(group *sync.WaitGroup) {
defer group.Done()
createCollection(t, collectionName)
has := hasCollection(t, collectionName)
if has {
resp, err := proxyClient.DescribeCollection(ctx, &servicepb.CollectionName{CollectionName: collectionName})
@ -213,7 +191,6 @@ func TestProxy_DescribeCollection(t *testing.T) {
msg := "Describe Collection " + strconv.Itoa(i) + " should succeed!"
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
t.Logf("Describe Collection %v: %v", i, resp)
dropCollection(t, collectionName)
}
}(&wg)
}
@ -229,7 +206,6 @@ func TestProxy_ShowCollections(t *testing.T) {
wg.Add(1)
go func(group *sync.WaitGroup) {
defer group.Done()
createCollection(t, collectionName)
has := hasCollection(t, collectionName)
if has {
resp, err := proxyClient.ShowCollections(ctx, &commonpb.Empty{})
@ -239,7 +215,6 @@ func TestProxy_ShowCollections(t *testing.T) {
msg := "Show collections " + strconv.Itoa(i) + " should succeed!"
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
t.Logf("Show collections %v: %v", i, resp)
dropCollection(t, collectionName)
}
}(&wg)
}
@ -271,7 +246,6 @@ func TestProxy_Insert(t *testing.T) {
}
msg := "Insert into Collection " + strconv.Itoa(i) + " should succeed!"
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
dropCollection(t, collectionName)
}
}(&wg)
}
@ -334,7 +308,6 @@ func TestProxy_Search(t *testing.T) {
queryWg.Add(1)
go func(group *sync.WaitGroup) {
defer group.Done()
//createCollection(t, collectionName)
has := hasCollection(t, collectionName)
if !has {
createCollection(t, collectionName)
@ -342,7 +315,6 @@ func TestProxy_Search(t *testing.T) {
resp, err := proxyClient.Search(ctx, req)
t.Logf("response of search collection %v: %v", i, resp)
assert.Nil(t, err)
dropCollection(t, collectionName)
}(&queryWg)
}
@ -356,9 +328,9 @@ func TestProxy_Search(t *testing.T) {
func TestProxy_AssignSegID(t *testing.T) {
collectionName := "CreateCollection1"
createCollection(t, collectionName)
testNum := 1
testNum := 4
for i := 0; i < testNum; i++ {
segID, err := proxyServer.segAssigner.GetSegmentID(collectionName, Params.defaultPartitionTag(), int32(i), 200000)
segID, err := proxyServer.segAssigner.GetSegmentID(collectionName, "default", int32(i), 200000)
assert.Nil(t, err)
fmt.Println("segID", segID)
}
@ -373,7 +345,6 @@ func TestProxy_DropCollection(t *testing.T) {
wg.Add(1)
go func(group *sync.WaitGroup) {
defer group.Done()
createCollection(t, collectionName)
has := hasCollection(t, collectionName)
if has {
dropCollection(t, collectionName)
@ -386,14 +357,27 @@ func TestProxy_DropCollection(t *testing.T) {
func TestProxy_PartitionGRPC(t *testing.T) {
var wg sync.WaitGroup
collName := "collPartTest"
createCollection(t, collName)
filedName := "collPartTestF1"
collReq := &schemapb.CollectionSchema{
Name: collName,
Fields: []*schemapb.FieldSchema{
&schemapb.FieldSchema{
Name: filedName,
Description: "",
DataType: schemapb.DataType_VECTOR_FLOAT,
},
},
}
st, err := proxyClient.CreateCollection(ctx, collReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
for i := 0; i < testNum; i++ {
wg.Add(1)
i := i
go func() {
defer wg.Done()
tag := fmt.Sprintf("partition_%d", i)
tag := fmt.Sprintf("partition-%d", i)
preq := &servicepb.PartitionName{
CollectionName: collName,
Tag: tag,
@ -429,7 +413,6 @@ func TestProxy_PartitionGRPC(t *testing.T) {
}()
}
wg.Wait()
dropCollection(t, collName)
}
func TestMain(m *testing.M) {

View File

@ -91,7 +91,7 @@ func (it *InsertTask) PreExecute() error {
func (it *InsertTask) Execute() error {
collectionName := it.BaseInsertTask.CollectionName
if !globalMetaCache.Hit(collectionName) {
err := globalMetaCache.Sync(collectionName)
err := globalMetaCache.Update(collectionName)
if err != nil {
return err
}
@ -103,20 +103,17 @@ func (it *InsertTask) Execute() error {
autoID := description.Schema.AutoID
var rowIDBegin UniqueID
var rowIDEnd UniqueID
rowNums := len(it.BaseInsertTask.RowData)
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(uint32(rowNums))
it.BaseInsertTask.RowIDs = make([]UniqueID, rowNums)
for i := rowIDBegin; i < rowIDEnd; i++ {
offset := i - rowIDBegin
it.BaseInsertTask.RowIDs[offset] = i
}
if autoID {
if autoID || true {
if it.HashValues == nil || len(it.HashValues) == 0 {
it.HashValues = make([]uint32, 0)
}
for _, rowID := range it.RowIDs {
hashValue, _ := typeutil.Hash32Int64(rowID)
rowNums := len(it.BaseInsertTask.RowData)
rowIDBegin, rowIDEnd, _ = it.rowIDAllocator.Alloc(uint32(rowNums))
it.BaseInsertTask.RowIDs = make([]UniqueID, rowNums)
for i := rowIDBegin; i < rowIDEnd; i++ {
offset := i - rowIDBegin
it.BaseInsertTask.RowIDs[offset] = i
hashValue, _ := typeutil.Hash32Int64(i)
it.HashValues = append(it.HashValues, hashValue)
}
}
@ -129,7 +126,6 @@ func (it *InsertTask) Execute() error {
}
msgPack.Msgs[0] = tsMsg
err = it.manipulationMsgStream.Produce(msgPack)
it.result = &servicepb.IntegerRangeResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
@ -356,7 +352,7 @@ func (qt *QueryTask) SetTs(ts Timestamp) {
func (qt *QueryTask) PreExecute() error {
collectionName := qt.query.CollectionName
if !globalMetaCache.Hit(collectionName) {
err := globalMetaCache.Sync(collectionName)
err := globalMetaCache.Update(collectionName)
if err != nil {
return err
}
@ -609,9 +605,14 @@ func (dct *DescribeCollectionTask) PreExecute() error {
}
func (dct *DescribeCollectionTask) Execute() error {
if !globalMetaCache.Hit(dct.CollectionName.CollectionName) {
err := globalMetaCache.Update(dct.CollectionName.CollectionName)
if err != nil {
return err
}
}
var err error
dct.result, err = dct.masterClient.DescribeCollection(dct.ctx, &dct.DescribeCollectionRequest)
globalMetaCache.Update(dct.CollectionName.CollectionName, dct.result)
dct.result, err = globalMetaCache.Get(dct.CollectionName.CollectionName)
return err
}

View File

@ -27,7 +27,7 @@ func TestTimeTick_Start(t *testing.T) {
func TestTimeTick_Start2(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
masterAddr := Params.MasterAddress()
tsoAllocator, err := allocator.NewTimestampAllocator(ctx, masterAddr)
assert.Nil(t, err)

View File

@ -9,6 +9,7 @@ import (
)
func TestValidateCollectionName(t *testing.T) {
Params.Init()
assert.Nil(t, ValidateCollectionName("abc"))
assert.Nil(t, ValidateCollectionName("_123abc"))
assert.Nil(t, ValidateCollectionName("abc123_$"))
@ -33,8 +34,8 @@ func TestValidateCollectionName(t *testing.T) {
}
func TestValidatePartitionTag(t *testing.T) {
Params.Init()
assert.Nil(t, ValidatePartitionTag("abc", true))
assert.Nil(t, ValidatePartitionTag("123abc", true))
assert.Nil(t, ValidatePartitionTag("_123abc", true))
assert.Nil(t, ValidatePartitionTag("abc123_$", true))
@ -43,6 +44,7 @@ func TestValidatePartitionTag(t *testing.T) {
longName[i] = 'a'
}
invalidNames := []string{
"123abc",
"$abc",
"_12 ac",
" ",
@ -60,6 +62,7 @@ func TestValidatePartitionTag(t *testing.T) {
}
func TestValidateFieldName(t *testing.T) {
Params.Init()
assert.Nil(t, ValidateFieldName("abc"))
assert.Nil(t, ValidateFieldName("_123abc"))
@ -83,6 +86,7 @@ func TestValidateFieldName(t *testing.T) {
}
func TestValidateDimension(t *testing.T) {
Params.Init()
assert.Nil(t, ValidateDimension(1, false))
assert.Nil(t, ValidateDimension(Params.MaxDimension(), false))
assert.Nil(t, ValidateDimension(8, true))

View File

@ -12,7 +12,6 @@ package querynode
*/
import "C"
import (
"fmt"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"log"
"strconv"
@ -230,7 +229,6 @@ func (colReplica *collectionReplicaImpl) addPartitionsByCollectionMeta(colMeta *
if err != nil {
log.Println(err)
}
fmt.Println("add partition: ", tag)
}
return nil
@ -264,7 +262,6 @@ func (colReplica *collectionReplicaImpl) removePartitionsByCollectionMeta(colMet
if err != nil {
log.Println(err)
}
fmt.Println("delete partition: ", tag)
}
return nil

View File

@ -145,7 +145,7 @@ func printSegmentStruct(obj *etcdpb.SegmentMeta) {
}
func (mService *metaService) processCollectionCreate(id string, value string) {
//println(fmt.Sprintf("Create Collection:$%s$", id))
println(fmt.Sprintf("Create Collection:$%s$", id))
col := mService.collectionUnmarshal(value)
if col != nil {
@ -163,7 +163,7 @@ func (mService *metaService) processCollectionCreate(id string, value string) {
}
func (mService *metaService) processSegmentCreate(id string, value string) {
//println("Create Segment: ", id)
println("Create Segment: ", id)
seg := mService.segmentUnmarshal(value)
if !isSegmentChannelRangeInQueryNodeChannelRange(seg) {
@ -182,7 +182,7 @@ func (mService *metaService) processSegmentCreate(id string, value string) {
}
func (mService *metaService) processCreate(key string, msg string) {
//println("process create", key)
println("process create", key)
if isCollectionObj(key) {
objID := GetCollectionObjID(key)
mService.processCollectionCreate(objID, msg)
@ -214,7 +214,7 @@ func (mService *metaService) processSegmentModify(id string, value string) {
}
func (mService *metaService) processCollectionModify(id string, value string) {
//println("Modify Collection: ", id)
println("Modify Collection: ", id)
col := mService.collectionUnmarshal(value)
if col != nil {
@ -242,7 +242,7 @@ func (mService *metaService) processModify(key string, msg string) {
}
func (mService *metaService) processSegmentDelete(id string) {
//println("Delete segment: ", id)
println("Delete segment: ", id)
var segmentID, err = strconv.ParseInt(id, 10, 64)
if err != nil {
@ -257,7 +257,7 @@ func (mService *metaService) processSegmentDelete(id string) {
}
func (mService *metaService) processCollectionDelete(id string) {
//println("Delete collection: ", id)
println("Delete collection: ", id)
var collectionID, err = strconv.ParseInt(id, 10, 64)
if err != nil {
@ -272,7 +272,7 @@ func (mService *metaService) processCollectionDelete(id string) {
}
func (mService *metaService) processDelete(key string) {
//println("process delete")
println("process delete")
if isCollectionObj(key) {
objID := GetCollectionObjID(key)

View File

@ -29,7 +29,7 @@ func createPlan(col Collection, dsl string) (*Plan, error) {
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return nil, errors.New("Create plan failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
return nil, errors.New("Insert failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
var newPlan = &Plan{cPlan: cPlan}
@ -60,7 +60,7 @@ func parserPlaceholderGroup(plan *Plan, placeHolderBlob []byte) (*PlaceholderGro
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return nil, errors.New("Parser placeholder group failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
return nil, errors.New("Insert failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
var newPlaceholderGroup = &PlaceholderGroup{cPlaceholderGroup: cPlaceholderGroup}

View File

@ -10,8 +10,6 @@ package querynode
*/
import "C"
import (
"errors"
"strconv"
"unsafe"
)
@ -23,66 +21,26 @@ type MarshaledHits struct {
cMarshaledHits C.CMarshaledHits
}
func reduceSearchResults(searchResults []*SearchResult, numSegments int64, inReduced []bool) error {
func reduceSearchResults(searchResults []*SearchResult, numSegments int64) *SearchResult {
cSearchResults := make([]C.CQueryResult, 0)
for _, res := range searchResults {
cSearchResults = append(cSearchResults, res.cQueryResult)
}
cSearchResultPtr := (*C.CQueryResult)(&cSearchResults[0])
cNumSegments := C.long(numSegments)
cInReduced := (*C.bool)(&inReduced[0])
status := C.ReduceQueryResults(cSearchResultPtr, cNumSegments, cInReduced)
errorCode := status.error_code
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return errors.New("reduceSearchResults failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
return nil
res := C.ReduceQueryResults(cSearchResultPtr, cNumSegments)
return &SearchResult{cQueryResult: res}
}
func fillTargetEntry(plan *Plan, searchResults []*SearchResult, matchedSegments []*Segment, inReduced []bool) error {
for i, value := range inReduced {
if value {
err := matchedSegments[i].fillTargetEntry(plan, searchResults[i])
if err != nil {
return err
}
}
}
return nil
}
func reorganizeQueryResults(plan *Plan, placeholderGroups []*PlaceholderGroup, searchResults []*SearchResult, numSegments int64, inReduced []bool) (*MarshaledHits, error) {
func (sr *SearchResult) reorganizeQueryResults(plan *Plan, placeholderGroups []*PlaceholderGroup) *MarshaledHits {
cPlaceholderGroups := make([]C.CPlaceholderGroup, 0)
for _, pg := range placeholderGroups {
cPlaceholderGroups = append(cPlaceholderGroups, (*pg).cPlaceholderGroup)
}
var cPlaceHolderGroupPtr = (*C.CPlaceholderGroup)(&cPlaceholderGroups[0])
var cNumGroup = (C.long)(len(placeholderGroups))
cSearchResults := make([]C.CQueryResult, 0)
for _, res := range searchResults {
cSearchResults = append(cSearchResults, res.cQueryResult)
}
cSearchResultPtr := (*C.CQueryResult)(&cSearchResults[0])
var cNumSegments = C.long(numSegments)
var cInReduced = (*C.bool)(&inReduced[0])
var cMarshaledHits C.CMarshaledHits
status := C.ReorganizeQueryResults(&cMarshaledHits, cPlaceHolderGroupPtr, cNumGroup, cSearchResultPtr, cInReduced, cNumSegments, plan.cPlan)
errorCode := status.error_code
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return nil, errors.New("reorganizeQueryResults failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
return &MarshaledHits{cMarshaledHits: cMarshaledHits}, nil
cNumGroup := (C.long)(len(placeholderGroups))
var cPlaceHolder = (*C.CPlaceholderGroup)(&cPlaceholderGroups[0])
res := C.ReorganizeQueryResults(sr.cQueryResult, plan.cPlan, cPlaceHolder, cNumGroup)
return &MarshaledHits{cMarshaledHits: res}
}
func (mh *MarshaledHits) getHitsBlobSize() int64 {

View File

@ -107,21 +107,15 @@ func TestReduce_AllFunc(t *testing.T) {
placeholderGroups = append(placeholderGroups, holder)
searchResults := make([]*SearchResult, 0)
matchedSegment := make([]*Segment, 0)
searchResult, err := segment.segmentSearch(plan, placeholderGroups, []Timestamp{0})
assert.Nil(t, err)
searchResults = append(searchResults, searchResult)
matchedSegment = append(matchedSegment, segment)
testReduce := make([]bool, len(searchResults))
err = reduceSearchResults(searchResults, 1, testReduce)
assert.Nil(t, err)
err = fillTargetEntry(plan, searchResults, matchedSegment, testReduce)
assert.Nil(t, err)
reducedSearchResults := reduceSearchResults(searchResults, 1)
assert.NotNil(t, reducedSearchResults)
marshaledHits, err := reorganizeQueryResults(plan, placeholderGroups, searchResults, 1, testReduce)
marshaledHits := reducedSearchResults.reorganizeQueryResults(plan, placeholderGroups)
assert.NotNil(t, marshaledHits)
assert.Nil(t, err)
hitsBlob, err := marshaledHits.getHitsBlob()
assert.Nil(t, err)
@ -143,6 +137,7 @@ func TestReduce_AllFunc(t *testing.T) {
plan.delete()
holder.delete()
deleteSearchResults(searchResults)
deleteSearchResults([]*SearchResult{reducedSearchResults})
deleteMarshaledHits(marshaledHits)
deleteSegment(segment)
deleteCollection(collection)

View File

@ -139,7 +139,7 @@ func (ss *searchService) receiveSearchMsg() {
err := ss.search(msg)
if err != nil {
log.Println(err)
err = ss.publishFailedSearchResult(msg, err.Error())
err = ss.publishFailedSearchResult(msg)
if err != nil {
log.Println("publish FailedSearchResult failed, error message: ", err)
}
@ -191,7 +191,7 @@ func (ss *searchService) doUnsolvedMsgSearch() {
err := ss.search(msg)
if err != nil {
log.Println(err)
err = ss.publishFailedSearchResult(msg, err.Error())
err = ss.publishFailedSearchResult(msg)
if err != nil {
log.Println("publish FailedSearchResult failed, error message: ", err)
}
@ -238,7 +238,6 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
placeholderGroups = append(placeholderGroups, placeholderGroup)
searchResults := make([]*SearchResult, 0)
matchedSegments := make([]*Segment, 0)
for _, partitionTag := range partitionTags {
hasPartition := (*ss.replica).hasPartition(collectionID, partitionTag)
@ -258,7 +257,6 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
return err
}
searchResults = append(searchResults, searchResult)
matchedSegments = append(matchedSegments, segment)
}
}
@ -284,20 +282,8 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
return nil
}
inReduced := make([]bool, len(searchResults))
numSegment := int64(len(searchResults))
err = reduceSearchResults(searchResults, numSegment, inReduced)
if err != nil {
return err
}
err = fillTargetEntry(plan, searchResults, matchedSegments, inReduced)
if err != nil {
return err
}
marshaledHits, err := reorganizeQueryResults(plan, placeholderGroups, searchResults, numSegment, inReduced)
if err != nil {
return err
}
reducedSearchResult := reduceSearchResults(searchResults, int64(len(searchResults)))
marshaledHits := reducedSearchResult.reorganizeQueryResults(plan, placeholderGroups)
hitsBlob, err := marshaledHits.getHitsBlob()
if err != nil {
return err
@ -305,12 +291,12 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
var offset int64 = 0
for index := range placeholderGroups {
hitBlobSizePeerQuery, err := marshaledHits.hitBlobSizeInGroup(int64(index))
hitBolbSizePeerQuery, err := marshaledHits.hitBlobSizeInGroup(int64(index))
if err != nil {
return err
}
hits := make([][]byte, 0)
for _, len := range hitBlobSizePeerQuery {
for _, len := range hitBolbSizePeerQuery {
hits = append(hits, hitsBlob[offset:offset+len])
//test code to checkout marshaled hits
//marshaledHit := hitsBlob[offset:offset+len]
@ -343,6 +329,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
}
deleteSearchResults(searchResults)
deleteSearchResults([]*SearchResult{reducedSearchResult})
deleteMarshaledHits(marshaledHits)
plan.delete()
placeholderGroup.delete()
@ -359,7 +346,7 @@ func (ss *searchService) publishSearchResult(msg msgstream.TsMsg) error {
return nil
}
func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg string) error {
func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg) error {
msgPack := msgstream.MsgPack{}
searchMsg, ok := msg.(*msgstream.SearchMsg)
if !ok {
@ -367,7 +354,7 @@ func (ss *searchService) publishFailedSearchResult(msg msgstream.TsMsg, errMsg s
}
var results = internalpb.SearchResult{
MsgType: internalpb.MsgType_kSearchResult,
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR, Reason: errMsg},
Status: &commonpb.Status{ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR},
ReqID: searchMsg.ReqID,
ProxyID: searchMsg.ProxyID,
QueryNodeID: searchMsg.ProxyID,

View File

@ -253,242 +253,3 @@ func TestSearch_Search(t *testing.T) {
cancel()
node.Close()
}
func TestSearch_SearchMultiSegments(t *testing.T) {
Params.Init()
ctx, cancel := context.WithCancel(context.Background())
// init query node
pulsarURL, _ := Params.pulsarAddress()
node := NewQueryNode(ctx, 0)
// init meta
collectionName := "collection0"
fieldVec := schemapb.FieldSchema{
Name: "vec",
IsPrimaryKey: false,
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "16",
},
},
}
fieldInt := schemapb.FieldSchema{
Name: "age",
IsPrimaryKey: false,
DataType: schemapb.DataType_INT32,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "1",
},
},
}
schema := schemapb.CollectionSchema{
Name: collectionName,
AutoID: true,
Fields: []*schemapb.FieldSchema{
&fieldVec, &fieldInt,
},
}
collectionMeta := etcdpb.CollectionMeta{
ID: UniqueID(0),
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
collectionMetaBlob := proto.MarshalTextString(&collectionMeta)
assert.NotEqual(t, "", collectionMetaBlob)
var err = (*node.replica).addCollection(&collectionMeta, collectionMetaBlob)
assert.NoError(t, err)
collection, err := (*node.replica).getCollectionByName(collectionName)
assert.NoError(t, err)
assert.Equal(t, collection.meta.Schema.Name, "collection0")
assert.Equal(t, collection.meta.ID, UniqueID(0))
assert.Equal(t, (*node.replica).getCollectionNum(), 1)
err = (*node.replica).addPartition(collection.ID(), collectionMeta.PartitionTags[0])
assert.NoError(t, err)
segmentID := UniqueID(0)
err = (*node.replica).addSegment(segmentID, collectionMeta.PartitionTags[0], UniqueID(0))
assert.NoError(t, err)
// test data generate
const msgLength = 1024
const receiveBufSize = 1024
const DIM = 16
insertProducerChannels := Params.insertChannelNames()
searchProducerChannels := Params.searchChannelNames()
var vec = [DIM]float32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
// start search service
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"L2\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
var searchRawData1 []byte
var searchRawData2 []byte
for i, ele := range vec {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*2)))
searchRawData1 = append(searchRawData1, buf...)
}
for i, ele := range vec {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*4)))
searchRawData2 = append(searchRawData2, buf...)
}
placeholderValue := servicepb.PlaceholderValue{
Tag: "$0",
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
Values: [][]byte{searchRawData1, searchRawData2},
}
placeholderGroup := servicepb.PlaceholderGroup{
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
}
placeGroupByte, err := proto.Marshal(&placeholderGroup)
if err != nil {
log.Print("marshal placeholderGroup failed")
}
query := servicepb.Query{
CollectionName: "collection0",
PartitionTags: []string{"default"},
Dsl: dslString,
PlaceholderGroup: placeGroupByte,
}
queryByte, err := proto.Marshal(&query)
if err != nil {
log.Print("marshal query failed")
}
blob := commonpb.Blob{
Value: queryByte,
}
searchMsg := &msgstream.SearchMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{0},
},
SearchRequest: internalpb.SearchRequest{
MsgType: internalpb.MsgType_kSearch,
ReqID: int64(1),
ProxyID: int64(1),
Timestamp: uint64(10 + 1000),
ResultChannelID: int64(0),
Query: &blob,
},
}
msgPackSearch := msgstream.MsgPack{}
msgPackSearch.Msgs = append(msgPackSearch.Msgs, searchMsg)
searchStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
searchStream.SetPulsarClient(pulsarURL)
searchStream.CreatePulsarProducers(searchProducerChannels)
searchStream.Start()
err = searchStream.Produce(&msgPackSearch)
assert.NoError(t, err)
node.searchService = newSearchService(node.ctx, node.replica)
go node.searchService.start()
// start insert
timeRange := TimeRange{
timestampMin: 0,
timestampMax: math.MaxUint64,
}
insertMessages := make([]msgstream.TsMsg, 0)
for i := 0; i < msgLength; i++ {
segmentID := 0
if i >= msgLength/2 {
segmentID = 1
}
var rawData []byte
for _, ele := range vec {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*2)))
rawData = append(rawData, buf...)
}
bs := make([]byte, 4)
binary.LittleEndian.PutUint32(bs, 1)
rawData = append(rawData, bs...)
var msg msgstream.TsMsg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{
uint32(i),
},
},
InsertRequest: internalpb.InsertRequest{
MsgType: internalpb.MsgType_kInsert,
ReqID: int64(i),
CollectionName: "collection0",
PartitionTag: "default",
SegmentID: int64(segmentID),
ChannelID: int64(0),
ProxyID: int64(0),
Timestamps: []uint64{uint64(i + 1000)},
RowIDs: []int64{int64(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
},
},
}
insertMessages = append(insertMessages, msg)
}
msgPack := msgstream.MsgPack{
BeginTs: timeRange.timestampMin,
EndTs: timeRange.timestampMax,
Msgs: insertMessages,
}
// generate timeTick
timeTickMsgPack := msgstream.MsgPack{}
baseMsg := msgstream.BaseMsg{
BeginTimestamp: 0,
EndTimestamp: 0,
HashValues: []uint32{0},
}
timeTickResult := internalpb.TimeTickMsg{
MsgType: internalpb.MsgType_kTimeTick,
PeerID: UniqueID(0),
Timestamp: math.MaxUint64,
}
timeTickMsg := &msgstream.TimeTickMsg{
BaseMsg: baseMsg,
TimeTickMsg: timeTickResult,
}
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
// pulsar produce
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
insertStream.SetPulsarClient(pulsarURL)
insertStream.CreatePulsarProducers(insertProducerChannels)
insertStream.Start()
err = insertStream.Produce(&msgPack)
assert.NoError(t, err)
err = insertStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
// dataSync
node.dataSyncService = newDataSyncService(node.ctx, node.replica)
go node.dataSyncService.start()
time.Sleep(1 * time.Second)
cancel()
node.Close()
}

View File

@ -208,7 +208,7 @@ func (s *Segment) segmentSearch(plan *Plan,
var cTimestamp = (*C.ulong)(&timestamp[0])
var cPlaceHolder = (*C.CPlaceholderGroup)(&cPlaceholderGroups[0])
var cNumGroups = C.int(len(placeHolderGroups))
var cQueryResult = (*C.CQueryResult)(&searchResult.cQueryResult)
cQueryResult := (*C.CQueryResult)(&searchResult.cQueryResult)
var status = C.Search(s.segmentPtr, plan.cPlan, cPlaceHolder, cTimestamp, cNumGroups, cQueryResult)
errorCode := status.error_code
@ -221,18 +221,3 @@ func (s *Segment) segmentSearch(plan *Plan,
return &searchResult, nil
}
func (s *Segment) fillTargetEntry(plan *Plan,
result *SearchResult) error {
var status = C.FillTargetEntry(s.segmentPtr, plan.cPlan, result.cQueryResult)
errorCode := status.error_code
if errorCode != 0 {
errorMsg := C.GoString(status.error_msg)
defer C.free(unsafe.Pointer(status.error_msg))
return errors.New("FillTargetEntry failed, C runtime error detected, error code = " + strconv.Itoa(int(errorCode)) + ", error msg = " + errorMsg)
}
return nil
}

3
internal/storage/cwrapper/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
cmake-build-debug
.idea
cmake_build

View File

@ -0,0 +1,42 @@
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
project(wrapper)
set(CMAKE_CXX_STANDARD 17)
###################################################################################################
# - cmake modules ---------------------------------------------------------------------------------
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/" ${CMAKE_MODULE_PATH})
###################################################################################################
# - build arrow ------------------------------------------------------------------------------------
message(STATUS "BUILDING ARROW")
include(ConfigureArrow)
if(ARROW_FOUND)
message(STATUS "Apache Arrow found in ${ARROW_INCLUDE_DIR}")
else()
message(FATAL_ERROR "Apache Arrow not found, please check your settings.")
endif(ARROW_FOUND)
add_library(arrow STATIC IMPORTED ${ARROW_LIB})
add_library(parquet STATIC IMPORTED ${PARQUET_LIB})
add_library(thrift STATIC IMPORTED ${THRIFT_LIB})
add_library(utf8proc STATIC IMPORTED ${UTF8PROC_LIB})
if(ARROW_FOUND)
set_target_properties(arrow PROPERTIES IMPORTED_LOCATION ${ARROW_LIB})
set_target_properties(parquet PROPERTIES IMPORTED_LOCATION ${PARQUET_LIB})
set_target_properties(thrift PROPERTIES IMPORTED_LOCATION ${THRIFT_LIB})
set_target_properties(utf8proc PROPERTIES IMPORTED_LOCATION ${UTF8PROC_LIB})
endif(ARROW_FOUND)
###################################################################################################
include_directories(${ARROW_INCLUDE_DIR})
include_directories(${PROJECT_SOURCE_DIR})
add_library(wrapper ParquetWraper.cpp ParquetWraper.h ColumnType.h PayloadStream.h PayloadStream.cpp)
add_subdirectory(test)

View File

@ -0,0 +1,42 @@
#pragma once
enum ColumnType : int {
NONE = 0,
BOOL = 1,
INT8 = 2,
INT16 = 3,
INT32 = 4,
INT64 = 5,
FLOAT = 10,
DOUBLE = 11,
STRING = 20,
VECTOR_BINARY = 100,
VECTOR_FLOAT = 101
};
enum ErrorCode : int {
SUCCESS = 0,
UNEXPECTED_ERROR = 1,
CONNECT_FAILED = 2,
PERMISSION_DENIED = 3,
COLLECTION_NOT_EXISTS = 4,
ILLEGAL_ARGUMENT = 5,
ILLEGAL_DIMENSION = 7,
ILLEGAL_INDEX_TYPE = 8,
ILLEGAL_COLLECTION_NAME = 9,
ILLEGAL_TOPK = 10,
ILLEGAL_ROWRECORD = 11,
ILLEGAL_VECTOR_ID = 12,
ILLEGAL_SEARCH_RESULT = 13,
FILE_NOT_FOUND = 14,
META_FAILED = 15,
CACHE_FAILED = 16,
CANNOT_CREATE_FOLDER = 17,
CANNOT_CREATE_FILE = 18,
CANNOT_DELETE_FOLDER = 19,
CANNOT_DELETE_FILE = 20,
BUILD_INDEX_ERROR = 21,
ILLEGAL_NLIST = 22,
ILLEGAL_METRIC_TYPE = 23,
OUT_OF_MEMORY = 24,
DD_REQUEST_RACE = 1000
};

View File

@ -0,0 +1,188 @@
#include "ParquetWraper.h"
#include "PayloadStream.h"
#include "parquet/arrow/writer.h"
extern "C" CPayloadWriter NewPayloadWriter(int columnType) {
auto p = new wrapper::PayloadWriter;
p->builder = nullptr;
p->schema = nullptr;
p->output = nullptr;
p->rows = 0;
switch (static_cast<ColumnType>(columnType)) {
case ColumnType::BOOL : {
p->columnType = ColumnType::BOOL;
p->builder = std::make_shared<arrow::BooleanBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::boolean())});
break;
}
case ColumnType::INT8 : {
p->columnType = ColumnType::INT8;
p->builder = std::make_shared<arrow::Int8Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int8())});
break;
}
case ColumnType::INT16 : {
p->columnType = ColumnType::INT16;
p->builder = std::make_shared<arrow::Int16Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int16())});
break;
}
case ColumnType::INT32 : {
p->columnType = ColumnType::INT32;
p->builder = std::make_shared<arrow::Int32Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int32())});
break;
}
case ColumnType::INT64 : {
p->columnType = ColumnType::INT64;
p->builder = std::make_shared<arrow::Int64Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int64())});
break;
}
case ColumnType::FLOAT : {
p->columnType = ColumnType::FLOAT;
p->builder = std::make_shared<arrow::FloatBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::float32())});
break;
}
case ColumnType::DOUBLE : {
p->columnType = ColumnType::DOUBLE;
p->builder = std::make_shared<arrow::DoubleBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::float64())});
break;
}
case ColumnType::STRING : {
p->columnType = ColumnType::STRING;
p->builder = std::make_shared<arrow::StringBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::utf8())});
break;
}
case ColumnType::VECTOR_BINARY : {
p->columnType = ColumnType::VECTOR_BINARY;
//TODO, arrow array builder
break;
}
case ColumnType::VECTOR_FLOAT : {
p->columnType == ColumnType::VECTOR_FLOAT;
//TODO, arrow array builder
break;
}
default: {
delete p;
return nullptr;
}
}
return reinterpret_cast<void *>(p);
}
template<typename DT, typename BT>
CStatus AddValuesToPayload(CPayloadWriter payloadWriter, DT *values, int length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
if (length <= 0) return st;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
auto builder = std::dynamic_pointer_cast<BT>(p->builder);
if (builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = "incorrect data type";
return st;
}
if (p->output != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = "payload has finished";
return st;
}
auto ast = builder->AppendValues(values, values + length);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = "add value into arrow array failed";
return st;
}
p->rows += length;
return st;
}
extern "C" CStatus AddBooleanToPayload(CPayloadWriter payloadWriter, bool *values, int length) {
return AddValuesToPayload<bool, arrow::BooleanBuilder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt8ToPayload(CPayloadWriter payloadWriter, int8_t *values, int length) {
return AddValuesToPayload<int8_t, arrow::Int8Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt16ToPayload(CPayloadWriter payloadWriter, int16_t *values, int length) {
return AddValuesToPayload<int16_t, arrow::Int16Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t *values, int length) {
return AddValuesToPayload<int32_t, arrow::Int32Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t *values, int length) {
return AddValuesToPayload<int64_t, arrow::Int64Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddFloatToPayload(CPayloadWriter payloadWriter, float *values, int length) {
return AddValuesToPayload<float, arrow::FloatBuilder>(payloadWriter, values, length);
}
extern "C" CStatus AddDoubleToPayload(CPayloadWriter payloadWriter, double *values, int length) {
return AddValuesToPayload<double, arrow::DoubleBuilder>(payloadWriter, values, length);
}
extern "C" CStatus FinishPayload(CPayloadWriter payloadWriter) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
if (p->builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = "arrow builder is nullptr";
return st;
}
if (p->output == nullptr) {
std::shared_ptr<arrow::Array> array;
auto ast = p->builder->Finish(&array);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = "failed to finish array builder";
return st;
}
auto table = arrow::Table::Make(p->schema, {array});
p->output = std::make_shared<wrapper::PayloadOutputStream>();
ast = parquet::arrow::WriteTable(*table, arrow::default_memory_pool(), p->output, 1024 * 1024);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = "failed to write parquet buffer";
return st;
}
}
return st;
}
CBuffer GetPayloadBuffer(CPayloadWriter payloadWriter) {
CBuffer buf;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
if (p->output == nullptr) {
buf.length = 0;
buf.data = nullptr;
}
auto &output = p->output->Buffer();
buf.length = static_cast<int>(output.size());
buf.data = (char *) (output.data());
return buf;
}
int GetPayloadNums(CPayloadWriter payloadWriter) {
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
return p->rows;
}
extern "C" CStatus ReleasePayload(CPayloadWriter handler) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(handler);
if (p != nullptr) delete p;
return st;
}

View File

@ -0,0 +1,40 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
typedef void *CPayloadWriter;
typedef struct CBuffer {
char *data;
int length;
} CBuffer;
typedef struct CStatus {
int error_code;
const char *error_msg;
} CStatus;
CPayloadWriter NewPayloadWriter(int columnType);
CStatus AddBooleanToPayload(CPayloadWriter payloadWriter, bool *values, int length);
CStatus AddInt8ToPayload(CPayloadWriter payloadWriter, int8_t *values, int length);
CStatus AddInt16ToPayload(CPayloadWriter payloadWriter, int16_t *values, int length);
CStatus AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t *values, int length);
CStatus AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t *values, int length);
CStatus AddFloatToPayload(CPayloadWriter payloadWriter, float *values, int length);
CStatus AddDoubleToPayload(CPayloadWriter payloadWriter, double *values, int length);
CStatus AddBinaryVectorToPayload(CPayloadWriter payloadWriter, int8_t *values, int dimension, int length);
CStatus AddFloatVectorToPayload(CPayloadWriter payloadWriter, float *values, int dimension, int length);
CStatus FinishPayload(CPayloadWriter payloadWriter);
CBuffer GetPayloadBuffer(CPayloadWriter payloadWriter);
int GetPayloadNums(CPayloadWriter payloadWriter);
CStatus ReleasePayload(CPayloadWriter handler);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,44 @@
#include "PayloadStream.h"
namespace wrapper {
PayloadOutputStream::PayloadOutputStream() {
buffer_.reserve(1024 * 1024);
closed_ = false;
}
PayloadOutputStream::~PayloadOutputStream() noexcept {
}
arrow::Status PayloadOutputStream::Close() {
closed_ = true;
return arrow::Status::OK();
}
arrow::Result<int64_t> PayloadOutputStream::Tell() const {
return arrow::Result<int64_t>(buffer_.size());
}
bool PayloadOutputStream::closed() const {
return closed_;
}
arrow::Status PayloadOutputStream::Write(const void *data, int64_t nbytes) {
if (nbytes <= 0) return arrow::Status::OK();
auto size = buffer_.size();
buffer_.resize(size + nbytes);
std::memcpy(buffer_.data() + size, data, nbytes);
return arrow::Status::OK();
}
arrow::Status PayloadOutputStream::Flush() {
return arrow::Status::OK();
}
const std::vector<uint8_t> &PayloadOutputStream::Buffer() const {
return buffer_;
}
}

View File

@ -0,0 +1,46 @@
#pragma once
#include <arrow/api.h>
#include <arrow/io/interfaces.h>
#include "ColumnType.h"
namespace wrapper {
class PayloadOutputStream;
struct PayloadWriter {
ColumnType columnType;
std::shared_ptr<arrow::ArrayBuilder> builder;
std::shared_ptr<arrow::Schema> schema;
std::shared_ptr<PayloadOutputStream> output;
int rows;
};
class PayloadOutputStream : public arrow::io::OutputStream {
public:
PayloadOutputStream();
~PayloadOutputStream();
arrow::Status Close() override;
arrow::Result<int64_t> Tell() const override;
bool closed() const override;
arrow::Status Write(const void *data, int64_t nbytes) override;
arrow::Status Flush() override;
public:
const std::vector<uint8_t> &Buffer() const;
private:
std::vector<uint8_t> buffer_;
bool closed_;
};
class PayloadInputStream : public arrow::io::RandomAccessFile {
public:
PayloadInputStream(const void *data, int64_t size);
~PayloadInputStream();
};
}

View File

@ -0,0 +1,95 @@
set(ARROW_ROOT ${CMAKE_BINARY_DIR}/arrow)
set(ARROW_CMAKE_ARGS " -DARROW_WITH_LZ4=OFF"
" -DARROW_WITH_ZSTD=OFF"
" -DARROW_WITH_BROTLI=OFF"
" -DARROW_WITH_SNAPPY=OFF"
" -DARROW_WITH_ZLIB=OFF"
" -DARROW_BUILD_STATIC=ON"
" -DARROW_BUILD_SHARED=OFF"
" -DARROW_BOOST_USE_SHARED=OFF"
" -DARROW_BUILD_TESTS=OFF"
" -DARROW_TEST_MEMCHECK=OFF"
" -DARROW_BUILD_BENCHMARKS=OFF"
" -DARROW_CUDA=OFF"
" -DARROW_JEMALLOC=OFF"
" -DARROW_PYTHON=OFF"
" -DARROW_BUILD_UTILITIES=OFF"
" -DARROW_PARQUET=ON"
" -DPARQUET_BUILD_SHARED=OFF"
" -DARROW_S3=OFF"
" -DCMAKE_VERBOSE_MAKEFILE=ON")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/Templates/Arrow.CMakeLists.txt.cmake"
"${ARROW_ROOT}/CMakeLists.txt")
file(MAKE_DIRECTORY "${ARROW_ROOT}/build")
file(MAKE_DIRECTORY "${ARROW_ROOT}/install")
execute_process(
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
RESULT_VARIABLE ARROW_CONFIG
WORKING_DIRECTORY ${ARROW_ROOT})
if(ARROW_CONFIG)
message(FATAL_ERROR "Configuring Arrow failed: " ${ARROW_CONFIG})
endif(ARROW_CONFIG)
set(PARALLEL_BUILD -j)
if($ENV{PARALLEL_LEVEL})
set(NUM_JOBS $ENV{PARALLEL_LEVEL})
set(PARALLEL_BUILD "${PARALLEL_BUILD}${NUM_JOBS}")
endif($ENV{PARALLEL_LEVEL})
if(${NUM_JOBS})
if(${NUM_JOBS} EQUAL 1)
message(STATUS "ARROW BUILD: Enabling Sequential CMake build")
elseif(${NUM_JOBS} GREATER 1)
message(STATUS "ARROW BUILD: Enabling Parallel CMake build with ${NUM_JOBS} jobs")
endif(${NUM_JOBS} EQUAL 1)
else()
message(STATUS "ARROW BUILD: Enabling Parallel CMake build with all threads")
endif(${NUM_JOBS})
execute_process(
COMMAND ${CMAKE_COMMAND} --build .. -- ${PARALLEL_BUILD}
RESULT_VARIABLE ARROW_BUILD
WORKING_DIRECTORY ${ARROW_ROOT}/build)
if(ARROW_BUILD)
message(FATAL_ERROR "Building Arrow failed: " ${ARROW_BUILD})
endif(ARROW_BUILD)
message(STATUS "Arrow installed here: " ${ARROW_ROOT}/install)
set(ARROW_LIBRARY_DIR "${ARROW_ROOT}/install/lib")
set(ARROW_INCLUDE_DIR "${ARROW_ROOT}/install/include")
find_library(ARROW_LIB arrow
NO_DEFAULT_PATH
HINTS "${ARROW_LIBRARY_DIR}")
message(STATUS "Arrow library: " ${ARROW_LIB})
find_library(PARQUET_LIB parquet
NO_DEFAULT_PATH
HINTS "${ARROW_LIBRARY_DIR}")
message(STATUS "Parquet library: " ${PARQUET_LIB})
find_library(THRIFT_LIB thrift
NO_DEFAULT_PATH
HINTS "${ARROW_ROOT}/build/thrift_ep-install/lib")
message(STATUS "Thirft library: " ${THRIFT_LIB})
find_library(UTF8PROC_LIB utf8proc
NO_DEFAULT_PATH
HINTS "${ARROW_ROOT}/build/utf8proc_ep-install/lib")
message(STATUS "utf8proc library: " ${UTF8PROC_LIB})
if(ARROW_LIB AND PARQUET_LIB AND THRIFT_LIB AND UTF8PROC_LIB)
set(ARROW_FOUND TRUE)
endif(ARROW_LIB AND PARQUET_LIB AND THRIFT_LIB AND UTF8PROC_LIB)
message(STATUS "FlatBuffers installed here: " ${FLATBUFFERS_ROOT})
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_ROOT}/include")
set(FLATBUFFERS_LIBRARY_DIR "${FLATBUFFERS_ROOT}/lib")
add_definitions(-DARROW_METADATA_V4)

View File

@ -0,0 +1,30 @@
#=============================================================================
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
project(wrapper-Arrow)
include(ExternalProject)
ExternalProject_Add(Arrow
GIT_REPOSITORY https://github.com/apache/arrow.git
GIT_TAG apache-arrow-2.0.0
GIT_SHALLOW true
SOURCE_DIR "${ARROW_ROOT}/arrow"
SOURCE_SUBDIR "cpp"
BINARY_DIR "${ARROW_ROOT}/build"
INSTALL_DIR "${ARROW_ROOT}/install"
CMAKE_ARGS ${ARROW_CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX=${ARROW_ROOT}/install)

View File

@ -0,0 +1,19 @@
add_executable(wrapper_test
ParquetWrapperTest.cpp)
include(FetchContent)
FetchContent_Declare(googletest
URL "https://github.com/google/googletest/archive/release-1.10.0.tar.gz")
set(BUILD_GMOCK CACHE BOOL OFF)
set(INSTALL_GTEST CACHE BOOL OFF)
FetchContent_MakeAvailable(googletest)
target_link_libraries(wrapper_test
gtest_main
wrapper
parquet arrow thrift utf8proc pthread
)
# Defines `gtest_discover_tests()`.
#include(GoogleTest)
#gtest_discover_tests(milvusd_test)

View File

@ -0,0 +1,138 @@
#include <gtest/gtest.h>
#include <fstream>
#include <arrow/api.h>
#include <arrow/io/api.h>
#include <parquet/arrow/reader.h>
#include "ParquetWraper.h"
#include "ColumnType.h"
static void WriteToFile(CBuffer cb) {
auto data_file = std::ofstream("/tmp/wrapper_test_data.dat", std::ios::binary);
data_file.write(cb.data, cb.length);
data_file.close();
}
static std::shared_ptr<arrow::Table> ReadFromFile() {
std::shared_ptr<arrow::io::ReadableFile> infile;
auto rst = arrow::io::ReadableFile::Open("/tmp/wrapper_test_data.dat");
if (!rst.ok()) return nullptr;
infile = *rst;
std::shared_ptr<arrow::Table> table;
std::unique_ptr<parquet::arrow::FileReader> reader;
auto st = parquet::arrow::OpenFile(infile, arrow::default_memory_pool(), &reader);
if (!st.ok()) return nullptr;
st = reader->ReadTable(&table);
if (!st.ok()) return nullptr;
return table;
}
TEST(wrapper, boolean) {
auto payload = NewPayloadWriter(ColumnType::BOOL);
bool data[] = {true, false, true, false};
auto st = AddBooleanToPayload(payload, data, 4);
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
st = FinishPayload(payload);
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
auto cb = GetPayloadBuffer(payload);
ASSERT_GT(cb.length, 0);
ASSERT_NE(cb.data, nullptr);
WriteToFile(cb);
auto nums = GetPayloadNums(payload);
ASSERT_EQ(nums, 4);
st = ReleasePayload(payload);
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
auto table = ReadFromFile();
ASSERT_NE(table, nullptr);
auto chunks = table->column(0)->chunks();
ASSERT_EQ(chunks.size(), 1);
auto bool_array = std::dynamic_pointer_cast<arrow::BooleanArray>(chunks[0]);
ASSERT_NE(bool_array, nullptr);
ASSERT_EQ(bool_array->Value(0), true);
ASSERT_EQ(bool_array->Value(1), false);
ASSERT_EQ(bool_array->Value(2), true);
ASSERT_EQ(bool_array->Value(3), false);
}
#define NUMERIC_TEST(TEST_NAME, COLUMN_TYPE, DATA_TYPE, ADD_FUNC, ARRAY_TYPE) TEST(wrapper, TEST_NAME) { \
auto payload = NewPayloadWriter(COLUMN_TYPE); \
DATA_TYPE data[] = {-1, 1, -100, 100}; \
\
auto st = ADD_FUNC(payload, data, 4); \
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
st = FinishPayload(payload); \
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
auto cb = GetPayloadBuffer(payload); \
ASSERT_GT(cb.length, 0); \
ASSERT_NE(cb.data, nullptr); \
\
WriteToFile(cb); \
\
auto nums = GetPayloadNums(payload); \
ASSERT_EQ(nums, 4); \
st = ReleasePayload(payload); \
ASSERT_EQ(st.error_code, ErrorCode::SUCCESS); \
\
auto table = ReadFromFile(); \
ASSERT_NE(table, nullptr); \
\
auto chunks = table->column(0)->chunks(); \
ASSERT_EQ(chunks.size(), 1); \
\
auto bool_array = std::dynamic_pointer_cast<ARRAY_TYPE>(chunks[0]); \
ASSERT_NE(bool_array, nullptr); \
\
ASSERT_EQ(bool_array->Value(0), -1); \
ASSERT_EQ(bool_array->Value(1), 1); \
ASSERT_EQ(bool_array->Value(2), -100); \
ASSERT_EQ(bool_array->Value(3), 100); \
}
NUMERIC_TEST(int8, ColumnType::INT8, int8_t, AddInt8ToPayload, arrow::Int8Array)
NUMERIC_TEST(int16, ColumnType::INT16, int16_t, AddInt16ToPayload, arrow::Int16Array)
NUMERIC_TEST(int32, ColumnType::INT32, int32_t, AddInt32ToPayload, arrow::Int32Array)
NUMERIC_TEST(int64, ColumnType::INT64, int64_t, AddInt64ToPayload, arrow::Int64Array)
NUMERIC_TEST(float32, ColumnType::FLOAT, float, AddFloatToPayload, arrow::FloatArray)
NUMERIC_TEST(float64, ColumnType::DOUBLE, double, AddDoubleToPayload, arrow::DoubleArray)
//TEST(wrapper, int8) {
// auto payload = NewPayloadWriter(ColumnType::INT8);
// int8_t data[] = {-1, 1, -100, 100};
//
// auto st = AddInt8ToPayload(payload, data, 4);
// ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
// st = FinishPayload(payload);
// ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
// auto cb = GetPayloadBuffer(payload);
// ASSERT_GT(cb.length, 0);
// ASSERT_NE(cb.data, nullptr);
//
// WriteToFile(cb);
//
// auto nums = GetPayloadNums(payload);
// ASSERT_EQ(nums, 4);
// st = ReleasePayload(payload);
// ASSERT_EQ(st.error_code, ErrorCode::SUCCESS);
//
// auto table = ReadFromFile();
// ASSERT_NE(table, nullptr);
//
// auto chunks = table->column(0)->chunks();
// ASSERT_EQ(chunks.size(), 1);
//
// auto bool_array = std::dynamic_pointer_cast<arrow::Int8Array>(chunks[0]);
// ASSERT_NE(bool_array, nullptr);
//
// ASSERT_EQ(bool_array->Value(0), -1);
// ASSERT_EQ(bool_array->Value(1), 1);
// ASSERT_EQ(bool_array->Value(2), -100);
// ASSERT_EQ(bool_array->Value(3), 100);
//}

View File

@ -13,5 +13,5 @@ SCRIPTS_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# ignore Minio,S3 unittes
MILVUS_DIR="${SCRIPTS_DIR}/../internal/"
echo $MILVUS_DIR
go test -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." "${MILVUS_DIR}/proxy/..." -failfast
#go test -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." -failfast
#go test -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." "${MILVUS_DIR}/proxy/..." -failfast
go test -cover "${MILVUS_DIR}/kv/..." "${MILVUS_DIR}/msgstream/..." "${MILVUS_DIR}/master/..." "${MILVUS_DIR}/querynode/..." -failfast

View File

@ -1,235 +0,0 @@
import socket
import pytest
from .utils import *
timeout = 60
dimension = 128
delete_timeout = 60
def pytest_addoption(parser):
parser.addoption("--ip", action="store", default="localhost")
parser.addoption("--service", action="store", default="")
parser.addoption("--port", action="store", default=19530)
parser.addoption("--http-port", action="store", default=19121)
parser.addoption("--handler", action="store", default="GRPC")
parser.addoption("--tag", action="store", default="all", help="only run tests matching the tag.")
parser.addoption('--dry-run', action='store_true', default=False)
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"markers", "tag(name): mark test to run only matching the tag"
)
def pytest_runtest_setup(item):
tags = list()
for marker in item.iter_markers(name="tag"):
for tag in marker.args:
tags.append(tag)
if tags:
cmd_tag = item.config.getoption("--tag")
if cmd_tag != "all" and cmd_tag not in tags:
pytest.skip("test requires tag in {!r}".format(tags))
def pytest_runtestloop(session):
if session.config.getoption('--dry-run'):
total_passed = 0
total_skipped = 0
test_file_to_items = {}
for item in session.items:
file_name, test_class, test_func = item.nodeid.split("::")
if test_file_to_items.get(file_name) is not None:
test_file_to_items[file_name].append(item)
else:
test_file_to_items[file_name] = [item]
for k, items in test_file_to_items.items():
skip_case = []
should_pass_but_skipped = []
skipped_other_reason = []
level2_case = []
for item in items:
if "pytestmark" in item.keywords.keys():
markers = item.keywords["pytestmark"]
skip_case.extend([item.nodeid for marker in markers if marker.name == 'skip'])
should_pass_but_skipped.extend([item.nodeid for marker in markers if marker.name == 'skip' and len(marker.args) > 0 and marker.args[0] == "should pass"])
skipped_other_reason.extend([item.nodeid for marker in markers if marker.name == 'skip' and (len(marker.args) < 1 or marker.args[0] != "should pass")])
level2_case.extend([item.nodeid for marker in markers if marker.name == 'level' and marker.args[0] == 2])
print("")
print(f"[{k}]:")
print(f" Total : {len(items):13}")
print(f" Passed : {len(items) - len(skip_case):13}")
print(f" Skipped : {len(skip_case):13}")
print(f" - should pass: {len(should_pass_but_skipped):4}")
print(f" - not supported: {len(skipped_other_reason):4}")
print(f" Level2 : {len(level2_case):13}")
print(f" ---------------------------------------")
print(f" should pass but skipped: ")
print("")
for nodeid in should_pass_but_skipped:
name, test_class, test_func = nodeid.split("::")
print(f" {name:8}: {test_class}.{test_func}")
print("")
print(f"===============================================")
total_passed += len(items) - len(skip_case)
total_skipped += len(skip_case)
print("Total tests : ", len(session.items))
print("Total passed: ", total_passed)
print("Total skiped: ", total_skipped)
return True
def check_server_connection(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
connected = True
if ip and (ip not in ['localhost', '127.0.0.1']):
try:
socket.getaddrinfo(ip, port, 0, 0, socket.IPPROTO_TCP)
except Exception as e:
print("Socket connnet failed: %s" % str(e))
connected = False
return connected
@pytest.fixture(scope="module")
def connect(request):
ip = request.config.getoption("--ip")
service_name = request.config.getoption("--service")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
try:
milvus = get_milvus(host=ip, port=port, handler=handler)
# reset_build_index_threshold(milvus)
except Exception as e:
logging.getLogger().error(str(e))
pytest.exit("Milvus server can not connected, exit pytest ...")
def fin():
try:
milvus.close()
pass
except Exception as e:
logging.getLogger().info(str(e))
request.addfinalizer(fin)
return milvus
@pytest.fixture(scope="module")
def dis_connect(request):
ip = request.config.getoption("--ip")
service_name = request.config.getoption("--service")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
milvus = get_milvus(host=ip, port=port, handler=handler)
milvus.close()
return milvus
@pytest.fixture(scope="module")
def args(request):
ip = request.config.getoption("--ip")
service_name = request.config.getoption("--service")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
args = {"ip": ip, "port": port, "handler": handler, "service_name": service_name}
return args
@pytest.fixture(scope="module")
def milvus(request):
ip = request.config.getoption("--ip")
port = request.config.getoption("--port")
http_port = request.config.getoption("--http-port")
handler = request.config.getoption("--handler")
if handler == "HTTP":
port = http_port
return get_milvus(host=ip, port=port, handler=handler)
@pytest.fixture(scope="function")
def collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
default_fields = gen_default_fields()
connect.create_collection(collection_name, default_fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name
# customised id
@pytest.fixture(scope="function")
def id_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
fields = gen_default_fields(auto_id=False)
connect.create_collection(collection_name, fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name
@pytest.fixture(scope="function")
def binary_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
fields = gen_binary_default_fields()
connect.create_collection(collection_name, fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
collection_names = connect.list_collections()
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name
# customised id
@pytest.fixture(scope="function")
def binary_id_collection(request, connect):
ori_collection_name = getattr(request.module, "collection_id", "test")
collection_name = gen_unique_str(ori_collection_name)
try:
fields = gen_binary_default_fields(auto_id=False)
connect.create_collection(collection_name, fields)
except Exception as e:
pytest.exit(str(e))
def teardown():
if connect.has_collection(collection_name):
connect.drop_collection(collection_name, timeout=delete_timeout)
request.addfinalizer(teardown)
assert connect.has_collection(collection_name)
return collection_name

View File

@ -1,22 +0,0 @@
from . import utils
default_fields = utils.gen_default_fields()
default_binary_fields = utils.gen_binary_default_fields()
default_entity = utils.gen_entities(1)
default_raw_binary_vector, default_binary_entity = utils.gen_binary_entities(1)
default_entity_row = utils.gen_entities_rows(1)
default_raw_binary_vector_row, default_binary_entity_row = utils.gen_binary_entities_rows(1)
default_entities = utils.gen_entities(utils.default_nb)
default_raw_binary_vectors, default_binary_entities = utils.gen_binary_entities(utils.default_nb)
default_entities_new = utils.gen_entities_new(utils.default_nb)
default_raw_binary_vectors_new, default_binary_entities_new = utils.gen_binary_entities_new(utils.default_nb)
default_entities_rows = utils.gen_entities_rows(utils.default_nb)
default_raw_binary_vectors_rows, default_binary_entities_rows = utils.gen_binary_entities_rows(utils.default_nb)

View File

@ -1,127 +0,0 @@
# STL imports
import random
import string
import time
import datetime
import random
import struct
import sys
import uuid
from functools import wraps
sys.path.append('..')
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
# local application imports
from milvus.client.types import IndexType, MetricType, DataType
# grpc
from milvus.client.grpc_handler import Prepare as gPrepare
from milvus.grpc_gen import milvus_pb2
def gen_vectors(num, dim):
return [[random.random() for _ in range(dim)] for _ in range(num)]
def gen_single_vector(dim):
return [[random.random() for _ in range(dim)]]
def gen_vector(nb, d, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, d).astype("float32")
return xb.tolist()
def gen_unique_str(str=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return prefix if str is None else str + "_" + prefix
def get_current_day():
return time.strftime('%Y-%m-%d', time.localtime())
def get_last_day(day):
tmp = datetime.datetime.now() - datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def get_next_day(day):
tmp = datetime.datetime.now() + datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def gen_long_str(num):
string = ''
for _ in range(num):
char = random.choice('tomorrow')
string += char
def gen_one_binary(topk):
ids = [random.randrange(10000000, 99999999) for _ in range(topk)]
distances = [random.random() for _ in range(topk)]
return milvus_pb2.TopKQueryResult(struct.pack(str(topk) + 'l', *ids), struct.pack(str(topk) + 'd', *distances))
def gen_nq_binaries(nq, topk):
return [gen_one_binary(topk) for _ in range(nq)]
def fake_query_bin_result(nq, topk):
return gen_nq_binaries(nq, topk)
class FakerProvider(BaseProvider):
def collection_name(self):
return 'collection_names' + str(uuid.uuid4()).replace('-', '_')
def normal_field_name(self):
return 'normal_field_names' + str(uuid.uuid4()).replace('-', '_')
def vector_field_name(self):
return 'vector_field_names' + str(uuid.uuid4()).replace('-', '_')
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def collection_name_factory():
return fake.collection_name()
def collection_schema_factory():
param = {
"fields": [
{"name": fake.normal_field_name(),"type": DataType.INT32},
{"name": fake.vector_field_name(),"type": DataType.FLOAT_VECTOR, "params": {"dim": random.randint(1, 999)}},
],
"auto_id": True,
}
return param
def records_factory(dimension, nq):
return [[random.random() for _ in range(dimension)] for _ in range(nq)]
def time_it(func):
@wraps(func)
def inner(*args, **kwrgs):
pref = time.perf_counter()
result = func(*args, **kwrgs)
delt = time.perf_counter() - pref
print(f"[{func.__name__}][{delt:.4}s]")
return result
return inner

View File

@ -1,19 +0,0 @@
[pytest]
log_format = [%(asctime)s-%(levelname)s-%(name)s]: %(message)s (%(filename)s:%(lineno)s)
log_date_format = %Y-%m-%d %H:%M:%S
# cli arguments. `-x`-stop test when error occurred;
addopts = -x
testpaths = .
log_cli = true
log_level = 10
timeout = 360
markers =
level: test level
serial
; level = 1

View File

@ -1,8 +0,0 @@
grpcio==1.26.0
grpcio-tools==1.26.0
numpy==1.18.1
pytest==5.3.4
pytest-cov==2.8.1
pytest-timeout==1.3.4
pymilvus-distributed==0.0.3
sklearn==0.0

File diff suppressed because it is too large Load Diff

View File

@ -1,314 +0,0 @@
import pytest
from .utils import *
from .constants import *
uid = "create_collection"
class TestCreateCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_segment_row_limits()
)
def get_segment_row_limit(self, request):
yield request.param
def test_create_collection_fields(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields
method: create collection with diff fields: metric/field_type/...
expected: no exception raised
'''
filter_field = get_filter_field
# logging.getLogger().info(filter_field)
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [filter_field, vector_field],
}
# logging.getLogger().info(fields)
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
def test_create_collection_fields_create_index(self, connect, get_filter_field, get_vector_field):
'''
target: test create normal collection with different fields
method: create collection with diff fields: metric/field_type/...
expected: no exception raised
'''
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = gen_unique_str(uid)
fields = {
"fields": [filter_field, vector_field],
}
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.skip("no segment_row_limit")
def test_create_collection_segment_row_limit(self, connect):
'''
target: test create normal collection with different fields
method: create collection with diff segment_row_limit
expected: no exception raised
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
# fields["segment_row_limit"] = get_segment_row_limit
connect.create_collection(collection_name, fields)
assert connect.has_collection(collection_name)
@pytest.mark.skip("no flush")
def _test_create_collection_auto_flush_disabled(self, connect):
'''
target: test create normal collection, with large auto_flush_interval
method: create collection with corrent params
expected: create status return ok
'''
disable_flush(connect)
collection_name = gen_unique_str(uid)
try:
connect.create_collection(collection_name, default_fields)
finally:
enable_flush(connect)
def test_create_collection_after_insert(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
# pdb.set_trace()
connect.bulk_insert(collection, default_entity)
with pytest.raises(Exception) as e:
connect.create_collection(collection, default_fields)
def test_create_collection_after_insert_flush(self, connect, collection):
'''
target: test insert vector, then create collection again
method: insert vector and create collection
expected: error raised
'''
connect.bulk_insert(collection, default_entity)
connect.flush([collection])
with pytest.raises(Exception) as e:
connect.create_collection(collection, default_fields)
def test_create_collection_without_connection(self, dis_connect):
'''
target: test create collection, without connection
method: create collection with correct params, with a disconnected instance
expected: error raised
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
dis_connect.create_collection(collection_name, default_fields)
def test_create_collection_existed(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: error raised
'''
collection_name = gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
def test_create_after_drop_collection(self, connect, collection):
'''
target: create with the same collection name after collection dropped
method: delete, then create
expected: create success
'''
connect.drop_collection(collection)
time.sleep(2)
connect.create_collection(collection, default_fields)
@pytest.mark.level(2)
def test_create_collection_multithread(self, connect):
'''
target: test create collection with multithread
method: create collection using multithread,
expected: collections are created
'''
threads_num = 8
threads = []
collection_names = []
def create():
collection_name = gen_unique_str(uid)
collection_names.append(collection_name)
connect.create_collection(collection_name, default_fields)
for i in range(threads_num):
t = threading.Thread(target=create, args=())
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
for item in collection_names:
assert item in connect.list_collections()
connect.drop_collection(item)
class TestCreateCollectionInvalid(object):
"""
Test creating collections with invalid params
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_segment_row_limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_dim(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_string(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_field_types()
)
def get_field_type(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.skip("no segment row limit")
def test_create_collection_with_invalid_segment_row_limit(self, connect, get_segment_row_limit):
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["segment_row_limit"] = get_segment_row_limit
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.level(2)
def test_create_collection_with_invalid_dimension(self, connect, get_dim):
dimension = get_dim
collection_name = gen_unique_str()
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"]["dim"] = dimension
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.level(2)
def test_create_collection_with_invalid_collectionname(self, connect, get_invalid_string):
collection_name = get_invalid_string
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
@pytest.mark.level(2)
def test_create_collection_with_empty_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
@pytest.mark.level(2)
def test_create_collection_with_none_collectionname(self, connect):
collection_name = None
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, default_fields)
def test_create_collection_None(self, connect):
'''
target: test create collection but the collection name is None
method: create collection, param collection_name is None
expected: create raise error
'''
with pytest.raises(Exception) as e:
connect.create_collection(None, default_fields)
def test_create_collection_no_dimension(self, connect):
'''
target: test create collection with no dimension params
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields["fields"][-1]["params"].pop("dim")
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.skip("no segment row limit")
def test_create_collection_no_segment_row_limit(self, connect):
'''
target: test create collection with no segment_row_limit params
method: create collection with correct params
expected: use default default_segment_row_limit
'''
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
fields.pop("segment_row_limit")
connect.create_collection(collection_name, fields)
res = connect.get_collection_info(collection_name)
# logging.getLogger().info(res)
assert res["segment_row_limit"] == default_server_segment_row_limit
def test_create_collection_limit_fields(self, connect):
collection_name = gen_unique_str(uid)
limit_num = 64
fields = copy.deepcopy(default_fields)
for i in range(limit_num):
field_name = gen_unique_str("field_name")
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
@pytest.mark.level(2)
def test_create_collection_invalid_field_name(self, connect, get_invalid_string):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_name = get_invalid_string
field = {"name": field_name, "type": DataType.INT64}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)
def test_create_collection_invalid_field_type(self, connect, get_field_type):
collection_name = gen_unique_str(uid)
fields = copy.deepcopy(default_fields)
field_type = get_field_type
field = {"name": "test_field", "type": field_type}
fields["fields"].append(field)
with pytest.raises(Exception) as e:
connect.create_collection(collection_name, fields)

Some files were not shown because too many files have changed in this diff Show More