Add support of metric type in schema, enable binary vector, fix segfault

Signed-off-by: FluorineDog <guilin.gou@zilliz.com>
pull/4973/head^2
FluorineDog 2020-12-05 06:46:01 +08:00 committed by yefu.chen
parent acdf8cc9e1
commit 6412ebc0d4
109 changed files with 6236 additions and 1004 deletions

1
.env
View File

@ -3,6 +3,7 @@ ARCH=amd64
UBUNTU=18.04
DATE_VERSION=20201202-085131
LATEST_DATE_VERSION=latest
MINIO_ADDRESS=minio:9000
PULSAR_ADDRESS=pulsar://pulsar:6650
ETCD_ADDRESS=etcd:2379
MASTER_ADDRESS=localhost:53100

View File

@ -51,7 +51,7 @@ jobs:
- name: Start Service
shell: bash
run: |
docker-compose up -d pulsar etcd
docker-compose up -d pulsar etcd minio
- name: Build and UnitTest
env:
CHECK_BUILDER: "1"

1
.gitignore vendored
View File

@ -55,3 +55,4 @@ cmake_build/
.DS_Store
*.swp
cwrapper_build

View File

@ -1,9 +1,9 @@
timeout(time: 5, unit: 'MINUTES') {
timeout(time: 10, unit: 'MINUTES') {
dir ("scripts") {
sh '. ./before-install.sh && unset http_proxy && unset https_proxy && ./check_cache.sh -l $CCACHE_ARTFACTORY_URL --cache_dir=\$CCACHE_DIR -f ccache-\$OS_NAME-\$BUILD_ENV_IMAGE_ID.tar.gz || echo \"ccache files not found!\"'
}
sh '. ./scripts/before-install.sh && make check-proto-product && make verifiers && make install'
sh '. ./scripts/before-install.sh && make install'
dir ("scripts") {
withCredentials([usernamePassword(credentialsId: "${env.JFROG_CREDENTIALS_ID}", usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD')]) {

View File

@ -4,7 +4,10 @@ try {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d pulsar'
dir ('build/docker/deploy') {
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} pull'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d master'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxy'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e QUERY_NODE_ID=1 -d querynode'
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e QUERY_NODE_ID=2 -d querynode'
}
dir ('build/docker/test') {

View File

@ -65,9 +65,11 @@ build-go:
build-cpp:
@(env bash $(PWD)/scripts/core_build.sh)
@(env bash $(PWD)/scripts/cwrapper_build.sh -t Release)
build-cpp-with-unittest:
@(env bash $(PWD)/scripts/core_build.sh -u)
@(env bash $(PWD)/scripts/cwrapper_build.sh -t Release)
# Runs the tests.
unittest: test-cpp test-go

View File

@ -32,8 +32,9 @@ msgChannel:
# default channel range [0, 1)
channelRange:
insert: [0, 1]
insert: [0, 2]
delete: [0, 1]
dataDefinition: [0,1]
k2s: [0, 1]
search: [0, 1]
searchResult: [0, 1]

View File

@ -12,7 +12,7 @@
nodeID: # will be deprecated after v0.2
proxyIDList: [0]
queryNodeIDList: [1]
queryNodeIDList: [1, 2]
writeNodeIDList: [3]
etcd:
@ -23,6 +23,13 @@ etcd:
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
segThreshold: 10000
minio:
address: localhost
port: 9000
accessKeyID: minioadmin
secretAccessKey: minioadmin
useSSL: false
pulsar:
address: localhost
port: 6650

View File

@ -20,6 +20,22 @@ services:
networks:
- milvus
minio:
image: minio/minio:RELEASE.2020-12-03T00-03-10Z
ports:
- "9000:9000"
environment:
MINIO_ACCESS_KEY: minioadmin
MINIO_SECRET_KEY: minioadmin
command: minio server /minio_data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
networks:
milvus:

View File

@ -21,6 +21,7 @@ services:
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
ETCD_ADDRESS: ${ETCD_ADDRESS}
MASTER_ADDRESS: ${MASTER_ADDRESS}
MINIO_ADDRESS: ${MINIO_ADDRESS}
volumes: &ubuntu-volumes
- .:/go/src/github.com/zilliztech/milvus-distributed:delegated
- ${DOCKER_VOLUME_DIRECTORY:-.docker}/${ARCH}-ubuntu${UBUNTU}-cache:/ccache:delegated
@ -45,6 +46,7 @@ services:
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
ETCD_ADDRESS: ${ETCD_ADDRESS}
MASTER_ADDRESS: ${MASTER_ADDRESS}
MINIO_ADDRESS: ${MINIO_ADDRESS}
volumes:
- .:/go/src/github.com/zilliztech/milvus-distributed:delegated
- ${DOCKER_VOLUME_DIRECTORY:-.docker}/${ARCH}-ubuntu${UBUNTU}-gdbserver-home:/home/debugger:delegated
@ -68,5 +70,21 @@ services:
networks:
- milvus
minio:
image: minio/minio:RELEASE.2020-12-03T00-03-10Z
ports:
- "9000:9000"
environment:
MINIO_ACCESS_KEY: minioadmin
MINIO_SECRET_KEY: minioadmin
command: minio server /minio_data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- milvus
networks:
milvus:

View File

@ -184,7 +184,7 @@ Note that *tenantId*, *proxyId*, *collectionId*, *segmentId* are unique strings
```go
type metaTable struct {
kv kv.Base // client of a reliable kv service, i.e. etcd client
kv kv.TxnBase // client of a reliable kv service, i.e. etcd client
tenantId2Meta map[UniqueId]TenantMeta // tenant id to tenant meta
proxyId2Meta map[UniqueId]ProxyMeta // proxy id to proxy meta
collId2Meta map[UniqueId]CollectionMeta // collection id to collection meta
@ -216,7 +216,7 @@ func (meta *metaTable) GetSegmentById(segId UniqueId)(*SegmentMeta, error)
func (meta *metaTable) DeleteSegment(segId UniqueId) error
func (meta *metaTable) CloseSegment(segId UniqueId, closeTs Timestamp, num_rows int64) error
func NewMetaTable(kv kv.Base) (*metaTable,error)
func NewMetaTable(kv kv.TxnBase) (*metaTable,error)
```
*metaTable* maintains meta both in memory and *etcdKV*. It keeps meta's consistency in both sides. All its member functions may be called concurrently.
@ -380,13 +380,18 @@ func (segMgr *SegmentManager) AssignSegmentID(segIDReq []*internalpb.SegIDReques
// "/msg_stream/insert"
message SysConfigRequest {
repeated string keys = 1;
repeated string key_prefixes = 2;
MsgType msg_type = 1;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
repeated string keys = 5;
repeated string key_prefixes = 6;
}
message SysConfigResponse {
repeated string keys = 1;
repeated string values = 2;
common.Status status = 1;
repeated string keys = 2;
repeated string values = 3;
}
```
@ -394,12 +399,11 @@ message SysConfigResponse {
```go
type SysConfig struct {
etcdKV *etcd
etcdPathPrefix string
kv *kv.EtcdKV
}
func (conf *SysConfig) InitFromFile(filePath string) (error)
func (conf *SysConfig) GetByPrefix(keyPrefix string) ([]string, error)
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error)
func (conf *SysConfig) Get(keys []string) ([]string, error)
```

View File

@ -1,8 +1,9 @@
set(COMMON_SRC
Schema.cpp
)
set(COMMON_SRC
Schema.cpp
Types.cpp
)
add_library(milvus_common
${COMMON_SRC}
)
add_library(milvus_common
${COMMON_SRC}
)
target_link_libraries(milvus_common milvus_proto)

View File

@ -10,18 +10,13 @@
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include "utils/Types.h"
#include "common/Types.h"
#include "utils/Status.h"
#include "utils/EasyAssert.h"
#include <string>
#include <stdexcept>
namespace milvus {
using Timestamp = uint64_t; // TODO: use TiKV-like timestamp
using engine::DataType;
using engine::FieldElementType;
inline int
field_sizeof(DataType data_type, int dim = 1) {
switch (data_type) {
@ -89,7 +84,13 @@ field_is_vector(DataType datatype) {
struct FieldMeta {
public:
FieldMeta(std::string_view name, DataType type, int dim = 1) : name_(name), type_(type), dim_(dim) {
FieldMeta(std::string_view name, DataType type) : name_(name), type_(type) {
Assert(!is_vector());
}
FieldMeta(std::string_view name, DataType type, int64_t dim, MetricType metric_type)
: name_(name), type_(type), vector_info_(VectorInfo{dim, metric_type}) {
Assert(is_vector());
}
bool
@ -98,14 +99,11 @@ struct FieldMeta {
return type_ == DataType::VECTOR_BINARY || type_ == DataType::VECTOR_FLOAT;
}
void
set_dim(int dim) {
dim_ = dim;
}
int
int64_t
get_dim() const {
return dim_;
Assert(is_vector());
Assert(vector_info_.has_value());
return vector_info_->dim_;
}
const std::string&
@ -120,12 +118,20 @@ struct FieldMeta {
int
get_sizeof() const {
return field_sizeof(type_, dim_);
if (is_vector()) {
return field_sizeof(type_, get_dim());
} else {
return field_sizeof(type_, 1);
}
}
private:
struct VectorInfo {
int64_t dim_;
MetricType metric_type_;
};
std::string name_;
DataType type_ = DataType::NONE;
int dim_ = 1;
std::optional<VectorInfo> vector_info_;
};
} // namespace milvus

View File

@ -11,35 +11,50 @@
#include "common/Schema.h"
#include <google/protobuf/text_format.h>
#include <boost/lexical_cast.hpp>
namespace milvus {
using std::string;
static std::map<string, string>
RepeatedKeyValToMap(const google::protobuf::RepeatedPtrField<proto::common::KeyValuePair>& kvs) {
std::map<string, string> mapping;
for (auto& kv : kvs) {
AssertInfo(!mapping.count(kv.key()), "repeat key(" + kv.key() + ") in protobuf");
mapping.emplace(kv.key(), kv.value());
}
return mapping;
}
std::shared_ptr<Schema>
Schema::ParseFrom(const milvus::proto::schema::CollectionSchema& schema_proto) {
auto schema = std::make_shared<Schema>();
schema->set_auto_id(schema_proto.autoid());
for (const milvus::proto::schema::FieldSchema& child : schema_proto.fields()) {
const auto& type_params = child.type_params();
int64_t dim = -1;
auto data_type = DataType(child.data_type());
for (const auto& type_param : type_params) {
if (type_param.key() == "dim") {
dim = strtoll(type_param.value().c_str(), nullptr, 10);
}
}
if (field_is_vector(data_type)) {
AssertInfo(dim != -1, "dim not found");
} else {
AssertInfo(dim == 1 || dim == -1, "Invalid dim field. Should be 1 or not exists");
dim = 1;
}
if (child.is_primary_key()) {
AssertInfo(!schema->primary_key_offset_opt_.has_value(), "repetitive primary key");
schema->primary_key_offset_opt_ = schema->size();
}
schema->AddField(child.name(), data_type, dim);
if (field_is_vector(data_type)) {
auto type_map = RepeatedKeyValToMap(child.type_params());
auto index_map = RepeatedKeyValToMap(child.index_params());
if (!index_map.count("metric_type")) {
auto default_metric_type =
data_type == DataType::VECTOR_FLOAT ? MetricType::METRIC_L2 : MetricType::METRIC_Jaccard;
index_map["metric_type"] = default_metric_type;
}
AssertInfo(type_map.count("dim"), "dim not found");
auto dim = boost::lexical_cast<int64_t>(type_map.at("dim"));
AssertInfo(index_map.count("metric_type"), "index not found");
auto metric_type = GetMetricType(index_map.at("metric_type"));
schema->AddField(child.name(), data_type, dim, metric_type);
} else {
schema->AddField(child.name(), data_type);
}
}
return schema;
}

View File

@ -24,19 +24,15 @@ namespace milvus {
class Schema {
public:
void
AddField(std::string_view field_name, DataType data_type, int dim = 1) {
auto field_meta = FieldMeta(field_name, data_type, dim);
AddField(std::string_view field_name, DataType data_type) {
auto field_meta = FieldMeta(field_name, data_type);
this->AddField(std::move(field_meta));
}
void
AddField(FieldMeta field_meta) {
auto offset = fields_.size();
fields_.emplace_back(field_meta);
offsets_.emplace(field_meta.get_name(), offset);
auto field_sizeof = field_meta.get_sizeof();
sizeof_infos_.push_back(field_sizeof);
total_sizeof_ += field_sizeof;
AddField(std::string_view field_name, DataType data_type, int64_t dim, MetricType metric_type) {
auto field_meta = FieldMeta(field_name, data_type, dim, metric_type);
this->AddField(std::move(field_meta));
}
void
@ -44,17 +40,6 @@ class Schema {
is_auto_id_ = is_auto_id;
}
auto
begin() {
return fields_.begin();
}
auto
end() {
return fields_.end();
}
public:
bool
get_is_auto_id() const {
return is_auto_id_;
@ -123,11 +108,20 @@ class Schema {
static std::shared_ptr<Schema>
ParseFrom(const milvus::proto::schema::CollectionSchema& schema_proto);
void
AddField(FieldMeta&& field_meta) {
auto offset = fields_.size();
fields_.emplace_back(field_meta);
offsets_.emplace(field_meta.get_name(), offset);
auto field_sizeof = field_meta.get_sizeof();
sizeof_infos_.push_back(std::move(field_sizeof));
total_sizeof_ += field_sizeof;
}
private:
// this is where data holds
std::vector<FieldMeta> fields_;
private:
// a mapping for random access
std::unordered_map<std::string, int> offsets_;
std::vector<int> sizeof_infos_;

View File

@ -0,0 +1,45 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
//
// Created by mike on 12/3/20.
//
#include "common/Types.h"
#include <knowhere/index/vector_index/helpers/IndexParameter.h>
#include "utils/EasyAssert.h"
#include <boost/bimap.hpp>
#include <boost/algorithm/string/case_conv.hpp>
namespace milvus {
using boost::algorithm::to_lower_copy;
namespace Metric = knowhere::Metric;
static auto map = [] {
boost::bimap<std::string, MetricType> mapping;
using pos = boost::bimap<std::string, MetricType>::value_type;
mapping.insert(pos(to_lower_copy(std::string(Metric::L2)), MetricType::METRIC_L2));
mapping.insert(pos(to_lower_copy(std::string(Metric::IP)), MetricType::METRIC_INNER_PRODUCT));
mapping.insert(pos(to_lower_copy(std::string(Metric::JACCARD)), MetricType::METRIC_Jaccard));
mapping.insert(pos(to_lower_copy(std::string(Metric::TANIMOTO)), MetricType::METRIC_Tanimoto));
mapping.insert(pos(to_lower_copy(std::string(Metric::HAMMING)), MetricType::METRIC_Hamming));
mapping.insert(pos(to_lower_copy(std::string(Metric::SUBSTRUCTURE)), MetricType::METRIC_Substructure));
mapping.insert(pos(to_lower_copy(std::string(Metric::SUPERSTRUCTURE)), MetricType::METRIC_Superstructure));
return mapping;
}();
MetricType
GetMetricType(const std::string& type_name) {
auto real_name = to_lower_copy(type_name);
AssertInfo(map.left.count(real_name), "metric type not found: " + type_name);
return map.left.at(real_name);
}
} // namespace milvus

View File

@ -0,0 +1,27 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include "utils/Types.h"
#include <faiss/MetricType.h>
#include <string>
namespace milvus {
using Timestamp = uint64_t; // TODO: use TiKV-like timestamp
using engine::DataType;
using engine::FieldElementType;
using engine::QueryResult;
using MetricType = faiss::MetricType;
faiss::MetricType
GetMetricType(const std::string& type);
} // namespace milvus

View File

@ -76,6 +76,10 @@ class PartitionDescriptionDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<PartitionDescription> _instance;
} _PartitionDescription_default_instance_;
class SysConfigResponseDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<SysConfigResponse> _instance;
} _SysConfigResponse_default_instance_;
class HitsDefaultTypeInternal {
public:
::PROTOBUF_NAMESPACE_ID::internal::ExplicitlyConstructed<Hits> _instance;
@ -311,7 +315,22 @@ static void InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto() {
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_StringResponse_service_5fmsg_2eproto}, {
&scc_info_Status_common_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[15];
static void InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto() {
GOOGLE_PROTOBUF_VERIFY_VERSION;
{
void* ptr = &::milvus::proto::service::_SysConfigResponse_default_instance_;
new (ptr) ::milvus::proto::service::SysConfigResponse();
::PROTOBUF_NAMESPACE_ID::internal::OnShutdownDestroyMessage(ptr);
}
::milvus::proto::service::SysConfigResponse::InitAsDefaultInstance();
}
::PROTOBUF_NAMESPACE_ID::internal::SCCInfo<1> scc_info_SysConfigResponse_service_5fmsg_2eproto =
{{ATOMIC_VAR_INIT(::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase::kUninitialized), 1, InitDefaultsscc_info_SysConfigResponse_service_5fmsg_2eproto}, {
&scc_info_Status_common_2eproto.base,}};
static ::PROTOBUF_NAMESPACE_ID::Metadata file_level_metadata_service_5fmsg_2eproto[16];
static const ::PROTOBUF_NAMESPACE_ID::EnumDescriptor* file_level_enum_descriptors_service_5fmsg_2eproto[1];
static constexpr ::PROTOBUF_NAMESPACE_ID::ServiceDescriptor const** file_level_service_descriptors_service_5fmsg_2eproto = nullptr;
@ -414,6 +433,14 @@ const ::PROTOBUF_NAMESPACE_ID::uint32 TableStruct_service_5fmsg_2eproto::offsets
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, name_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::PartitionDescription, statistics_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
~0u, // no _weak_field_map_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, status_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, keys_),
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::SysConfigResponse, values_),
~0u, // no _has_bits_
PROTOBUF_FIELD_OFFSET(::milvus::proto::service::Hits, _internal_metadata_),
~0u, // no _extensions_
~0u, // no _oneof_case_
@ -443,8 +470,9 @@ static const ::PROTOBUF_NAMESPACE_ID::internal::MigrationSchema schemas[] PROTOB
{ 73, -1, sizeof(::milvus::proto::service::IntegerRangeResponse)},
{ 81, -1, sizeof(::milvus::proto::service::CollectionDescription)},
{ 89, -1, sizeof(::milvus::proto::service::PartitionDescription)},
{ 97, -1, sizeof(::milvus::proto::service::Hits)},
{ 105, -1, sizeof(::milvus::proto::service::QueryResult)},
{ 97, -1, sizeof(::milvus::proto::service::SysConfigResponse)},
{ 105, -1, sizeof(::milvus::proto::service::Hits)},
{ 113, -1, sizeof(::milvus::proto::service::QueryResult)},
};
static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] = {
@ -461,6 +489,7 @@ static ::PROTOBUF_NAMESPACE_ID::Message const * const file_default_instances[] =
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_IntegerRangeResponse_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_CollectionDescription_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_PartitionDescription_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_SysConfigResponse_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_Hits_default_instance_),
reinterpret_cast<const ::PROTOBUF_NAMESPACE_ID::Message*>(&::milvus::proto::service::_QueryResult_default_instance_),
};
@ -499,20 +528,22 @@ const char descriptor_table_protodef_service_5fmsg_2eproto[] PROTOBUF_SECTION_VA
"\006status\030\001 \001(\0132\033.milvus.proto.common.Stat"
"us\0221\n\004name\030\002 \001(\0132#.milvus.proto.service."
"PartitionName\0225\n\nstatistics\030\003 \003(\0132!.milv"
"us.proto.common.KeyValuePair\"5\n\004Hits\022\013\n\003"
"IDs\030\001 \003(\003\022\020\n\010row_data\030\002 \003(\014\022\016\n\006scores\030\003 "
"\003(\002\"H\n\013QueryResult\022+\n\006status\030\001 \001(\0132\033.mil"
"vus.proto.common.Status\022\014\n\004hits\030\002 \003(\014*@\n"
"\017PlaceholderType\022\010\n\004NONE\020\000\022\021\n\rVECTOR_BIN"
"ARY\020d\022\020\n\014VECTOR_FLOAT\020eBCZAgithub.com/zi"
"lliztech/milvus-distributed/internal/pro"
"to/servicepbb\006proto3"
"us.proto.common.KeyValuePair\"^\n\021SysConfi"
"gResponse\022+\n\006status\030\001 \001(\0132\033.milvus.proto"
".common.Status\022\014\n\004keys\030\002 \003(\t\022\016\n\006values\030\003"
" \003(\t\"5\n\004Hits\022\013\n\003IDs\030\001 \003(\003\022\020\n\010row_data\030\002 "
"\003(\014\022\016\n\006scores\030\003 \003(\002\"H\n\013QueryResult\022+\n\006st"
"atus\030\001 \001(\0132\033.milvus.proto.common.Status\022"
"\014\n\004hits\030\002 \003(\014*@\n\017PlaceholderType\022\010\n\004NONE"
"\020\000\022\021\n\rVECTOR_BINARY\020d\022\020\n\014VECTOR_FLOAT\020eB"
"CZAgithub.com/zilliztech/milvus-distribu"
"ted/internal/proto/servicepbb\006proto3"
;
static const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable*const descriptor_table_service_5fmsg_2eproto_deps[2] = {
&::descriptor_table_common_2eproto,
&::descriptor_table_schema_2eproto,
};
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[15] = {
static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_service_5fmsg_2eproto_sccs[16] = {
&scc_info_BoolResponse_service_5fmsg_2eproto.base,
&scc_info_CollectionDescription_service_5fmsg_2eproto.base,
&scc_info_CollectionName_service_5fmsg_2eproto.base,
@ -528,14 +559,15 @@ static ::PROTOBUF_NAMESPACE_ID::internal::SCCInfoBase*const descriptor_table_ser
&scc_info_RowBatch_service_5fmsg_2eproto.base,
&scc_info_StringListResponse_service_5fmsg_2eproto.base,
&scc_info_StringResponse_service_5fmsg_2eproto.base,
&scc_info_SysConfigResponse_service_5fmsg_2eproto.base,
};
static ::PROTOBUF_NAMESPACE_ID::internal::once_flag descriptor_table_service_5fmsg_2eproto_once;
static bool descriptor_table_service_5fmsg_2eproto_initialized = false;
const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_service_5fmsg_2eproto = {
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1620,
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 15, 2,
&descriptor_table_service_5fmsg_2eproto_initialized, descriptor_table_protodef_service_5fmsg_2eproto, "service_msg.proto", 1716,
&descriptor_table_service_5fmsg_2eproto_once, descriptor_table_service_5fmsg_2eproto_sccs, descriptor_table_service_5fmsg_2eproto_deps, 16, 2,
schemas, file_default_instances, TableStruct_service_5fmsg_2eproto::offsets,
file_level_metadata_service_5fmsg_2eproto, 15, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
file_level_metadata_service_5fmsg_2eproto, 16, file_level_enum_descriptors_service_5fmsg_2eproto, file_level_service_descriptors_service_5fmsg_2eproto,
};
// Force running AddDescriptors() at dynamic initialization time.
@ -5159,6 +5191,398 @@ void PartitionDescription::InternalSwap(PartitionDescription* other) {
}
// ===================================================================
void SysConfigResponse::InitAsDefaultInstance() {
::milvus::proto::service::_SysConfigResponse_default_instance_._instance.get_mutable()->status_ = const_cast< ::milvus::proto::common::Status*>(
::milvus::proto::common::Status::internal_default_instance());
}
class SysConfigResponse::_Internal {
public:
static const ::milvus::proto::common::Status& status(const SysConfigResponse* msg);
};
const ::milvus::proto::common::Status&
SysConfigResponse::_Internal::status(const SysConfigResponse* msg) {
return *msg->status_;
}
void SysConfigResponse::clear_status() {
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
delete status_;
}
status_ = nullptr;
}
SysConfigResponse::SysConfigResponse()
: ::PROTOBUF_NAMESPACE_ID::Message(), _internal_metadata_(nullptr) {
SharedCtor();
// @@protoc_insertion_point(constructor:milvus.proto.service.SysConfigResponse)
}
SysConfigResponse::SysConfigResponse(const SysConfigResponse& from)
: ::PROTOBUF_NAMESPACE_ID::Message(),
_internal_metadata_(nullptr),
keys_(from.keys_),
values_(from.values_) {
_internal_metadata_.MergeFrom(from._internal_metadata_);
if (from.has_status()) {
status_ = new ::milvus::proto::common::Status(*from.status_);
} else {
status_ = nullptr;
}
// @@protoc_insertion_point(copy_constructor:milvus.proto.service.SysConfigResponse)
}
void SysConfigResponse::SharedCtor() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
status_ = nullptr;
}
SysConfigResponse::~SysConfigResponse() {
// @@protoc_insertion_point(destructor:milvus.proto.service.SysConfigResponse)
SharedDtor();
}
void SysConfigResponse::SharedDtor() {
if (this != internal_default_instance()) delete status_;
}
void SysConfigResponse::SetCachedSize(int size) const {
_cached_size_.Set(size);
}
const SysConfigResponse& SysConfigResponse::default_instance() {
::PROTOBUF_NAMESPACE_ID::internal::InitSCC(&::scc_info_SysConfigResponse_service_5fmsg_2eproto.base);
return *internal_default_instance();
}
void SysConfigResponse::Clear() {
// @@protoc_insertion_point(message_clear_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
keys_.Clear();
values_.Clear();
if (GetArenaNoVirtual() == nullptr && status_ != nullptr) {
delete status_;
}
status_ = nullptr;
_internal_metadata_.Clear();
}
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* SysConfigResponse::_InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) {
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
while (!ctx->Done(&ptr)) {
::PROTOBUF_NAMESPACE_ID::uint32 tag;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::ReadTag(ptr, &tag);
CHK_(ptr);
switch (tag >> 3) {
// .milvus.proto.common.Status status = 1;
case 1:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 10)) {
ptr = ctx->ParseMessage(mutable_status(), ptr);
CHK_(ptr);
} else goto handle_unusual;
continue;
// repeated string keys = 2;
case 2:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 18)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_keys(), ptr, ctx, "milvus.proto.service.SysConfigResponse.keys");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 18);
} else goto handle_unusual;
continue;
// repeated string values = 3;
case 3:
if (PROTOBUF_PREDICT_TRUE(static_cast<::PROTOBUF_NAMESPACE_ID::uint8>(tag) == 26)) {
ptr -= 1;
do {
ptr += 1;
ptr = ::PROTOBUF_NAMESPACE_ID::internal::InlineGreedyStringParserUTF8(add_values(), ptr, ctx, "milvus.proto.service.SysConfigResponse.values");
CHK_(ptr);
if (!ctx->DataAvailable(ptr)) break;
} while (::PROTOBUF_NAMESPACE_ID::internal::UnalignedLoad<::PROTOBUF_NAMESPACE_ID::uint8>(ptr) == 26);
} else goto handle_unusual;
continue;
default: {
handle_unusual:
if ((tag & 7) == 4 || tag == 0) {
ctx->SetLastTag(tag);
goto success;
}
ptr = UnknownFieldParse(tag, &_internal_metadata_, ptr, ctx);
CHK_(ptr != nullptr);
continue;
}
} // switch
} // while
success:
return ptr;
failure:
ptr = nullptr;
goto success;
#undef CHK_
}
#else // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
bool SysConfigResponse::MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) {
#define DO_(EXPRESSION) if (!PROTOBUF_PREDICT_TRUE(EXPRESSION)) goto failure
::PROTOBUF_NAMESPACE_ID::uint32 tag;
// @@protoc_insertion_point(parse_start:milvus.proto.service.SysConfigResponse)
for (;;) {
::std::pair<::PROTOBUF_NAMESPACE_ID::uint32, bool> p = input->ReadTagWithCutoffNoLastTag(127u);
tag = p.first;
if (!p.second) goto handle_unusual;
switch (::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::GetTagFieldNumber(tag)) {
// .milvus.proto.common.Status status = 1;
case 1: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (10 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadMessage(
input, mutable_status()));
} else {
goto handle_unusual;
}
break;
}
// repeated string keys = 2;
case 2: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (18 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_keys()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(this->keys_size() - 1).data(),
static_cast<int>(this->keys(this->keys_size() - 1).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.service.SysConfigResponse.keys"));
} else {
goto handle_unusual;
}
break;
}
// repeated string values = 3;
case 3: {
if (static_cast< ::PROTOBUF_NAMESPACE_ID::uint8>(tag) == (26 & 0xFF)) {
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::ReadString(
input, this->add_values()));
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(this->values_size() - 1).data(),
static_cast<int>(this->values(this->values_size() - 1).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::PARSE,
"milvus.proto.service.SysConfigResponse.values"));
} else {
goto handle_unusual;
}
break;
}
default: {
handle_unusual:
if (tag == 0) {
goto success;
}
DO_(::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SkipField(
input, tag, _internal_metadata_.mutable_unknown_fields()));
break;
}
}
}
success:
// @@protoc_insertion_point(parse_success:milvus.proto.service.SysConfigResponse)
return true;
failure:
// @@protoc_insertion_point(parse_failure:milvus.proto.service.SysConfigResponse)
return false;
#undef DO_
}
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SysConfigResponse::SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const {
// @@protoc_insertion_point(serialize_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteMessageMaybeToArray(
1, _Internal::status(this), output);
}
// repeated string keys = 2;
for (int i = 0, n = this->keys_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.keys");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
2, this->keys(i), output);
}
// repeated string values = 3;
for (int i = 0, n = this->values_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(i).data(), static_cast<int>(this->values(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.values");
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::WriteString(
3, this->values(i), output);
}
if (_internal_metadata_.have_unknown_fields()) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFields(
_internal_metadata_.unknown_fields(), output);
}
// @@protoc_insertion_point(serialize_end:milvus.proto.service.SysConfigResponse)
}
::PROTOBUF_NAMESPACE_ID::uint8* SysConfigResponse::InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const {
// @@protoc_insertion_point(serialize_to_array_start:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
InternalWriteMessageToArray(
1, _Internal::status(this), target);
}
// repeated string keys = 2;
for (int i = 0, n = this->keys_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->keys(i).data(), static_cast<int>(this->keys(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.keys");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(2, this->keys(i), target);
}
// repeated string values = 3;
for (int i = 0, n = this->values_size(); i < n; i++) {
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
this->values(i).data(), static_cast<int>(this->values(i).length()),
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
"milvus.proto.service.SysConfigResponse.values");
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::
WriteStringToArray(3, this->values(i), target);
}
if (_internal_metadata_.have_unknown_fields()) {
target = ::PROTOBUF_NAMESPACE_ID::internal::WireFormat::SerializeUnknownFieldsToArray(
_internal_metadata_.unknown_fields(), target);
}
// @@protoc_insertion_point(serialize_to_array_end:milvus.proto.service.SysConfigResponse)
return target;
}
size_t SysConfigResponse::ByteSizeLong() const {
// @@protoc_insertion_point(message_byte_size_start:milvus.proto.service.SysConfigResponse)
size_t total_size = 0;
if (_internal_metadata_.have_unknown_fields()) {
total_size +=
::PROTOBUF_NAMESPACE_ID::internal::WireFormat::ComputeUnknownFieldsSize(
_internal_metadata_.unknown_fields());
}
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
// Prevent compiler warnings about cached_has_bits being unused
(void) cached_has_bits;
// repeated string keys = 2;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->keys_size());
for (int i = 0, n = this->keys_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->keys(i));
}
// repeated string values = 3;
total_size += 1 *
::PROTOBUF_NAMESPACE_ID::internal::FromIntSize(this->values_size());
for (int i = 0, n = this->values_size(); i < n; i++) {
total_size += ::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
this->values(i));
}
// .milvus.proto.common.Status status = 1;
if (this->has_status()) {
total_size += 1 +
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::MessageSize(
*status_);
}
int cached_size = ::PROTOBUF_NAMESPACE_ID::internal::ToCachedSize(total_size);
SetCachedSize(cached_size);
return total_size;
}
void SysConfigResponse::MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
// @@protoc_insertion_point(generalized_merge_from_start:milvus.proto.service.SysConfigResponse)
GOOGLE_DCHECK_NE(&from, this);
const SysConfigResponse* source =
::PROTOBUF_NAMESPACE_ID::DynamicCastToGenerated<SysConfigResponse>(
&from);
if (source == nullptr) {
// @@protoc_insertion_point(generalized_merge_from_cast_fail:milvus.proto.service.SysConfigResponse)
::PROTOBUF_NAMESPACE_ID::internal::ReflectionOps::Merge(from, this);
} else {
// @@protoc_insertion_point(generalized_merge_from_cast_success:milvus.proto.service.SysConfigResponse)
MergeFrom(*source);
}
}
void SysConfigResponse::MergeFrom(const SysConfigResponse& from) {
// @@protoc_insertion_point(class_specific_merge_from_start:milvus.proto.service.SysConfigResponse)
GOOGLE_DCHECK_NE(&from, this);
_internal_metadata_.MergeFrom(from._internal_metadata_);
::PROTOBUF_NAMESPACE_ID::uint32 cached_has_bits = 0;
(void) cached_has_bits;
keys_.MergeFrom(from.keys_);
values_.MergeFrom(from.values_);
if (from.has_status()) {
mutable_status()->::milvus::proto::common::Status::MergeFrom(from.status());
}
}
void SysConfigResponse::CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) {
// @@protoc_insertion_point(generalized_copy_from_start:milvus.proto.service.SysConfigResponse)
if (&from == this) return;
Clear();
MergeFrom(from);
}
void SysConfigResponse::CopyFrom(const SysConfigResponse& from) {
// @@protoc_insertion_point(class_specific_copy_from_start:milvus.proto.service.SysConfigResponse)
if (&from == this) return;
Clear();
MergeFrom(from);
}
bool SysConfigResponse::IsInitialized() const {
return true;
}
void SysConfigResponse::InternalSwap(SysConfigResponse* other) {
using std::swap;
_internal_metadata_.Swap(&other->_internal_metadata_);
keys_.InternalSwap(CastToBase(&other->keys_));
values_.InternalSwap(CastToBase(&other->values_));
swap(status_, other->status_);
}
::PROTOBUF_NAMESPACE_ID::Metadata SysConfigResponse::GetMetadata() const {
return GetMetadataStatic();
}
// ===================================================================
void Hits::InitAsDefaultInstance() {
@ -5911,6 +6335,9 @@ template<> PROTOBUF_NOINLINE ::milvus::proto::service::CollectionDescription* Ar
template<> PROTOBUF_NOINLINE ::milvus::proto::service::PartitionDescription* Arena::CreateMaybeMessage< ::milvus::proto::service::PartitionDescription >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::PartitionDescription >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage< ::milvus::proto::service::SysConfigResponse >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::SysConfigResponse >(arena);
}
template<> PROTOBUF_NOINLINE ::milvus::proto::service::Hits* Arena::CreateMaybeMessage< ::milvus::proto::service::Hits >(Arena* arena) {
return Arena::CreateInternal< ::milvus::proto::service::Hits >(arena);
}

View File

@ -50,7 +50,7 @@ struct TableStruct_service_5fmsg_2eproto {
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::AuxillaryParseTableField aux[]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[15]
static const ::PROTOBUF_NAMESPACE_ID::internal::ParseTable schema[16]
PROTOBUF_SECTION_VARIABLE(protodesc_cold);
static const ::PROTOBUF_NAMESPACE_ID::internal::FieldMetadata field_metadata[];
static const ::PROTOBUF_NAMESPACE_ID::internal::SerializationTable serialization_table[];
@ -105,6 +105,9 @@ extern StringListResponseDefaultTypeInternal _StringListResponse_default_instanc
class StringResponse;
class StringResponseDefaultTypeInternal;
extern StringResponseDefaultTypeInternal _StringResponse_default_instance_;
class SysConfigResponse;
class SysConfigResponseDefaultTypeInternal;
extern SysConfigResponseDefaultTypeInternal _SysConfigResponse_default_instance_;
} // namespace service
} // namespace proto
} // namespace milvus
@ -124,6 +127,7 @@ template<> ::milvus::proto::service::QueryResult* Arena::CreateMaybeMessage<::mi
template<> ::milvus::proto::service::RowBatch* Arena::CreateMaybeMessage<::milvus::proto::service::RowBatch>(Arena*);
template<> ::milvus::proto::service::StringListResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringListResponse>(Arena*);
template<> ::milvus::proto::service::StringResponse* Arena::CreateMaybeMessage<::milvus::proto::service::StringResponse>(Arena*);
template<> ::milvus::proto::service::SysConfigResponse* Arena::CreateMaybeMessage<::milvus::proto::service::SysConfigResponse>(Arena*);
PROTOBUF_NAMESPACE_CLOSE
namespace milvus {
namespace proto {
@ -2154,6 +2158,178 @@ class PartitionDescription :
};
// -------------------------------------------------------------------
class SysConfigResponse :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.SysConfigResponse) */ {
public:
SysConfigResponse();
virtual ~SysConfigResponse();
SysConfigResponse(const SysConfigResponse& from);
SysConfigResponse(SysConfigResponse&& from) noexcept
: SysConfigResponse() {
*this = ::std::move(from);
}
inline SysConfigResponse& operator=(const SysConfigResponse& from) {
CopyFrom(from);
return *this;
}
inline SysConfigResponse& operator=(SysConfigResponse&& from) noexcept {
if (GetArenaNoVirtual() == from.GetArenaNoVirtual()) {
if (this != &from) InternalSwap(&from);
} else {
CopyFrom(from);
}
return *this;
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
return GetDescriptor();
}
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
return GetMetadataStatic().descriptor;
}
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
return GetMetadataStatic().reflection;
}
static const SysConfigResponse& default_instance();
static void InitAsDefaultInstance(); // FOR INTERNAL USE ONLY
static inline const SysConfigResponse* internal_default_instance() {
return reinterpret_cast<const SysConfigResponse*>(
&_SysConfigResponse_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
friend void swap(SysConfigResponse& a, SysConfigResponse& b) {
a.Swap(&b);
}
inline void Swap(SysConfigResponse* other) {
if (other == this) return;
InternalSwap(other);
}
// implements Message ----------------------------------------------
inline SysConfigResponse* New() const final {
return CreateMaybeMessage<SysConfigResponse>(nullptr);
}
SysConfigResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena) const final {
return CreateMaybeMessage<SysConfigResponse>(arena);
}
void CopyFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void MergeFrom(const ::PROTOBUF_NAMESPACE_ID::Message& from) final;
void CopyFrom(const SysConfigResponse& from);
void MergeFrom(const SysConfigResponse& from);
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
bool IsInitialized() const final;
size_t ByteSizeLong() const final;
#if GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
#else
bool MergePartialFromCodedStream(
::PROTOBUF_NAMESPACE_ID::io::CodedInputStream* input) final;
#endif // GOOGLE_PROTOBUF_ENABLE_EXPERIMENTAL_PARSER
void SerializeWithCachedSizes(
::PROTOBUF_NAMESPACE_ID::io::CodedOutputStream* output) const final;
::PROTOBUF_NAMESPACE_ID::uint8* InternalSerializeWithCachedSizesToArray(
::PROTOBUF_NAMESPACE_ID::uint8* target) const final;
int GetCachedSize() const final { return _cached_size_.Get(); }
private:
inline void SharedCtor();
inline void SharedDtor();
void SetCachedSize(int size) const final;
void InternalSwap(SysConfigResponse* other);
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
return "milvus.proto.service.SysConfigResponse";
}
private:
inline ::PROTOBUF_NAMESPACE_ID::Arena* GetArenaNoVirtual() const {
return nullptr;
}
inline void* MaybeArenaPtr() const {
return nullptr;
}
public:
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
private:
static ::PROTOBUF_NAMESPACE_ID::Metadata GetMetadataStatic() {
::PROTOBUF_NAMESPACE_ID::internal::AssignDescriptors(&::descriptor_table_service_5fmsg_2eproto);
return ::descriptor_table_service_5fmsg_2eproto.file_level_metadata[kIndexInFileMessages];
}
public:
// nested types ----------------------------------------------------
// accessors -------------------------------------------------------
enum : int {
kKeysFieldNumber = 2,
kValuesFieldNumber = 3,
kStatusFieldNumber = 1,
};
// repeated string keys = 2;
int keys_size() const;
void clear_keys();
const std::string& keys(int index) const;
std::string* mutable_keys(int index);
void set_keys(int index, const std::string& value);
void set_keys(int index, std::string&& value);
void set_keys(int index, const char* value);
void set_keys(int index, const char* value, size_t size);
std::string* add_keys();
void add_keys(const std::string& value);
void add_keys(std::string&& value);
void add_keys(const char* value);
void add_keys(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& keys() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_keys();
// repeated string values = 3;
int values_size() const;
void clear_values();
const std::string& values(int index) const;
std::string* mutable_values(int index);
void set_values(int index, const std::string& value);
void set_values(int index, std::string&& value);
void set_values(int index, const char* value);
void set_values(int index, const char* value, size_t size);
std::string* add_values();
void add_values(const std::string& value);
void add_values(std::string&& value);
void add_values(const char* value);
void add_values(const char* value, size_t size);
const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>& values() const;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>* mutable_values();
// .milvus.proto.common.Status status = 1;
bool has_status() const;
void clear_status();
const ::milvus::proto::common::Status& status() const;
::milvus::proto::common::Status* release_status();
::milvus::proto::common::Status* mutable_status();
void set_allocated_status(::milvus::proto::common::Status* status);
// @@protoc_insertion_point(class_scope:milvus.proto.service.SysConfigResponse)
private:
class _Internal;
::PROTOBUF_NAMESPACE_ID::internal::InternalMetadataWithArena _internal_metadata_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> keys_;
::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string> values_;
::milvus::proto::common::Status* status_;
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
friend struct ::TableStruct_service_5fmsg_2eproto;
};
// -------------------------------------------------------------------
class Hits :
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:milvus.proto.service.Hits) */ {
public:
@ -2196,7 +2372,7 @@ class Hits :
&_Hits_default_instance_);
}
static constexpr int kIndexInFileMessages =
13;
14;
friend void swap(Hits& a, Hits& b) {
a.Swap(&b);
@ -2367,7 +2543,7 @@ class QueryResult :
&_QueryResult_default_instance_);
}
static constexpr int kIndexInFileMessages =
14;
15;
friend void swap(QueryResult& a, QueryResult& b) {
a.Swap(&b);
@ -3880,6 +4056,185 @@ PartitionDescription::statistics() const {
// -------------------------------------------------------------------
// SysConfigResponse
// .milvus.proto.common.Status status = 1;
inline bool SysConfigResponse::has_status() const {
return this != internal_default_instance() && status_ != nullptr;
}
inline const ::milvus::proto::common::Status& SysConfigResponse::status() const {
const ::milvus::proto::common::Status* p = status_;
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.status)
return p != nullptr ? *p : *reinterpret_cast<const ::milvus::proto::common::Status*>(
&::milvus::proto::common::_Status_default_instance_);
}
inline ::milvus::proto::common::Status* SysConfigResponse::release_status() {
// @@protoc_insertion_point(field_release:milvus.proto.service.SysConfigResponse.status)
::milvus::proto::common::Status* temp = status_;
status_ = nullptr;
return temp;
}
inline ::milvus::proto::common::Status* SysConfigResponse::mutable_status() {
if (status_ == nullptr) {
auto* p = CreateMaybeMessage<::milvus::proto::common::Status>(GetArenaNoVirtual());
status_ = p;
}
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.status)
return status_;
}
inline void SysConfigResponse::set_allocated_status(::milvus::proto::common::Status* status) {
::PROTOBUF_NAMESPACE_ID::Arena* message_arena = GetArenaNoVirtual();
if (message_arena == nullptr) {
delete reinterpret_cast< ::PROTOBUF_NAMESPACE_ID::MessageLite*>(status_);
}
if (status) {
::PROTOBUF_NAMESPACE_ID::Arena* submessage_arena = nullptr;
if (message_arena != submessage_arena) {
status = ::PROTOBUF_NAMESPACE_ID::internal::GetOwnedMessage(
message_arena, status, submessage_arena);
}
} else {
}
status_ = status;
// @@protoc_insertion_point(field_set_allocated:milvus.proto.service.SysConfigResponse.status)
}
// repeated string keys = 2;
inline int SysConfigResponse::keys_size() const {
return keys_.size();
}
inline void SysConfigResponse::clear_keys() {
keys_.Clear();
}
inline const std::string& SysConfigResponse::keys(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.keys)
return keys_.Get(index);
}
inline std::string* SysConfigResponse::mutable_keys(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.keys)
return keys_.Mutable(index);
}
inline void SysConfigResponse::set_keys(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
keys_.Mutable(index)->assign(value);
}
inline void SysConfigResponse::set_keys(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.keys)
keys_.Mutable(index)->assign(std::move(value));
}
inline void SysConfigResponse::set_keys(int index, const char* value) {
GOOGLE_DCHECK(value != nullptr);
keys_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::set_keys(int index, const char* value, size_t size) {
keys_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.keys)
}
inline std::string* SysConfigResponse::add_keys() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.keys)
return keys_.Add();
}
inline void SysConfigResponse::add_keys(const std::string& value) {
keys_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(std::string&& value) {
keys_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(const char* value) {
GOOGLE_DCHECK(value != nullptr);
keys_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.keys)
}
inline void SysConfigResponse::add_keys(const char* value, size_t size) {
keys_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.keys)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
SysConfigResponse::keys() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.keys)
return keys_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
SysConfigResponse::mutable_keys() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.keys)
return &keys_;
}
// repeated string values = 3;
inline int SysConfigResponse::values_size() const {
return values_.size();
}
inline void SysConfigResponse::clear_values() {
values_.Clear();
}
inline const std::string& SysConfigResponse::values(int index) const {
// @@protoc_insertion_point(field_get:milvus.proto.service.SysConfigResponse.values)
return values_.Get(index);
}
inline std::string* SysConfigResponse::mutable_values(int index) {
// @@protoc_insertion_point(field_mutable:milvus.proto.service.SysConfigResponse.values)
return values_.Mutable(index);
}
inline void SysConfigResponse::set_values(int index, const std::string& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
values_.Mutable(index)->assign(value);
}
inline void SysConfigResponse::set_values(int index, std::string&& value) {
// @@protoc_insertion_point(field_set:milvus.proto.service.SysConfigResponse.values)
values_.Mutable(index)->assign(std::move(value));
}
inline void SysConfigResponse::set_values(int index, const char* value) {
GOOGLE_DCHECK(value != nullptr);
values_.Mutable(index)->assign(value);
// @@protoc_insertion_point(field_set_char:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::set_values(int index, const char* value, size_t size) {
values_.Mutable(index)->assign(
reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_set_pointer:milvus.proto.service.SysConfigResponse.values)
}
inline std::string* SysConfigResponse::add_values() {
// @@protoc_insertion_point(field_add_mutable:milvus.proto.service.SysConfigResponse.values)
return values_.Add();
}
inline void SysConfigResponse::add_values(const std::string& value) {
values_.Add()->assign(value);
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(std::string&& value) {
values_.Add(std::move(value));
// @@protoc_insertion_point(field_add:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(const char* value) {
GOOGLE_DCHECK(value != nullptr);
values_.Add()->assign(value);
// @@protoc_insertion_point(field_add_char:milvus.proto.service.SysConfigResponse.values)
}
inline void SysConfigResponse::add_values(const char* value, size_t size) {
values_.Add()->assign(reinterpret_cast<const char*>(value), size);
// @@protoc_insertion_point(field_add_pointer:milvus.proto.service.SysConfigResponse.values)
}
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
SysConfigResponse::values() const {
// @@protoc_insertion_point(field_list:milvus.proto.service.SysConfigResponse.values)
return values_;
}
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
SysConfigResponse::mutable_values() {
// @@protoc_insertion_point(field_mutable_list:milvus.proto.service.SysConfigResponse.values)
return &values_;
}
// -------------------------------------------------------------------
// Hits
// repeated int64 IDs = 1;
@ -4152,6 +4507,8 @@ QueryResult::mutable_hits() {
// -------------------------------------------------------------------
// -------------------------------------------------------------------
// @@protoc_insertion_point(namespace_scope)

View File

@ -11,20 +11,66 @@
#include "BruteForceSearch.h"
#include <vector>
#include <common/Types.h>
#include <boost/dynamic_bitset.hpp>
#include <queue>
namespace milvus::query {
void
BinarySearchBruteForce(faiss::MetricType metric_type,
int64_t code_size,
const uint8_t* binary_chunk,
int64_t chunk_size,
int64_t topk,
int64_t num_queries,
const uint8_t* query_data,
float* result_distances,
idx_t* result_labels,
faiss::ConcurrentBitsetPtr bitset) {
BinarySearchBruteForceNaive(MetricType metric_type,
int64_t code_size,
const uint8_t* binary_chunk,
int64_t chunk_size,
int64_t topk,
int64_t num_queries,
const uint8_t* query_data,
float* result_distances,
idx_t* result_labels,
faiss::ConcurrentBitsetPtr bitset) {
// THIS IS A NAIVE IMPLEMENTATION, ready for optimize
Assert(metric_type == faiss::METRIC_Jaccard);
Assert(code_size % 4 == 0);
using T = std::tuple<float, int>;
for (int64_t q = 0; q < num_queries; ++q) {
auto query_ptr = query_data + code_size * q;
auto query = boost::dynamic_bitset(query_ptr, query_ptr + code_size);
std::vector<T> max_heap(topk + 1, std::make_tuple(std::numeric_limits<float>::max(), -1));
for (int64_t i = 0; i < chunk_size; ++i) {
auto element_ptr = binary_chunk + code_size * i;
auto element = boost::dynamic_bitset(element_ptr, element_ptr + code_size);
auto the_and = (query & element).count();
auto the_or = (query | element).count();
auto distance = the_or ? (float)(the_or - the_and) / the_or : 0;
if (distance < std::get<0>(max_heap[0])) {
max_heap[topk] = std::make_tuple(distance, i);
std::push_heap(max_heap.begin(), max_heap.end());
std::pop_heap(max_heap.begin(), max_heap.end());
}
}
std::sort(max_heap.begin(), max_heap.end());
for (int k = 0; k < topk; ++k) {
auto info = max_heap[k];
result_distances[k + q * topk] = std::get<0>(info);
result_labels[k + q * topk] = std::get<1>(info);
}
}
}
void
BinarySearchBruteForceFast(MetricType metric_type,
int64_t code_size,
const uint8_t* binary_chunk,
int64_t chunk_size,
int64_t topk,
int64_t num_queries,
const uint8_t* query_data,
float* result_distances,
idx_t* result_labels,
faiss::ConcurrentBitsetPtr bitset) {
const idx_t block_size = segcore::DefaultElementPerChunk;
bool use_heap = true;
@ -83,6 +129,21 @@ BinarySearchBruteForce(faiss::MetricType metric_type,
for (int i = 0; i < num_queries; ++i) {
result_distances[i] = static_cast<float>(int_distances[i]);
}
} else {
PanicInfo("Unsupported metric type");
}
}
void
BinarySearchBruteForce(const dataset::BinaryQueryDataset& query_dataset,
const uint8_t* binary_chunk,
int64_t chunk_size,
float* result_distances,
idx_t* result_labels,
faiss::ConcurrentBitsetPtr bitset) {
// TODO: refactor the internal function
BinarySearchBruteForceFast(query_dataset.metric_type, query_dataset.code_size, binary_chunk, chunk_size,
query_dataset.topk, query_dataset.num_queries, query_dataset.query_data,
result_distances, result_labels, bitset);
}
} // namespace milvus::query

View File

@ -15,15 +15,25 @@
#include "common/Schema.h"
namespace milvus::query {
using MetricType = faiss::MetricType;
namespace dataset {
struct BinaryQueryDataset {
MetricType metric_type;
int64_t num_queries;
int64_t topk;
int64_t code_size;
const uint8_t* query_data;
};
} // namespace dataset
void
BinarySearchBruteForce(faiss::MetricType metric_type,
int64_t code_size,
BinarySearchBruteForce(const dataset::BinaryQueryDataset& query_dataset,
const uint8_t* binary_chunk,
int64_t chunk_size,
int64_t topk,
int64_t num_queries,
const uint8_t* query_data,
float* result_distances,
idx_t* result_labels,
faiss::ConcurrentBitsetPtr bitset = nullptr);
} // namespace milvus::query

View File

@ -26,15 +26,25 @@ static std::unique_ptr<VectorPlanNode>
ParseVecNode(Plan* plan, const Json& out_body) {
Assert(out_body.is_object());
// TODO add binary info
auto vec_node = std::make_unique<FloatVectorANNS>();
Assert(out_body.size() == 1);
auto iter = out_body.begin();
std::string field_name = iter.key();
auto& vec_info = iter.value();
Assert(vec_info.is_object());
auto topK = vec_info["topk"];
AssertInfo(topK > 0, "topK must greater than 0");
AssertInfo(topK < 16384, "topK is too large");
auto field_meta = plan->schema_.operator[](field_name);
auto vec_node = [&]() -> std::unique_ptr<VectorPlanNode> {
auto data_type = field_meta.get_data_type();
if (data_type == DataType::VECTOR_FLOAT) {
return std::make_unique<FloatVectorANNS>();
} else {
return std::make_unique<BinaryVectorANNS>();
}
}();
vec_node->query_info_.topK_ = topK;
vec_node->query_info_.metric_type_ = vec_info.at("metric_type");
vec_node->query_info_.search_params_ = vec_info.at("params");

View File

@ -16,6 +16,7 @@
#include <faiss/utils/distances.h>
#include "utils/tools.h"
#include "query/BruteForceSearch.h"
namespace milvus::query {
using segcore::DefaultElementPerChunk;
@ -41,7 +42,7 @@ QueryBruteForceImpl(const segcore::SegmentSmallIndex& segment,
int64_t num_queries,
Timestamp timestamp,
std::optional<const BitmapSimple*> bitmaps_opt,
segcore::QueryResult& results) {
QueryResult& results) {
auto& schema = segment.get_schema();
auto& indexing_record = segment.get_indexing_record();
auto& record = segment.get_insert_record();
@ -131,7 +132,92 @@ QueryBruteForceImpl(const segcore::SegmentSmallIndex& segment,
}
results.result_ids_ = std::move(final_uids);
// TODO: deprecated code end
return Status::OK();
}
Status
BinaryQueryBruteForceImpl(const segcore::SegmentSmallIndex& segment,
const query::QueryInfo& info,
const uint8_t* query_data,
int64_t num_queries,
Timestamp timestamp,
std::optional<const BitmapSimple*> bitmaps_opt,
QueryResult& results) {
auto& schema = segment.get_schema();
auto& indexing_record = segment.get_indexing_record();
auto& record = segment.get_insert_record();
// step 1: binary search to find the barrier of the snapshot
auto ins_barrier = get_barrier(record, timestamp);
auto max_chunk = upper_div(ins_barrier, DefaultElementPerChunk);
auto metric_type = GetMetricType(info.metric_type_);
// auto del_barrier = get_barrier(deleted_record_, timestamp);
#if 0
auto bitmap_holder = get_deleted_bitmap(del_barrier, timestamp, ins_barrier);
Assert(bitmap_holder);
auto bitmap = bitmap_holder->bitmap_ptr;
#endif
// step 2.1: get meta
// step 2.2: get which vector field to search
auto vecfield_offset_opt = schema.get_offset(info.field_id_);
Assert(vecfield_offset_opt.has_value());
auto vecfield_offset = vecfield_offset_opt.value();
auto& field = schema[vecfield_offset];
Assert(field.get_data_type() == DataType::VECTOR_BINARY);
auto dim = field.get_dim();
auto code_size = dim / 8;
auto topK = info.topK_;
auto total_count = topK * num_queries;
// step 3: small indexing search
std::vector<int64_t> final_uids(total_count, -1);
std::vector<float> final_dis(total_count, std::numeric_limits<float>::max());
query::dataset::BinaryQueryDataset query_dataset{metric_type, num_queries, topK, code_size, query_data};
using segcore::BinaryVector;
auto vec_ptr = record.get_entity<BinaryVector>(vecfield_offset);
auto max_indexed_id = 0;
// step 4: brute force search where small indexing is unavailable
for (int chunk_id = max_indexed_id; chunk_id < max_chunk; ++chunk_id) {
std::vector<int64_t> buf_uids(total_count, -1);
std::vector<float> buf_dis(total_count, std::numeric_limits<float>::max());
auto& chunk = vec_ptr->get_chunk(chunk_id);
auto nsize =
chunk_id != max_chunk - 1 ? DefaultElementPerChunk : ins_barrier - chunk_id * DefaultElementPerChunk;
auto bitmap_view = create_bitmap_view(bitmaps_opt, chunk_id);
BinarySearchBruteForce(query_dataset, chunk.data(), nsize, buf_dis.data(), buf_uids.data(), bitmap_view);
// convert chunk uid to segment uid
for (auto& x : buf_uids) {
if (x != -1) {
x += chunk_id * DefaultElementPerChunk;
}
}
segcore::merge_into(num_queries, topK, final_dis.data(), final_uids.data(), buf_dis.data(), buf_uids.data());
}
results.result_distances_ = std::move(final_dis);
results.internal_seg_offsets_ = std::move(final_uids);
results.topK_ = topK;
results.num_queries_ = num_queries;
// TODO: deprecated code begin
final_uids = results.internal_seg_offsets_;
for (auto& id : final_uids) {
if (id == -1) {
continue;
}
id = record.uids_[id];
}
results.result_ids_ = std::move(final_uids);
// TODO: deprecated code end
return Status::OK();
}
} // namespace milvus::query

View File

@ -27,5 +27,14 @@ QueryBruteForceImpl(const segcore::SegmentSmallIndex& segment,
int64_t num_queries,
Timestamp timestamp,
std::optional<const BitmapSimple*> bitmap_opt,
segcore::QueryResult& results);
QueryResult& results);
Status
BinaryQueryBruteForceImpl(const segcore::SegmentSmallIndex& segment,
const query::QueryInfo& info,
const uint8_t* query_data,
int64_t num_queries,
Timestamp timestamp,
std::optional<const BitmapSimple*> bitmaps_opt,
QueryResult& results);
} // namespace milvus::query

View File

@ -18,7 +18,7 @@
#include <unordered_map>
#include <vector>
#include "utils/Types.h"
#include "common/Types.h"
#include "utils/Json.h"
namespace milvus {

View File

@ -28,7 +28,7 @@ class ExecPlanNodeVisitor : PlanNodeVisitor {
visit(BinaryVectorANNS& node) override;
public:
using RetType = segcore::QueryResult;
using RetType = QueryResult;
ExecPlanNodeVisitor(segcore::SegmentBase& segment, Timestamp timestamp, const PlaceholderGroup& placeholder_group)
: segment_(segment), timestamp_(timestamp), placeholder_group_(placeholder_group) {
}

View File

@ -26,7 +26,7 @@ namespace impl {
// WILL BE USED BY GENERATOR UNDER suvlim/core_gen/
class ExecPlanNodeVisitor : PlanNodeVisitor {
public:
using RetType = segcore::QueryResult;
using RetType = QueryResult;
ExecPlanNodeVisitor(segcore::SegmentBase& segment, Timestamp timestamp, const PlaceholderGroup& placeholder_group)
: segment_(segment), timestamp_(timestamp), placeholder_group_(placeholder_group) {
}
@ -75,7 +75,22 @@ ExecPlanNodeVisitor::visit(FloatVectorANNS& node) {
void
ExecPlanNodeVisitor::visit(BinaryVectorANNS& node) {
// TODO
// TODO: optimize here, remove the dynamic cast
assert(!ret_.has_value());
auto segment = dynamic_cast<segcore::SegmentSmallIndex*>(&segment_);
AssertInfo(segment, "support SegmentSmallIndex Only");
RetType ret;
auto& ph = placeholder_group_.at(0);
auto src_data = ph.get_blob<uint8_t>();
auto num_queries = ph.num_of_queries_;
if (node.predicate_.has_value()) {
auto bitmap = ExecExprVisitor(*segment).call_child(*node.predicate_.value());
auto ptr = &bitmap;
BinaryQueryBruteForceImpl(*segment, node.query_info_, src_data, num_queries, timestamp_, ptr, ret);
} else {
BinaryQueryBruteForceImpl(*segment, node.query_info_, src_data, num_queries, timestamp_, std::nullopt, ret);
}
ret_ = ret;
}
} // namespace milvus::query

View File

@ -73,7 +73,24 @@ ShowPlanNodeVisitor::visit(FloatVectorANNS& node) {
void
ShowPlanNodeVisitor::visit(BinaryVectorANNS& node) {
// TODO
assert(!ret_);
auto& info = node.query_info_;
Json json_body{
{"node_type", "BinaryVectorANNS"}, //
{"metric_type", info.metric_type_}, //
{"field_id_", info.field_id_}, //
{"topK", info.topK_}, //
{"search_params", info.search_params_}, //
{"placeholder_tag", node.placeholder_tag_}, //
};
if (node.predicate_.has_value()) {
ShowExprVisitor expr_show;
Assert(node.predicate_.value());
json_body["predicate"] = expr_show.call_child(node.predicate_->operator*());
} else {
json_body["predicate"] = "None";
}
ret_ = json_body;
}
} // namespace milvus::query

View File

@ -123,9 +123,10 @@ Collection::CreateIndex(std::string& index_config) {
void
Collection::parse() {
if (collection_proto_.empty()) {
// TODO: remove hard code use unittests are ready
std::cout << "WARN: Use default schema" << std::endl;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
schema_ = schema;
return;

View File

@ -226,8 +226,14 @@ class ConcurrentVector : public ConcurrentVectorImpl<Type, true> {
using ConcurrentVectorImpl<Type, true>::ConcurrentVectorImpl;
};
class FloatVector {};
class BinaryVector {};
class VectorTrait {};
class FloatVector : public VectorTrait {
using embedded_type = float;
};
class BinaryVector : public VectorTrait {
using embedded_type = uint8_t;
};
template <>
class ConcurrentVector<FloatVector> : public ConcurrentVectorImpl<float, false> {

View File

@ -85,8 +85,6 @@ IndexingRecord::UpdateResourceAck(int64_t chunk_ack, const InsertRecord& record)
template <typename T>
void
ScalarIndexingEntry<T>::BuildIndexRange(int64_t ack_beg, int64_t ack_end, const VectorBase* vec_base) {
auto dim = field_meta_.get_dim();
auto source = dynamic_cast<const ConcurrentVector<T>*>(vec_base);
Assert(source);
auto chunk_size = source->chunk_size();

View File

@ -24,7 +24,7 @@ namespace milvus {
namespace segcore {
// using engine::DataChunk;
// using engine::DataChunkPtr;
using engine::QueryResult;
using QueryResult = milvus::QueryResult;
struct RowBasedRawData {
void* raw_data; // schema
int sizeof_per_row; // alignment

View File

@ -42,7 +42,7 @@ DeleteSegment(CSegmentBase segment) {
void
DeleteQueryResult(CQueryResult query_result) {
auto res = (milvus::segcore::QueryResult*)query_result;
auto res = (milvus::QueryResult*)query_result;
delete res;
}
@ -134,7 +134,7 @@ Search(CSegmentBase c_segment,
placeholder_groups.push_back((const milvus::query::PlaceholderGroup*)c_placeholder_groups[i]);
}
auto query_result = std::make_unique<milvus::segcore::QueryResult>();
auto query_result = std::make_unique<milvus::QueryResult>();
auto status = CStatus();
try {

View File

@ -42,8 +42,11 @@ EasyAssertInfo(
[[noreturn]] void
ThrowWithTrace(const std::exception& exception) {
if (typeid(exception) == typeid(WrappedRuntimError)) {
throw exception;
}
auto err_msg = exception.what() + std::string("\n") + EasyStackTrace();
throw std::runtime_error(err_msg);
throw WrappedRuntimError(err_msg);
}
} // namespace milvus::impl

View File

@ -11,6 +11,7 @@
#pragma once
#include <string_view>
#include <stdexcept>
#include <exception>
#include <stdio.h>
#include <stdlib.h>
@ -22,6 +23,10 @@ void
EasyAssertInfo(
bool value, std::string_view expr_str, std::string_view filename, int lineno, std::string_view extra_info);
class WrappedRuntimError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
[[noreturn]] void
ThrowWithTrace(const std::exception& exception);

View File

@ -21,7 +21,7 @@ TEST(Binary, Insert) {
int64_t num_queries = 10;
int64_t topK = 5;
auto schema = std::make_shared<Schema>();
schema->AddField("vecbin", DataType::VECTOR_BINARY, 128);
schema->AddField("vecbin", DataType::VECTOR_BINARY, 128, MetricType::METRIC_Jaccard);
schema->AddField("age", DataType::INT64);
auto dataset = DataGen(schema, N, 10);
auto segment = CreateSegment(schema);

View File

@ -98,7 +98,49 @@ TEST(Expr, Range) {
}
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
auto plan = CreatePlan(*schema, dsl_string);
ShowPlanNodeVisitor shower;
Assert(plan->tag2field_.at("$0") == "fakevec");
auto out = shower.call_child(*plan->plan_node_);
std::cout << out.dump(4);
}
TEST(Expr, RangeBinary) {
SUCCEED();
using namespace milvus;
using namespace milvus::query;
using namespace milvus::segcore;
std::string dsl_string = R"(
{
"bool": {
"must": [
{
"range": {
"age": {
"GT": 1,
"LT": 100
}
}
},
{
"vector": {
"fakevec": {
"metric_type": "Jaccard",
"params": {
"nprobe": 10
},
"query": "$0",
"topk": 10
}
}
}
]
}
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_BINARY, 512, MetricType::METRIC_Jaccard);
schema->AddField("age", DataType::INT32);
auto plan = CreatePlan(*schema, dsl_string);
ShowPlanNodeVisitor shower;
@ -140,7 +182,7 @@ TEST(Expr, InvalidRange) {
}
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
ASSERT_ANY_THROW(CreatePlan(*schema, dsl_string));
}
@ -179,7 +221,7 @@ TEST(Expr, InvalidDSL) {
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
ASSERT_ANY_THROW(CreatePlan(*schema, dsl_string));
}
@ -189,7 +231,7 @@ TEST(Expr, ShowExecutor) {
using namespace milvus::segcore;
auto node = std::make_unique<FloatVectorANNS>();
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
int64_t num_queries = 100L;
auto raw_data = DataGen(schema, num_queries);
auto& info = node->query_info_;
@ -248,7 +290,7 @@ TEST(Expr, TestRange) {
}
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
auto seg = CreateSegment(schema);

View File

@ -235,14 +235,14 @@ TEST(Indexing, IVFFlatNM) {
}
}
TEST(Indexing, DISABLED_BinaryBruteForce) {
TEST(Indexing, BinaryBruteForce) {
int64_t N = 100000;
int64_t num_queries = 10;
int64_t topk = 5;
int64_t dim = 64;
int64_t dim = 512;
auto result_count = topk * num_queries;
auto schema = std::make_shared<Schema>();
schema->AddField("vecbin", DataType::VECTOR_BINARY, dim);
schema->AddField("vecbin", DataType::VECTOR_BINARY, dim, MetricType::METRIC_Jaccard);
schema->AddField("age", DataType::INT64);
auto dataset = DataGen(schema, N, 10);
vector<float> distances(result_count);
@ -250,8 +250,16 @@ TEST(Indexing, DISABLED_BinaryBruteForce) {
auto bin_vec = dataset.get_col<uint8_t>(0);
auto line_sizeof = schema->operator[](0).get_sizeof();
auto query_data = 1024 * line_sizeof + bin_vec.data();
query::BinarySearchBruteForce(faiss::MetricType::METRIC_Jaccard, line_sizeof, bin_vec.data(), N, topk, num_queries,
query_data, distances.data(), ids.data());
query::dataset::BinaryQueryDataset query_dataset{
faiss::MetricType::METRIC_Jaccard, //
num_queries, //
topk, //
line_sizeof, //
query_data //
};
query::BinarySearchBruteForce(query_dataset, bin_vec.data(), N, distances.data(), ids.data());
QueryResult qr;
qr.num_queries_ = num_queries;
qr.topK_ = topk;
@ -264,76 +272,78 @@ TEST(Indexing, DISABLED_BinaryBruteForce) {
[
[
"1024->0.000000",
"86966->0.395349",
"24843->0.404762",
"13806->0.416667",
"44313->0.421053"
"43190->0.578804",
"5255->0.586207",
"23247->0.586486",
"4936->0.588889"
],
[
"1025->0.000000",
"14226->0.348837",
"1488->0.365854",
"47337->0.377778",
"20913->0.377778"
"15147->0.562162",
"49910->0.564304",
"67435->0.567867",
"38292->0.569921"
],
[
"1026->0.000000",
"81882->0.386364",
"9215->0.409091",
"95024->0.409091",
"54987->0.414634"
"15332->0.569061",
"56391->0.572559",
"17187->0.572603",
"26988->0.573771"
],
[
"1027->0.000000",
"68981->0.394737",
"75528->0.404762",
"68794->0.405405",
"21975->0.425000"
"4502->0.559585",
"25879->0.566234",
"66937->0.566489",
"21228->0.566845"
],
[
"1028->0.000000",
"90290->0.375000",
"34309->0.394737",
"58559->0.400000",
"33865->0.400000"
"38490->0.578804",
"12946->0.581717",
"31677->0.582173",
"94474->0.583569"
],
[
"1029->0.000000",
"62722->0.388889",
"89070->0.394737",
"18528->0.414634",
"94971->0.421053"
"59011->0.551630",
"82575->0.555263",
"42914->0.561828",
"23705->0.564171"
],
[
"1030->0.000000",
"67402->0.333333",
"3988->0.347826",
"86376->0.354167",
"84381->0.361702"
"39782->0.579946",
"65553->0.589947",
"82154->0.590028",
"13374->0.590164"
],
[
"1031->0.000000",
"81569->0.325581",
"12715->0.347826",
"40332->0.363636",
"21037->0.372093"
"47826->0.582873",
"72669->0.587432",
"334->0.588076",
"80652->0.589333"
],
[
"1032->0.000000",
"60536->0.428571",
"93293->0.432432",
"70969->0.435897",
"64048->0.450000"
"31968->0.573034",
"63545->0.575758",
"76913->0.575916",
"6286->0.576000"
],
[
"1033->0.000000",
"99022->0.394737",
"11763->0.405405",
"50073->0.428571",
"97118->0.428571"
"95635->0.570248",
"93439->0.574866",
"6709->0.578534",
"6367->0.579634"
]
]
]
)");
ASSERT_EQ(json, ref);
auto json_str = json.dump(2);
auto ref_str = ref.dump(2);
ASSERT_EQ(json_str, ref_str);
}

View File

@ -72,7 +72,7 @@ TEST(Query, ShowExecutor) {
using namespace milvus;
auto node = std::make_unique<FloatVectorANNS>();
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
int64_t num_queries = 100L;
auto raw_data = DataGen(schema, num_queries);
auto& info = node->query_info_;
@ -98,7 +98,7 @@ TEST(Query, DSL) {
"must": [
{
"vector": {
"Vec": {
"fakevec": {
"metric_type": "L2",
"params": {
"nprobe": 10
@ -113,7 +113,7 @@ TEST(Query, DSL) {
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
auto plan = CreatePlan(*schema, dsl_string);
auto res = shower.call_child(*plan->plan_node_);
@ -123,7 +123,7 @@ TEST(Query, DSL) {
{
"bool": {
"vector": {
"Vec": {
"fakevec": {
"metric_type": "L2",
"params": {
"nprobe": 10
@ -159,7 +159,7 @@ TEST(Query, ParsePlaceholderGroup) {
})";
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
auto plan = CreatePlan(*schema, dsl_string);
int64_t num_queries = 100000;
int dim = 16;
@ -172,7 +172,7 @@ TEST(Query, ExecWithPredicate) {
using namespace milvus::query;
using namespace milvus::segcore;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::FLOAT);
std::string dsl = R"({
"bool": {
@ -217,8 +217,8 @@ TEST(Query, ExecWithPredicate) {
int topk = 5;
Json json = QueryResultToJson(qr);
auto ref = Json::parse(R"([
auto ref = Json::parse(R"(
[
[
[
"980486->3.149221",
@ -257,15 +257,14 @@ TEST(Query, ExecWithPredicate) {
]
]
])");
ASSERT_EQ(json, ref);
ASSERT_EQ(json.dump(2), ref.dump(2));
}
TEST(Query, ExecWihtoutPredicate) {
TEST(Query, ExecWithoutPredicate) {
using namespace milvus::query;
using namespace milvus::segcore;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::FLOAT);
std::string dsl = R"({
"bool": {
@ -301,18 +300,49 @@ TEST(Query, ExecWihtoutPredicate) {
segment->Search(plan.get(), ph_group_arr.data(), &time, 1, qr);
std::vector<std::vector<std::string>> results;
int topk = 5;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(qr.result_ids_[index]) + "->" +
std::to_string(qr.result_distances_[index]));
}
results.emplace_back(std::move(result));
}
Json json{results};
std::cout << json.dump(2);
auto json = QueryResultToJson(qr);
auto ref = Json::parse(R"(
[
[
[
"980486->3.149221",
"318367->3.661235",
"302798->4.553688",
"321424->4.757450",
"565529->5.083780"
],
[
"233390->7.931535",
"238958->8.109344",
"230645->8.439169",
"901939->8.658772",
"380328->8.731251"
],
[
"749862->3.398494",
"701321->3.632437",
"897246->3.749835",
"750683->3.897577",
"105995->4.073595"
],
[
"138274->3.454446",
"124548->3.783290",
"840855->4.782170",
"936719->5.026924",
"709627->5.063170"
],
[
"810401->3.926393",
"46575->4.054171",
"201740->4.274491",
"669040->4.399628",
"231500->4.831223"
]
]
]
)");
ASSERT_EQ(json.dump(2), ref.dump(2));
}
TEST(Query, FillSegment) {
@ -331,6 +361,9 @@ TEST(Query, FillSegment) {
auto param = field->add_type_params();
param->set_key("dim");
param->set_value("16");
auto iparam = field->add_index_params();
iparam->set_key("metric_type");
iparam->set_value("L2");
}
{
@ -392,3 +425,57 @@ TEST(Query, FillSegment) {
++std_index;
}
}
TEST(Query, ExecWithPredicateBinary) {
using namespace milvus::query;
using namespace milvus::segcore;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_BINARY, 512, MetricType::METRIC_Jaccard);
schema->AddField("age", DataType::FLOAT);
std::string dsl = R"({
"bool": {
"must": [
{
"range": {
"age": {
"GE": -1,
"LT": 1
}
}
},
{
"vector": {
"fakevec": {
"metric_type": "Jaccard",
"params": {
"nprobe": 10
},
"query": "$0",
"topk": 5
}
}
}
]
}
})";
int64_t N = 1000 * 1000;
auto dataset = DataGen(schema, N);
auto segment = std::make_unique<SegmentSmallIndex>(schema);
segment->PreInsert(N);
segment->Insert(0, N, dataset.row_ids_.data(), dataset.timestamps_.data(), dataset.raw_);
auto vec_ptr = dataset.get_col<uint8_t>(0);
auto plan = CreatePlan(*schema, dsl);
auto num_queries = 5;
auto ph_group_raw = CreateBinaryPlaceholderGroupFromBlob(num_queries, 512, vec_ptr.data() + 1024 * 512 / 8);
auto ph_group = ParsePlaceholderGroup(plan.get(), ph_group_raw.SerializeAsString());
QueryResult qr;
Timestamp time = 1000000;
std::vector<const PlaceholderGroup*> ph_group_arr = {ph_group.get()};
segment->Search(plan.get(), ph_group_arr.data(), &time, 1, qr);
int topk = 5;
Json json = QueryResultToJson(qr);
std::cout << json.dump(2);
// ASSERT_EQ(json.dump(2), ref.dump(2));
}

View File

@ -63,7 +63,7 @@ TEST(SegmentCoreTest, NormalDistributionTest) {
using namespace milvus::segcore;
using namespace milvus::engine;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
int N = 1000 * 1000;
auto [raw_data, timestamps, uids] = generate_data(N);
@ -76,7 +76,7 @@ TEST(SegmentCoreTest, MockTest) {
using namespace milvus::segcore;
using namespace milvus::engine;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
std::vector<char> raw_data;
std::vector<Timestamp> timestamps;
@ -116,7 +116,7 @@ TEST(SegmentCoreTest, SmallIndex) {
using namespace milvus::segcore;
using namespace milvus::engine;
auto schema = std::make_shared<Schema>();
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16);
schema->AddField("fakevec", DataType::VECTOR_FLOAT, 16, MetricType::METRIC_L2);
schema->AddField("age", DataType::INT32);
int N = 1024 * 1024;
auto data = DataGen(schema, N);

View File

@ -167,7 +167,7 @@ CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::VECTOR_FLOAT);
value->set_type(ser::PlaceholderType::VECTOR_BINARY);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
@ -175,7 +175,27 @@ CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::service;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::VECTOR_BINARY);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}

View File

@ -1,4 +1,4 @@
package kv
package etcdkv
import (
"context"

View File

@ -1,11 +1,11 @@
package kv_test
package etcdkv_test
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
"go.etcd.io/etcd/clientv3"
)
@ -28,7 +28,7 @@ func TestEtcdKV_Load(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
rootPath := "/etcd/test/root"
etcdKV := kv.NewEtcdKV(cli, rootPath)
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
defer etcdKV.Close()
defer etcdKV.RemoveWithPrefix("")
@ -86,7 +86,7 @@ func TestEtcdKV_MultiSave(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
rootPath := "/etcd/test/root"
etcdKV := kv.NewEtcdKV(cli, rootPath)
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
defer etcdKV.Close()
defer etcdKV.RemoveWithPrefix("")
@ -117,7 +117,7 @@ func TestEtcdKV_Remove(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
rootPath := "/etcd/test/root"
etcdKV := kv.NewEtcdKV(cli, rootPath)
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
defer etcdKV.Close()
defer etcdKV.RemoveWithPrefix("")
@ -188,7 +188,7 @@ func TestEtcdKV_MultiSaveAndRemove(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
rootPath := "/etcd/test/root"
etcdKV := kv.NewEtcdKV(cli, rootPath)
etcdKV := etcdkv.NewEtcdKV(cli, rootPath)
defer etcdKV.Close()
defer etcdKV.RemoveWithPrefix("")

View File

@ -8,6 +8,11 @@ type Base interface {
MultiSave(kvs map[string]string) error
Remove(key string) error
MultiRemove(keys []string) error
MultiSaveAndRemove(saves map[string]string, removals []string) error
Close()
}
type TxnBase interface {
Base
MultiSaveAndRemove(saves map[string]string, removals []string) error
}

View File

@ -1,4 +1,4 @@
package kv
package memkv
import (
"sync"

View File

@ -0,0 +1,149 @@
package miniokv
import (
"context"
"io"
"log"
"strings"
"github.com/minio/minio-go/v7"
)
type MinIOKV struct {
ctx context.Context
minioClient *minio.Client
bucketName string
}
// NewMinIOKV creates a new MinIO kv.
func NewMinIOKV(ctx context.Context, client *minio.Client, bucketName string) (*MinIOKV, error) {
bucketExists, err := client.BucketExists(ctx, bucketName)
if err != nil {
return nil, err
}
if !bucketExists {
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
if err != nil {
return nil, err
}
}
return &MinIOKV{
ctx: ctx,
minioClient: client,
bucketName: bucketName,
}, nil
}
func (kv *MinIOKV) LoadWithPrefix(key string) ([]string, []string, error) {
objects := kv.minioClient.ListObjects(kv.ctx, kv.bucketName, minio.ListObjectsOptions{Prefix: key})
var objectsKeys []string
var objectsValues []string
for object := range objects {
objectsKeys = append(objectsKeys, object.Key)
}
objectsValues, err := kv.MultiLoad(objectsKeys)
if err != nil {
log.Printf("cannot load value with prefix:%s", key)
}
return objectsKeys, objectsValues, nil
}
func (kv *MinIOKV) Load(key string) (string, error) {
object, err := kv.minioClient.GetObject(kv.ctx, kv.bucketName, key, minio.GetObjectOptions{})
if err != nil {
return "", err
}
buf := new(strings.Builder)
_, err = io.Copy(buf, object)
if err != nil && err != io.EOF {
return "", err
}
return buf.String(), nil
}
func (kv *MinIOKV) MultiLoad(keys []string) ([]string, error) {
var resultErr error
var objectsValues []string
for _, key := range keys {
objectValue, err := kv.Load(key)
if err != nil {
if resultErr == nil {
resultErr = err
}
}
objectsValues = append(objectsValues, objectValue)
}
return objectsValues, resultErr
}
func (kv *MinIOKV) Save(key, value string) error {
reader := strings.NewReader(value)
_, err := kv.minioClient.PutObject(kv.ctx, kv.bucketName, key, reader, int64(len(value)), minio.PutObjectOptions{})
if err != nil {
return err
}
return err
}
func (kv *MinIOKV) MultiSave(kvs map[string]string) error {
var resultErr error
for key, value := range kvs {
err := kv.Save(key, value)
if err != nil {
if resultErr == nil {
resultErr = err
}
}
}
return resultErr
}
func (kv *MinIOKV) RemoveWithPrefix(prefix string) error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
defer close(objectsCh)
for object := range kv.minioClient.ListObjects(kv.ctx, kv.bucketName, minio.ListObjectsOptions{Prefix: prefix}) {
objectsCh <- object
}
}()
for rErr := range kv.minioClient.RemoveObjects(kv.ctx, kv.bucketName, objectsCh, minio.RemoveObjectsOptions{GovernanceBypass: true}) {
if rErr.Err != nil {
return rErr.Err
}
}
return nil
}
func (kv *MinIOKV) Remove(key string) error {
err := kv.minioClient.RemoveObject(kv.ctx, kv.bucketName, string(key), minio.RemoveObjectOptions{})
return err
}
func (kv *MinIOKV) MultiRemove(keys []string) error {
var resultErr error
for _, key := range keys {
err := kv.Remove(key)
if err != nil {
if resultErr == nil {
resultErr = err
}
}
}
return resultErr
}
func (kv *MinIOKV) Close() {
}

View File

@ -0,0 +1,195 @@
package miniokv_test
import (
"context"
"strconv"
"testing"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
"github.com/stretchr/testify/assert"
)
var Params paramtable.BaseTable
func TestMinIOKV_Load(t *testing.T) {
Params.Init()
endPoint, _ := Params.Load("_MinioAddress")
accessKeyID, _ := Params.Load("minio.accessKeyID")
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
useSSLStr, _ := Params.Load("minio.useSSL")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
useSSL, _ := strconv.ParseBool(useSSLStr)
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
assert.Nil(t, err)
bucketName := "fantastic-tech-test"
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
assert.Nil(t, err)
defer MinIOKV.RemoveWithPrefix("")
err = MinIOKV.Save("abc", "123")
assert.Nil(t, err)
err = MinIOKV.Save("abcd", "1234")
assert.Nil(t, err)
val, err := MinIOKV.Load("abc")
assert.Nil(t, err)
assert.Equal(t, val, "123")
keys, vals, err := MinIOKV.LoadWithPrefix("abc")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 2)
assert.Equal(t, vals[0], "123")
assert.Equal(t, vals[1], "1234")
err = MinIOKV.Save("key_1", "123")
assert.Nil(t, err)
err = MinIOKV.Save("key_2", "456")
assert.Nil(t, err)
err = MinIOKV.Save("key_3", "789")
assert.Nil(t, err)
keys = []string{"key_1", "key_100"}
vals, err = MinIOKV.MultiLoad(keys)
assert.NotNil(t, err)
assert.Equal(t, len(vals), len(keys))
assert.Equal(t, vals[0], "123")
assert.Empty(t, vals[1])
keys = []string{"key_1", "key_2"}
vals, err = MinIOKV.MultiLoad(keys)
assert.Nil(t, err)
assert.Equal(t, len(vals), len(keys))
assert.Equal(t, vals[0], "123")
assert.Equal(t, vals[1], "456")
}
func TestMinIOKV_MultiSave(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Params.Init()
endPoint, _ := Params.Load("_MinioAddress")
accessKeyID, _ := Params.Load("minio.accessKeyID")
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
useSSLStr, _ := Params.Load("minio.useSSL")
useSSL, _ := strconv.ParseBool(useSSLStr)
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
assert.Nil(t, err)
bucketName := "fantastic-tech-test"
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
assert.Nil(t, err)
defer MinIOKV.RemoveWithPrefix("")
err = MinIOKV.Save("key_1", "111")
assert.Nil(t, err)
kvs := map[string]string{
"key_1": "123",
"key_2": "456",
}
err = MinIOKV.MultiSave(kvs)
assert.Nil(t, err)
val, err := MinIOKV.Load("key_1")
assert.Nil(t, err)
assert.Equal(t, val, "123")
}
func TestMinIOKV_Remove(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Params.Init()
endPoint, _ := Params.Load("_MinioAddress")
accessKeyID, _ := Params.Load("minio.accessKeyID")
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
useSSLStr, _ := Params.Load("minio.useSSL")
useSSL, _ := strconv.ParseBool(useSSLStr)
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
assert.Nil(t, err)
bucketName := "fantastic-tech-test"
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
assert.Nil(t, err)
defer MinIOKV.RemoveWithPrefix("")
err = MinIOKV.Save("key_1", "123")
assert.Nil(t, err)
err = MinIOKV.Save("key_2", "456")
assert.Nil(t, err)
val, err := MinIOKV.Load("key_1")
assert.Nil(t, err)
assert.Equal(t, val, "123")
// delete "key_1"
err = MinIOKV.Remove("key_1")
assert.Nil(t, err)
val, err = MinIOKV.Load("key_1")
assert.Error(t, err)
assert.Empty(t, val)
val, err = MinIOKV.Load("key_2")
assert.Nil(t, err)
assert.Equal(t, val, "456")
keys, vals, err := MinIOKV.LoadWithPrefix("key")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, len(keys), 1)
assert.Equal(t, vals[0], "456")
// MultiRemove
err = MinIOKV.Save("key_1", "111")
assert.Nil(t, err)
kvs := map[string]string{
"key_1": "123",
"key_2": "456",
"key_3": "789",
"key_4": "012",
}
err = MinIOKV.MultiSave(kvs)
assert.Nil(t, err)
val, err = MinIOKV.Load("key_1")
assert.Nil(t, err)
assert.Equal(t, val, "123")
val, err = MinIOKV.Load("key_3")
assert.Nil(t, err)
assert.Equal(t, val, "789")
keys = []string{"key_1", "key_2", "key_3"}
err = MinIOKV.MultiRemove(keys)
assert.Nil(t, err)
val, err = MinIOKV.Load("key_1")
assert.Error(t, err)
assert.Empty(t, val)
}

View File

@ -5,6 +5,7 @@ import (
"log"
"github.com/golang/protobuf/proto"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
@ -85,8 +86,23 @@ func (t *createCollectionTask) Execute() error {
// TODO: initial partition?
PartitionTags: make([]string, 0),
}
err = t.mt.AddCollection(&collection)
if err != nil {
return err
}
return t.mt.AddCollection(&collection)
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: t.req.Timestamp,
EndTimestamp: t.req.Timestamp,
HashValues: []uint32{0},
}
timeTickMsg := &ms.CreateCollectionMsg{
BaseMsg: baseMsg,
CreateCollectionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -102,7 +118,7 @@ func (t *dropCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return Timestamp(t.req.Timestamp), nil
return t.req.Timestamp, nil
}
func (t *dropCollectionTask) Execute() error {
@ -118,7 +134,29 @@ func (t *dropCollectionTask) Execute() error {
collectionID := collectionMeta.ID
return t.mt.DeleteCollection(collectionID)
err = t.mt.DeleteCollection(collectionID)
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.DropCollectionMsg{
BaseMsg: baseMsg,
DropCollectionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -134,7 +172,7 @@ func (t *hasCollectionTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return Timestamp(t.req.Timestamp), nil
return t.req.Timestamp, nil
}
func (t *hasCollectionTask) Execute() error {
@ -147,8 +185,8 @@ func (t *hasCollectionTask) Execute() error {
if err == nil {
t.hasCollection = true
}
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -181,6 +219,7 @@ func (t *describeCollectionTask) Execute() error {
t.description.Schema = collection.Schema
return nil
}
//////////////////////////////////////////////////////////////////////////

View File

@ -19,6 +19,7 @@ import (
func TestMaster_CollectionTask(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
@ -55,6 +56,7 @@ func TestMaster_CollectionTask(t *testing.T) {
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
DDChannelNames: []string{"dd1", "dd2"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
@ -63,10 +65,10 @@ func TestMaster_CollectionTask(t *testing.T) {
svr, err := CreateServer(ctx)
assert.Nil(t, err)
err = svr.Run(10002)
err = svr.Run(int64(Params.Port))
assert.Nil(t, err)
conn, err := grpc.DialContext(ctx, "127.0.0.1:10002", grpc.WithInsecure(), grpc.WithBlock())
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()

View File

@ -0,0 +1,79 @@
package master
import (
"log"
"github.com/zilliztech/milvus-distributed/internal/errors"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type getSysConfigsTask struct {
baseTask
configkv *etcdkv.EtcdKV
req *internalpb.SysConfigRequest
keys []string
values []string
}
func (t *getSysConfigsTask) Type() internalpb.MsgType {
if t.req == nil {
log.Printf("null request")
return 0
}
return t.req.MsgType
}
func (t *getSysConfigsTask) Ts() (Timestamp, error) {
if t.req == nil {
return 0, errors.New("null request")
}
return t.req.Timestamp, nil
}
func (t *getSysConfigsTask) Execute() error {
if t.req == nil {
return errors.New("null request")
}
sc := &SysConfig{kv: t.configkv}
keyMap := make(map[string]bool)
// Load configs with prefix
for _, prefix := range t.req.KeyPrefixes {
prefixKeys, prefixVals, err := sc.GetByPrefix(prefix)
if err != nil {
return errors.Errorf("Load configs by prefix wrong: %s", err.Error())
}
t.keys = append(t.keys, prefixKeys...)
t.values = append(t.values, prefixVals...)
}
for _, key := range t.keys {
keyMap[key] = true
}
// Load specific configs
if len(t.req.Keys) > 0 {
// To clean up duplicated keys
cleanKeys := []string{}
for _, key := range t.req.Keys {
if v, ok := keyMap[key]; (!ok) || (ok && !v) {
cleanKeys = append(cleanKeys, key)
keyMap[key] = true
continue
}
log.Println("[GetSysConfigs] Warning: duplicate key:", key)
}
v, err := sc.Get(cleanKeys)
if err != nil {
return errors.Errorf("Load configs wrong: %s", err.Error())
}
t.keys = append(t.keys, cleanKeys...)
t.values = append(t.values, v...)
}
return nil
}

View File

@ -0,0 +1,151 @@
package master
import (
"context"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
"google.golang.org/grpc"
)
func TestMaster_ConfigTask(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}})
require.Nil(t, err)
_, err = etcdCli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
EtcdAddress: Params.EtcdAddress,
MetaRootPath: "/test/root",
PulsarAddress: Params.PulsarAddress,
ProxyIDList: []typeutil.UniqueID{1, 2},
WriteNodeIDList: []typeutil.UniqueID{3, 4},
TopicNum: 5,
QueryNodeNum: 3,
SoftTimeTickBarrierInterval: 300,
// segment
SegmentSize: 536870912 / 1024 / 1024,
SegmentSizeFactor: 0.75,
DefaultRecordSize: 1024,
MinSegIDAssignCnt: 1048576 / 1024,
MaxSegIDAssignCnt: Params.MaxSegIDAssignCnt,
SegIDAssignExpiration: 2000,
// msgChannel
ProxyTimeTickChannelNames: []string{"proxy1", "proxy2"},
WriteNodeTimeTickChannelNames: []string{"write3", "write4"},
InsertChannelNames: []string{"dm0", "dm1"},
K2SChannelNames: []string{"k2s0", "k2s1"},
QueryNodeStatsChannelName: "statistic",
MsgChannelSubName: Params.MsgChannelSubName,
}
svr, err := CreateServer(ctx)
require.Nil(t, err)
err = svr.Run(int64(Params.Port))
defer svr.Close()
require.Nil(t, err)
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
require.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
sc := SysConfig{kv: svr.kvBase}
sc.InitFromFile(".")
configRequest := &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: testKeys,
KeyPrefixes: []string{},
}
response, err := cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
assert.ElementsMatch(t, testKeys, response.Keys)
assert.ElementsMatch(t, testVals, response.Values)
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
configRequest = &internalpb.SysConfigRequest{
MsgType: internalpb.MsgType_kGetSysConfigs,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Keys: []string{},
KeyPrefixes: []string{"/master"},
}
response, err = cli.GetSysConfigs(ctx, configRequest)
assert.Nil(t, err)
for i := range response.GetKeys() {
assert.True(t, strings.HasPrefix(response.GetKeys()[i], "/master"))
}
assert.Equal(t, len(response.GetKeys()), len(response.GetValues()))
t.Run("Test duplicate keys and key prefix", func(t *testing.T) {
configRequest.Keys = []string{}
configRequest.KeyPrefixes = []string{"/master"}
resp, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(resp.GetKeys()), len(resp.GetValues()))
assert.NotEqual(t, 0, len(resp.GetKeys()))
configRequest.Keys = []string{"/master/port"}
configRequest.KeyPrefixes = []string{"/master"}
respDup, err := cli.GetSysConfigs(ctx, configRequest)
require.Nil(t, err)
assert.Equal(t, len(respDup.GetKeys()), len(respDup.GetValues()))
assert.NotEqual(t, 0, len(respDup.GetKeys()))
assert.Equal(t, len(respDup.GetKeys()), len(resp.GetKeys()))
})
}

View File

@ -0,0 +1,111 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
master: # 21
address: localhost
port: 53100
pulsarmoniterinterval: 1
pulsartopic: "monitor-topic"
proxyidlist: [1, 2]
proxyTimeSyncChannels: ["proxy1", "proxy2"]
proxyTimeSyncSubName: "proxy-topic"
softTimeTickBarrierInterval: 500
writeidlist: [3, 4]
writeTimeSyncChannels: ["write3", "write4"]
writeTimeSyncSubName: "write-topic"
dmTimeSyncChannels: ["dm5", "dm6"]
k2sTimeSyncChannels: ["k2s7", "k2s8"]
defaultSizePerRecord: 1024
minimumAssignSize: 1048576
segmentThreshold: 536870912
segmentExpireDuration: 2000
segmentThresholdFactor: 0.75
querynodenum: 1
writenodenum: 1
statsChannels: "statistic"
etcd: # 4
address: localhost
port: 2379
rootpath: by-dev
segthreshold: 10000
timesync: # 1
interval: 400
storage: # 5
driver: TIKV
address: localhost
port: 2379
accesskey:
secretkey:
pulsar: # 6
authentication: false
user: user-default
token: eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY
address: localhost
port: 6650
topicnum: 128
reader: # 7
clientid: 0
stopflag: -1
readerqueuesize: 10000
searchchansize: 10000
key2segchansize: 10000
topicstart: 0
topicend: 128
writer: # 8
clientid: 0
stopflag: -2
readerqueuesize: 10000
searchbyidchansize: 10000
parallelism: 100
topicstart: 0
topicend: 128
bucket: "zilliz-hz"
proxy: # 21
timezone: UTC+8
proxy_id: 1
numReaderNodes: 2
tsoSaveInterval: 200
timeTickInterval: 200
pulsarTopics:
readerTopicPrefix: "milvusReader"
numReaderTopics: 2
deleteTopic: "milvusDeleter"
queryTopic: "milvusQuery"
resultTopic: "milvusResult"
resultGroup: "milvusResultGroup"
timeTickTopic: "milvusTimeTick"
network:
address: 0.0.0.0
port: 19530
logs:
level: debug
trace.enable: true
path: /tmp/logs
max_log_file_size: 1024MB
log_rotate_num: 0
storage:
path: /var/lib/milvus
auto_flush_interval: 1

View File

@ -36,7 +36,7 @@ type GlobalTSOAllocator struct {
}
// NewGlobalTSOAllocator creates a new global TSO allocator.
func NewGlobalTSOAllocator(key string, kvBase kv.Base) *GlobalTSOAllocator {
func NewGlobalTSOAllocator(key string, kvBase kv.TxnBase) *GlobalTSOAllocator {
var saveInterval = 3 * time.Second
return &GlobalTSOAllocator{
tso: &timestampOracle{

View File

@ -1,7 +1,6 @@
package master
import (
"os"
"testing"
"time"
@ -12,16 +11,6 @@ import (
var gTestTsoAllocator Allocator
var gTestIDAllocator *GlobalIDAllocator
func TestMain(m *testing.M) {
Params.Init()
etcdAddr := Params.EtcdAddress
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", tsoutil.NewTSOKVBase([]string{etcdAddr}, "/test/root/kv", "tso"))
gTestIDAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddr}, "/test/root/kv", "gid"))
exitCode := m.Run()
os.Exit(exitCode)
}
func TestGlobalTSOAllocator_Initialize(t *testing.T) {
err := gTestTsoAllocator.Initialize()
assert.Nil(t, err)

View File

@ -359,6 +359,43 @@ func (s *Master) ShowPartitions(ctx context.Context, in *internalpb.ShowPartitio
return t.(*showPartitionTask).stringListResponse, nil
}
func (s *Master) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
var t task = &getSysConfigsTask{
req: in,
configkv: s.kvBase,
baseTask: baseTask{
sch: s.scheduler,
mt: s.metaTable,
cv: make(chan error),
},
keys: []string{},
values: []string{},
}
response := &servicepb.SysConfigResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
},
}
var err = s.scheduler.Enqueue(t)
if err != nil {
response.Status.Reason = "Enqueue failed: " + err.Error()
return response, nil
}
err = t.WaitToFinish(ctx)
if err != nil {
response.Status.Reason = "Get System Config failed: " + err.Error()
return response, nil
}
response.Keys = t.(*getSysConfigsTask).keys
response.Values = t.(*getSysConfigsTask).values
response.Status.ErrorCode = commonpb.ErrorCode_SUCCESS
return response, nil
}
//----------------------------------------Internal GRPC Service--------------------------------
func (s *Master) AllocTimestamp(ctx context.Context, request *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {

View File

@ -17,6 +17,7 @@ import (
func TestMaster_CreateCollection(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
@ -62,10 +63,10 @@ func TestMaster_CreateCollection(t *testing.T) {
svr, err := CreateServer(ctx)
assert.Nil(t, err)
err = svr.Run(10001)
err = svr.Run(int64(Params.Port))
assert.Nil(t, err)
conn, err := grpc.DialContext(ctx, "127.0.0.1:10001", grpc.WithInsecure(), grpc.WithBlock())
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()

View File

@ -9,7 +9,7 @@ type GlobalIDAllocator struct {
allocator Allocator
}
func NewGlobalIDAllocator(key string, base kv.Base) *GlobalIDAllocator {
func NewGlobalIDAllocator(key string, base kv.TxnBase) *GlobalIDAllocator {
return &GlobalIDAllocator{
allocator: NewGlobalTSOAllocator(key, base),
}

View File

@ -10,8 +10,7 @@ import (
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
@ -43,7 +42,7 @@ type Master struct {
grpcServer *grpc.Server
grpcErr chan error
kvBase *kv.EtcdKV
kvBase *etcdkv.EtcdKV
scheduler *ddRequestScheduler
metaTable *metaTable
timesSyncMsgProducer *timeSyncMsgProducer
@ -64,12 +63,12 @@ type Master struct {
tsoAllocator *GlobalTSOAllocator
}
func newKVBase(kvRoot string, etcdAddr []string) *kv.EtcdKV {
func newKVBase(kvRoot string, etcdAddr []string) *etcdkv.EtcdKV {
cli, _ := clientv3.New(clientv3.Config{
Endpoints: etcdAddr,
DialTimeout: 5 * time.Second,
})
kvBase := kv.NewEtcdKV(cli, kvRoot)
kvBase := etcdkv.NewEtcdKV(cli, kvRoot)
return kvBase
}
@ -90,8 +89,8 @@ func CreateServer(ctx context.Context) (*Master, error) {
if err != nil {
return nil, err
}
etcdkv := kv.NewEtcdKV(etcdClient, metaRootPath)
metakv, err := NewMetaTable(etcdkv)
etcdKV := etcdkv.NewEtcdKV(etcdClient, metaRootPath)
metakv, err := NewMetaTable(etcdKV)
if err != nil {
return nil, err
}
@ -123,6 +122,11 @@ func CreateServer(ctx context.Context) (*Master, error) {
}
tsMsgProducer.SetWriteNodeTtBarrier(writeTimeTickBarrier)
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(Params.DDChannelNames)
tsMsgProducer.SetDDSyncStream(pulsarDDStream)
pulsarDMStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDMStream.SetPulsarClient(pulsarAddr)
pulsarDMStream.CreatePulsarProducers(Params.InsertChannelNames)
@ -161,7 +165,10 @@ func CreateServer(ctx context.Context) (*Master, error) {
return nil, err
}
m.scheduler = NewDDRequestScheduler(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
m.scheduler = NewDDRequestScheduler(ctx)
m.scheduler.SetDDMsgStream(pulsarDDStream)
m.scheduler.SetIDAllocator(func() (UniqueID, error) { return m.idAllocator.AllocOne() })
m.segmentMgr = NewSegmentManager(metakv,
func() (UniqueID, error) { return m.idAllocator.AllocOne() },
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },
@ -248,6 +255,11 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
return err
}
s.serverLoopWg.Add(1)
if err := s.scheduler.Start(); err != nil {
return err
}
s.serverLoopWg.Add(1)
go s.grpcLoop(grpcPort)
@ -255,9 +267,6 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
return err
}
s.serverLoopWg.Add(1)
go s.tasksExecutionLoop()
s.serverLoopWg.Add(1)
go s.segmentStatisticsLoop()
@ -270,6 +279,8 @@ func (s *Master) startServerLoop(ctx context.Context, grpcPort int64) error {
func (s *Master) stopServerLoop() {
s.timesSyncMsgProducer.Close()
s.serverLoopWg.Done()
s.scheduler.Close()
s.serverLoopWg.Done()
if s.grpcServer != nil {
s.grpcServer.GracefulStop()
@ -337,33 +348,6 @@ func (s *Master) tsLoop() {
}
}
func (s *Master) tasksExecutionLoop() {
defer s.serverLoopWg.Done()
ctx, cancel := context.WithCancel(s.serverLoopCtx)
defer cancel()
for {
select {
case task := <-s.scheduler.reqQueue:
timeStamp, err := (task).Ts()
if err != nil {
log.Println(err)
} else {
if timeStamp < s.scheduler.scheduleTimeStamp {
task.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, s.scheduler.scheduleTimeStamp))
} else {
s.scheduler.scheduleTimeStamp = timeStamp
err = task.Execute()
task.Notify(err)
}
}
case <-ctx.Done():
log.Print("server is closed, exit task execution loop")
return
}
}
}
func (s *Master) segmentStatisticsLoop() {
defer s.serverLoopWg.Done()
defer s.segmentStatusMsg.Close()

View File

@ -0,0 +1,297 @@
package master
import (
"context"
"log"
"math/rand"
"os"
"strconv"
"testing"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"go.uber.org/zap"
"google.golang.org/grpc"
)
var testPORT = 53200
func genMasterTestPort() int64 {
testPORT++
return int64(testPORT)
}
func refreshMasterAddress() {
masterPort := genMasterTestPort()
Params.Port = int(masterPort)
masterAddr := makeMasterAddress(masterPort)
Params.Address = masterAddr
}
func makeMasterAddress(port int64) string {
masterAddr := "127.0.0.1:" + strconv.FormatInt(port, 10)
return masterAddr
}
func makeNewChannalNames(names []string, suffix string) []string {
var ret []string
for _, name := range names {
ret = append(ret, name+suffix)
}
return ret
}
func refreshChannelNames() {
suffix := "_test" + strconv.FormatInt(rand.Int63n(100), 10)
Params.DDChannelNames = makeNewChannalNames(Params.DDChannelNames, suffix)
Params.WriteNodeTimeTickChannelNames = makeNewChannalNames(Params.WriteNodeTimeTickChannelNames, suffix)
Params.InsertChannelNames = makeNewChannalNames(Params.InsertChannelNames, suffix)
Params.K2SChannelNames = makeNewChannalNames(Params.K2SChannelNames, suffix)
Params.ProxyTimeTickChannelNames = makeNewChannalNames(Params.ProxyTimeTickChannelNames, suffix)
}
func receiveTimeTickMsg(stream *ms.MsgStream) bool {
for {
result := (*stream).Consume()
if len(result.Msgs) > 0 {
return true
}
}
}
func getTimeTickMsgPack(ttmsgs [][2]uint64) *ms.MsgPack {
msgPack := ms.MsgPack{}
for _, vi := range ttmsgs {
msgPack.Msgs = append(msgPack.Msgs, getTtMsg(internalPb.MsgType_kTimeTick, UniqueID(vi[0]), Timestamp(vi[1])))
}
return &msgPack
}
func TestMain(m *testing.M) {
Init()
refreshMasterAddress()
refreshChannelNames()
etcdAddr := Params.EtcdAddress
gTestTsoAllocator = NewGlobalTSOAllocator("timestamp", tsoutil.NewTSOKVBase([]string{etcdAddr}, "/test/root/kv", "tso"))
gTestIDAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddr}, "/test/root/kv", "gid"))
exitCode := m.Run()
os.Exit(exitCode)
}
func TestMaster(t *testing.T) {
Init()
refreshMasterAddress()
pulsarAddr := Params.PulsarAddress
Params.ProxyIDList = []UniqueID{0}
//Param
// Creates server.
ctx, cancel := context.WithCancel(context.Background())
svr, err := CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
proxyTimeTickStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
proxyTimeTickStream.SetPulsarClient(pulsarAddr)
proxyTimeTickStream.CreatePulsarProducers(Params.ProxyTimeTickChannelNames)
proxyTimeTickStream.Start()
writeNodeStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
writeNodeStream.SetPulsarClient(pulsarAddr)
writeNodeStream.CreatePulsarProducers(Params.WriteNodeTimeTickChannelNames)
writeNodeStream.Start()
ddMs := ms.NewPulsarMsgStream(ctx, 1024)
ddMs.SetPulsarClient(pulsarAddr)
ddMs.CreatePulsarConsumers(Params.DDChannelNames, "DDStream", ms.NewUnmarshalDispatcher(), 1024)
ddMs.Start()
dMMs := ms.NewPulsarMsgStream(ctx, 1024)
dMMs.SetPulsarClient(pulsarAddr)
dMMs.CreatePulsarConsumers(Params.InsertChannelNames, "DMStream", ms.NewUnmarshalDispatcher(), 1024)
dMMs.Start()
k2sMs := ms.NewPulsarMsgStream(ctx, 1024)
k2sMs.SetPulsarClient(pulsarAddr)
k2sMs.CreatePulsarConsumers(Params.K2SChannelNames, "K2SStream", ms.NewUnmarshalDispatcher(), 1024)
k2sMs.Start()
ttsoftmsgs := [][2]uint64{
{0, 10},
}
msgSoftPackAddr := getTimeTickMsgPack(ttsoftmsgs)
proxyTimeTickStream.Produce(msgSoftPackAddr)
var dMMsgstream ms.MsgStream = dMMs
assert.True(t, receiveTimeTickMsg(&dMMsgstream))
var ddMsgstream ms.MsgStream = ddMs
assert.True(t, receiveTimeTickMsg(&ddMsgstream))
tthardmsgs := [][2]int{
{3, 10},
}
msghardPackAddr := getMsgPack(tthardmsgs)
writeNodeStream.Produce(msghardPackAddr)
var k2sMsgstream ms.MsgStream = k2sMs
assert.True(t, receiveTimeTickMsg(&k2sMsgstream))
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()
cli := masterpb.NewMasterClient(conn)
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{},
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
st, err := cli.CreateCollection(ctx, &createCollectionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var consumeMsg ms.MsgStream = ddMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////CreatePartition////////////////////////
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
st, err = cli.CreatePartition(ctx, &createPartitionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var createPartitionMsg *ms.CreatePartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createPartitionMsg = v.(*ms.CreatePartitionMsg)
}
break
}
}
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
////////////////////////////DropPartition////////////////////////
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
st, err = cli.DropPartition(ctx, &dropPartitionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var dropPartitionMsg *ms.DropPartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropPartitionMsg = v.(*ms.DropPartitionMsg)
}
break
}
}
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
////////////////////////////DropCollection////////////////////////
dropCollectionReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
}
st, err = cli.DropCollection(ctx, &dropCollectionReq)
assert.Nil(t, err)
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
var dropCollectionMsg *ms.DropCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropCollectionMsg = v.(*ms.DropCollectionMsg)
}
break
}
}
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
cancel()
svr.Close()
}

View File

@ -11,7 +11,7 @@ import (
)
type metaTable struct {
client *kv.EtcdKV // client of a reliable kv service, i.e. etcd client
client kv.TxnBase // client of a reliable kv service, i.e. etcd client
tenantID2Meta map[UniqueID]pb.TenantMeta // tenant id to tenant meta
proxyID2Meta map[UniqueID]pb.ProxyMeta // proxy id to proxy meta
collID2Meta map[UniqueID]pb.CollectionMeta // collection id to collection meta
@ -23,7 +23,7 @@ type metaTable struct {
ddLock sync.RWMutex
}
func NewMetaTable(kv *kv.EtcdKV) (*metaTable, error) {
func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
mt := &metaTable{
client: kv,
tenantLock: sync.RWMutex{},

View File

@ -7,7 +7,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"go.etcd.io/etcd/clientv3"
@ -19,7 +19,7 @@ func TestMetaTable_Collection(t *testing.T) {
etcdAddr := Params.EtcdAddress
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
@ -157,7 +157,7 @@ func TestMetaTable_DeletePartition(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
@ -252,7 +252,7 @@ func TestMetaTable_Segment(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
@ -333,7 +333,7 @@ func TestMetaTable_UpdateSegment(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
assert.Nil(t, err)
@ -379,7 +379,7 @@ func TestMetaTable_AddPartition_Limit(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := kv.NewEtcdKV(cli, "/etcd/test/root")
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
_, err = cli.Delete(context.TODO(), "/etcd/test/root", clientv3.WithPrefix())
assert.Nil(t, err)

View File

@ -40,6 +40,7 @@ type ParamTable struct {
// msgChannel
ProxyTimeTickChannelNames []string
WriteNodeTimeTickChannelNames []string
DDChannelNames []string
InsertChannelNames []string
K2SChannelNames []string
QueryNodeStatsChannelName string
@ -86,6 +87,7 @@ func (p *ParamTable) Init() {
p.initProxyTimeTickChannelNames()
p.initWriteNodeTimeTickChannelNames()
p.initInsertChannelNames()
p.initDDChannelNames()
p.initK2SChannelNames()
p.initQueryNodeStatsChannelName()
p.initMsgChannelSubName()
@ -102,15 +104,7 @@ func (p *ParamTable) initAddress() {
}
func (p *ParamTable) initPort() {
masterPort, err := p.Load("master.port")
if err != nil {
panic(err)
}
port, err := strconv.Atoi(masterPort)
if err != nil {
panic(err)
}
p.Port = port
p.Port = p.ParseInt("master.port")
}
func (p *ParamTable) initEtcdAddress() {
@ -154,117 +148,40 @@ func (p *ParamTable) initKvRootPath() {
}
func (p *ParamTable) initTopicNum() {
insertChannelRange, err := p.Load("msgChannel.channelRange.insert")
iRangeStr, err := p.Load("msgChannel.channelRange.insert")
if err != nil {
panic(err)
}
channelRange := strings.Split(insertChannelRange, ",")
if len(channelRange) != 2 {
panic("Illegal channel range num")
}
channelBegin, err := strconv.Atoi(channelRange[0])
if err != nil {
panic(err)
}
channelEnd, err := strconv.Atoi(channelRange[1])
if err != nil {
panic(err)
}
if channelBegin < 0 || channelEnd < 0 {
panic("Illegal channel range value")
}
if channelBegin > channelEnd {
panic("Illegal channel range value")
}
p.TopicNum = channelEnd
rangeSlice := paramtable.ConvertRangeToIntRange(iRangeStr, ",")
p.TopicNum = rangeSlice[1] - rangeSlice[0]
}
func (p *ParamTable) initSegmentSize() {
threshold, err := p.Load("master.segment.size")
if err != nil {
panic(err)
}
segmentThreshold, err := strconv.ParseFloat(threshold, 64)
if err != nil {
panic(err)
}
p.SegmentSize = segmentThreshold
p.SegmentSize = p.ParseFloat("master.segment.size")
}
func (p *ParamTable) initSegmentSizeFactor() {
segFactor, err := p.Load("master.segment.sizeFactor")
if err != nil {
panic(err)
}
factor, err := strconv.ParseFloat(segFactor, 64)
if err != nil {
panic(err)
}
p.SegmentSizeFactor = factor
p.SegmentSizeFactor = p.ParseFloat("master.segment.sizeFactor")
}
func (p *ParamTable) initDefaultRecordSize() {
size, err := p.Load("master.segment.defaultSizePerRecord")
if err != nil {
panic(err)
}
res, err := strconv.ParseInt(size, 10, 64)
if err != nil {
panic(err)
}
p.DefaultRecordSize = res
p.DefaultRecordSize = p.ParseInt64("master.segment.defaultSizePerRecord")
}
func (p *ParamTable) initMinSegIDAssignCnt() {
size, err := p.Load("master.segment.minIDAssignCnt")
if err != nil {
panic(err)
}
res, err := strconv.ParseInt(size, 10, 64)
if err != nil {
panic(err)
}
p.MinSegIDAssignCnt = res
p.MinSegIDAssignCnt = p.ParseInt64("master.segment.minIDAssignCnt")
}
func (p *ParamTable) initMaxSegIDAssignCnt() {
size, err := p.Load("master.segment.maxIDAssignCnt")
if err != nil {
panic(err)
}
res, err := strconv.ParseInt(size, 10, 64)
if err != nil {
panic(err)
}
p.MaxSegIDAssignCnt = res
p.MaxSegIDAssignCnt = p.ParseInt64("master.segment.maxIDAssignCnt")
}
func (p *ParamTable) initSegIDAssignExpiration() {
duration, err := p.Load("master.segment.IDAssignExpiration")
if err != nil {
panic(err)
}
res, err := strconv.ParseInt(duration, 10, 64)
if err != nil {
panic(err)
}
p.SegIDAssignExpiration = res
p.SegIDAssignExpiration = p.ParseInt64("master.segment.IDAssignExpiration")
}
func (p *ParamTable) initQueryNodeNum() {
id, err := p.Load("nodeID.queryNodeIDList")
if err != nil {
panic(err)
}
ids := strings.Split(id, ",")
for _, i := range ids {
_, err := strconv.ParseInt(i, 10, 64)
if err != nil {
log.Panicf("load proxy id list error, %s", err.Error())
}
}
p.QueryNodeNum = len(ids)
p.QueryNodeNum = len(p.QueryNodeIDList())
}
func (p *ParamTable) initQueryNodeStatsChannelName() {
@ -276,20 +193,7 @@ func (p *ParamTable) initQueryNodeStatsChannelName() {
}
func (p *ParamTable) initProxyIDList() {
id, err := p.Load("nodeID.proxyIDList")
if err != nil {
log.Panicf("load proxy id list error, %s", err.Error())
}
ids := strings.Split(id, ",")
idList := make([]typeutil.UniqueID, 0, len(ids))
for _, i := range ids {
v, err := strconv.ParseInt(i, 10, 64)
if err != nil {
log.Panicf("load proxy id list error, %s", err.Error())
}
idList = append(idList, typeutil.UniqueID(v))
}
p.ProxyIDList = idList
p.ProxyIDList = p.BaseTable.ProxyIDList()
}
func (p *ParamTable) initProxyTimeTickChannelNames() {
@ -334,20 +238,7 @@ func (p *ParamTable) initSoftTimeTickBarrierInterval() {
}
func (p *ParamTable) initWriteNodeIDList() {
id, err := p.Load("nodeID.writeNodeIDList")
if err != nil {
log.Panic(err)
}
ids := strings.Split(id, ",")
idlist := make([]typeutil.UniqueID, 0, len(ids))
for _, i := range ids {
v, err := strconv.ParseInt(i, 10, 64)
if err != nil {
log.Panicf("load proxy id list error, %s", err.Error())
}
idlist = append(idlist, typeutil.UniqueID(v))
}
p.WriteNodeIDList = idlist
p.WriteNodeIDList = p.BaseTable.WriteNodeIDList()
}
func (p *ParamTable) initWriteNodeTimeTickChannelNames() {
@ -371,61 +262,58 @@ func (p *ParamTable) initWriteNodeTimeTickChannelNames() {
p.WriteNodeTimeTickChannelNames = channels
}
func (p *ParamTable) initDDChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
if err != nil {
panic(err)
}
prefix += "-"
iRangeStr, err := p.Load("msgChannel.channelRange.dataDefinition")
if err != nil {
panic(err)
}
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
var ret []string
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
p.DDChannelNames = ret
}
func (p *ParamTable) initInsertChannelNames() {
ch, err := p.Load("msgChannel.chanNamePrefix.insert")
if err != nil {
log.Fatal(err)
}
channelRange, err := p.Load("msgChannel.channelRange.insert")
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
if err != nil {
panic(err)
}
chanRange := strings.Split(channelRange, ",")
if len(chanRange) != 2 {
panic("Illegal channel range num")
}
channelBegin, err := strconv.Atoi(chanRange[0])
prefix += "-"
iRangeStr, err := p.Load("msgChannel.channelRange.insert")
if err != nil {
panic(err)
}
channelEnd, err := strconv.Atoi(chanRange[1])
if err != nil {
panic(err)
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
var ret []string
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
if channelBegin < 0 || channelEnd < 0 {
panic("Illegal channel range value")
}
if channelBegin > channelEnd {
panic("Illegal channel range value")
}
channels := make([]string, channelEnd-channelBegin)
for i := 0; i < channelEnd-channelBegin; i++ {
channels[i] = ch + "-" + strconv.Itoa(channelBegin+i)
}
p.InsertChannelNames = channels
p.InsertChannelNames = ret
}
func (p *ParamTable) initK2SChannelNames() {
ch, err := p.Load("msgChannel.chanNamePrefix.k2s")
prefix, err := p.Load("msgChannel.chanNamePrefix.k2s")
if err != nil {
log.Fatal(err)
panic(err)
}
id, err := p.Load("nodeID.writeNodeIDList")
prefix += "-"
iRangeStr, err := p.Load("msgChannel.channelRange.k2s")
if err != nil {
log.Panicf("load write node id list error, %s", err.Error())
panic(err)
}
ids := strings.Split(id, ",")
channels := make([]string, 0, len(ids))
for _, i := range ids {
_, err := strconv.ParseInt(i, 10, 64)
if err != nil {
log.Panicf("load write node id list error, %s", err.Error())
}
channels = append(channels, ch+"-"+i)
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
var ret []string
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
p.K2SChannelNames = channels
p.K2SChannelNames = ret
}
func (p *ParamTable) initMaxPartitionNum() {

View File

@ -12,132 +12,111 @@ func TestParamTable_Init(t *testing.T) {
}
func TestParamTable_Address(t *testing.T) {
Params.Init()
address := Params.Address
assert.Equal(t, address, "localhost")
}
func TestParamTable_Port(t *testing.T) {
Params.Init()
port := Params.Port
assert.Equal(t, port, 53100)
}
func TestParamTable_MetaRootPath(t *testing.T) {
Params.Init()
path := Params.MetaRootPath
assert.Equal(t, path, "by-dev/meta")
}
func TestParamTable_KVRootPath(t *testing.T) {
Params.Init()
path := Params.KvRootPath
assert.Equal(t, path, "by-dev/kv")
}
func TestParamTable_TopicNum(t *testing.T) {
Params.Init()
num := Params.TopicNum
fmt.Println("TopicNum:", num)
}
func TestParamTable_SegmentSize(t *testing.T) {
Params.Init()
size := Params.SegmentSize
assert.Equal(t, size, float64(512))
}
func TestParamTable_SegmentSizeFactor(t *testing.T) {
Params.Init()
factor := Params.SegmentSizeFactor
assert.Equal(t, factor, 0.75)
}
func TestParamTable_DefaultRecordSize(t *testing.T) {
Params.Init()
size := Params.DefaultRecordSize
assert.Equal(t, size, int64(1024))
}
func TestParamTable_MinSegIDAssignCnt(t *testing.T) {
Params.Init()
cnt := Params.MinSegIDAssignCnt
assert.Equal(t, cnt, int64(1024))
}
func TestParamTable_MaxSegIDAssignCnt(t *testing.T) {
Params.Init()
cnt := Params.MaxSegIDAssignCnt
assert.Equal(t, cnt, int64(16384))
}
func TestParamTable_SegIDAssignExpiration(t *testing.T) {
Params.Init()
expiration := Params.SegIDAssignExpiration
assert.Equal(t, expiration, int64(2000))
}
func TestParamTable_QueryNodeNum(t *testing.T) {
Params.Init()
num := Params.QueryNodeNum
fmt.Println("QueryNodeNum", num)
}
func TestParamTable_QueryNodeStatsChannelName(t *testing.T) {
Params.Init()
name := Params.QueryNodeStatsChannelName
assert.Equal(t, name, "query-node-stats")
}
func TestParamTable_ProxyIDList(t *testing.T) {
Params.Init()
ids := Params.ProxyIDList
assert.Equal(t, len(ids), 1)
assert.Equal(t, ids[0], int64(0))
}
func TestParamTable_ProxyTimeTickChannelNames(t *testing.T) {
Params.Init()
names := Params.ProxyTimeTickChannelNames
assert.Equal(t, len(names), 1)
assert.Equal(t, names[0], "proxyTimeTick-0")
}
func TestParamTable_MsgChannelSubName(t *testing.T) {
Params.Init()
name := Params.MsgChannelSubName
assert.Equal(t, name, "master")
}
func TestParamTable_SoftTimeTickBarrierInterval(t *testing.T) {
Params.Init()
interval := Params.SoftTimeTickBarrierInterval
assert.Equal(t, interval, Timestamp(0x7d00000))
}
func TestParamTable_WriteNodeIDList(t *testing.T) {
Params.Init()
ids := Params.WriteNodeIDList
assert.Equal(t, len(ids), 1)
assert.Equal(t, ids[0], int64(3))
}
func TestParamTable_WriteNodeTimeTickChannelNames(t *testing.T) {
Params.Init()
names := Params.WriteNodeTimeTickChannelNames
assert.Equal(t, len(names), 1)
assert.Equal(t, names[0], "writeNodeTimeTick-3")
}
func TestParamTable_InsertChannelNames(t *testing.T) {
Params.Init()
names := Params.InsertChannelNames
assert.Equal(t, Params.TopicNum, len(names))
}
func TestParamTable_K2SChannelNames(t *testing.T) {
Params.Init()
names := Params.K2SChannelNames
assert.Equal(t, len(names), 1)
assert.Equal(t, names[0], "k2s-3")
assert.Equal(t, names[0], "k2s-0")
}

View File

@ -4,6 +4,7 @@ import (
"errors"
"log"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
@ -66,7 +67,30 @@ func (t *createPartitionTask) Execute() error {
if err != nil {
return err
}
return t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
ts, err := t.Ts()
if err != nil {
return err
}
err = t.mt.AddPartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.CreatePartitionMsg{
BaseMsg: baseMsg,
CreatePartitionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -98,7 +122,29 @@ func (t *dropPartitionTask) Execute() error {
return err
}
return t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
err = t.mt.DeletePartition(collectionMeta.ID, partitionName.Tag)
if err != nil {
return err
}
ts, err := t.Ts()
if err != nil {
return err
}
msgPack := ms.MsgPack{}
baseMsg := ms.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
}
timeTickMsg := &ms.DropPartitionMsg{
BaseMsg: baseMsg,
DropPartitionRequest: *t.req,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
return t.sch.ddMsgStream.Broadcast(&msgPack)
}
//////////////////////////////////////////////////////////////////////////
@ -132,6 +178,7 @@ func (t *hasPartitionTask) Execute() error {
t.hasPartition = t.mt.HasPartition(collectionMeta.ID, partitionName.Tag)
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -168,6 +215,7 @@ func (t *describePartitionTask) Execute() error {
t.description = &description
return nil
}
//////////////////////////////////////////////////////////////////////////
@ -208,4 +256,5 @@ func (t *showPartitionTask) Execute() error {
t.stringListResponse = &stringListResponse
return nil
}

View File

@ -2,8 +2,6 @@ package master
import (
"context"
"math/rand"
"strconv"
"testing"
"github.com/golang/protobuf/proto"
@ -20,6 +18,7 @@ import (
func TestMaster_Partition(t *testing.T) {
Init()
refreshMasterAddress()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
@ -66,13 +65,12 @@ func TestMaster_Partition(t *testing.T) {
DefaultPartitionTag: "_default",
}
port := 10000 + rand.Intn(1000)
svr, err := CreateServer(ctx)
assert.Nil(t, err)
err = svr.Run(int64(port))
err = svr.Run(int64(Params.Port))
assert.Nil(t, err)
conn, err := grpc.DialContext(ctx, "127.0.0.1:"+strconv.Itoa(port), grpc.WithInsecure(), grpc.WithBlock())
conn, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer conn.Close()

View File

@ -1,17 +1,36 @@
package master
import (
"context"
"log"
"github.com/zilliztech/milvus-distributed/internal/errors"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
)
//type ddRequestScheduler interface {}
//type ddReqFIFOScheduler struct {}
type ddRequestScheduler struct {
ctx context.Context
cancel context.CancelFunc
globalIDAllocator func() (UniqueID, error)
reqQueue chan task
scheduleTimeStamp Timestamp
ddMsgStream ms.MsgStream
}
func NewDDRequestScheduler(allocGlobalID func() (UniqueID, error)) *ddRequestScheduler {
func NewDDRequestScheduler(ctx context.Context) *ddRequestScheduler {
const channelSize = 1024
ctx2, cancel := context.WithCancel(ctx)
rs := ddRequestScheduler{
globalIDAllocator: allocGlobalID,
reqQueue: make(chan task, channelSize),
ctx: ctx2,
cancel: cancel,
reqQueue: make(chan task, channelSize),
}
return &rs
}
@ -20,3 +39,51 @@ func (rs *ddRequestScheduler) Enqueue(task task) error {
rs.reqQueue <- task
return nil
}
func (rs *ddRequestScheduler) SetIDAllocator(allocGlobalID func() (UniqueID, error)) {
rs.globalIDAllocator = allocGlobalID
}
func (rs *ddRequestScheduler) SetDDMsgStream(ddStream ms.MsgStream) {
rs.ddMsgStream = ddStream
}
func (rs *ddRequestScheduler) scheduleLoop() {
for {
select {
case task := <-rs.reqQueue:
err := rs.schedule(task)
if err != nil {
log.Println(err)
}
case <-rs.ctx.Done():
log.Print("server is closed, exit task execution loop")
return
}
}
}
func (rs *ddRequestScheduler) schedule(t task) error {
timeStamp, err := t.Ts()
if err != nil {
log.Println(err)
return err
}
if timeStamp < rs.scheduleTimeStamp {
t.Notify(errors.Errorf("input timestamp = %d, schduler timestamp = %d", timeStamp, rs.scheduleTimeStamp))
} else {
rs.scheduleTimeStamp = timeStamp
err = t.Execute()
t.Notify(err)
}
return nil
}
func (rs *ddRequestScheduler) Start() error {
go rs.scheduleLoop()
return nil
}
func (rs *ddRequestScheduler) Close() {
rs.cancel()
}

View File

@ -0,0 +1,342 @@
package master
import (
"context"
"math/rand"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
ms "github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"go.etcd.io/etcd/clientv3"
)
func TestMaster_Scheduler_Collection(t *testing.T) {
Init()
etcdAddress := Params.EtcdAddress
kvRootPath := Params.MetaRootPath
pulsarAddr := Params.PulsarAddress
producerChannels := []string{"ddstream"}
consumerChannels := []string{"ddstream"}
consumerSubName := "substream"
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcdKV)
assert.Nil(t, err)
defer meta.client.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(producerChannels)
pulsarDDStream.Start()
defer pulsarDDStream.Close()
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
consumeMs.SetPulsarClient(pulsarAddr)
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
consumeMs.Start()
defer consumeMs.Close()
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
err = idAllocator.Initialize()
assert.Nil(t, err)
scheduler := NewDDRequestScheduler(ctx)
scheduler.SetDDMsgStream(pulsarDDStream)
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
scheduler.Start()
defer scheduler.Close()
rand.Seed(time.Now().Unix())
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "string",
AutoID: true,
Fields: nil,
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
////////////////////////////CreateCollection////////////////////////
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
var createCollectionTask task = &createCollectionTask{
req: &createCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createCollectionTask)
assert.Nil(t, err)
err = createCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var consumeMsg ms.MsgStream = consumeMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////DropCollection////////////////////////
dropCollectionReq := internalpb.DropCollectionRequest{
MsgType: internalpb.MsgType_kDropCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
CollectionName: &servicepb.CollectionName{CollectionName: sch.Name},
}
var dropCollectionTask task = &dropCollectionTask{
req: &dropCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(dropCollectionTask)
assert.Nil(t, err)
err = dropCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var dropCollectionMsg *ms.DropCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropCollectionMsg = v.(*ms.DropCollectionMsg)
}
break
}
}
assert.Equal(t, dropCollectionReq.MsgType, dropCollectionMsg.DropCollectionRequest.MsgType)
assert.Equal(t, dropCollectionReq.ReqID, dropCollectionMsg.DropCollectionRequest.ReqID)
assert.Equal(t, dropCollectionReq.Timestamp, dropCollectionMsg.DropCollectionRequest.Timestamp)
assert.Equal(t, dropCollectionReq.ProxyID, dropCollectionMsg.DropCollectionRequest.ProxyID)
assert.Equal(t, dropCollectionReq.CollectionName.CollectionName, dropCollectionMsg.DropCollectionRequest.CollectionName.CollectionName)
}
func TestMaster_Scheduler_Partition(t *testing.T) {
Init()
etcdAddress := Params.EtcdAddress
kvRootPath := Params.MetaRootPath
pulsarAddr := Params.PulsarAddress
producerChannels := []string{"ddstream"}
consumerChannels := []string{"ddstream"}
consumerSubName := "substream"
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
assert.Nil(t, err)
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
meta, err := NewMetaTable(etcdKV)
assert.Nil(t, err)
defer meta.client.Close()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
pulsarDDStream := ms.NewPulsarMsgStream(ctx, 1024) //input stream
pulsarDDStream.SetPulsarClient(pulsarAddr)
pulsarDDStream.CreatePulsarProducers(producerChannels)
pulsarDDStream.Start()
defer pulsarDDStream.Close()
consumeMs := ms.NewPulsarMsgStream(ctx, 1024)
consumeMs.SetPulsarClient(pulsarAddr)
consumeMs.CreatePulsarConsumers(consumerChannels, consumerSubName, ms.NewUnmarshalDispatcher(), 1024)
consumeMs.Start()
defer consumeMs.Close()
idAllocator := NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{etcdAddress}, kvRootPath, "gid"))
err = idAllocator.Initialize()
assert.Nil(t, err)
scheduler := NewDDRequestScheduler(ctx)
scheduler.SetDDMsgStream(pulsarDDStream)
scheduler.SetIDAllocator(func() (UniqueID, error) { return idAllocator.AllocOne() })
scheduler.Start()
defer scheduler.Close()
rand.Seed(time.Now().Unix())
sch := schemapb.CollectionSchema{
Name: "name" + strconv.FormatUint(rand.Uint64(), 10),
Description: "string",
AutoID: true,
Fields: nil,
}
schemaBytes, err := proto.Marshal(&sch)
assert.Nil(t, err)
////////////////////////////CreateCollection////////////////////////
createCollectionReq := internalpb.CreateCollectionRequest{
MsgType: internalpb.MsgType_kCreateCollection,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
Schema: &commonpb.Blob{Value: schemaBytes},
}
var createCollectionTask task = &createCollectionTask{
req: &createCollectionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createCollectionTask)
assert.Nil(t, err)
err = createCollectionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var consumeMsg ms.MsgStream = consumeMs
var createCollectionMsg *ms.CreateCollectionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createCollectionMsg = v.(*ms.CreateCollectionMsg)
}
break
}
}
assert.Equal(t, createCollectionReq.MsgType, createCollectionMsg.CreateCollectionRequest.MsgType)
assert.Equal(t, createCollectionReq.ReqID, createCollectionMsg.CreateCollectionRequest.ReqID)
assert.Equal(t, createCollectionReq.Timestamp, createCollectionMsg.CreateCollectionRequest.Timestamp)
assert.Equal(t, createCollectionReq.ProxyID, createCollectionMsg.CreateCollectionRequest.ProxyID)
assert.Equal(t, createCollectionReq.Schema.Value, createCollectionMsg.CreateCollectionRequest.Schema.Value)
////////////////////////////CreatePartition////////////////////////
partitionName := "partitionName" + strconv.FormatUint(rand.Uint64(), 10)
createPartitionReq := internalpb.CreatePartitionRequest{
MsgType: internalpb.MsgType_kCreatePartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
var createPartitionTask task = &createPartitionTask{
req: &createPartitionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(createPartitionTask)
assert.Nil(t, err)
err = createPartitionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var createPartitionMsg *ms.CreatePartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
createPartitionMsg = v.(*ms.CreatePartitionMsg)
}
break
}
}
assert.Equal(t, createPartitionReq.MsgType, createPartitionMsg.CreatePartitionRequest.MsgType)
assert.Equal(t, createPartitionReq.ReqID, createPartitionMsg.CreatePartitionRequest.ReqID)
assert.Equal(t, createPartitionReq.Timestamp, createPartitionMsg.CreatePartitionRequest.Timestamp)
assert.Equal(t, createPartitionReq.ProxyID, createPartitionMsg.CreatePartitionRequest.ProxyID)
assert.Equal(t, createPartitionReq.PartitionName.CollectionName, createPartitionMsg.CreatePartitionRequest.PartitionName.CollectionName)
assert.Equal(t, createPartitionReq.PartitionName.Tag, createPartitionMsg.CreatePartitionRequest.PartitionName.Tag)
////////////////////////////DropPartition////////////////////////
dropPartitionReq := internalpb.DropPartitionRequest{
MsgType: internalpb.MsgType_kDropPartition,
ReqID: 1,
Timestamp: 11,
ProxyID: 1,
PartitionName: &servicepb.PartitionName{
CollectionName: sch.Name,
Tag: partitionName,
},
}
var dropPartitionTask task = &dropPartitionTask{
req: &dropPartitionReq,
baseTask: baseTask{
sch: scheduler,
mt: meta,
cv: make(chan error),
},
}
err = scheduler.Enqueue(dropPartitionTask)
assert.Nil(t, err)
err = dropPartitionTask.WaitToFinish(ctx)
assert.Nil(t, err)
var dropPartitionMsg *ms.DropPartitionMsg
for {
result := consumeMsg.Consume()
if len(result.Msgs) > 0 {
msgs := result.Msgs
for _, v := range msgs {
dropPartitionMsg = v.(*ms.DropPartitionMsg)
}
break
}
}
assert.Equal(t, dropPartitionReq.MsgType, dropPartitionMsg.DropPartitionRequest.MsgType)
assert.Equal(t, dropPartitionReq.ReqID, dropPartitionMsg.DropPartitionRequest.ReqID)
assert.Equal(t, dropPartitionReq.Timestamp, dropPartitionMsg.DropPartitionRequest.Timestamp)
assert.Equal(t, dropPartitionReq.ProxyID, dropPartitionMsg.DropPartitionRequest.ProxyID)
assert.Equal(t, dropPartitionReq.PartitionName.CollectionName, dropPartitionMsg.DropPartitionRequest.PartitionName.CollectionName)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
@ -29,12 +30,12 @@ var segMgr *SegmentManager
var collName = "coll_segmgr_test"
var collID = int64(1001)
var partitionTag = "test"
var kvBase *kv.EtcdKV
var kvBase kv.TxnBase
var master *Master
var masterCancelFunc context.CancelFunc
func setup() {
Params.Init()
Init()
etcdAddress := Params.EtcdAddress
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
@ -48,7 +49,7 @@ func setup() {
if err != nil {
panic(err)
}
kvBase = kv.NewEtcdKV(cli, rootPath)
kvBase = etcdkv.NewEtcdKV(cli, rootPath)
tmpMt, err := NewMetaTable(kvBase)
if err != nil {
panic(err)
@ -217,7 +218,8 @@ func TestSegmentManager_SegmentStats(t *testing.T) {
}
func startupMaster() {
Params.Init()
Init()
refreshMasterAddress()
etcdAddress := Params.EtcdAddress
rootPath := "/test/root"
ctx, cancel := context.WithCancel(context.TODO())
@ -230,7 +232,6 @@ func startupMaster() {
if err != nil {
panic(err)
}
Params = ParamTable{
Address: Params.Address,
Port: Params.Port,
@ -271,7 +272,7 @@ func startupMaster() {
if err != nil {
panic(err)
}
err = master.Run(10013)
err = master.Run(int64(Params.Port))
if err != nil {
panic(err)
@ -288,7 +289,7 @@ func TestSegmentManager_RPC(t *testing.T) {
defer shutdownMaster()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
dialContext, err := grpc.DialContext(ctx, "127.0.0.1:10013", grpc.WithInsecure(), grpc.WithBlock())
dialContext, err := grpc.DialContext(ctx, Params.Address, grpc.WithInsecure(), grpc.WithBlock())
assert.Nil(t, err)
defer dialContext.Close()
client := masterpb.NewMasterClient(dialContext)

View File

@ -0,0 +1,117 @@
package master
import (
"fmt"
"log"
"os"
"path"
"path/filepath"
"strings"
"github.com/spf13/viper"
"github.com/zilliztech/milvus-distributed/internal/errors"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
)
type SysConfig struct {
kv *etcdkv.EtcdKV
}
// Initialize Configs from config files, and store them in Etcd.
func (conf *SysConfig) InitFromFile(filePath string) error {
memConfigs, err := conf.getConfigFiles(filePath)
if err != nil {
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
}
for _, memConfig := range memConfigs {
if err := conf.saveToEtcd(memConfig, "config"); err != nil {
return errors.Errorf("[Init SysConfig] %s\n", err.Error())
}
}
return nil
}
func (conf *SysConfig) GetByPrefix(keyPrefix string) (keys []string, values []string, err error) {
realPrefix := path.Join("config", strings.ToLower(keyPrefix))
keys, values, err = conf.kv.LoadWithPrefix(realPrefix)
for index := range keys {
keys[index] = strings.Replace(keys[index], conf.kv.GetPath("config"), "", 1)
}
if err != nil {
return nil, nil, err
}
log.Println("Loaded", len(keys), "pairs of configs with prefix", keyPrefix)
return keys, values, err
}
// Get specific configs for keys.
func (conf *SysConfig) Get(keys []string) ([]string, error) {
var keysToLoad []string
for i := range keys {
keysToLoad = append(keysToLoad, path.Join("config", strings.ToLower(keys[i])))
}
values, err := conf.kv.MultiLoad(keysToLoad)
if err != nil {
return nil, err
}
return values, nil
}
func (conf *SysConfig) getConfigFiles(filePath string) ([]*viper.Viper, error) {
var vipers []*viper.Viper
err := filepath.Walk(filePath,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// all names
if !info.IsDir() && filepath.Ext(path) == ".yaml" {
log.Println("Config files ", info.Name())
currentConf := viper.New()
currentConf.SetConfigFile(path)
if err := currentConf.ReadInConfig(); err != nil {
log.Panic("Config file error: ", err)
}
vipers = append(vipers, currentConf)
}
return nil
})
if err != nil {
return nil, err
}
if len(vipers) == 0 {
return nil, errors.Errorf("There are no config files in the path `%s`.\n", filePath)
}
return vipers, nil
}
func (conf *SysConfig) saveToEtcd(memConfig *viper.Viper, secondRootPath string) error {
configMaps := map[string]string{}
allKeys := memConfig.AllKeys()
for _, key := range allKeys {
etcdKey := strings.ReplaceAll(key, ".", "/")
etcdKey = path.Join(secondRootPath, etcdKey)
val := memConfig.Get(key)
if val == nil {
configMaps[etcdKey] = ""
continue
}
configMaps[etcdKey] = fmt.Sprintf("%v", val)
}
if err := conf.kv.MultiSave(configMaps); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,209 @@
package master
import (
"context"
"fmt"
"log"
"path"
"strings"
"testing"
"time"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/spf13/viper"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/clientv3"
)
func Test_SysConfig(t *testing.T) {
Init()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{Params.EtcdAddress},
DialTimeout: 5 * time.Second,
})
require.Nil(t, err)
_, err = cli.Delete(ctx, "/test/root", clientv3.WithPrefix())
require.Nil(t, err)
rootPath := "/test/root"
configKV := etcdkv.NewEtcdKV(cli, rootPath)
defer configKV.Close()
sc := SysConfig{kv: configKV}
require.Equal(t, rootPath, sc.kv.GetPath("."))
t.Run("tests on contig_test.yaml", func(t *testing.T) {
err = sc.InitFromFile(".")
require.Nil(t, err)
testKeys := []string{
"/etcd/address",
"/master/port",
"/master/proxyidlist",
"/master/segmentthresholdfactor",
"/pulsar/token",
"/reader/stopflag",
"/proxy/timezone",
"/proxy/network/address",
"/proxy/storage/path",
"/storage/accesskey",
}
testVals := []string{
"localhost",
"53100",
"[1 2]",
"0.75",
"eyJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJKb2UifQ.ipevRNuRP6HflG8cFKnmUPtypruRC4fb1DWtoLL62SY",
"-1",
"UTC+8",
"0.0.0.0",
"/var/lib/milvus",
"",
}
vals, err := sc.Get(testKeys)
assert.Nil(t, err)
for i := range testVals {
assert.Equal(t, testVals[i], vals[i])
}
keys, vals, err := sc.GetByPrefix("/master")
assert.Nil(t, err)
for i := range keys {
assert.True(t, strings.HasPrefix(keys[i], "/master/"))
}
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 21, len(keys))
// Test get all configs
keys, vals, err = sc.GetByPrefix("/")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 73, len(vals))
// Test get configs with prefix not exist
keys, vals, err = sc.GetByPrefix("/config")
assert.Nil(t, err)
assert.Equal(t, len(keys), len(vals))
assert.Equal(t, 0, len(keys))
assert.Equal(t, 0, len(vals))
_, _, err = sc.GetByPrefix("//././../../../../../..//../")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix("/master/./address")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix(".")
assert.Nil(t, err)
_, _, err = sc.GetByPrefix("\\")
assert.Nil(t, err)
})
t.Run("getConfigFiles", func(t *testing.T) {
filePath := "../../configs"
vipers, err := sc.getConfigFiles(filePath)
assert.Nil(t, err)
assert.NotNil(t, vipers[0])
filePath = "/path/not/exists"
_, err = sc.getConfigFiles(filePath)
assert.NotNil(t, err)
log.Println(err)
})
t.Run("Test saveToEtcd Normal", func(t *testing.T) {
_, err = cli.Delete(ctx, "/test/root/config", clientv3.WithPrefix())
require.Nil(t, err)
v := viper.New()
v.Set("a.suba1", "v1")
v.Set("a.suba2", "v2")
v.Set("a.suba3.subsuba1", "v3")
v.Set("a.suba3.subsuba2", "v4")
secondRootPath := "config"
err := sc.saveToEtcd(v, secondRootPath)
assert.Nil(t, err)
value, err := sc.kv.Load(path.Join(secondRootPath, "a/suba1"))
assert.Nil(t, err)
assert.Equal(t, "v1", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba2"))
assert.Nil(t, err)
assert.Equal(t, "v2", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba1"))
assert.Nil(t, err)
assert.Equal(t, "v3", value)
value, err = sc.kv.Load(path.Join(secondRootPath, "a/suba3/subsuba2"))
assert.Nil(t, err)
assert.Equal(t, "v4", value)
keys, values, err := sc.kv.LoadWithPrefix(path.Join(secondRootPath, "a"))
assert.Nil(t, err)
assert.Equal(t, 4, len(keys))
assert.Equal(t, 4, len(values))
assert.ElementsMatch(t, []string{
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba1"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba2"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba1"),
path.Join(sc.kv.GetPath(secondRootPath), "/a/suba3/subsuba2"),
}, keys)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
keys = []string{
"/a/suba1",
"/a/suba2",
"/a/suba3/subsuba1",
"/a/suba3/subsuba2",
}
values, err = sc.Get(keys)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
keysAfter, values, err := sc.GetByPrefix("/a")
fmt.Println(keysAfter)
assert.Nil(t, err)
assert.ElementsMatch(t, []string{"v1", "v2", "v3", "v4"}, values)
assert.ElementsMatch(t, keys, keysAfter)
})
t.Run("Test saveToEtcd Different value types", func(t *testing.T) {
v := viper.New()
v.Set("string", "string")
v.Set("number", 1)
v.Set("nil", nil)
v.Set("float", 1.2)
v.Set("intslice", []int{100, 200})
v.Set("stringslice", []string{"a", "b"})
v.Set("stringmapstring", map[string]string{"k1": "1", "k2": "2"})
secondRootPath := "test_save_to_etcd_different_value_types"
err := sc.saveToEtcd(v, secondRootPath)
require.Nil(t, err)
keys, values, err := sc.kv.LoadWithPrefix(path.Join("/", secondRootPath))
assert.Nil(t, err)
assert.Equal(t, 7, len(keys))
assert.Equal(t, 7, len(values))
assert.ElementsMatch(t, []string{
path.Join(sc.kv.GetPath(secondRootPath), "nil"),
path.Join(sc.kv.GetPath(secondRootPath), "string"),
path.Join(sc.kv.GetPath(secondRootPath), "number"),
path.Join(sc.kv.GetPath(secondRootPath), "float"),
path.Join(sc.kv.GetPath(secondRootPath), "intslice"),
path.Join(sc.kv.GetPath(secondRootPath), "stringslice"),
path.Join(sc.kv.GetPath(secondRootPath), "stringmapstring"),
}, keys)
assert.ElementsMatch(t, []string{"", "string", "1", "1.2", "[100 200]", "[a b]", "map[k1:1 k2:2]"}, values)
})
}

View File

@ -81,13 +81,18 @@ func receiveMsg(stream *ms.MsgStream) []uint64 {
func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
Init()
pulsarAddress := Params.PulsarAddress
producerChannels := []string{"proxyTtBarrier"}
consumerChannels := []string{"proxyTtBarrier"}
consumerSubName := "proxyTtBarrier"
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
proxyTtInputStream, proxyTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels := []string{"proxyDMTtBarrier"}
consumerChannels := []string{"proxyDMTtBarrier"}
consumerSubName := "proxyDMTtBarrier"
proxyDMTtInputStream, proxyDMTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels = []string{"proxyDDTtBarrier"}
consumerChannels = []string{"proxyDDTtBarrier"}
consumerSubName = "proxyDDTtBarrier"
proxyDDTtInputStream, proxyDDTtOutputStream := initTestPulsarStream(ctx, pulsarAddress, producerChannels, consumerChannels, consumerSubName)
producerChannels = []string{"writeNodeBarrier"}
consumerChannels = []string{"writeNodeBarrier"}
@ -97,14 +102,20 @@ func TestStream_PulsarMsgStream_TimeTick(t *testing.T) {
timeSyncProducer, _ := NewTimeSyncMsgProducer(ctx)
timeSyncProducer.SetProxyTtBarrier(&TestTickBarrier{ctx: ctx})
timeSyncProducer.SetWriteNodeTtBarrier(&TestTickBarrier{ctx: ctx})
timeSyncProducer.SetDMSyncStream(*proxyTtInputStream)
timeSyncProducer.SetDMSyncStream(*proxyDMTtInputStream)
timeSyncProducer.SetDDSyncStream(*proxyDDTtInputStream)
timeSyncProducer.SetK2sSyncStream(*writeNodeInputStream)
(*proxyTtOutputStream).Start()
(*proxyDMTtOutputStream).Start()
(*proxyDDTtOutputStream).Start()
(*writeNodeOutputStream).Start()
timeSyncProducer.Start()
expected := []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result1 := receiveMsg(proxyTtOutputStream)
result1 := receiveMsg(proxyDMTtOutputStream)
assert.Equal(t, expected, result1)
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result1 = receiveMsg(proxyDDTtOutputStream)
assert.Equal(t, expected, result1)
expected = []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
result2 := receiveMsg(writeNodeOutputStream)
assert.Equal(t, expected, result2)

View File

@ -15,7 +15,8 @@ type timeSyncMsgProducer struct {
//hardTimeTickBarrier
writeNodeTtBarrier TimeTickBarrier
dmSyncStream ms.MsgStream // insert & delete
ddSyncStream ms.MsgStream // insert & delete
dmSyncStream ms.MsgStream
k2sSyncStream ms.MsgStream
ctx context.Context
@ -34,6 +35,9 @@ func (syncMsgProducer *timeSyncMsgProducer) SetProxyTtBarrier(proxyTtBarrier Tim
func (syncMsgProducer *timeSyncMsgProducer) SetWriteNodeTtBarrier(writeNodeTtBarrier TimeTickBarrier) {
syncMsgProducer.writeNodeTtBarrier = writeNodeTtBarrier
}
func (syncMsgProducer *timeSyncMsgProducer) SetDDSyncStream(ddSync ms.MsgStream) {
syncMsgProducer.ddSyncStream = ddSync
}
func (syncMsgProducer *timeSyncMsgProducer) SetDMSyncStream(dmSync ms.MsgStream) {
syncMsgProducer.dmSyncStream = dmSync
@ -43,7 +47,7 @@ func (syncMsgProducer *timeSyncMsgProducer) SetK2sSyncStream(k2sSync ms.MsgStrea
syncMsgProducer.k2sSyncStream = k2sSync
}
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, stream ms.MsgStream) error {
func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier, streams []ms.MsgStream) error {
for {
select {
case <-syncMsgProducer.ctx.Done():
@ -72,7 +76,9 @@ func (syncMsgProducer *timeSyncMsgProducer) broadcastMsg(barrier TimeTickBarrier
TimeTickMsg: timeTickResult,
}
msgPack.Msgs = append(msgPack.Msgs, timeTickMsg)
err = stream.Broadcast(&msgPack)
for _, stream := range streams {
err = stream.Broadcast(&msgPack)
}
if err != nil {
return err
}
@ -91,16 +97,17 @@ func (syncMsgProducer *timeSyncMsgProducer) Start() error {
return err
}
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, syncMsgProducer.dmSyncStream)
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, syncMsgProducer.k2sSyncStream)
go syncMsgProducer.broadcastMsg(syncMsgProducer.proxyTtBarrier, []ms.MsgStream{syncMsgProducer.dmSyncStream, syncMsgProducer.ddSyncStream})
go syncMsgProducer.broadcastMsg(syncMsgProducer.writeNodeTtBarrier, []ms.MsgStream{syncMsgProducer.k2sSyncStream})
return nil
}
func (syncMsgProducer *timeSyncMsgProducer) Close() {
syncMsgProducer.proxyTtBarrier.Close()
syncMsgProducer.writeNodeTtBarrier.Close()
syncMsgProducer.ddSyncStream.Close()
syncMsgProducer.dmSyncStream.Close()
syncMsgProducer.k2sSyncStream.Close()
syncMsgProducer.cancel()
syncMsgProducer.proxyTtBarrier.Close()
syncMsgProducer.writeNodeTtBarrier.Close()
}

View File

@ -47,7 +47,7 @@ type atomicObject struct {
// timestampOracle is used to maintain the logic of tso.
type timestampOracle struct {
key string
kvBase kv.Base
kvBase kv.TxnBase
// TODO: remove saveInterval
saveInterval time.Duration

593
internal/msgstream/msg.go Normal file
View File

@ -0,0 +1,593 @@
package msgstream
import (
"github.com/golang/protobuf/proto"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type MsgType = internalPb.MsgType
type TsMsg interface {
BeginTs() Timestamp
EndTs() Timestamp
Type() MsgType
HashKeys() []uint32
Marshal(TsMsg) ([]byte, error)
Unmarshal([]byte) (TsMsg, error)
}
type BaseMsg struct {
BeginTimestamp Timestamp
EndTimestamp Timestamp
HashValues []uint32
}
func (bm *BaseMsg) BeginTs() Timestamp {
return bm.BeginTimestamp
}
func (bm *BaseMsg) EndTs() Timestamp {
return bm.EndTimestamp
}
func (bm *BaseMsg) HashKeys() []uint32 {
return bm.HashValues
}
/////////////////////////////////////////Insert//////////////////////////////////////////
type InsertMsg struct {
BaseMsg
internalPb.InsertRequest
}
func (it *InsertMsg) Type() MsgType {
return it.MsgType
}
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
insertMsg := input.(*InsertMsg)
insertRequest := &insertMsg.InsertRequest
mb, err := proto.Marshal(insertRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
insertRequest := internalPb.InsertRequest{}
err := proto.Unmarshal(input, &insertRequest)
if err != nil {
return nil, err
}
insertMsg := &InsertMsg{InsertRequest: insertRequest}
for _, timestamp := range insertMsg.Timestamps {
insertMsg.BeginTimestamp = timestamp
insertMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range insertMsg.Timestamps {
if timestamp > insertMsg.EndTimestamp {
insertMsg.EndTimestamp = timestamp
}
if timestamp < insertMsg.BeginTimestamp {
insertMsg.BeginTimestamp = timestamp
}
}
return insertMsg, nil
}
/////////////////////////////////////////Delete//////////////////////////////////////////
type DeleteMsg struct {
BaseMsg
internalPb.DeleteRequest
}
func (dt *DeleteMsg) Type() MsgType {
return dt.MsgType
}
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
deleteTask := input.(*DeleteMsg)
deleteRequest := &deleteTask.DeleteRequest
mb, err := proto.Marshal(deleteRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
deleteRequest := internalPb.DeleteRequest{}
err := proto.Unmarshal(input, &deleteRequest)
if err != nil {
return nil, err
}
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
for _, timestamp := range deleteMsg.Timestamps {
deleteMsg.BeginTimestamp = timestamp
deleteMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range deleteMsg.Timestamps {
if timestamp > deleteMsg.EndTimestamp {
deleteMsg.EndTimestamp = timestamp
}
if timestamp < deleteMsg.BeginTimestamp {
deleteMsg.BeginTimestamp = timestamp
}
}
return deleteMsg, nil
}
/////////////////////////////////////////Search//////////////////////////////////////////
type SearchMsg struct {
BaseMsg
internalPb.SearchRequest
}
func (st *SearchMsg) Type() MsgType {
return st.MsgType
}
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
searchTask := input.(*SearchMsg)
searchRequest := &searchTask.SearchRequest
mb, err := proto.Marshal(searchRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
searchRequest := internalPb.SearchRequest{}
err := proto.Unmarshal(input, &searchRequest)
if err != nil {
return nil, err
}
searchMsg := &SearchMsg{SearchRequest: searchRequest}
searchMsg.BeginTimestamp = searchMsg.Timestamp
searchMsg.EndTimestamp = searchMsg.Timestamp
return searchMsg, nil
}
/////////////////////////////////////////SearchResult//////////////////////////////////////////
type SearchResultMsg struct {
BaseMsg
internalPb.SearchResult
}
func (srt *SearchResultMsg) Type() MsgType {
return srt.MsgType
}
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
searchResultTask := input.(*SearchResultMsg)
searchResultRequest := &searchResultTask.SearchResult
mb, err := proto.Marshal(searchResultRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
searchResultRequest := internalPb.SearchResult{}
err := proto.Unmarshal(input, &searchResultRequest)
if err != nil {
return nil, err
}
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
return searchResultMsg, nil
}
/////////////////////////////////////////TimeTick//////////////////////////////////////////
type TimeTickMsg struct {
BaseMsg
internalPb.TimeTickMsg
}
func (tst *TimeTickMsg) Type() MsgType {
return tst.MsgType
}
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
timeTickTask := input.(*TimeTickMsg)
timeTick := &timeTickTask.TimeTickMsg
mb, err := proto.Marshal(timeTick)
if err != nil {
return nil, err
}
return mb, nil
}
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
timeTickMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeTickMsg)
if err != nil {
return nil, err
}
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
timeTick.BeginTimestamp = timeTick.Timestamp
timeTick.EndTimestamp = timeTick.Timestamp
return timeTick, nil
}
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
type QueryNodeSegStatsMsg struct {
BaseMsg
internalPb.QueryNodeSegStats
}
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
return qs.MsgType
}
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
mb, err := proto.Marshal(queryNodeSegStats)
if err != nil {
return nil, err
}
return mb, nil
}
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
queryNodeSegStats := internalPb.QueryNodeSegStats{}
err := proto.Unmarshal(input, &queryNodeSegStats)
if err != nil {
return nil, err
}
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
return queryNodeSegStatsMsg, nil
}
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
//type Key2SegMsg struct {
// BaseMsg
// internalPb.Key2SegMsg
//}
//
//func (k2st *Key2SegMsg) Type() MsgType {
// return
//}
/////////////////////////////////////////CreateCollection//////////////////////////////////////////
type CreateCollectionMsg struct {
BaseMsg
internalPb.CreateCollectionRequest
}
func (cc *CreateCollectionMsg) Type() MsgType {
return cc.MsgType
}
func (cc *CreateCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
createCollectionMsg := input.(*CreateCollectionMsg)
createCollectionRequest := &createCollectionMsg.CreateCollectionRequest
mb, err := proto.Marshal(createCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (cc *CreateCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
createCollectionRequest := internalPb.CreateCollectionRequest{}
err := proto.Unmarshal(input, &createCollectionRequest)
if err != nil {
return nil, err
}
createCollectionMsg := &CreateCollectionMsg{CreateCollectionRequest: createCollectionRequest}
createCollectionMsg.BeginTimestamp = createCollectionMsg.Timestamp
createCollectionMsg.EndTimestamp = createCollectionMsg.Timestamp
return createCollectionMsg, nil
}
/////////////////////////////////////////DropCollection//////////////////////////////////////////
type DropCollectionMsg struct {
BaseMsg
internalPb.DropCollectionRequest
}
func (dc *DropCollectionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DropCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
dropCollectionMsg := input.(*DropCollectionMsg)
dropCollectionRequest := &dropCollectionMsg.DropCollectionRequest
mb, err := proto.Marshal(dropCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DropCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
dropCollectionRequest := internalPb.DropCollectionRequest{}
err := proto.Unmarshal(input, &dropCollectionRequest)
if err != nil {
return nil, err
}
dropCollectionMsg := &DropCollectionMsg{DropCollectionRequest: dropCollectionRequest}
dropCollectionMsg.BeginTimestamp = dropCollectionMsg.Timestamp
dropCollectionMsg.EndTimestamp = dropCollectionMsg.Timestamp
return dropCollectionMsg, nil
}
/////////////////////////////////////////HasCollection//////////////////////////////////////////
type HasCollectionMsg struct {
BaseMsg
internalPb.HasCollectionRequest
}
func (hc *HasCollectionMsg) Type() MsgType {
return hc.MsgType
}
func (hc *HasCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
hasCollectionMsg := input.(*HasCollectionMsg)
hasCollectionRequest := &hasCollectionMsg.HasCollectionRequest
mb, err := proto.Marshal(hasCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (hc *HasCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
hasCollectionRequest := internalPb.HasCollectionRequest{}
err := proto.Unmarshal(input, &hasCollectionRequest)
if err != nil {
return nil, err
}
hasCollectionMsg := &HasCollectionMsg{HasCollectionRequest: hasCollectionRequest}
hasCollectionMsg.BeginTimestamp = hasCollectionMsg.Timestamp
hasCollectionMsg.EndTimestamp = hasCollectionMsg.Timestamp
return hasCollectionMsg, nil
}
/////////////////////////////////////////DescribeCollection//////////////////////////////////////////
type DescribeCollectionMsg struct {
BaseMsg
internalPb.DescribeCollectionRequest
}
func (dc *DescribeCollectionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DescribeCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
describeCollectionMsg := input.(*DescribeCollectionMsg)
describeCollectionRequest := &describeCollectionMsg.DescribeCollectionRequest
mb, err := proto.Marshal(describeCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DescribeCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
describeCollectionRequest := internalPb.DescribeCollectionRequest{}
err := proto.Unmarshal(input, &describeCollectionRequest)
if err != nil {
return nil, err
}
describeCollectionMsg := &DescribeCollectionMsg{DescribeCollectionRequest: describeCollectionRequest}
describeCollectionMsg.BeginTimestamp = describeCollectionMsg.Timestamp
describeCollectionMsg.EndTimestamp = describeCollectionMsg.Timestamp
return describeCollectionMsg, nil
}
/////////////////////////////////////////ShowCollection//////////////////////////////////////////
type ShowCollectionMsg struct {
BaseMsg
internalPb.ShowCollectionRequest
}
func (sc *ShowCollectionMsg) Type() MsgType {
return sc.MsgType
}
func (sc *ShowCollectionMsg) Marshal(input TsMsg) ([]byte, error) {
showCollectionMsg := input.(*ShowCollectionMsg)
showCollectionRequest := &showCollectionMsg.ShowCollectionRequest
mb, err := proto.Marshal(showCollectionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (sc *ShowCollectionMsg) Unmarshal(input []byte) (TsMsg, error) {
showCollectionRequest := internalPb.ShowCollectionRequest{}
err := proto.Unmarshal(input, &showCollectionRequest)
if err != nil {
return nil, err
}
showCollectionMsg := &ShowCollectionMsg{ShowCollectionRequest: showCollectionRequest}
showCollectionMsg.BeginTimestamp = showCollectionMsg.Timestamp
showCollectionMsg.EndTimestamp = showCollectionMsg.Timestamp
return showCollectionMsg, nil
}
/////////////////////////////////////////CreatePartition//////////////////////////////////////////
type CreatePartitionMsg struct {
BaseMsg
internalPb.CreatePartitionRequest
}
func (cc *CreatePartitionMsg) Type() MsgType {
return cc.MsgType
}
func (cc *CreatePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
createPartitionMsg := input.(*CreatePartitionMsg)
createPartitionRequest := &createPartitionMsg.CreatePartitionRequest
mb, err := proto.Marshal(createPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (cc *CreatePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
createPartitionRequest := internalPb.CreatePartitionRequest{}
err := proto.Unmarshal(input, &createPartitionRequest)
if err != nil {
return nil, err
}
createPartitionMsg := &CreatePartitionMsg{CreatePartitionRequest: createPartitionRequest}
createPartitionMsg.BeginTimestamp = createPartitionMsg.Timestamp
createPartitionMsg.EndTimestamp = createPartitionMsg.Timestamp
return createPartitionMsg, nil
}
/////////////////////////////////////////DropPartition//////////////////////////////////////////
type DropPartitionMsg struct {
BaseMsg
internalPb.DropPartitionRequest
}
func (dc *DropPartitionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DropPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
dropPartitionMsg := input.(*DropPartitionMsg)
dropPartitionRequest := &dropPartitionMsg.DropPartitionRequest
mb, err := proto.Marshal(dropPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DropPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
dropPartitionRequest := internalPb.DropPartitionRequest{}
err := proto.Unmarshal(input, &dropPartitionRequest)
if err != nil {
return nil, err
}
dropPartitionMsg := &DropPartitionMsg{DropPartitionRequest: dropPartitionRequest}
dropPartitionMsg.BeginTimestamp = dropPartitionMsg.Timestamp
dropPartitionMsg.EndTimestamp = dropPartitionMsg.Timestamp
return dropPartitionMsg, nil
}
/////////////////////////////////////////HasPartition//////////////////////////////////////////
type HasPartitionMsg struct {
BaseMsg
internalPb.HasPartitionRequest
}
func (hc *HasPartitionMsg) Type() MsgType {
return hc.MsgType
}
func (hc *HasPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
hasPartitionMsg := input.(*HasPartitionMsg)
hasPartitionRequest := &hasPartitionMsg.HasPartitionRequest
mb, err := proto.Marshal(hasPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (hc *HasPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
hasPartitionRequest := internalPb.HasPartitionRequest{}
err := proto.Unmarshal(input, &hasPartitionRequest)
if err != nil {
return nil, err
}
hasPartitionMsg := &HasPartitionMsg{HasPartitionRequest: hasPartitionRequest}
hasPartitionMsg.BeginTimestamp = hasPartitionMsg.Timestamp
hasPartitionMsg.EndTimestamp = hasPartitionMsg.Timestamp
return hasPartitionMsg, nil
}
/////////////////////////////////////////DescribePartition//////////////////////////////////////////
type DescribePartitionMsg struct {
BaseMsg
internalPb.DescribePartitionRequest
}
func (dc *DescribePartitionMsg) Type() MsgType {
return dc.MsgType
}
func (dc *DescribePartitionMsg) Marshal(input TsMsg) ([]byte, error) {
describePartitionMsg := input.(*DescribePartitionMsg)
describePartitionRequest := &describePartitionMsg.DescribePartitionRequest
mb, err := proto.Marshal(describePartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dc *DescribePartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
describePartitionRequest := internalPb.DescribePartitionRequest{}
err := proto.Unmarshal(input, &describePartitionRequest)
if err != nil {
return nil, err
}
describePartitionMsg := &DescribePartitionMsg{DescribePartitionRequest: describePartitionRequest}
describePartitionMsg.BeginTimestamp = describePartitionMsg.Timestamp
describePartitionMsg.EndTimestamp = describePartitionMsg.Timestamp
return describePartitionMsg, nil
}
/////////////////////////////////////////ShowPartition//////////////////////////////////////////
type ShowPartitionMsg struct {
BaseMsg
internalPb.ShowPartitionRequest
}
func (sc *ShowPartitionMsg) Type() MsgType {
return sc.MsgType
}
func (sc *ShowPartitionMsg) Marshal(input TsMsg) ([]byte, error) {
showPartitionMsg := input.(*ShowPartitionMsg)
showPartitionRequest := &showPartitionMsg.ShowPartitionRequest
mb, err := proto.Marshal(showPartitionRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (sc *ShowPartitionMsg) Unmarshal(input []byte) (TsMsg, error) {
showPartitionRequest := internalPb.ShowPartitionRequest{}
err := proto.Unmarshal(input, &showPartitionRequest)
if err != nil {
return nil, err
}
showPartitionMsg := &ShowPartitionMsg{ShowPartitionRequest: showPartitionRequest}
showPartitionMsg.BeginTimestamp = showPartitionMsg.Timestamp
showPartitionMsg.EndTimestamp = showPartitionMsg.Timestamp
return showPartitionMsg, nil
}

View File

@ -1,263 +0,0 @@
package msgstream
import (
"github.com/golang/protobuf/proto"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
)
type MsgType = internalPb.MsgType
type TsMsg interface {
BeginTs() Timestamp
EndTs() Timestamp
Type() MsgType
HashKeys() []uint32
Marshal(TsMsg) ([]byte, error)
Unmarshal([]byte) (TsMsg, error)
}
type BaseMsg struct {
BeginTimestamp Timestamp
EndTimestamp Timestamp
HashValues []uint32
}
func (bm *BaseMsg) BeginTs() Timestamp {
return bm.BeginTimestamp
}
func (bm *BaseMsg) EndTs() Timestamp {
return bm.EndTimestamp
}
func (bm *BaseMsg) HashKeys() []uint32 {
return bm.HashValues
}
/////////////////////////////////////////Insert//////////////////////////////////////////
type InsertMsg struct {
BaseMsg
internalPb.InsertRequest
}
func (it *InsertMsg) Type() MsgType {
return it.MsgType
}
func (it *InsertMsg) Marshal(input TsMsg) ([]byte, error) {
insertMsg := input.(*InsertMsg)
insertRequest := &insertMsg.InsertRequest
mb, err := proto.Marshal(insertRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (it *InsertMsg) Unmarshal(input []byte) (TsMsg, error) {
insertRequest := internalPb.InsertRequest{}
err := proto.Unmarshal(input, &insertRequest)
if err != nil {
return nil, err
}
insertMsg := &InsertMsg{InsertRequest: insertRequest}
for _, timestamp := range insertMsg.Timestamps {
insertMsg.BeginTimestamp = timestamp
insertMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range insertMsg.Timestamps {
if timestamp > insertMsg.EndTimestamp {
insertMsg.EndTimestamp = timestamp
}
if timestamp < insertMsg.BeginTimestamp {
insertMsg.BeginTimestamp = timestamp
}
}
return insertMsg, nil
}
/////////////////////////////////////////Delete//////////////////////////////////////////
type DeleteMsg struct {
BaseMsg
internalPb.DeleteRequest
}
func (dt *DeleteMsg) Type() MsgType {
return dt.MsgType
}
func (dt *DeleteMsg) Marshal(input TsMsg) ([]byte, error) {
deleteTask := input.(*DeleteMsg)
deleteRequest := &deleteTask.DeleteRequest
mb, err := proto.Marshal(deleteRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (dt *DeleteMsg) Unmarshal(input []byte) (TsMsg, error) {
deleteRequest := internalPb.DeleteRequest{}
err := proto.Unmarshal(input, &deleteRequest)
if err != nil {
return nil, err
}
deleteMsg := &DeleteMsg{DeleteRequest: deleteRequest}
for _, timestamp := range deleteMsg.Timestamps {
deleteMsg.BeginTimestamp = timestamp
deleteMsg.EndTimestamp = timestamp
break
}
for _, timestamp := range deleteMsg.Timestamps {
if timestamp > deleteMsg.EndTimestamp {
deleteMsg.EndTimestamp = timestamp
}
if timestamp < deleteMsg.BeginTimestamp {
deleteMsg.BeginTimestamp = timestamp
}
}
return deleteMsg, nil
}
/////////////////////////////////////////Search//////////////////////////////////////////
type SearchMsg struct {
BaseMsg
internalPb.SearchRequest
}
func (st *SearchMsg) Type() MsgType {
return st.MsgType
}
func (st *SearchMsg) Marshal(input TsMsg) ([]byte, error) {
searchTask := input.(*SearchMsg)
searchRequest := &searchTask.SearchRequest
mb, err := proto.Marshal(searchRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (st *SearchMsg) Unmarshal(input []byte) (TsMsg, error) {
searchRequest := internalPb.SearchRequest{}
err := proto.Unmarshal(input, &searchRequest)
if err != nil {
return nil, err
}
searchMsg := &SearchMsg{SearchRequest: searchRequest}
searchMsg.BeginTimestamp = searchMsg.Timestamp
searchMsg.EndTimestamp = searchMsg.Timestamp
return searchMsg, nil
}
/////////////////////////////////////////SearchResult//////////////////////////////////////////
type SearchResultMsg struct {
BaseMsg
internalPb.SearchResult
}
func (srt *SearchResultMsg) Type() MsgType {
return srt.MsgType
}
func (srt *SearchResultMsg) Marshal(input TsMsg) ([]byte, error) {
searchResultTask := input.(*SearchResultMsg)
searchResultRequest := &searchResultTask.SearchResult
mb, err := proto.Marshal(searchResultRequest)
if err != nil {
return nil, err
}
return mb, nil
}
func (srt *SearchResultMsg) Unmarshal(input []byte) (TsMsg, error) {
searchResultRequest := internalPb.SearchResult{}
err := proto.Unmarshal(input, &searchResultRequest)
if err != nil {
return nil, err
}
searchResultMsg := &SearchResultMsg{SearchResult: searchResultRequest}
searchResultMsg.BeginTimestamp = searchResultMsg.Timestamp
searchResultMsg.EndTimestamp = searchResultMsg.Timestamp
return searchResultMsg, nil
}
/////////////////////////////////////////TimeTick//////////////////////////////////////////
type TimeTickMsg struct {
BaseMsg
internalPb.TimeTickMsg
}
func (tst *TimeTickMsg) Type() MsgType {
return tst.MsgType
}
func (tst *TimeTickMsg) Marshal(input TsMsg) ([]byte, error) {
timeTickTask := input.(*TimeTickMsg)
timeTick := &timeTickTask.TimeTickMsg
mb, err := proto.Marshal(timeTick)
if err != nil {
return nil, err
}
return mb, nil
}
func (tst *TimeTickMsg) Unmarshal(input []byte) (TsMsg, error) {
timeTickMsg := internalPb.TimeTickMsg{}
err := proto.Unmarshal(input, &timeTickMsg)
if err != nil {
return nil, err
}
timeTick := &TimeTickMsg{TimeTickMsg: timeTickMsg}
timeTick.BeginTimestamp = timeTick.Timestamp
timeTick.EndTimestamp = timeTick.Timestamp
return timeTick, nil
}
/////////////////////////////////////////QueryNodeSegStats//////////////////////////////////////////
type QueryNodeSegStatsMsg struct {
BaseMsg
internalPb.QueryNodeSegStats
}
func (qs *QueryNodeSegStatsMsg) Type() MsgType {
return qs.MsgType
}
func (qs *QueryNodeSegStatsMsg) Marshal(input TsMsg) ([]byte, error) {
queryNodeSegStatsTask := input.(*QueryNodeSegStatsMsg)
queryNodeSegStats := &queryNodeSegStatsTask.QueryNodeSegStats
mb, err := proto.Marshal(queryNodeSegStats)
if err != nil {
return nil, err
}
return mb, nil
}
func (qs *QueryNodeSegStatsMsg) Unmarshal(input []byte) (TsMsg, error) {
queryNodeSegStats := internalPb.QueryNodeSegStats{}
err := proto.Unmarshal(input, &queryNodeSegStats)
if err != nil {
return nil, err
}
queryNodeSegStatsMsg := &QueryNodeSegStatsMsg{QueryNodeSegStats: queryNodeSegStats}
return queryNodeSegStatsMsg, nil
}
///////////////////////////////////////////Key2Seg//////////////////////////////////////////
//type Key2SegMsg struct {
// BaseMsg
// internalPb.Key2SegMsg
//}
//
//func (k2st *Key2SegMsg) Type() MsgType {
// return
//}

View File

@ -30,6 +30,11 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
searchMsg := SearchMsg{}
searchResultMsg := SearchResultMsg{}
timeTickMsg := TimeTickMsg{}
createCollectionMsg := CreateCollectionMsg{}
dropCollectionMsg := DropCollectionMsg{}
createPartitionMsg := CreatePartitionMsg{}
dropPartitionMsg := DropPartitionMsg{}
queryNodeSegStatsMsg := QueryNodeSegStatsMsg{}
dispatcher.tempMap = make(map[internalPb.MsgType]UnmarshalFunc)
dispatcher.tempMap[internalPb.MsgType_kInsert] = insertMsg.Unmarshal
@ -38,6 +43,11 @@ func (dispatcher *UnmarshalDispatcher) addDefaultMsgTemplates() {
dispatcher.tempMap[internalPb.MsgType_kSearchResult] = searchResultMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kTimeTick] = timeTickMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kQueryNodeSegStats] = queryNodeSegStatsMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kCreateCollection] = createCollectionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kDropCollection] = dropCollectionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kCreatePartition] = createPartitionMsg.Unmarshal
dispatcher.tempMap[internalPb.MsgType_kDropPartition] = dropPartitionMsg.Unmarshal
}
func NewUnmarshalDispatcher() *UnmarshalDispatcher {

View File

@ -14,6 +14,7 @@ enum MsgType {
kHasCollection = 102;
kDescribeCollection = 103;
kShowCollections = 104;
kGetSysConfigs = 105;
/* Definition Requests: partition */
kCreatePartition = 200;
@ -33,6 +34,7 @@ enum MsgType {
/* System Control */
kTimeTick = 1200;
kQueryNodeSegStats = 1201;
}
enum PeerRole {
@ -223,6 +225,19 @@ message SearchRequest {
}
/**
* @brief Request of DescribePartition
*/
message SysConfigRequest {
MsgType msg_type = 1;
int64 reqID = 2;
int64 proxyID = 3;
uint64 timestamp = 4;
repeated string keys = 5;
repeated string key_prefixes = 6;
}
message SearchResult {
MsgType msg_type = 1;
common.Status status = 2;
@ -266,4 +281,4 @@ message QueryNodeSegStats {
MsgType msg_type = 1;
int64 peerID = 2;
repeated SegmentStats seg_stats = 3;
}
}

View File

@ -32,6 +32,7 @@ const (
MsgType_kHasCollection MsgType = 102
MsgType_kDescribeCollection MsgType = 103
MsgType_kShowCollections MsgType = 104
MsgType_kGetSysConfigs MsgType = 105
// Definition Requests: partition
MsgType_kCreatePartition MsgType = 200
MsgType_kDropPartition MsgType = 201
@ -56,6 +57,7 @@ var MsgType_name = map[int32]string{
102: "kHasCollection",
103: "kDescribeCollection",
104: "kShowCollections",
105: "kGetSysConfigs",
200: "kCreatePartition",
201: "kDropPartition",
202: "kHasPartition",
@ -76,6 +78,7 @@ var MsgType_value = map[string]int32{
"kHasCollection": 102,
"kDescribeCollection": 103,
"kShowCollections": 104,
"kGetSysConfigs": 105,
"kCreatePartition": 200,
"kDropPartition": 201,
"kHasPartition": 202,
@ -1579,6 +1582,87 @@ func (m *SearchRequest) GetQuery() *commonpb.Blob {
return nil
}
//*
// @brief Request of DescribePartition
type SysConfigRequest struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
ReqID int64 `protobuf:"varint,2,opt,name=reqID,proto3" json:"reqID,omitempty"`
ProxyID int64 `protobuf:"varint,3,opt,name=proxyID,proto3" json:"proxyID,omitempty"`
Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
Keys []string `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
KeyPrefixes []string `protobuf:"bytes,6,rep,name=key_prefixes,json=keyPrefixes,proto3" json:"key_prefixes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SysConfigRequest) Reset() { *m = SysConfigRequest{} }
func (m *SysConfigRequest) String() string { return proto.CompactTextString(m) }
func (*SysConfigRequest) ProtoMessage() {}
func (*SysConfigRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{21}
}
func (m *SysConfigRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SysConfigRequest.Unmarshal(m, b)
}
func (m *SysConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SysConfigRequest.Marshal(b, m, deterministic)
}
func (m *SysConfigRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SysConfigRequest.Merge(m, src)
}
func (m *SysConfigRequest) XXX_Size() int {
return xxx_messageInfo_SysConfigRequest.Size(m)
}
func (m *SysConfigRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SysConfigRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SysConfigRequest proto.InternalMessageInfo
func (m *SysConfigRequest) GetMsgType() MsgType {
if m != nil {
return m.MsgType
}
return MsgType_kNone
}
func (m *SysConfigRequest) GetReqID() int64 {
if m != nil {
return m.ReqID
}
return 0
}
func (m *SysConfigRequest) GetProxyID() int64 {
if m != nil {
return m.ProxyID
}
return 0
}
func (m *SysConfigRequest) GetTimestamp() uint64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *SysConfigRequest) GetKeys() []string {
if m != nil {
return m.Keys
}
return nil
}
func (m *SysConfigRequest) GetKeyPrefixes() []string {
if m != nil {
return m.KeyPrefixes
}
return nil
}
type SearchResult struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
Status *commonpb.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
@ -1597,7 +1681,7 @@ func (m *SearchResult) Reset() { *m = SearchResult{} }
func (m *SearchResult) String() string { return proto.CompactTextString(m) }
func (*SearchResult) ProtoMessage() {}
func (*SearchResult) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{21}
return fileDescriptor_7eb37f6b80b23116, []int{22}
}
func (m *SearchResult) XXX_Unmarshal(b []byte) error {
@ -1687,7 +1771,7 @@ func (m *TimeTickMsg) Reset() { *m = TimeTickMsg{} }
func (m *TimeTickMsg) String() string { return proto.CompactTextString(m) }
func (*TimeTickMsg) ProtoMessage() {}
func (*TimeTickMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{22}
return fileDescriptor_7eb37f6b80b23116, []int{23}
}
func (m *TimeTickMsg) XXX_Unmarshal(b []byte) error {
@ -1744,7 +1828,7 @@ func (m *Key2Seg) Reset() { *m = Key2Seg{} }
func (m *Key2Seg) String() string { return proto.CompactTextString(m) }
func (*Key2Seg) ProtoMessage() {}
func (*Key2Seg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{23}
return fileDescriptor_7eb37f6b80b23116, []int{24}
}
func (m *Key2Seg) XXX_Unmarshal(b []byte) error {
@ -1812,7 +1896,7 @@ func (m *Key2SegMsg) Reset() { *m = Key2SegMsg{} }
func (m *Key2SegMsg) String() string { return proto.CompactTextString(m) }
func (*Key2SegMsg) ProtoMessage() {}
func (*Key2SegMsg) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{24}
return fileDescriptor_7eb37f6b80b23116, []int{25}
}
func (m *Key2SegMsg) XXX_Unmarshal(b []byte) error {
@ -1861,7 +1945,7 @@ func (m *SegmentStats) Reset() { *m = SegmentStats{} }
func (m *SegmentStats) String() string { return proto.CompactTextString(m) }
func (*SegmentStats) ProtoMessage() {}
func (*SegmentStats) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{25}
return fileDescriptor_7eb37f6b80b23116, []int{26}
}
func (m *SegmentStats) XXX_Unmarshal(b []byte) error {
@ -1923,7 +2007,7 @@ func (m *QueryNodeSegStats) Reset() { *m = QueryNodeSegStats{} }
func (m *QueryNodeSegStats) String() string { return proto.CompactTextString(m) }
func (*QueryNodeSegStats) ProtoMessage() {}
func (*QueryNodeSegStats) Descriptor() ([]byte, []int) {
return fileDescriptor_7eb37f6b80b23116, []int{26}
return fileDescriptor_7eb37f6b80b23116, []int{27}
}
func (m *QueryNodeSegStats) XXX_Unmarshal(b []byte) error {
@ -1989,6 +2073,7 @@ func init() {
proto.RegisterType((*InsertRequest)(nil), "milvus.proto.internal.InsertRequest")
proto.RegisterType((*DeleteRequest)(nil), "milvus.proto.internal.DeleteRequest")
proto.RegisterType((*SearchRequest)(nil), "milvus.proto.internal.SearchRequest")
proto.RegisterType((*SysConfigRequest)(nil), "milvus.proto.internal.SysConfigRequest")
proto.RegisterType((*SearchResult)(nil), "milvus.proto.internal.SearchResult")
proto.RegisterType((*TimeTickMsg)(nil), "milvus.proto.internal.TimeTickMsg")
proto.RegisterType((*Key2Seg)(nil), "milvus.proto.internal.Key2Seg")
@ -2000,94 +2085,98 @@ func init() {
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
var fileDescriptor_7eb37f6b80b23116 = []byte{
// 1416 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1c, 0xc5,
0x13, 0x4f, 0xef, 0xac, 0xf7, 0x51, 0x6b, 0xaf, 0xc7, 0x6d, 0x3b, 0xd9, 0x24, 0x7f, 0x25, 0xce,
0xe4, 0x2f, 0x62, 0x82, 0xb0, 0x85, 0xc3, 0x81, 0xdc, 0x20, 0xde, 0x43, 0x96, 0xc8, 0x51, 0x18,
0x5b, 0x20, 0xa1, 0x48, 0xa3, 0xf1, 0x6e, 0x65, 0x77, 0x34, 0x4f, 0x77, 0xcf, 0xda, 0x59, 0x1f,
0x38, 0xe5, 0x03, 0xc0, 0x81, 0x03, 0x07, 0x24, 0x8e, 0x9c, 0x22, 0xf8, 0x16, 0xbc, 0xae, 0x1c,
0xf8, 0x0a, 0x20, 0x88, 0x04, 0xe1, 0x8e, 0xba, 0x7b, 0x1e, 0x3b, 0x7e, 0x46, 0x4a, 0x0c, 0x96,
0x7c, 0x9b, 0xaa, 0xe9, 0xe9, 0xaa, 0xfa, 0xfd, 0xaa, 0x6b, 0xaa, 0x1a, 0xa8, 0x13, 0xc4, 0xc8,
0x02, 0xdb, 0xb3, 0x7c, 0xde, 0x5f, 0x8a, 0x58, 0x18, 0x87, 0x74, 0xde, 0x77, 0xbc, 0xed, 0x21,
0x57, 0xd2, 0x52, 0xba, 0xe0, 0xd2, 0x64, 0x37, 0xf4, 0xfd, 0x30, 0x50, 0xea, 0x4b, 0x33, 0x1c,
0xd9, 0xb6, 0xd3, 0xc5, 0xfc, 0x3b, 0x23, 0x80, 0x7a, 0xa7, 0x6d, 0xe2, 0xd6, 0x10, 0x79, 0x4c,
0xcf, 0x43, 0x25, 0x42, 0x64, 0x9d, 0x76, 0x8b, 0x2c, 0x90, 0x45, 0xcd, 0x4c, 0x24, 0x7a, 0x0b,
0xca, 0x2c, 0xf4, 0xb0, 0x55, 0x5a, 0x20, 0x8b, 0xcd, 0x95, 0xab, 0x4b, 0x07, 0xda, 0x5a, 0x7a,
0x80, 0xc8, 0xcc, 0xd0, 0x43, 0x53, 0x2e, 0xa6, 0x73, 0x30, 0xd1, 0x0d, 0x87, 0x41, 0xdc, 0xd2,
0x16, 0xc8, 0xe2, 0x94, 0xa9, 0x04, 0xa3, 0x0f, 0x20, 0xec, 0xf1, 0x28, 0x0c, 0x38, 0xd2, 0x5b,
0x50, 0xe1, 0xb1, 0x1d, 0x0f, 0xb9, 0x34, 0xd8, 0x58, 0xb9, 0x5c, 0xdc, 0x3a, 0x71, 0x7e, 0x5d,
0x2e, 0x31, 0x93, 0xa5, 0xb4, 0x09, 0xa5, 0x4e, 0x5b, 0xfa, 0xa2, 0x99, 0xa5, 0x4e, 0xfb, 0x10,
0x43, 0x21, 0xc0, 0x06, 0x0f, 0xff, 0xc5, 0xc8, 0xb6, 0xa1, 0x21, 0x0d, 0xbe, 0x4c, 0x68, 0xff,
0x83, 0x7a, 0xec, 0xf8, 0xc8, 0x63, 0xdb, 0x8f, 0xa4, 0x4f, 0x65, 0x33, 0x57, 0x1c, 0x62, 0xf7,
0x09, 0x81, 0xc9, 0x75, 0xec, 0xe7, 0x2c, 0x66, 0xcb, 0xc8, 0xd8, 0x32, 0xb1, 0x75, 0x77, 0x60,
0x07, 0x01, 0x7a, 0x09, 0x78, 0x13, 0x66, 0xae, 0xa0, 0x97, 0xa1, 0xde, 0x0d, 0x3d, 0xcf, 0x0a,
0x6c, 0x1f, 0xe5, 0xf6, 0x75, 0xb3, 0x26, 0x14, 0xf7, 0x6d, 0x1f, 0xe9, 0x75, 0x98, 0x8a, 0x6c,
0x16, 0x3b, 0xb1, 0x13, 0x06, 0x56, 0x6c, 0xf7, 0x5b, 0x65, 0xb9, 0x60, 0x32, 0x53, 0x6e, 0xd8,
0x7d, 0xe3, 0x29, 0x01, 0xfa, 0x1e, 0xe7, 0x4e, 0x3f, 0x28, 0x38, 0xf3, 0x4a, 0x81, 0xbf, 0x07,
0xd3, 0x11, 0x32, 0x2b, 0x71, 0xdb, 0x62, 0xb8, 0xd5, 0xd2, 0x16, 0xb4, 0xc5, 0xc6, 0xca, 0xf5,
0x43, 0xbe, 0x1f, 0x77, 0xc5, 0x9c, 0x8a, 0x90, 0xad, 0xaa, 0x4f, 0x4d, 0xdc, 0x32, 0xbe, 0x24,
0x30, 0x2d, 0xdf, 0x2b, 0xaf, 0x7d, 0x0c, 0x24, 0x74, 0x5c, 0xa8, 0x12, 0x67, 0x95, 0x70, 0x0c,
0x74, 0x07, 0xb2, 0x52, 0x04, 0xb4, 0x7c, 0x1c, 0xa0, 0x13, 0x07, 0x00, 0xfa, 0x8c, 0xc0, 0x6c,
0x01, 0xd0, 0x93, 0x4b, 0xac, 0x1b, 0x30, 0x8d, 0x8f, 0x23, 0x87, 0xa1, 0xd5, 0x1b, 0x32, 0x5b,
0x38, 0x20, 0x83, 0x29, 0x9b, 0x4d, 0xa5, 0x6e, 0x27, 0x5a, 0xfa, 0x10, 0xce, 0x8f, 0x13, 0x60,
0x67, 0xc8, 0xb5, 0xca, 0x92, 0x87, 0xd7, 0x8e, 0xe2, 0x21, 0xc7, 0xd9, 0x9c, 0xcb, 0xa9, 0xc8,
0xb5, 0xc6, 0xcf, 0x04, 0x2e, 0xac, 0x32, 0xb4, 0x63, 0x5c, 0x0d, 0x3d, 0x0f, 0xbb, 0xc2, 0x64,
0x9a, 0x47, 0xb7, 0xa1, 0xe6, 0xf3, 0xbe, 0x15, 0x8f, 0x22, 0x94, 0x71, 0x37, 0x57, 0xae, 0x1c,
0x62, 0x6b, 0x8d, 0xf7, 0x37, 0x46, 0x11, 0x9a, 0x55, 0x5f, 0x3d, 0x08, 0x82, 0x18, 0x6e, 0x65,
0x25, 0x43, 0x09, 0x45, 0x44, 0xb4, 0xbd, 0x88, 0xb4, 0xa0, 0x1a, 0xb1, 0xf0, 0xf1, 0xa8, 0xd3,
0x96, 0xe4, 0x69, 0x66, 0x2a, 0xd2, 0xb7, 0xa0, 0xc2, 0xbb, 0x03, 0xf4, 0x6d, 0x49, 0x5a, 0x63,
0xe5, 0xe2, 0x81, 0xf0, 0xdf, 0xf1, 0xc2, 0x4d, 0x33, 0x59, 0x28, 0x98, 0x9c, 0x6f, 0xb3, 0x30,
0x3a, 0xc5, 0x51, 0xad, 0xc1, 0x74, 0x37, 0xf3, 0x4e, 0x25, 0xad, 0x0a, 0xef, 0xff, 0x45, 0x7f,
0x92, 0x1f, 0xc8, 0x52, 0x1e, 0x8a, 0x48, 0x68, 0xb3, 0xd9, 0x2d, 0xc8, 0xc6, 0x1f, 0x04, 0xe6,
0xee, 0xda, 0xfc, 0xec, 0x04, 0xfc, 0x17, 0x81, 0x8b, 0x6d, 0xe4, 0x5d, 0xe6, 0x6c, 0xe2, 0xd9,
0x89, 0xfa, 0x2b, 0x02, 0xf3, 0xeb, 0x83, 0x70, 0xe7, 0xf4, 0x46, 0x6c, 0xfc, 0x4e, 0xe0, 0xbc,
0xaa, 0x29, 0x0f, 0xd2, 0xe2, 0x7a, 0xea, 0x58, 0x79, 0x1f, 0x9a, 0xf9, 0xef, 0x60, 0x8c, 0x94,
0xeb, 0x07, 0x93, 0x92, 0x05, 0x22, 0x39, 0xc9, 0xff, 0x24, 0x92, 0x92, 0xdf, 0x08, 0xcc, 0x89,
0x5a, 0x73, 0x36, 0xa2, 0xfd, 0x95, 0xc0, 0xec, 0x5d, 0x9b, 0x9f, 0x8d, 0x60, 0x9f, 0x11, 0x68,
0xa5, 0x35, 0xe6, 0x6c, 0x44, 0x2c, 0x7e, 0x23, 0xa2, 0xbe, 0x9c, 0xde, 0x68, 0x5f, 0x71, 0x41,
0xfd, 0xb3, 0x04, 0x53, 0x9d, 0x80, 0x23, 0x8b, 0x4f, 0x2c, 0xd2, 0x1b, 0xfb, 0x3d, 0x56, 0xfd,
0xfe, 0x1e, 0x5f, 0x5e, 0xa8, 0xeb, 0x17, 0xb8, 0x71, 0xec, 0x8b, 0xee, 0xad, 0xd3, 0x96, 0x91,
0x6b, 0x66, 0xae, 0x28, 0x36, 0xce, 0x15, 0xf5, 0x36, 0x6f, 0x9c, 0xc7, 0x50, 0xad, 0x16, 0x51,
0xbd, 0x02, 0x90, 0x81, 0xcf, 0x5b, 0xb5, 0x05, 0x6d, 0xb1, 0x6c, 0x8e, 0x69, 0xc4, 0x50, 0xc1,
0xc2, 0x9d, 0x4e, 0x9b, 0xb7, 0xea, 0x0b, 0x9a, 0x18, 0x2a, 0x94, 0x44, 0xdf, 0x86, 0x1a, 0x0b,
0x77, 0xac, 0x9e, 0x1d, 0xdb, 0x2d, 0x90, 0x0d, 0xe9, 0x11, 0xdd, 0x59, 0x95, 0x85, 0x3b, 0x6d,
0x3b, 0xb6, 0x8d, 0x27, 0x25, 0x98, 0x6a, 0xa3, 0x87, 0x31, 0xfe, 0xf7, 0xa0, 0x17, 0x10, 0x2b,
0x1f, 0x81, 0xd8, 0xc4, 0x51, 0x88, 0x55, 0xf6, 0x21, 0x76, 0x0d, 0x26, 0x23, 0xe6, 0xf8, 0x36,
0x1b, 0x59, 0x2e, 0x8e, 0x78, 0xab, 0x2a, 0x71, 0x6b, 0x24, 0xba, 0x7b, 0x38, 0xe2, 0xc6, 0x73,
0x02, 0x53, 0xeb, 0x68, 0xb3, 0xee, 0xe0, 0xc4, 0x60, 0x18, 0xf3, 0x5f, 0x2b, 0xfa, 0x5f, 0x38,
0x7f, 0xe5, 0xbd, 0xe7, 0xef, 0x75, 0xd0, 0x19, 0xf2, 0xa1, 0x17, 0x5b, 0x39, 0x38, 0x0a, 0x80,
0x69, 0xa5, 0x5f, 0xcd, 0x20, 0x5a, 0x86, 0x89, 0xad, 0x21, 0xb2, 0x91, 0x4c, 0xb7, 0x23, 0xf9,
0x57, 0xeb, 0x8c, 0xa7, 0x25, 0x31, 0x3e, 0xab, 0xb0, 0xc5, 0x56, 0x2f, 0x13, 0x75, 0x3e, 0x9a,
0x95, 0x5e, 0x7c, 0x34, 0xcb, 0xa0, 0xd2, 0x0e, 0x81, 0x6a, 0x4f, 0xc9, 0xb9, 0x06, 0x93, 0xd2,
0x73, 0x2b, 0x08, 0x7b, 0x98, 0x01, 0xd1, 0x90, 0xba, 0xfb, 0x52, 0x55, 0x44, 0xb3, 0xf2, 0x22,
0x68, 0x56, 0x0f, 0x46, 0x93, 0x42, 0x79, 0xe0, 0xc4, 0xea, 0x08, 0x4e, 0x9a, 0xf2, 0xd9, 0xf8,
0x04, 0x1a, 0x1b, 0x8e, 0x8f, 0x1b, 0x4e, 0xd7, 0x5d, 0xe3, 0xfd, 0x97, 0x81, 0x2b, 0xbf, 0x1b,
0x28, 0x15, 0xee, 0x06, 0x8e, 0x2c, 0xc6, 0xc6, 0x17, 0x04, 0xaa, 0xf7, 0x70, 0xb4, 0xb2, 0x8e,
0x7d, 0x89, 0x9d, 0x38, 0xfa, 0xe9, 0xbc, 0x2e, 0x05, 0x7a, 0x15, 0x1a, 0x63, 0xc9, 0x9e, 0x6c,
0x0e, 0x79, 0xae, 0x1f, 0x53, 0xed, 0x2f, 0x42, 0xcd, 0xe1, 0xd6, 0xb6, 0xed, 0x39, 0x3d, 0x89,
0x7d, 0xcd, 0xac, 0x3a, 0xfc, 0x43, 0x21, 0x8a, 0x63, 0x96, 0x55, 0x37, 0xde, 0x9a, 0x90, 0x87,
0x68, 0x4c, 0x63, 0x3c, 0x04, 0x48, 0x5c, 0x13, 0xd0, 0x64, 0xcc, 0x92, 0x71, 0x66, 0xdf, 0x81,
0xaa, 0x8b, 0xa3, 0x15, 0x8e, 0xfd, 0x56, 0x49, 0xd6, 0xa8, 0xc3, 0xf0, 0x4a, 0x76, 0x32, 0xd3,
0xe5, 0xc6, 0xe7, 0xea, 0xa6, 0x47, 0x18, 0x13, 0x39, 0xc4, 0x8b, 0xd5, 0x97, 0xec, 0xad, 0xbe,
0x57, 0xa1, 0xe1, 0xa3, 0x1f, 0xb2, 0x91, 0xc5, 0x9d, 0x5d, 0x4c, 0x61, 0x50, 0xaa, 0x75, 0x67,
0x17, 0x45, 0xa0, 0xc1, 0xd0, 0xb7, 0x58, 0xb8, 0xc3, 0xd3, 0xf3, 0x18, 0x0c, 0x7d, 0x33, 0xdc,
0xe1, 0xf4, 0x0d, 0x98, 0x61, 0xd8, 0xc5, 0x20, 0xf6, 0x46, 0x96, 0x1f, 0xf6, 0x9c, 0x47, 0x0e,
0xa6, 0x60, 0xe8, 0xe9, 0x8b, 0xb5, 0x44, 0x6f, 0x7c, 0x4d, 0x60, 0xe6, 0x83, 0x34, 0xfd, 0xd6,
0xb1, 0xaf, 0x9c, 0x3b, 0x81, 0xc4, 0x78, 0x57, 0xc6, 0x6b, 0x89, 0x83, 0xc3, 0x8f, 0xbf, 0xf9,
0xc9, 0x70, 0x32, 0x6b, 0x3c, 0x71, 0xea, 0xe6, 0x2f, 0x25, 0xa8, 0x26, 0xe6, 0x68, 0x1d, 0x26,
0xdc, 0xfb, 0x61, 0x80, 0xfa, 0x39, 0x3a, 0x0f, 0x33, 0xee, 0xde, 0x9b, 0x07, 0xbd, 0x47, 0x67,
0x61, 0xda, 0x2d, 0x0e, 0xee, 0x3a, 0x52, 0x0a, 0x4d, 0xb7, 0x30, 0xdb, 0xea, 0x8f, 0xe8, 0x05,
0x98, 0x75, 0xf7, 0x8f, 0x7f, 0xba, 0x48, 0x01, 0xdd, 0x2d, 0x4e, 0x48, 0x5c, 0x1f, 0xd0, 0x79,
0xd0, 0xdd, 0x3d, 0x43, 0x89, 0xfe, 0x1d, 0xa1, 0xb3, 0xd0, 0x74, 0x0b, 0xbd, 0xbb, 0xfe, 0x3d,
0xa1, 0x14, 0xa6, 0xdc, 0xf1, 0x16, 0x57, 0xff, 0x81, 0xd0, 0x0b, 0x40, 0xdd, 0x7d, 0x9d, 0xa0,
0xfe, 0x23, 0xa1, 0x73, 0x30, 0xed, 0x16, 0x1a, 0x26, 0xae, 0xff, 0x44, 0xe8, 0x24, 0x54, 0x5d,
0xd5, 0x55, 0xe8, 0x9f, 0x6a, 0x52, 0x52, 0xbf, 0x3b, 0xfd, 0x33, 0x25, 0xa9, 0xf2, 0xa7, 0x3f,
0xd7, 0xa4, 0xb1, 0xf1, 0x62, 0xa8, 0xff, 0xad, 0xd1, 0x26, 0xd4, 0xdd, 0xf4, 0xc0, 0xeb, 0xdf,
0xd4, 0xa5, 0xf1, 0x7d, 0x6c, 0xeb, 0xdf, 0xd6, 0x6f, 0xde, 0x86, 0x5a, 0x7a, 0x5f, 0x47, 0x01,
0x2a, 0x6b, 0x36, 0x8f, 0x91, 0xe9, 0xe7, 0xc4, 0xb3, 0x89, 0x76, 0x0f, 0x99, 0x4e, 0xc4, 0xf3,
0x47, 0xcc, 0x11, 0xfa, 0x92, 0xc0, 0xff, 0x81, 0xa8, 0x69, 0xba, 0x76, 0xa7, 0xfd, 0xf1, 0x9d,
0xbe, 0x13, 0x0f, 0x86, 0x9b, 0xa2, 0x46, 0x2e, 0xef, 0x3a, 0x9e, 0xe7, 0xec, 0xc6, 0xd8, 0x1d,
0x2c, 0x2b, 0x72, 0xdf, 0xec, 0x39, 0x3c, 0x66, 0xce, 0xe6, 0x30, 0xc6, 0xde, 0x72, 0x4a, 0xf1,
0xb2, 0x64, 0x3c, 0x13, 0xa3, 0xcd, 0xcd, 0x8a, 0xd4, 0xdc, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff,
0x56, 0x57, 0x32, 0x28, 0x20, 0x17, 0x00, 0x00,
// 1474 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x58, 0x4b, 0x6f, 0x1b, 0x47,
0x12, 0xf6, 0x70, 0x28, 0x3e, 0x8a, 0x14, 0x35, 0x6a, 0x49, 0x36, 0x6d, 0x2f, 0x6c, 0x79, 0xbc,
0x58, 0x6b, 0xbd, 0x58, 0x09, 0x2b, 0xef, 0x61, 0x7d, 0xdb, 0xb5, 0x08, 0xac, 0xb9, 0x86, 0x0c,
0xed, 0x50, 0x48, 0x80, 0xc0, 0xc0, 0x60, 0x44, 0x96, 0x86, 0x83, 0x79, 0xaa, 0x7b, 0x28, 0x99,
0x3a, 0xe4, 0xe4, 0x1f, 0x90, 0x1c, 0x72, 0xc8, 0x21, 0x40, 0x8e, 0x39, 0x19, 0xc9, 0xbf, 0xc8,
0xeb, 0x14, 0x20, 0x7f, 0x22, 0x81, 0x63, 0x20, 0x71, 0xee, 0x41, 0x77, 0xcf, 0x83, 0xa3, 0xa7,
0x01, 0x5b, 0x89, 0x00, 0xdd, 0xba, 0x6a, 0x7a, 0xba, 0xaa, 0xbe, 0xaf, 0xba, 0xba, 0xba, 0x81,
0x38, 0x41, 0x8c, 0x34, 0xb0, 0x3c, 0xd3, 0x67, 0xf6, 0x72, 0x44, 0xc3, 0x38, 0x24, 0x0b, 0xbe,
0xe3, 0xed, 0x8e, 0x98, 0x94, 0x96, 0xd3, 0x09, 0xd7, 0x9a, 0xfd, 0xd0, 0xf7, 0xc3, 0x40, 0xaa,
0xaf, 0xcd, 0x32, 0xa4, 0xbb, 0x4e, 0x1f, 0xf3, 0xff, 0xf4, 0x00, 0xea, 0xdd, 0x8e, 0x81, 0x3b,
0x23, 0x64, 0x31, 0xb9, 0x0c, 0x95, 0x08, 0x91, 0x76, 0x3b, 0x6d, 0x65, 0x51, 0x59, 0x52, 0x8d,
0x44, 0x22, 0xf7, 0xa0, 0x4c, 0x43, 0x0f, 0xdb, 0xa5, 0x45, 0x65, 0xa9, 0xb5, 0x7a, 0x73, 0xf9,
0x48, 0x5b, 0xcb, 0x1b, 0x88, 0xd4, 0x08, 0x3d, 0x34, 0xc4, 0x64, 0x32, 0x0f, 0x53, 0xfd, 0x70,
0x14, 0xc4, 0x6d, 0x75, 0x51, 0x59, 0x9a, 0x36, 0xa4, 0xa0, 0xdb, 0x00, 0xdc, 0x1e, 0x8b, 0xc2,
0x80, 0x21, 0xb9, 0x07, 0x15, 0x16, 0x5b, 0xf1, 0x88, 0x09, 0x83, 0x8d, 0xd5, 0xeb, 0xc5, 0xa5,
0x13, 0xe7, 0x7b, 0x62, 0x8a, 0x91, 0x4c, 0x25, 0x2d, 0x28, 0x75, 0x3b, 0xc2, 0x17, 0xd5, 0x28,
0x75, 0x3b, 0xc7, 0x18, 0x0a, 0x01, 0x36, 0x59, 0xf8, 0x3b, 0x46, 0xb6, 0x0b, 0x0d, 0x61, 0xf0,
0x4d, 0x42, 0xfb, 0x13, 0xd4, 0x63, 0xc7, 0x47, 0x16, 0x5b, 0x7e, 0x24, 0x7c, 0x2a, 0x1b, 0xb9,
0xe2, 0x18, 0xbb, 0xcf, 0x14, 0x68, 0xf6, 0xd0, 0xce, 0x59, 0xcc, 0xa6, 0x29, 0x13, 0xd3, 0xf8,
0xd2, 0xfd, 0xa1, 0x15, 0x04, 0xe8, 0x25, 0xe0, 0x4d, 0x19, 0xb9, 0x82, 0x5c, 0x87, 0x7a, 0x3f,
0xf4, 0x3c, 0x33, 0xb0, 0x7c, 0x14, 0xcb, 0xd7, 0x8d, 0x1a, 0x57, 0x3c, 0xb6, 0x7c, 0x24, 0xb7,
0x61, 0x3a, 0xb2, 0x68, 0xec, 0xc4, 0x4e, 0x18, 0x98, 0xb1, 0x65, 0xb7, 0xcb, 0x62, 0x42, 0x33,
0x53, 0x6e, 0x5a, 0xb6, 0xfe, 0x5c, 0x01, 0xf2, 0x1f, 0xc6, 0x1c, 0x3b, 0x28, 0x38, 0xf3, 0x56,
0x81, 0x7f, 0x04, 0x33, 0x11, 0x52, 0x33, 0x71, 0xdb, 0xa4, 0xb8, 0xd3, 0x56, 0x17, 0xd5, 0xa5,
0xc6, 0xea, 0xed, 0x63, 0xfe, 0x9f, 0x74, 0xc5, 0x98, 0x8e, 0x90, 0xae, 0xc9, 0x5f, 0x0d, 0xdc,
0xd1, 0x3f, 0x51, 0x60, 0x46, 0x7c, 0x97, 0x5e, 0xfb, 0x18, 0x08, 0xe8, 0x18, 0x57, 0x25, 0xce,
0x4a, 0xe1, 0x14, 0xe8, 0x8e, 0x64, 0xa5, 0x08, 0x68, 0xf9, 0x34, 0x40, 0xa7, 0x8e, 0x00, 0xf4,
0xa5, 0x02, 0x73, 0x05, 0x40, 0xcf, 0x2e, 0xb1, 0xee, 0xc0, 0x0c, 0x3e, 0x8d, 0x1c, 0x8a, 0xe6,
0x60, 0x44, 0x2d, 0xee, 0x80, 0x08, 0xa6, 0x6c, 0xb4, 0xa4, 0xba, 0x93, 0x68, 0xc9, 0x13, 0xb8,
0x3c, 0x49, 0x80, 0x95, 0x21, 0xd7, 0x2e, 0x0b, 0x1e, 0xfe, 0x72, 0x12, 0x0f, 0x39, 0xce, 0xc6,
0x7c, 0x4e, 0x45, 0xae, 0xd5, 0xbf, 0x57, 0xe0, 0xca, 0x1a, 0x45, 0x2b, 0xc6, 0xb5, 0xd0, 0xf3,
0xb0, 0xcf, 0x4d, 0xa6, 0x79, 0x74, 0x1f, 0x6a, 0x3e, 0xb3, 0xcd, 0x78, 0x1c, 0xa1, 0x88, 0xbb,
0xb5, 0x7a, 0xe3, 0x18, 0x5b, 0xeb, 0xcc, 0xde, 0x1c, 0x47, 0x68, 0x54, 0x7d, 0x39, 0xe0, 0x04,
0x51, 0xdc, 0xc9, 0x4a, 0x86, 0x14, 0x8a, 0x88, 0xa8, 0x07, 0x11, 0x69, 0x43, 0x35, 0xa2, 0xe1,
0xd3, 0x71, 0xb7, 0x23, 0xc8, 0x53, 0x8d, 0x54, 0x24, 0xff, 0x80, 0x0a, 0xeb, 0x0f, 0xd1, 0xb7,
0x04, 0x69, 0x8d, 0xd5, 0xab, 0x47, 0xc2, 0xff, 0xc0, 0x0b, 0xb7, 0x8c, 0x64, 0x22, 0x67, 0x72,
0xa1, 0x43, 0xc3, 0xe8, 0x1c, 0x47, 0xb5, 0x0e, 0x33, 0xfd, 0xcc, 0x3b, 0x99, 0xb4, 0x32, 0xbc,
0x3f, 0x17, 0xfd, 0x49, 0x0e, 0x90, 0xe5, 0x3c, 0x14, 0x9e, 0xd0, 0x46, 0xab, 0x5f, 0x90, 0xf5,
0x9f, 0x14, 0x98, 0x7f, 0x68, 0xb1, 0x8b, 0x13, 0xf0, 0x2f, 0x0a, 0x5c, 0xed, 0x20, 0xeb, 0x53,
0x67, 0x0b, 0x2f, 0x4e, 0xd4, 0x9f, 0x2a, 0xb0, 0xd0, 0x1b, 0x86, 0x7b, 0xe7, 0x37, 0x62, 0xfd,
0x85, 0x02, 0x97, 0x65, 0x4d, 0xd9, 0x48, 0x8b, 0xeb, 0xb9, 0x63, 0xe5, 0x7f, 0xd0, 0xca, 0x8f,
0x83, 0x09, 0x52, 0x6e, 0x1f, 0x4d, 0x4a, 0x16, 0x88, 0xe0, 0x24, 0x3f, 0x49, 0x04, 0x25, 0x3f,
0x2a, 0x30, 0xcf, 0x6b, 0xcd, 0xc5, 0x88, 0xf6, 0x07, 0x05, 0xe6, 0x1e, 0x5a, 0xec, 0x62, 0x04,
0xfb, 0x52, 0x81, 0x76, 0x5a, 0x63, 0x2e, 0x46, 0xc4, 0xfc, 0x18, 0xe1, 0xf5, 0xe5, 0xfc, 0x46,
0xfb, 0x96, 0x0b, 0xea, 0xcf, 0x25, 0x98, 0xee, 0x06, 0x0c, 0x69, 0x7c, 0x66, 0x91, 0xde, 0x39,
0xec, 0xb1, 0xec, 0xf7, 0x0f, 0xf8, 0xf2, 0x5a, 0x5d, 0x3f, 0xc7, 0x8d, 0xa1, 0xcd, 0xbb, 0xb7,
0x6e, 0x47, 0x44, 0xae, 0x1a, 0xb9, 0xa2, 0xd8, 0x38, 0x57, 0xe4, 0xd7, 0xbc, 0x71, 0x9e, 0x40,
0xb5, 0x5a, 0x44, 0xf5, 0x06, 0x40, 0x06, 0x3e, 0x6b, 0xd7, 0x16, 0xd5, 0xa5, 0xb2, 0x31, 0xa1,
0xe1, 0x97, 0x0a, 0x1a, 0xee, 0x75, 0x3b, 0xac, 0x5d, 0x5f, 0x54, 0xf9, 0xa5, 0x42, 0x4a, 0xe4,
0x9f, 0x50, 0xa3, 0xe1, 0x9e, 0x39, 0xb0, 0x62, 0xab, 0x0d, 0xa2, 0x21, 0x3d, 0xa1, 0x3b, 0xab,
0xd2, 0x70, 0xaf, 0x63, 0xc5, 0x96, 0xfe, 0xac, 0x04, 0xd3, 0x1d, 0xf4, 0x30, 0xc6, 0x3f, 0x1e,
0xf4, 0x02, 0x62, 0xe5, 0x13, 0x10, 0x9b, 0x3a, 0x09, 0xb1, 0xca, 0x21, 0xc4, 0x6e, 0x41, 0x33,
0xa2, 0x8e, 0x6f, 0xd1, 0xb1, 0xe9, 0xe2, 0x98, 0xb5, 0xab, 0x02, 0xb7, 0x46, 0xa2, 0x7b, 0x84,
0x63, 0xa6, 0xbf, 0x52, 0x60, 0xba, 0x87, 0x16, 0xed, 0x0f, 0xcf, 0x0c, 0x86, 0x09, 0xff, 0xd5,
0xa2, 0xff, 0x85, 0xfd, 0x57, 0x3e, 0xb8, 0xff, 0xfe, 0x0a, 0x1a, 0x45, 0x36, 0xf2, 0x62, 0x33,
0x07, 0x47, 0x02, 0x30, 0x23, 0xf5, 0x6b, 0x19, 0x44, 0x2b, 0x30, 0xb5, 0x33, 0x42, 0x3a, 0x16,
0xe9, 0x76, 0x22, 0xff, 0x72, 0x9e, 0xfe, 0x9d, 0x02, 0x5a, 0x6f, 0xcc, 0xd6, 0xc2, 0x60, 0xdb,
0xb1, 0xcf, 0x5d, 0xe4, 0x04, 0xca, 0x82, 0xaf, 0xa9, 0x45, 0x75, 0xa9, 0x6e, 0x88, 0x31, 0xe7,
0xd2, 0xc5, 0xb1, 0x19, 0x51, 0xdc, 0x76, 0x9e, 0xa2, 0x64, 0xbb, 0x6e, 0x34, 0x5c, 0x1c, 0x6f,
0x24, 0x2a, 0xfd, 0x79, 0x09, 0x9a, 0x29, 0x97, 0x1c, 0x9f, 0x37, 0x09, 0x28, 0xbf, 0x6f, 0x96,
0x5e, 0xff, 0xbe, 0x99, 0xa1, 0xa0, 0x1e, 0x83, 0xc2, 0x81, 0x3a, 0x7a, 0x0b, 0x9a, 0x82, 0x0e,
0x33, 0x08, 0x07, 0x98, 0xb1, 0xdb, 0x10, 0xba, 0xc7, 0x42, 0x55, 0x04, 0xaa, 0xf2, 0x3a, 0x29,
0x52, 0x3d, 0x3a, 0x45, 0x08, 0x94, 0x87, 0x4e, 0x2c, 0xeb, 0x4a, 0xd3, 0x10, 0x63, 0xfd, 0x7d,
0x68, 0x6c, 0x3a, 0x3e, 0x6e, 0x3a, 0x7d, 0x77, 0x9d, 0xd9, 0x6f, 0x02, 0x57, 0xfe, 0xe0, 0x51,
0x2a, 0x3c, 0x78, 0x9c, 0x78, 0xc2, 0xe8, 0x1f, 0x2b, 0x50, 0x7d, 0x84, 0xe3, 0xd5, 0x1e, 0xda,
0x02, 0x3b, 0x5e, 0xcf, 0xd2, 0x47, 0x08, 0x21, 0x90, 0x9b, 0xd0, 0x98, 0xd8, 0xc1, 0xc9, 0xe2,
0x90, 0x6f, 0xe0, 0x53, 0x8e, 0xb0, 0xab, 0x50, 0x73, 0x98, 0xb9, 0x6b, 0x79, 0xce, 0x40, 0x60,
0x5f, 0x33, 0xaa, 0x0e, 0x7b, 0x87, 0x8b, 0xbc, 0x76, 0x64, 0x25, 0x5b, 0x66, 0x9a, 0x6a, 0x4c,
0x68, 0xf4, 0x27, 0x00, 0x89, 0x6b, 0x1c, 0x9a, 0x8c, 0x59, 0x65, 0x92, 0xd9, 0x7f, 0x41, 0xd5,
0xc5, 0xf1, 0x2a, 0x43, 0xbb, 0x5d, 0x12, 0x85, 0xf7, 0x38, 0xbc, 0x92, 0x95, 0x8c, 0x74, 0xba,
0xfe, 0x91, 0x7c, 0xbe, 0xe2, 0xc6, 0x78, 0x0e, 0xb1, 0xe2, 0x91, 0xa2, 0x1c, 0x3c, 0x52, 0x6e,
0x42, 0xc3, 0x47, 0x3f, 0xa4, 0x63, 0x93, 0x39, 0xfb, 0x98, 0xc2, 0x20, 0x55, 0x3d, 0x67, 0x1f,
0x79, 0xa0, 0xc1, 0xc8, 0x37, 0x69, 0xb8, 0xc7, 0xd2, 0xad, 0x16, 0x8c, 0x7c, 0x23, 0xdc, 0x63,
0xe4, 0x6f, 0x30, 0x4b, 0xb1, 0x8f, 0x41, 0xec, 0x8d, 0x4d, 0x3f, 0x1c, 0x38, 0xdb, 0x0e, 0xa6,
0x60, 0x68, 0xe9, 0x87, 0xf5, 0x44, 0xaf, 0x7f, 0xa6, 0xc0, 0xec, 0xff, 0xd3, 0xf4, 0xeb, 0xa1,
0x2d, 0x9d, 0x3b, 0x83, 0xc4, 0xf8, 0xb7, 0x88, 0xd7, 0xe4, 0x1b, 0x87, 0x9d, 0xfe, 0x9c, 0x95,
0xe1, 0x64, 0xd4, 0x58, 0xe2, 0xd4, 0xdd, 0x17, 0x25, 0xa8, 0x26, 0xe6, 0x48, 0x1d, 0xa6, 0xdc,
0xc7, 0x61, 0x80, 0xda, 0x25, 0xb2, 0x00, 0xb3, 0xee, 0xc1, 0xe7, 0x14, 0x6d, 0x40, 0xe6, 0x60,
0xc6, 0x2d, 0xbe, 0x46, 0x68, 0x48, 0x08, 0xb4, 0xdc, 0xc2, 0x85, 0x5d, 0xdb, 0x26, 0x57, 0x60,
0xce, 0x3d, 0x7c, 0xa7, 0xd5, 0x78, 0x0a, 0x68, 0x6e, 0xf1, 0xda, 0xc7, 0xb4, 0xa1, 0x58, 0xe2,
0xbf, 0x18, 0x67, 0xb5, 0x94, 0x69, 0x0e, 0x59, 0x00, 0xcd, 0x3d, 0x70, 0xfb, 0xd2, 0xbe, 0x54,
0xc8, 0x1c, 0xb4, 0xdc, 0xc2, 0x25, 0x45, 0xfb, 0x4a, 0x21, 0x04, 0xa6, 0xdd, 0xc9, 0x5e, 0x5e,
0xfb, 0x5a, 0x21, 0x57, 0x80, 0xb8, 0x87, 0x5a, 0x5e, 0xed, 0x1b, 0x85, 0xcc, 0xc3, 0x8c, 0x5b,
0xe8, 0x0c, 0x99, 0xf6, 0xad, 0x42, 0x9a, 0x50, 0x75, 0x65, 0xfb, 0xa4, 0x7d, 0xa0, 0x0a, 0x49,
0x9e, 0xeb, 0xda, 0x87, 0x52, 0x92, 0x25, 0x51, 0x7b, 0xa5, 0x0a, 0x63, 0x93, 0x05, 0x52, 0xfb,
0x55, 0x25, 0x2d, 0xa8, 0xbb, 0x69, 0x11, 0xd0, 0x3e, 0xaf, 0x0b, 0xe3, 0x87, 0x32, 0x40, 0xfb,
0xa2, 0x7e, 0xf7, 0x3e, 0xd4, 0xd2, 0x87, 0x49, 0x02, 0x50, 0x59, 0xb7, 0x58, 0x8c, 0x54, 0xbb,
0xc4, 0xc7, 0x06, 0x5a, 0x03, 0xa4, 0x9a, 0xc2, 0xc7, 0xef, 0x52, 0x87, 0xeb, 0x4b, 0x9c, 0x93,
0x0d, 0x5e, 0xe7, 0x34, 0xf5, 0x41, 0xe7, 0xbd, 0x07, 0xb6, 0x13, 0x0f, 0x47, 0x5b, 0xbc, 0x6e,
0xae, 0xec, 0x3b, 0x9e, 0xe7, 0xec, 0xc7, 0xd8, 0x1f, 0xae, 0x48, 0xc2, 0xff, 0x3e, 0x70, 0x58,
0x4c, 0x9d, 0xad, 0x51, 0x8c, 0x83, 0x95, 0x94, 0xf6, 0x15, 0x91, 0x05, 0x99, 0x18, 0x6d, 0x6d,
0x55, 0x84, 0xe6, 0xde, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9e, 0x9f, 0x3d, 0x09, 0x18,
0x00, 0x00,
}

View File

@ -89,6 +89,15 @@ service Master {
rpc ShowPartitions(internal.ShowPartitionRequest) returns (service.StringListResponse) {}
/**
* @brief This method is used to get system configs
*
* @param SysConfigRequest, keys or key_prefixes of the configs.
*
* @return SysConfigResponse
*/
rpc GetSysConfigs(internal.SysConfigRequest) returns (service.SysConfigResponse) {}
rpc AllocTimestamp(internal.TsoRequest) returns (internal.TsoResponse) {}
rpc AllocID(internal.IDRequest) returns (internal.IDResponse) {}

View File

@ -30,36 +30,38 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
func init() { proto.RegisterFile("master.proto", fileDescriptor_f9c348dec43a6705) }
var fileDescriptor_f9c348dec43a6705 = []byte{
// 458 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xd3, 0x30,
0x14, 0xc7, 0x7b, 0x1a, 0x92, 0xd5, 0xb5, 0xcc, 0xdc, 0xca, 0x85, 0xf5, 0x04, 0x2d, 0x4b, 0x10,
0x7c, 0x01, 0xd6, 0xe5, 0xb0, 0x4a, 0x20, 0x4d, 0xeb, 0x2e, 0x80, 0xd0, 0x70, 0xb2, 0xa7, 0xf4,
0x81, 0x13, 0x07, 0xbf, 0x97, 0x21, 0xed, 0x23, 0xf1, 0x29, 0x51, 0x93, 0x26, 0xa9, 0x69, 0x5d,
0xca, 0x6e, 0xb5, 0xfd, 0xf3, 0xef, 0x5f, 0xbf, 0xf7, 0x14, 0xd1, 0xcf, 0x14, 0x31, 0xd8, 0xa0,
0xb0, 0x86, 0x8d, 0x7c, 0x96, 0xa1, 0xbe, 0x2f, 0xa9, 0x5e, 0x05, 0xf5, 0xd1, 0xa8, 0x9f, 0x98,
0x2c, 0x33, 0x79, 0xbd, 0x39, 0x92, 0x98, 0x33, 0xd8, 0x5c, 0xe9, 0xdb, 0x8c, 0xd2, 0xf5, 0xde,
0x09, 0x81, 0xbd, 0xc7, 0x04, 0xba, 0xad, 0xb7, 0xbf, 0x85, 0x38, 0xfa, 0x58, 0xdd, 0x97, 0x4a,
0x3c, 0xbd, 0xb0, 0xa0, 0x18, 0x2e, 0x8c, 0xd6, 0x90, 0x30, 0x9a, 0x5c, 0x06, 0x81, 0x93, 0xd4,
0x38, 0x83, 0xbf, 0xc1, 0x6b, 0xf8, 0x59, 0x02, 0xf1, 0xe8, 0xb9, 0xcb, 0xaf, 0xff, 0xd1, 0x82,
0x15, 0x97, 0x34, 0xee, 0xc9, 0xaf, 0x62, 0x10, 0x59, 0x53, 0x6c, 0x04, 0xbc, 0xf6, 0x04, 0xb8,
0xd8, 0x81, 0xfa, 0x58, 0x1c, 0x5f, 0x2a, 0xda, 0xb0, 0x4f, 0x3d, 0x76, 0x87, 0x6a, 0xe4, 0x63,
0x17, 0x5e, 0xd7, 0x2a, 0x98, 0x19, 0xa3, 0xaf, 0x81, 0x0a, 0x93, 0x13, 0x8c, 0x7b, 0xb2, 0x14,
0x32, 0x02, 0x4a, 0x2c, 0xc6, 0x9b, 0x75, 0x7a, 0xe3, 0x7b, 0xc6, 0x16, 0xda, 0xa4, 0x4d, 0x77,
0xa7, 0x75, 0x60, 0x7d, 0xb5, 0x58, 0xfd, 0x1c, 0xf7, 0xe4, 0x0f, 0x31, 0x5c, 0x2c, 0xcd, 0xaf,
0xee, 0x98, 0xbc, 0xa5, 0x73, 0xb9, 0x26, 0xef, 0xe5, 0xee, 0xbc, 0x05, 0x5b, 0xcc, 0xd3, 0x0f,
0x48, 0xbc, 0xf1, 0xc6, 0x5b, 0x31, 0xac, 0x1b, 0x7c, 0xa5, 0x2c, 0x63, 0xf5, 0xc0, 0xb3, 0xbd,
0x83, 0xd0, 0x72, 0x07, 0x36, 0xea, 0x8b, 0x38, 0x5e, 0x35, 0xb8, 0xd3, 0x4f, 0xf7, 0x8c, 0xc1,
0xff, 0xca, 0xbf, 0x89, 0xfe, 0xa5, 0xa2, 0xce, 0x3d, 0xf1, 0x0f, 0xc1, 0x96, 0xfa, 0xb0, 0x19,
0xb0, 0xe2, 0xa4, 0x69, 0x6c, 0x17, 0x13, 0xfe, 0x63, 0x04, 0xb6, 0xb2, 0x26, 0xbb, 0xb3, 0x5a,
0xce, 0x1d, 0x00, 0x14, 0x83, 0x55, 0x63, 0xdb, 0x53, 0xf2, 0xd6, 0xcc, 0xc1, 0x1e, 0xd3, 0xfe,
0x4f, 0x62, 0x70, 0xae, 0xb5, 0x49, 0x6e, 0x30, 0x03, 0x62, 0x95, 0x15, 0xf2, 0xd4, 0x13, 0x75,
0x43, 0xc6, 0x53, 0x39, 0x17, 0x69, 0xd5, 0x57, 0xe2, 0x49, 0xa5, 0x9e, 0x47, 0xf2, 0x85, 0xe7,
0xc2, 0x3c, 0x6a, 0x94, 0xa7, 0x7b, 0x88, 0xd6, 0xf8, 0x5d, 0x0c, 0xcf, 0x89, 0x30, 0xcd, 0x17,
0x90, 0x66, 0x90, 0xf3, 0x3c, 0x92, 0xaf, 0x3c, 0xf7, 0x5a, 0xae, 0x8b, 0x98, 0x1c, 0x82, 0x36,
0x59, 0xb3, 0xd9, 0xe7, 0xf7, 0x29, 0xf2, 0xb2, 0x8c, 0x57, 0x33, 0x17, 0x3e, 0xa0, 0xd6, 0xf8,
0xc0, 0x90, 0x2c, 0xc3, 0x5a, 0x72, 0x76, 0x87, 0xc4, 0x16, 0xe3, 0x92, 0xe1, 0x2e, 0x6c, 0x54,
0x61, 0x65, 0x0e, 0xeb, 0x6f, 0x74, 0x11, 0xc7, 0x47, 0xd5, 0xfa, 0xdd, 0x9f, 0x00, 0x00, 0x00,
0xff, 0xff, 0xa0, 0xb5, 0xeb, 0xf6, 0xd1, 0x05, 0x00, 0x00,
// 484 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x95, 0xdf, 0x6e, 0xd3, 0x30,
0x14, 0x87, 0x7b, 0x35, 0x24, 0xd3, 0x3f, 0xcc, 0xdc, 0x95, 0x1b, 0xd6, 0x9b, 0x41, 0xcb, 0x12,
0x04, 0x2f, 0xc0, 0xda, 0x48, 0xac, 0x12, 0x48, 0xd3, 0xb2, 0x1b, 0x40, 0x68, 0x24, 0xd9, 0x21,
0x35, 0x24, 0x76, 0xf0, 0x39, 0x19, 0xa2, 0x2f, 0xc1, 0x2b, 0xa3, 0x26, 0x75, 0x52, 0xd3, 0xba,
0x94, 0xdd, 0xd5, 0xf6, 0xe7, 0xdf, 0x57, 0x9f, 0x73, 0xa4, 0xb0, 0x6e, 0x1e, 0x21, 0x81, 0xf6,
0x0a, 0xad, 0x48, 0xf1, 0xc7, 0xb9, 0xc8, 0xee, 0x4a, 0xac, 0x57, 0x5e, 0x7d, 0x34, 0xec, 0x26,
0x2a, 0xcf, 0x95, 0xac, 0x37, 0x87, 0x5c, 0x48, 0x02, 0x2d, 0xa3, 0xec, 0x26, 0xc7, 0x74, 0xbd,
0x77, 0x8c, 0xa0, 0xef, 0x44, 0x02, 0xed, 0xd6, 0xab, 0xdf, 0x0f, 0xd9, 0xd1, 0xfb, 0xea, 0x3e,
0x8f, 0xd8, 0xa3, 0x99, 0x86, 0x88, 0x60, 0xa6, 0xb2, 0x0c, 0x12, 0x12, 0x4a, 0x72, 0xcf, 0xb3,
0x4c, 0x26, 0xd3, 0xfb, 0x1b, 0xbc, 0x82, 0x1f, 0x25, 0x20, 0x0d, 0x9f, 0xd8, 0xfc, 0xfa, 0x1f,
0x85, 0x14, 0x51, 0x89, 0xa3, 0x0e, 0xff, 0xcc, 0xfa, 0x81, 0x56, 0xc5, 0x86, 0xe0, 0x85, 0x43,
0x60, 0x63, 0x07, 0xc6, 0xc7, 0xac, 0x77, 0x11, 0xe1, 0x46, 0xfa, 0xc4, 0x91, 0x6e, 0x51, 0x26,
0x7c, 0x64, 0xc3, 0xeb, 0x5a, 0x79, 0x53, 0xa5, 0xb2, 0x2b, 0xc0, 0x42, 0x49, 0x84, 0x51, 0x87,
0x97, 0x8c, 0x07, 0x80, 0x89, 0x16, 0xf1, 0x66, 0x9d, 0x5e, 0xba, 0x9e, 0xb1, 0x85, 0x1a, 0xdb,
0x64, 0xb7, 0xad, 0x05, 0xeb, 0xab, 0xc5, 0xea, 0xe7, 0xa8, 0xc3, 0xbf, 0xb3, 0x41, 0xb8, 0x50,
0x3f, 0xdb, 0x63, 0x74, 0x96, 0xce, 0xe6, 0x8c, 0xef, 0xd9, 0x6e, 0x5f, 0x48, 0x5a, 0xc8, 0xf4,
0x9d, 0x40, 0xda, 0x78, 0xe3, 0x0d, 0x1b, 0xd4, 0x0d, 0xbe, 0x8c, 0x34, 0x89, 0xea, 0x81, 0x67,
0x7b, 0x07, 0xa1, 0xe1, 0x0e, 0x6c, 0xd4, 0x27, 0xd6, 0x5b, 0x35, 0xb8, 0x8d, 0x9f, 0xec, 0x19,
0x83, 0xff, 0x0d, 0xff, 0xc2, 0xba, 0x17, 0x11, 0xb6, 0xd9, 0x63, 0xf7, 0x10, 0x6c, 0x45, 0x1f,
0x36, 0x03, 0x9a, 0x1d, 0x9b, 0xc6, 0xb6, 0x1a, 0xff, 0x1f, 0x23, 0xb0, 0xe5, 0x1a, 0xef, 0x76,
0x35, 0x9c, 0x3d, 0x00, 0x82, 0xf5, 0x57, 0x8d, 0x6d, 0x4e, 0xd1, 0x59, 0x33, 0x0b, 0xbb, 0x4f,
0xfb, 0x13, 0xd6, 0x7b, 0x0b, 0x14, 0xfe, 0xc2, 0x99, 0x92, 0x5f, 0x45, 0x8a, 0xfc, 0xd4, 0x65,
0x32, 0x88, 0xb1, 0x9c, 0x3a, 0x2c, 0x2d, 0xd7, 0x48, 0x3e, 0xb0, 0xfe, 0x79, 0x96, 0xa9, 0xe4,
0x5a, 0xe4, 0x80, 0x14, 0xe5, 0x05, 0x3f, 0x71, 0x58, 0xae, 0x51, 0x39, 0xda, 0x63, 0x23, 0x4d,
0xf4, 0x25, 0x7b, 0x50, 0x45, 0xcf, 0x03, 0xfe, 0xd4, 0x71, 0x61, 0x1e, 0x98, 0xc8, 0x93, 0x3d,
0x44, 0x93, 0xf8, 0x8d, 0x0d, 0xce, 0x11, 0x45, 0x2a, 0x43, 0x48, 0x73, 0x90, 0x34, 0x0f, 0xf8,
0x73, 0xc7, 0xbd, 0x86, 0x6b, 0x15, 0xe3, 0x43, 0x50, 0xe3, 0x9a, 0x4e, 0x3f, 0xbe, 0x49, 0x05,
0x2d, 0xca, 0x78, 0x35, 0xd8, 0xfe, 0x52, 0x64, 0x99, 0x58, 0x12, 0x24, 0x0b, 0xbf, 0x0e, 0x39,
0xbb, 0x15, 0x48, 0x5a, 0xc4, 0x25, 0xc1, 0xad, 0x6f, 0xa2, 0xfc, 0x2a, 0xd9, 0xaf, 0x3f, 0x04,
0x45, 0x1c, 0x1f, 0x55, 0xeb, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x75, 0x7d, 0xec,
0x36, 0x06, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -134,6 +136,13 @@ type MasterClient interface {
//
// @return StringListResponse
ShowPartitions(ctx context.Context, in *internalpb.ShowPartitionRequest, opts ...grpc.CallOption) (*servicepb.StringListResponse, error)
//*
// @brief This method is used to get system configs
//
// @param SysConfigRequest, keys or key_prefixes of the configs.
//
// @return SysConfigResponse
GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error)
AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error)
AllocID(ctx context.Context, in *internalpb.IDRequest, opts ...grpc.CallOption) (*internalpb.IDResponse, error)
AssignSegmentID(ctx context.Context, in *internalpb.AssignSegIDRequest, opts ...grpc.CallOption) (*internalpb.AssignSegIDResponse, error)
@ -237,6 +246,15 @@ func (c *masterClient) ShowPartitions(ctx context.Context, in *internalpb.ShowPa
return out, nil
}
func (c *masterClient) GetSysConfigs(ctx context.Context, in *internalpb.SysConfigRequest, opts ...grpc.CallOption) (*servicepb.SysConfigResponse, error) {
out := new(servicepb.SysConfigResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/GetSysConfigs", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *masterClient) AllocTimestamp(ctx context.Context, in *internalpb.TsoRequest, opts ...grpc.CallOption) (*internalpb.TsoResponse, error) {
out := new(internalpb.TsoResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.Master/AllocTimestamp", in, out, opts...)
@ -326,6 +344,13 @@ type MasterServer interface {
//
// @return StringListResponse
ShowPartitions(context.Context, *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error)
//*
// @brief This method is used to get system configs
//
// @param SysConfigRequest, keys or key_prefixes of the configs.
//
// @return SysConfigResponse
GetSysConfigs(context.Context, *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error)
AllocTimestamp(context.Context, *internalpb.TsoRequest) (*internalpb.TsoResponse, error)
AllocID(context.Context, *internalpb.IDRequest) (*internalpb.IDResponse, error)
AssignSegmentID(context.Context, *internalpb.AssignSegIDRequest) (*internalpb.AssignSegIDResponse, error)
@ -365,6 +390,9 @@ func (*UnimplementedMasterServer) DescribePartition(ctx context.Context, req *in
func (*UnimplementedMasterServer) ShowPartitions(ctx context.Context, req *internalpb.ShowPartitionRequest) (*servicepb.StringListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ShowPartitions not implemented")
}
func (*UnimplementedMasterServer) GetSysConfigs(ctx context.Context, req *internalpb.SysConfigRequest) (*servicepb.SysConfigResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSysConfigs not implemented")
}
func (*UnimplementedMasterServer) AllocTimestamp(ctx context.Context, req *internalpb.TsoRequest) (*internalpb.TsoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method AllocTimestamp not implemented")
}
@ -559,6 +587,24 @@ func _Master_ShowPartitions_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler)
}
func _Master_GetSysConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.SysConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServer).GetSysConfigs(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.master.Master/GetSysConfigs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServer).GetSysConfigs(ctx, req.(*internalpb.SysConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Master_AllocTimestamp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(internalpb.TsoRequest)
if err := dec(in); err != nil {
@ -657,6 +703,10 @@ var _Master_serviceDesc = grpc.ServiceDesc{
MethodName: "ShowPartitions",
Handler: _Master_ShowPartitions_Handler,
},
{
MethodName: "GetSysConfigs",
Handler: _Master_GetSysConfigs_Handler,
},
{
MethodName: "AllocTimestamp",
Handler: _Master_AllocTimestamp_Handler,

View File

@ -135,6 +135,14 @@ message PartitionDescription {
repeated common.KeyValuePair statistics = 3;
}
/**
* @brief Response of GetSysConfig
*/
message SysConfigResponse {
common.Status status = 1;
repeated string keys = 2;
repeated string values = 3;
}
/**
* @brief Entities hit by query

View File

@ -737,6 +737,63 @@ func (m *PartitionDescription) GetStatistics() []*commonpb.KeyValuePair {
return nil
}
//*
// @brief Response of GetSysConfig
type SysConfigResponse struct {
Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
Keys []string `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"`
Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SysConfigResponse) Reset() { *m = SysConfigResponse{} }
func (m *SysConfigResponse) String() string { return proto.CompactTextString(m) }
func (*SysConfigResponse) ProtoMessage() {}
func (*SysConfigResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
}
func (m *SysConfigResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SysConfigResponse.Unmarshal(m, b)
}
func (m *SysConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SysConfigResponse.Marshal(b, m, deterministic)
}
func (m *SysConfigResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SysConfigResponse.Merge(m, src)
}
func (m *SysConfigResponse) XXX_Size() int {
return xxx_messageInfo_SysConfigResponse.Size(m)
}
func (m *SysConfigResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SysConfigResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SysConfigResponse proto.InternalMessageInfo
func (m *SysConfigResponse) GetStatus() *commonpb.Status {
if m != nil {
return m.Status
}
return nil
}
func (m *SysConfigResponse) GetKeys() []string {
if m != nil {
return m.Keys
}
return nil
}
func (m *SysConfigResponse) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
//*
// @brief Entities hit by query
type Hits struct {
@ -752,7 +809,7 @@ func (m *Hits) Reset() { *m = Hits{} }
func (m *Hits) String() string { return proto.CompactTextString(m) }
func (*Hits) ProtoMessage() {}
func (*Hits) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{13}
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
}
func (m *Hits) XXX_Unmarshal(b []byte) error {
@ -808,7 +865,7 @@ func (m *QueryResult) Reset() { *m = QueryResult{} }
func (m *QueryResult) String() string { return proto.CompactTextString(m) }
func (*QueryResult) ProtoMessage() {}
func (*QueryResult) Descriptor() ([]byte, []int) {
return fileDescriptor_b4b40b84dd2f74cb, []int{14}
return fileDescriptor_b4b40b84dd2f74cb, []int{15}
}
func (m *QueryResult) XXX_Unmarshal(b []byte) error {
@ -858,6 +915,7 @@ func init() {
proto.RegisterType((*IntegerRangeResponse)(nil), "milvus.proto.service.IntegerRangeResponse")
proto.RegisterType((*CollectionDescription)(nil), "milvus.proto.service.CollectionDescription")
proto.RegisterType((*PartitionDescription)(nil), "milvus.proto.service.PartitionDescription")
proto.RegisterType((*SysConfigResponse)(nil), "milvus.proto.service.SysConfigResponse")
proto.RegisterType((*Hits)(nil), "milvus.proto.service.Hits")
proto.RegisterType((*QueryResult)(nil), "milvus.proto.service.QueryResult")
}
@ -865,52 +923,53 @@ func init() {
func init() { proto.RegisterFile("service_msg.proto", fileDescriptor_b4b40b84dd2f74cb) }
var fileDescriptor_b4b40b84dd2f74cb = []byte{
// 739 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xdb, 0x4a,
0x10, 0x3e, 0x8e, 0x43, 0x4e, 0x98, 0x38, 0x21, 0xec, 0xc9, 0x41, 0x06, 0x6e, 0x72, 0x8c, 0x38,
0x8d, 0x5a, 0x35, 0x91, 0xa0, 0x52, 0xc5, 0x45, 0xa5, 0x26, 0x40, 0x5b, 0x7e, 0x14, 0xe8, 0x12,
0x21, 0xd1, 0x4a, 0x8d, 0x36, 0xf6, 0xca, 0x5e, 0xd5, 0xf1, 0x5a, 0xde, 0x35, 0x51, 0x78, 0x90,
0xbe, 0x44, 0x1f, 0xa4, 0x77, 0x7d, 0xa6, 0xca, 0x6b, 0x93, 0x1f, 0x4a, 0x55, 0x0a, 0xdc, 0xcd,
0xcc, 0xee, 0xcc, 0x37, 0xbf, 0x1f, 0x2c, 0x0b, 0x1a, 0x5d, 0x32, 0x9b, 0xf6, 0x87, 0xc2, 0x6d,
0x86, 0x11, 0x97, 0x1c, 0xd5, 0x86, 0xcc, 0xbf, 0x8c, 0x45, 0xaa, 0x35, 0xb3, 0xf7, 0x35, 0xc3,
0xe6, 0xc3, 0x21, 0x0f, 0x52, 0xeb, 0x9a, 0x21, 0x6c, 0x8f, 0x0e, 0x49, 0xaa, 0x59, 0x3b, 0x50,
0xd9, 0xe5, 0xbe, 0x4f, 0x6d, 0xc9, 0x78, 0xd0, 0x25, 0x43, 0x8a, 0x9e, 0xc0, 0x92, 0x3d, 0xb1,
0xf4, 0x03, 0x32, 0xa4, 0xa6, 0x56, 0xd7, 0x1a, 0x8b, 0xb8, 0x62, 0xcf, 0x7d, 0xb4, 0x0e, 0xa1,
0x7c, 0x4a, 0x22, 0xc9, 0xfe, 0xd8, 0x13, 0x55, 0x41, 0x97, 0xc4, 0x35, 0x73, 0xea, 0x31, 0x11,
0xad, 0xaf, 0x1a, 0x14, 0x31, 0x1f, 0x75, 0x88, 0xb4, 0xbd, 0xbb, 0xc7, 0xd9, 0x80, 0x72, 0x78,
0x9d, 0x41, 0x7f, 0x1a, 0xd1, 0x98, 0x18, 0x7b, 0xc4, 0x45, 0x2f, 0xa0, 0x18, 0xf1, 0x51, 0xdf,
0x21, 0x92, 0x98, 0x7a, 0x5d, 0x6f, 0x94, 0xb6, 0x56, 0x9b, 0x73, 0x6d, 0xca, 0xba, 0xd3, 0xf1,
0xf9, 0x00, 0xff, 0x1d, 0xf1, 0xd1, 0x1e, 0x91, 0x04, 0xad, 0xc3, 0xa2, 0x47, 0x84, 0xd7, 0xff,
0x4c, 0xc7, 0xc2, 0xcc, 0xd7, 0xf5, 0x46, 0x19, 0x17, 0x13, 0xc3, 0x11, 0x1d, 0x0b, 0x6b, 0x04,
0xd5, 0x53, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x73, 0xe2, 0xc7, 0x93, 0x9a, 0xb4, 0x49,
0x4d, 0x68, 0x07, 0xf2, 0x72, 0x1c, 0x52, 0x95, 0x54, 0x65, 0x6b, 0xb3, 0x79, 0xdb, 0x6c, 0x9a,
0x33, 0x71, 0x7a, 0xe3, 0x90, 0x62, 0xe5, 0x82, 0x56, 0xa0, 0x70, 0x99, 0x44, 0x15, 0x2a, 0x63,
0x03, 0x67, 0x9a, 0xf5, 0x69, 0x0e, 0xf8, 0x6d, 0xc4, 0xe3, 0x10, 0x1d, 0x82, 0x11, 0x4e, 0x6d,
0xc2, 0xd4, 0x54, 0x8d, 0xff, 0xff, 0x16, 0x4e, 0xa5, 0x8d, 0xe7, 0x7c, 0xad, 0x2f, 0x1a, 0x2c,
0xbc, 0x8f, 0x69, 0x34, 0xbe, 0xfb, 0x0c, 0x36, 0xa1, 0x32, 0x37, 0x03, 0x61, 0xe6, 0xea, 0x7a,
0x63, 0x11, 0x97, 0x67, 0x87, 0x20, 0x92, 0xf6, 0x38, 0xc2, 0x37, 0xf5, 0xb4, 0x3d, 0x8e, 0xf0,
0xd1, 0x33, 0x58, 0x9e, 0xc1, 0xee, 0xbb, 0x49, 0x31, 0x66, 0xbe, 0xae, 0x35, 0x0c, 0x5c, 0x0d,
0x6f, 0x14, 0x69, 0x7d, 0x84, 0xca, 0x99, 0x8c, 0x58, 0xe0, 0x62, 0x2a, 0x42, 0x1e, 0x08, 0x8a,
0xb6, 0xa1, 0x20, 0x24, 0x91, 0xb1, 0x50, 0x79, 0x95, 0xb6, 0xd6, 0x6f, 0x1d, 0xea, 0x99, 0xfa,
0x82, 0xb3, 0xaf, 0xa8, 0x06, 0x0b, 0xaa, 0x93, 0xd9, 0xa2, 0xa4, 0x8a, 0x75, 0x01, 0x46, 0x87,
0x73, 0xff, 0x11, 0x43, 0x17, 0xaf, 0x43, 0x13, 0x40, 0x69, 0xde, 0xc7, 0x4c, 0xc8, 0x87, 0x01,
0x4c, 0x77, 0x22, 0x6d, 0xf0, 0xf5, 0x4e, 0x0c, 0xe0, 0x9f, 0x83, 0x40, 0x52, 0x97, 0x46, 0x8f,
0x8d, 0xa1, 0x4f, 0x30, 0x04, 0xd4, 0x32, 0x0c, 0x4c, 0x02, 0x97, 0x3e, 0xb8, 0x53, 0x03, 0xea,
0xb2, 0x40, 0x75, 0x4a, 0xc7, 0xa9, 0x92, 0x2c, 0x08, 0x0d, 0x1c, 0xb5, 0x20, 0x3a, 0x4e, 0x44,
0xeb, 0xbb, 0x06, 0xff, 0x4e, 0xb9, 0x69, 0x8f, 0x0a, 0x3b, 0x62, 0x61, 0x22, 0xde, 0x0f, 0xf6,
0x15, 0x14, 0x52, 0xe6, 0x53, 0xb8, 0xa5, 0x9f, 0x0e, 0x32, 0x65, 0xc5, 0x29, 0xe0, 0x99, 0x32,
0xe0, 0xcc, 0x09, 0xb5, 0x01, 0x92, 0x40, 0x4c, 0x48, 0x66, 0x8b, 0x8c, 0x48, 0xfe, 0xbb, 0x15,
0xf7, 0x88, 0x8e, 0xd5, 0x6d, 0x9d, 0x12, 0x16, 0xe1, 0x19, 0x27, 0xeb, 0x9b, 0x06, 0xb5, 0x09,
0x63, 0x3e, 0xb8, 0x9e, 0x97, 0x90, 0x57, 0x67, 0x99, 0x56, 0xb3, 0xf1, 0x8b, 0x7b, 0x9f, 0x25,
0x68, 0xac, 0x1c, 0x1e, 0xa3, 0x92, 0x23, 0xc8, 0xbf, 0x63, 0x52, 0x5d, 0xf5, 0xc1, 0x5e, 0x4a,
0x39, 0x3a, 0x4e, 0x44, 0xb4, 0x3a, 0xc3, 0xb6, 0x39, 0xc5, 0x5d, 0x13, 0x4a, 0x5d, 0x49, 0x06,
0xc0, 0xa3, 0x8c, 0xd4, 0x72, 0x38, 0xd3, 0xac, 0x73, 0x28, 0x29, 0xce, 0xc1, 0x54, 0xc4, 0xbe,
0xbc, 0x5f, 0x33, 0x10, 0xe4, 0x3d, 0x26, 0x45, 0x06, 0xa9, 0xe4, 0xa7, 0xaf, 0x61, 0xe9, 0x06,
0xbb, 0xa2, 0x22, 0xe4, 0xbb, 0x27, 0xdd, 0xfd, 0xea, 0x5f, 0x68, 0x19, 0xca, 0xe7, 0xfb, 0xbb,
0xbd, 0x13, 0xdc, 0xef, 0x1c, 0x74, 0xdb, 0xf8, 0xa2, 0xea, 0xa0, 0x2a, 0x18, 0x99, 0xe9, 0xcd,
0xf1, 0x49, 0xbb, 0x57, 0xa5, 0x9d, 0xdd, 0x0f, 0x6d, 0x97, 0x49, 0x2f, 0x1e, 0x24, 0xa8, 0xad,
0x2b, 0xe6, 0xfb, 0xec, 0x4a, 0x52, 0xdb, 0x6b, 0xa5, 0x19, 0x3d, 0x77, 0x98, 0x90, 0x11, 0x1b,
0xc4, 0x92, 0x3a, 0x2d, 0x16, 0x48, 0x1a, 0x05, 0xc4, 0x6f, 0xa9, 0x34, 0x5b, 0xd9, 0x00, 0xc2,
0xc1, 0xa0, 0xa0, 0x0c, 0xdb, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x08, 0x5d, 0xa4, 0xaf,
0x07, 0x00, 0x00,
// 762 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xdd, 0x4e, 0xe3, 0x46,
0x14, 0xae, 0xe3, 0x90, 0x86, 0x13, 0x27, 0x24, 0xd3, 0x14, 0x19, 0xb8, 0x49, 0x8d, 0x68, 0xa3,
0x56, 0x4d, 0x24, 0xa8, 0x54, 0x71, 0x51, 0xa9, 0x49, 0xa0, 0x2d, 0x3f, 0x0a, 0x74, 0x12, 0x21,
0xd1, 0x4a, 0x8d, 0x26, 0xf6, 0xd4, 0x1e, 0xd5, 0xf1, 0x58, 0x9e, 0x31, 0x51, 0x78, 0x90, 0xbe,
0xc4, 0x3e, 0xc8, 0xde, 0xed, 0x33, 0xad, 0x3c, 0x36, 0xf9, 0x61, 0x59, 0x2d, 0x4b, 0xb8, 0x3b,
0xe7, 0xcc, 0x9c, 0xf3, 0x9d, 0xdf, 0x0f, 0x6a, 0x82, 0x46, 0x77, 0xcc, 0xa6, 0xa3, 0x89, 0x70,
0x5b, 0x61, 0xc4, 0x25, 0x47, 0xf5, 0x09, 0xf3, 0xef, 0x62, 0x91, 0x6a, 0xad, 0xec, 0x7d, 0xd7,
0xb0, 0xf9, 0x64, 0xc2, 0x83, 0xd4, 0xba, 0x6b, 0x08, 0xdb, 0xa3, 0x13, 0x92, 0x6a, 0xd6, 0x31,
0x54, 0x7a, 0xdc, 0xf7, 0xa9, 0x2d, 0x19, 0x0f, 0xfa, 0x64, 0x42, 0xd1, 0x77, 0xb0, 0x65, 0xcf,
0x2d, 0xa3, 0x80, 0x4c, 0xa8, 0xa9, 0x35, 0xb4, 0xe6, 0x26, 0xae, 0xd8, 0x2b, 0x1f, 0xad, 0x73,
0x28, 0x5f, 0x93, 0x48, 0xb2, 0xcf, 0xf6, 0x44, 0x55, 0xd0, 0x25, 0x71, 0xcd, 0x9c, 0x7a, 0x4c,
0x44, 0xeb, 0x8d, 0x06, 0x45, 0xcc, 0xa7, 0x5d, 0x22, 0x6d, 0xef, 0xf9, 0x71, 0xf6, 0xa1, 0x1c,
0x3e, 0x64, 0x30, 0x5a, 0x44, 0x34, 0xe6, 0xc6, 0x21, 0x71, 0xd1, 0x4f, 0x50, 0x8c, 0xf8, 0x74,
0xe4, 0x10, 0x49, 0x4c, 0xbd, 0xa1, 0x37, 0x4b, 0x87, 0x3b, 0xad, 0x95, 0x36, 0x65, 0xdd, 0xe9,
0xfa, 0x7c, 0x8c, 0xbf, 0x8c, 0xf8, 0xf4, 0x84, 0x48, 0x82, 0xf6, 0x60, 0xd3, 0x23, 0xc2, 0x1b,
0xfd, 0x47, 0x67, 0xc2, 0xcc, 0x37, 0xf4, 0x66, 0x19, 0x17, 0x13, 0xc3, 0x05, 0x9d, 0x09, 0x6b,
0x0a, 0xd5, 0x6b, 0x9f, 0xd8, 0xd4, 0xe3, 0xbe, 0x43, 0xa3, 0x1b, 0xe2, 0xc7, 0xf3, 0x9a, 0xb4,
0x79, 0x4d, 0xe8, 0x18, 0xf2, 0x72, 0x16, 0x52, 0x95, 0x54, 0xe5, 0xf0, 0xa0, 0xf5, 0xd4, 0x6c,
0x5a, 0x4b, 0x71, 0x86, 0xb3, 0x90, 0x62, 0xe5, 0x82, 0xb6, 0xa1, 0x70, 0x97, 0x44, 0x15, 0x2a,
0x63, 0x03, 0x67, 0x9a, 0xf5, 0xcf, 0x0a, 0xf0, 0xef, 0x11, 0x8f, 0x43, 0x74, 0x0e, 0x46, 0xb8,
0xb0, 0x09, 0x53, 0x53, 0x35, 0x7e, 0xfb, 0x49, 0x38, 0x95, 0x36, 0x5e, 0xf1, 0xb5, 0xfe, 0xd7,
0x60, 0xe3, 0xcf, 0x98, 0x46, 0xb3, 0xe7, 0xcf, 0xe0, 0x00, 0x2a, 0x2b, 0x33, 0x10, 0x66, 0xae,
0xa1, 0x37, 0x37, 0x71, 0x79, 0x79, 0x08, 0x22, 0x69, 0x8f, 0x23, 0x7c, 0x53, 0x4f, 0xdb, 0xe3,
0x08, 0x1f, 0xfd, 0x00, 0xb5, 0x25, 0xec, 0x91, 0x9b, 0x14, 0x63, 0xe6, 0x1b, 0x5a, 0xd3, 0xc0,
0xd5, 0xf0, 0x51, 0x91, 0xd6, 0xdf, 0x50, 0x19, 0xc8, 0x88, 0x05, 0x2e, 0xa6, 0x22, 0xe4, 0x81,
0xa0, 0xe8, 0x08, 0x0a, 0x42, 0x12, 0x19, 0x0b, 0x95, 0x57, 0xe9, 0x70, 0xef, 0xc9, 0xa1, 0x0e,
0xd4, 0x17, 0x9c, 0x7d, 0x45, 0x75, 0xd8, 0x50, 0x9d, 0xcc, 0x16, 0x25, 0x55, 0xac, 0x5b, 0x30,
0xba, 0x9c, 0xfb, 0xaf, 0x18, 0xba, 0xf8, 0x10, 0x9a, 0x00, 0x4a, 0xf3, 0xbe, 0x64, 0x42, 0xae,
0x07, 0xb0, 0xd8, 0x89, 0xb4, 0xc1, 0x0f, 0x3b, 0x31, 0x86, 0xaf, 0xce, 0x02, 0x49, 0x5d, 0x1a,
0xbd, 0x36, 0x86, 0x3e, 0xc7, 0x10, 0x50, 0xcf, 0x30, 0x30, 0x09, 0x5c, 0xba, 0x76, 0xa7, 0xc6,
0xd4, 0x65, 0x81, 0xea, 0x94, 0x8e, 0x53, 0x25, 0x59, 0x10, 0x1a, 0x38, 0x6a, 0x41, 0x74, 0x9c,
0x88, 0xd6, 0x3b, 0x0d, 0xbe, 0x5e, 0x70, 0xd3, 0x09, 0x15, 0x76, 0xc4, 0xc2, 0x44, 0x7c, 0x19,
0xec, 0x2f, 0x50, 0x48, 0x99, 0x4f, 0xe1, 0x96, 0x3e, 0x38, 0xc8, 0x94, 0x15, 0x17, 0x80, 0x03,
0x65, 0xc0, 0x99, 0x13, 0xea, 0x00, 0x24, 0x81, 0x98, 0x90, 0xcc, 0x16, 0x19, 0x91, 0x7c, 0xf3,
0x24, 0xee, 0x05, 0x9d, 0xa9, 0xdb, 0xba, 0x26, 0x2c, 0xc2, 0x4b, 0x4e, 0xd6, 0x5b, 0x0d, 0xea,
0x73, 0xc6, 0x5c, 0xbb, 0x9e, 0x9f, 0x21, 0xaf, 0xce, 0x32, 0xad, 0x66, 0xff, 0x23, 0xf7, 0xbe,
0x4c, 0xd0, 0x58, 0x39, 0xbc, 0x46, 0x25, 0x12, 0x6a, 0x83, 0x99, 0xe8, 0xf1, 0xe0, 0x5f, 0xb6,
0xe6, 0x45, 0x22, 0xc8, 0x2b, 0x8a, 0x4d, 0x77, 0x5a, 0xc9, 0x8f, 0xd8, 0x6f, 0xb1, 0xe9, 0x17,
0x90, 0xff, 0x83, 0x49, 0xc5, 0x25, 0x67, 0x27, 0x29, 0xd1, 0xe9, 0x38, 0x11, 0xd1, 0xce, 0x12,
0xc7, 0xe7, 0x14, 0x63, 0xce, 0x89, 0x7c, 0x3b, 0x19, 0x3b, 0x8f, 0xb2, 0x60, 0x39, 0x9c, 0x69,
0xd6, 0x0d, 0x94, 0x14, 0xd3, 0x61, 0x2a, 0x62, 0x5f, 0xbe, 0x38, 0x79, 0x8f, 0x49, 0x91, 0x41,
0x2a, 0xf9, 0xfb, 0x5f, 0x61, 0xeb, 0x11, 0xa7, 0xa3, 0x22, 0xe4, 0xfb, 0x57, 0xfd, 0xd3, 0xea,
0x17, 0xa8, 0x06, 0xe5, 0x9b, 0xd3, 0xde, 0xf0, 0x0a, 0x8f, 0xba, 0x67, 0xfd, 0x0e, 0xbe, 0xad,
0x3a, 0xa8, 0x0a, 0x46, 0x66, 0xfa, 0xed, 0xf2, 0xaa, 0x33, 0xac, 0xd2, 0x6e, 0xef, 0xaf, 0x8e,
0xcb, 0xa4, 0x17, 0x8f, 0x13, 0xd4, 0xf6, 0x3d, 0xf3, 0x7d, 0x76, 0x2f, 0xa9, 0xed, 0xb5, 0xd3,
0x8c, 0x7e, 0x74, 0x98, 0x90, 0x11, 0x1b, 0xc7, 0x92, 0x3a, 0x6d, 0x16, 0x48, 0x1a, 0x05, 0xc4,
0x6f, 0xab, 0x34, 0xdb, 0xd9, 0xd8, 0xc3, 0xf1, 0xb8, 0xa0, 0x0c, 0x47, 0xef, 0x03, 0x00, 0x00,
0xff, 0xff, 0x8d, 0x5a, 0x44, 0x98, 0x25, 0x08, 0x00, 0x00,
}

View File

@ -86,23 +86,6 @@ func (pt *ParamTable) ProxyNum() int {
return len(ret)
}
func (pt *ParamTable) ProxyIDList() []UniqueID {
proxyIDStr, err := pt.Load("nodeID.proxyIDList")
if err != nil {
panic(err)
}
var ret []UniqueID
proxyIDs := strings.Split(proxyIDStr, ",")
for _, i := range proxyIDs {
v, err := strconv.Atoi(i)
if err != nil {
log.Panicf("load proxy id list error, %s", err.Error())
}
ret = append(ret, UniqueID(v))
}
return ret
}
func (pt *ParamTable) queryNodeNum() int {
return len(pt.queryNodeIDList())
}
@ -237,8 +220,6 @@ func (pt *ParamTable) SearchChannelNames() []string {
ret = append(ret, prefix+strconv.Itoa(ID))
}
return ret
//prefix += "-0"
//return []string{prefix}
}
func (pt *ParamTable) SearchResultChannelNames() []string {

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"strings"
@ -34,8 +35,26 @@ var masterServer *master.Master
var testNum = 10
func makeNewChannalNames(names []string, suffix string) []string {
var ret []string
for _, name := range names {
ret = append(ret, name+suffix)
}
return ret
}
func refreshChannelNames() {
suffix := "_test" + strconv.FormatInt(rand.Int63n(100), 10)
master.Params.DDChannelNames = makeNewChannalNames(master.Params.DDChannelNames, suffix)
master.Params.WriteNodeTimeTickChannelNames = makeNewChannalNames(master.Params.WriteNodeTimeTickChannelNames, suffix)
master.Params.InsertChannelNames = makeNewChannalNames(master.Params.InsertChannelNames, suffix)
master.Params.K2SChannelNames = makeNewChannalNames(master.Params.K2SChannelNames, suffix)
master.Params.ProxyTimeTickChannelNames = makeNewChannalNames(master.Params.ProxyTimeTickChannelNames, suffix)
}
func startMaster(ctx context.Context) {
master.Init()
refreshChannelNames()
etcdAddr := master.Params.EtcdAddress
metaRootPath := master.Params.MetaRootPath

View File

@ -12,7 +12,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/mvcc/mvccpb"
@ -25,7 +25,7 @@ const (
type metaService struct {
ctx context.Context
kvBase *kv.EtcdKV
kvBase *etcdkv.EtcdKV
replica collectionReplica
}
@ -40,7 +40,7 @@ func newMetaService(ctx context.Context, replica collectionReplica) *metaService
return &metaService{
ctx: ctx,
kvBase: kv.NewEtcdKV(cli, MetaRootPath),
kvBase: etcdkv.NewEtcdKV(cli, MetaRootPath),
replica: replica,
}
}

View File

@ -4,7 +4,6 @@ import (
"log"
"os"
"strconv"
"strings"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
)
@ -24,7 +23,7 @@ func (p *ParamTable) Init() {
queryNodeIDStr := os.Getenv("QUERY_NODE_ID")
if queryNodeIDStr == "" {
queryNodeIDList := p.queryNodeIDList()
queryNodeIDList := p.QueryNodeIDList()
if len(queryNodeIDList) <= 0 {
queryNodeIDStr = "0"
} else {
@ -42,23 +41,6 @@ func (p *ParamTable) pulsarAddress() (string, error) {
return url, nil
}
func (p *ParamTable) queryNodeIDList() []UniqueID {
queryNodeIDStr, err := p.Load("nodeID.queryNodeIDList")
if err != nil {
panic(err)
}
var ret []UniqueID
queryNodeIDs := strings.Split(queryNodeIDStr, ",")
for _, i := range queryNodeIDs {
v, err := strconv.Atoi(i)
if err != nil {
log.Panicf("load proxy id list error, %s", err.Error())
}
ret = append(ret, UniqueID(v))
}
return ret
}
func (p *ParamTable) QueryNodeID() UniqueID {
queryNodeID, err := p.Load("_queryNodeID")
if err != nil {
@ -232,7 +214,7 @@ func (p *ParamTable) statsChannelName() string {
func (p *ParamTable) sliceIndex() int {
queryNodeID := p.QueryNodeID()
queryNodeIDList := p.queryNodeIDList()
queryNodeIDList := p.QueryNodeIDList()
for i := 0; i < len(queryNodeIDList); i++ {
if queryNodeID == queryNodeIDList[i] {
return i
@ -242,5 +224,5 @@ func (p *ParamTable) sliceIndex() int {
}
func (p *ParamTable) queryNodeNum() int {
return len(p.queryNodeIDList())
return len(p.QueryNodeIDList())
}

View File

@ -18,7 +18,7 @@ func TestParamTable_PulsarAddress(t *testing.T) {
func TestParamTable_QueryNodeID(t *testing.T) {
id := Params.QueryNodeID()
assert.Contains(t, Params.queryNodeIDList(), id)
assert.Contains(t, Params.QueryNodeIDList(), id)
}
func TestParamTable_insertChannelRange(t *testing.T) {
@ -89,11 +89,7 @@ func TestParamTable_searchChannelNames(t *testing.T) {
func TestParamTable_searchResultChannelNames(t *testing.T) {
names := Params.searchResultChannelNames()
num := Params.queryNodeNum()
assert.Equal(t, num, len(names))
for i := 0; i < num; i++ {
assert.Equal(t, fmt.Sprintf("searchResult-%d", i), names[i])
}
assert.NotNil(t, names)
}
func TestParamTable_msgChannelSubName(t *testing.T) {

View File

@ -1,6 +1,7 @@
package querynode
import (
"context"
"encoding/binary"
"log"
"math"
@ -12,15 +13,12 @@ import (
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
//"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb"
//"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
)
func TestSearch_Search(t *testing.T) {
node := newQueryNode()
node := NewQueryNode(context.Background(), 0)
initTestMeta(t, node, "collection0", 0, 0)
pulsarURL, _ := Params.pulsarAddress()
@ -191,7 +189,7 @@ func TestSearch_Search(t *testing.T) {
}
func TestSearch_SearchMultiSegments(t *testing.T) {
node := newQueryNode()
node := NewQueryNode(context.Background(), 0)
initTestMeta(t, node, "collection0", 0, 0)
pulsarURL, _ := Params.pulsarAddress()

4
internal/storage/cwrapper/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
output
cmake-build-debug
.idea
cmake_build

View File

@ -0,0 +1,63 @@
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
project(wrapper)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
if (NOT GIT_ARROW_REPO)
set(GIT_ARROW_REPO "https://github.com/apache/arrow.git")
endif ()
message(STATUS "Arrow Repo:" ${GIT_ARROW_REPO})
if (NOT GIT_ARROW_TAG)
set(GIT_ARROW_TAG "apache-arrow-2.0.0")
endif ()
message(STATUS "Arrow Tag:" ${GIT_ARROW_TAG})
###################################################################################################
# - cmake modules ---------------------------------------------------------------------------------
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/" ${CMAKE_MODULE_PATH})
###################################################################################################
# - build arrow ------------------------------------------------------------------------------------
message(STATUS "BUILDING ARROW")
include(ConfigureArrow)
if (ARROW_FOUND)
message(STATUS "Apache Arrow found in ${ARROW_INCLUDE_DIR}")
else ()
message(FATAL_ERROR "Apache Arrow not found, please check your settings.")
endif (ARROW_FOUND)
add_library(arrow STATIC IMPORTED ${ARROW_LIB})
add_library(parquet STATIC IMPORTED ${PARQUET_LIB})
add_library(thrift STATIC IMPORTED ${THRIFT_LIB})
add_library(utf8proc STATIC IMPORTED ${UTF8PROC_LIB})
if (ARROW_FOUND)
set_target_properties(arrow PROPERTIES IMPORTED_LOCATION ${ARROW_LIB})
set_target_properties(parquet PROPERTIES IMPORTED_LOCATION ${PARQUET_LIB})
set_target_properties(thrift PROPERTIES IMPORTED_LOCATION ${THRIFT_LIB})
set_target_properties(utf8proc PROPERTIES IMPORTED_LOCATION ${UTF8PROC_LIB})
endif (ARROW_FOUND)
###################################################################################################
include_directories(${ARROW_INCLUDE_DIR})
include_directories(${PROJECT_SOURCE_DIR})
add_library(wrapper STATIC)
target_sources(wrapper PUBLIC ParquetWrapper.cpp
PayloadStream.cpp)
target_link_libraries(wrapper PUBLIC parquet arrow thrift utf8proc pthread)
if(NOT CMAKE_INSTALL_PREFIX)
set(CMAKE_INSTALL_PREFIX ${CMAKE_CURRENT_BINARY_DIR})
endif()
install(TARGETS wrapper DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES ${ARROW_LIB} ${PARQUET_LIB} ${THRIFT_LIB} ${UTF8PROC_LIB} DESTINATION ${CMAKE_INSTALL_PREFIX})
add_subdirectory(test)

View File

@ -0,0 +1,42 @@
#pragma once
enum ColumnType : int {
NONE = 0,
BOOL = 1,
INT8 = 2,
INT16 = 3,
INT32 = 4,
INT64 = 5,
FLOAT = 10,
DOUBLE = 11,
STRING = 20,
VECTOR_BINARY = 100,
VECTOR_FLOAT = 101
};
enum ErrorCode : int {
SUCCESS = 0,
UNEXPECTED_ERROR = 1,
CONNECT_FAILED = 2,
PERMISSION_DENIED = 3,
COLLECTION_NOT_EXISTS = 4,
ILLEGAL_ARGUMENT = 5,
ILLEGAL_DIMENSION = 7,
ILLEGAL_INDEX_TYPE = 8,
ILLEGAL_COLLECTION_NAME = 9,
ILLEGAL_TOPK = 10,
ILLEGAL_ROWRECORD = 11,
ILLEGAL_VECTOR_ID = 12,
ILLEGAL_SEARCH_RESULT = 13,
FILE_NOT_FOUND = 14,
META_FAILED = 15,
CACHE_FAILED = 16,
CANNOT_CREATE_FOLDER = 17,
CANNOT_CREATE_FILE = 18,
CANNOT_DELETE_FOLDER = 19,
CANNOT_DELETE_FILE = 20,
BUILD_INDEX_ERROR = 21,
ILLEGAL_NLIST = 22,
ILLEGAL_METRIC_TYPE = 23,
OUT_OF_MEMORY = 24,
DD_REQUEST_RACE = 1000
};

View File

@ -0,0 +1,497 @@
#include "ParquetWrapper.h"
#include "PayloadStream.h"
static const char *ErrorMsg(const std::string &msg) {
if (msg.empty()) return nullptr;
auto ret = (char *) malloc(msg.size() + 1);
std::memcpy(ret, msg.c_str(), msg.size());
ret[msg.size()] = '\0';
return ret;
}
extern "C" CPayloadWriter NewPayloadWriter(int columnType) {
auto p = new wrapper::PayloadWriter;
p->builder = nullptr;
p->schema = nullptr;
p->output = nullptr;
p->dimension = wrapper::EMPTY_DIMENSION;
p->rows = 0;
switch (static_cast<ColumnType>(columnType)) {
case ColumnType::BOOL : {
p->columnType = ColumnType::BOOL;
p->builder = std::make_shared<arrow::BooleanBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::boolean())});
break;
}
case ColumnType::INT8 : {
p->columnType = ColumnType::INT8;
p->builder = std::make_shared<arrow::Int8Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int8())});
break;
}
case ColumnType::INT16 : {
p->columnType = ColumnType::INT16;
p->builder = std::make_shared<arrow::Int16Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int16())});
break;
}
case ColumnType::INT32 : {
p->columnType = ColumnType::INT32;
p->builder = std::make_shared<arrow::Int32Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int32())});
break;
}
case ColumnType::INT64 : {
p->columnType = ColumnType::INT64;
p->builder = std::make_shared<arrow::Int64Builder>();
p->schema = arrow::schema({arrow::field("val", arrow::int64())});
break;
}
case ColumnType::FLOAT : {
p->columnType = ColumnType::FLOAT;
p->builder = std::make_shared<arrow::FloatBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::float32())});
break;
}
case ColumnType::DOUBLE : {
p->columnType = ColumnType::DOUBLE;
p->builder = std::make_shared<arrow::DoubleBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::float64())});
break;
}
case ColumnType::STRING : {
p->columnType = ColumnType::STRING;
p->builder = std::make_shared<arrow::StringBuilder>();
p->schema = arrow::schema({arrow::field("val", arrow::utf8())});
break;
}
case ColumnType::VECTOR_BINARY : {
p->columnType = ColumnType::VECTOR_BINARY;
p->dimension = wrapper::EMPTY_DIMENSION;
break;
}
case ColumnType::VECTOR_FLOAT : {
p->columnType = ColumnType::VECTOR_FLOAT;
p->dimension = wrapper::EMPTY_DIMENSION;
break;
}
default: {
delete p;
return nullptr;
}
}
return reinterpret_cast<CPayloadWriter>(p);
}
template<typename DT, typename BT>
CStatus AddValuesToPayload(CPayloadWriter payloadWriter, DT *values, int length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
if (length <= 0) return st;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
auto builder = std::dynamic_pointer_cast<BT>(p->builder);
if (builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
if (p->output != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("payload has finished");
return st;
}
auto ast = builder->AppendValues(values, values + length);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg(ast.message());
return st;
}
p->rows += length;
return st;
}
extern "C" CStatus AddBooleanToPayload(CPayloadWriter payloadWriter, bool *values, int length) {
return AddValuesToPayload<bool, arrow::BooleanBuilder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt8ToPayload(CPayloadWriter payloadWriter, int8_t *values, int length) {
return AddValuesToPayload<int8_t, arrow::Int8Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt16ToPayload(CPayloadWriter payloadWriter, int16_t *values, int length) {
return AddValuesToPayload<int16_t, arrow::Int16Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t *values, int length) {
return AddValuesToPayload<int32_t, arrow::Int32Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t *values, int length) {
return AddValuesToPayload<int64_t, arrow::Int64Builder>(payloadWriter, values, length);
}
extern "C" CStatus AddFloatToPayload(CPayloadWriter payloadWriter, float *values, int length) {
return AddValuesToPayload<float, arrow::FloatBuilder>(payloadWriter, values, length);
}
extern "C" CStatus AddDoubleToPayload(CPayloadWriter payloadWriter, double *values, int length) {
return AddValuesToPayload<double, arrow::DoubleBuilder>(payloadWriter, values, length);
}
extern "C" CStatus AddOneStringToPayload(CPayloadWriter payloadWriter, char *cstr, int str_size) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
auto builder = std::dynamic_pointer_cast<arrow::StringBuilder>(p->builder);
if (builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
if (p->output != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("payload has finished");
return st;
}
arrow::Status ast;
if (cstr == nullptr || str_size < 0) {
ast = builder->AppendNull();
} else {
ast = builder->Append(cstr, str_size);
}
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg(ast.message());
return st;
}
p->rows++;
return st;
}
extern "C" CStatus AddBinaryVectorToPayload(CPayloadWriter payloadWriter, uint8_t *values, int dimension, int length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
if (length <= 0) return st;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
if (p->dimension == wrapper::EMPTY_DIMENSION) {
if ((dimension % 8) || (dimension <= 0)) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect dimension value");
return st;
}
if (p->builder != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
p->builder = std::make_shared<arrow::FixedSizeBinaryBuilder>(arrow::fixed_size_binary(dimension / 8));
p->schema = arrow::schema({arrow::field("val", arrow::fixed_size_binary(dimension / 8))});
p->dimension = dimension;
} else if (p->dimension != dimension) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("dimension changed");
return st;
}
auto builder = std::dynamic_pointer_cast<arrow::FixedSizeBinaryBuilder>(p->builder);
if (builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
if (p->output != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("payload has finished");
return st;
}
auto ast = builder->AppendValues(values, (dimension / 8) * length);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg(ast.message());
return st;
}
p->rows += length;
return st;
}
extern "C" CStatus AddFloatVectorToPayload(CPayloadWriter payloadWriter, float *values, int dimension, int length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
if (length <= 0) return st;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
if (p->dimension == wrapper::EMPTY_DIMENSION) {
if (p->builder != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
p->builder = std::make_shared<arrow::FixedSizeBinaryBuilder>(
arrow::fixed_size_binary(dimension * sizeof(float)));
p->schema = arrow::schema({arrow::field("val", arrow::fixed_size_binary(dimension * sizeof(float)))});
p->dimension = dimension;
} else if (p->dimension != dimension) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("dimension changed");
return st;
}
auto builder = std::dynamic_pointer_cast<arrow::FixedSizeBinaryBuilder>(p->builder);
if (builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
if (p->output != nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("payload has finished");
return st;
}
auto ast = builder->AppendValues(reinterpret_cast<const uint8_t *>(values), dimension * length * sizeof(float));
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg(ast.message());
return st;
}
p->rows += length;
return st;
}
extern "C" CStatus FinishPayloadWriter(CPayloadWriter payloadWriter) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
if (p->builder == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("arrow builder is nullptr");
return st;
}
if (p->output == nullptr) {
std::shared_ptr<arrow::Array> array;
auto ast = p->builder->Finish(&array);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg(ast.message());
return st;
}
auto table = arrow::Table::Make(p->schema, {array});
p->output = std::make_shared<wrapper::PayloadOutputStream>();
ast = parquet::arrow::WriteTable(*table, arrow::default_memory_pool(), p->output, 1024 * 1024 * 1024);
if (!ast.ok()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg(ast.message());
return st;
}
}
return st;
}
CBuffer GetPayloadBufferFromWriter(CPayloadWriter payloadWriter) {
CBuffer buf;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
if (p->output == nullptr) {
buf.length = 0;
buf.data = nullptr;
}
auto &output = p->output->Buffer();
buf.length = static_cast<int>(output.size());
buf.data = (char *) (output.data());
return buf;
}
int GetPayloadLengthFromWriter(CPayloadWriter payloadWriter) {
auto p = reinterpret_cast<wrapper::PayloadWriter *>(payloadWriter);
return p->rows;
}
extern "C" CStatus ReleasePayloadWriter(CPayloadWriter handler) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadWriter *>(handler);
if (p != nullptr) delete p;
return st;
}
extern "C" CPayloadReader NewPayloadReader(int columnType, uint8_t *buffer, int64_t buf_size) {
auto p = new wrapper::PayloadReader;
p->bValues = nullptr;
p->input = std::make_shared<wrapper::PayloadInputStream>(buffer, buf_size);
auto st = parquet::arrow::OpenFile(p->input, arrow::default_memory_pool(), &p->reader);
if (!st.ok()) {
delete p;
return nullptr;
}
st = p->reader->ReadTable(&p->table);
if (!st.ok()) {
delete p;
return nullptr;
}
p->column = p->table->column(0);
assert(p->column != nullptr);
assert(p->column->chunks().size() == 1);
p->array = p->column->chunk(0);
switch (columnType) {
case ColumnType::BOOL :
case ColumnType::INT8 :
case ColumnType::INT16 :
case ColumnType::INT32 :
case ColumnType::INT64 :
case ColumnType::FLOAT :
case ColumnType::DOUBLE :
case ColumnType::STRING :
case ColumnType::VECTOR_BINARY :
case ColumnType::VECTOR_FLOAT : {
break;
}
default: {
delete p;
return nullptr;
}
}
return reinterpret_cast<CPayloadReader>(p);
}
extern "C" CStatus GetBoolFromPayload(CPayloadReader payloadReader, bool **values, int *length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
if (p->bValues == nullptr) {
auto array = std::dynamic_pointer_cast<arrow::BooleanArray>(p->array);
if (array == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
int len = array->length();
p->bValues = new bool[len];
for (int i = 0; i < len; i++) {
p->bValues[i] = array->Value(i);
}
}
*values = p->bValues;
*length = p->array->length();
return st;
}
template<typename DT, typename AT>
CStatus GetValuesFromPayload(CPayloadReader payloadReader, DT **values, int *length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
auto array = std::dynamic_pointer_cast<AT>(p->array);
if (array == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("incorrect data type");
return st;
}
*length = array->length();
*values = (DT *) array->raw_values();
return st;
}
extern "C" CStatus GetInt8FromPayload(CPayloadReader payloadReader, int8_t **values, int *length) {
return GetValuesFromPayload<int8_t, arrow::Int8Array>(payloadReader, values, length);
}
extern "C" CStatus GetInt16FromPayload(CPayloadReader payloadReader, int16_t **values, int *length) {
return GetValuesFromPayload<int16_t, arrow::Int16Array>(payloadReader, values, length);
}
extern "C" CStatus GetInt32FromPayload(CPayloadReader payloadReader, int32_t **values, int *length) {
return GetValuesFromPayload<int32_t, arrow::Int32Array>(payloadReader, values, length);
}
extern "C" CStatus GetInt64FromPayload(CPayloadReader payloadReader, int64_t **values, int *length) {
return GetValuesFromPayload<int64_t, arrow::Int64Array>(payloadReader, values, length);
}
extern "C" CStatus GetFloatFromPayload(CPayloadReader payloadReader, float **values, int *length) {
return GetValuesFromPayload<float, arrow::FloatArray>(payloadReader, values, length);
}
extern "C" CStatus GetDoubleFromPayload(CPayloadReader payloadReader, double **values, int *length) {
return GetValuesFromPayload<double, arrow::DoubleArray>(payloadReader, values, length);
}
extern "C" CStatus GetOneStringFromPayload(CPayloadReader payloadReader, int idx, char **cstr, int *str_size) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
auto array = std::dynamic_pointer_cast<arrow::StringArray>(p->array);
if (array == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("Incorrect data type");
return st;
}
if (idx >= array->length()) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("memory overflow");
return st;
}
arrow::StringArray::offset_type length;
*cstr = (char *) array->GetValue(idx, &length);
*str_size = length;
return st;
}
extern "C" CStatus GetBinaryVectorFromPayload(CPayloadReader payloadReader,
uint8_t **values,
int *dimension,
int *length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
auto array = std::dynamic_pointer_cast<arrow::FixedSizeBinaryArray>(p->array);
if (array == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("Incorrect data type");
return st;
}
*dimension = array->byte_width() * 8;
*length = array->length() / array->byte_width();
*values = (uint8_t *) array->raw_values();
return st;
}
extern "C" CStatus GetFloatVectorFromPayload(CPayloadReader payloadReader,
float **values,
int *dimension,
int *length) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
auto array = std::dynamic_pointer_cast<arrow::FixedSizeBinaryArray>(p->array);
if (array == nullptr) {
st.error_code = static_cast<int>(ErrorCode::UNEXPECTED_ERROR);
st.error_msg = ErrorMsg("Incorrect data type");
return st;
}
*dimension = array->byte_width() / sizeof(float);
*length = array->length() / array->byte_width();
*values = (float *) array->raw_values();
return st;
}
extern "C" int GetPayloadLengthFromReader(CPayloadReader payloadReader) {
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
if (p->array == nullptr) return 0;
auto ba = std::dynamic_pointer_cast<arrow::FixedSizeBinaryArray>(p->array);
if (ba == nullptr) {
return p->array->length();
} else {
return ba->length() / ba->byte_width();
}
}
extern "C" CStatus ReleasePayloadReader(CPayloadReader payloadReader) {
CStatus st;
st.error_code = static_cast<int>(ErrorCode::SUCCESS);
st.error_msg = nullptr;
auto p = reinterpret_cast<wrapper::PayloadReader *>(payloadReader);
delete[] p->bValues;
delete p;
return st;
}

View File

@ -0,0 +1,58 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
typedef void *CPayloadWriter;
typedef struct CBuffer {
char *data;
int length;
} CBuffer;
typedef struct CStatus {
int error_code;
const char *error_msg;
} CStatus;
CPayloadWriter NewPayloadWriter(int columnType);
//CStatus AddBooleanToPayload(CPayloadWriter payloadWriter, bool *values, int length);
CStatus AddInt8ToPayload(CPayloadWriter payloadWriter, int8_t *values, int length);
CStatus AddInt16ToPayload(CPayloadWriter payloadWriter, int16_t *values, int length);
CStatus AddInt32ToPayload(CPayloadWriter payloadWriter, int32_t *values, int length);
CStatus AddInt64ToPayload(CPayloadWriter payloadWriter, int64_t *values, int length);
CStatus AddFloatToPayload(CPayloadWriter payloadWriter, float *values, int length);
CStatus AddDoubleToPayload(CPayloadWriter payloadWriter, double *values, int length);
CStatus AddOneStringToPayload(CPayloadWriter payloadWriter, char *cstr, int str_size);
CStatus AddBinaryVectorToPayload(CPayloadWriter payloadWriter, uint8_t *values, int dimension, int length);
CStatus AddFloatVectorToPayload(CPayloadWriter payloadWriter, float *values, int dimension, int length);
CStatus FinishPayloadWriter(CPayloadWriter payloadWriter);
CBuffer GetPayloadBufferFromWriter(CPayloadWriter payloadWriter);
int GetPayloadLengthFromWriter(CPayloadWriter payloadWriter);
CStatus ReleasePayloadWriter(CPayloadWriter handler);
//============= payload reader ======================
typedef void *CPayloadReader;
CPayloadReader NewPayloadReader(int columnType, uint8_t *buffer, int64_t buf_size);
//CStatus GetBoolFromPayload(CPayloadReader payloadReader, bool **values, int *length);
CStatus GetInt8FromPayload(CPayloadReader payloadReader, int8_t **values, int *length);
CStatus GetInt16FromPayload(CPayloadReader payloadReader, int16_t **values, int *length);
CStatus GetInt32FromPayload(CPayloadReader payloadReader, int32_t **values, int *length);
CStatus GetInt64FromPayload(CPayloadReader payloadReader, int64_t **values, int *length);
CStatus GetFloatFromPayload(CPayloadReader payloadReader, float **values, int *length);
CStatus GetDoubleFromPayload(CPayloadReader payloadReader, double **values, int *length);
CStatus GetOneStringFromPayload(CPayloadReader payloadReader, int idx, char **cstr, int *str_size);
CStatus GetBinaryVectorFromPayload(CPayloadReader payloadReader, uint8_t **values, int *dimension, int *length);
CStatus GetFloatVectorFromPayload(CPayloadReader payloadReader, float **values, int *dimension, int *length);
int GetPayloadLengthFromReader(CPayloadReader payloadReader);
CStatus ReleasePayloadReader(CPayloadReader payloadReader);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,91 @@
#include "PayloadStream.h"
namespace wrapper {
PayloadOutputStream::PayloadOutputStream() {
buffer_.reserve(1024 * 1024);
closed_ = false;
}
PayloadOutputStream::~PayloadOutputStream() noexcept {
}
arrow::Status PayloadOutputStream::Close() {
closed_ = true;
return arrow::Status::OK();
}
arrow::Result<int64_t> PayloadOutputStream::Tell() const {
return arrow::Result<int64_t>(buffer_.size());
}
bool PayloadOutputStream::closed() const {
return closed_;
}
arrow::Status PayloadOutputStream::Write(const void *data, int64_t nbytes) {
if (nbytes <= 0) return arrow::Status::OK();
auto size = buffer_.size();
buffer_.resize(size + nbytes);
std::memcpy(buffer_.data() + size, data, nbytes);
return arrow::Status::OK();
}
arrow::Status PayloadOutputStream::Flush() {
return arrow::Status::OK();
}
const std::vector<uint8_t> &PayloadOutputStream::Buffer() const {
return buffer_;
}
PayloadInputStream::PayloadInputStream(const uint8_t *data, int64_t size) :
data_(data), size_(size), tell_(0), closed_(false) {
}
PayloadInputStream::~PayloadInputStream() noexcept {
}
arrow::Status PayloadInputStream::Close() {
closed_ = true;
return arrow::Status::OK();
}
bool PayloadInputStream::closed() const {
return closed_;
}
arrow::Result<int64_t> PayloadInputStream::Tell() const {
return arrow::Result<int64_t>(tell_);
}
arrow::Status PayloadInputStream::Seek(int64_t position) {
if (position < 0 || position >= size_) return arrow::Status::IOError("invalid position");
tell_ = position;
return arrow::Status::OK();
}
arrow::Result<int64_t> PayloadInputStream::Read(int64_t nbytes, void *out) {
auto remain = size_ - tell_;
if (nbytes > remain) nbytes = remain;
std::memcpy(out, data_ + tell_, nbytes);
tell_ += nbytes;
return arrow::Result<int64_t>(nbytes);
}
arrow::Result<std::shared_ptr<arrow::Buffer>> PayloadInputStream::Read(int64_t nbytes) {
auto remain = size_ - tell_;
if (nbytes > remain) nbytes = remain;
auto buf = std::make_shared<arrow::Buffer>(data_ + tell_, nbytes);
tell_ += nbytes;
return arrow::Result<std::shared_ptr<arrow::Buffer>>(buf);
}
arrow::Result<int64_t> PayloadInputStream::GetSize() {
return arrow::Result<int64_t>(size_);
}
}

View File

@ -0,0 +1,75 @@
#pragma once
#include <arrow/api.h>
#include <arrow/io/interfaces.h>
#include <parquet/arrow/writer.h>
#include <parquet/arrow/reader.h>
#include "ColumnType.h"
namespace wrapper {
class PayloadOutputStream;
class PayloadInputStream;
constexpr int EMPTY_DIMENSION = -1;
struct PayloadWriter {
ColumnType columnType;
int dimension; // binary vector, float vector
std::shared_ptr<arrow::ArrayBuilder> builder;
std::shared_ptr<arrow::Schema> schema;
std::shared_ptr<PayloadOutputStream> output;
int rows;
};
struct PayloadReader {
ColumnType column_type;
std::shared_ptr<PayloadInputStream> input;
std::unique_ptr<parquet::arrow::FileReader> reader;
std::shared_ptr<arrow::Table> table;
std::shared_ptr<arrow::ChunkedArray> column;
std::shared_ptr<arrow::Array> array;
bool *bValues;
};
class PayloadOutputStream : public arrow::io::OutputStream {
public:
PayloadOutputStream();
~PayloadOutputStream();
arrow::Status Close() override;
arrow::Result<int64_t> Tell() const override;
bool closed() const override;
arrow::Status Write(const void *data, int64_t nbytes) override;
arrow::Status Flush() override;
public:
const std::vector<uint8_t> &Buffer() const;
private:
std::vector<uint8_t> buffer_;
bool closed_;
};
class PayloadInputStream : public arrow::io::RandomAccessFile {
public:
PayloadInputStream(const uint8_t *data, int64_t size);
~PayloadInputStream();
arrow::Status Close() override;
arrow::Result<int64_t> Tell() const override;
bool closed() const override;
arrow::Status Seek(int64_t position) override;
arrow::Result<int64_t> Read(int64_t nbytes, void *out) override;
arrow::Result<std::shared_ptr<arrow::Buffer>> Read(int64_t nbytes) override;
arrow::Result<int64_t> GetSize() override;
private:
const uint8_t *data_;
const int64_t size_;
int64_t tell_;
bool closed_;
};
}

View File

@ -0,0 +1,58 @@
#!/bin/bash
SOURCE=${BASH_SOURCE[0]}
while [ -h $SOURCE ]; do # resolve $SOURCE until the file is no longer a symlink
DIR=$( cd -P $( dirname $SOURCE ) && pwd )
SOURCE=$(readlink $SOURCE)
[[ $SOURCE != /* ]] && SOURCE=$DIR/$SOURCE # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR=$( cd -P $( dirname $SOURCE ) && pwd )
# echo $DIR
CMAKE_BUILD=${DIR}/cmake_build
OUTPUT_LIB=${DIR}/output
if [ ! -d ${CMAKE_BUILD} ];then
mkdir ${CMAKE_BUILD}
fi
if [ -d ${OUTPUT_LIB} ];then
rm -rf ${OUTPUT_LIB}
fi
mkdir ${OUTPUT_LIB}
BUILD_TYPE="Debug"
GIT_ARROW_REPO="https://github.com/apache/arrow.git"
GIT_ARROW_TAG="apache-arrow-2.0.0"
while getopts "a:b:t:h" arg; do
case $arg in
t)
BUILD_TYPE=$OPTARG # BUILD_TYPE
;;
a)
GIT_ARROW_REPO=$OPTARG
;;
b)
GIT_ARROW_TAG=$OPTARG
;;
h) # help
echo "-t: build type(default: Debug)
-a: arrow repo(default: https://github.com/apache/arrow.git)
-b: arrow tag(default: apache-arrow-2.0.0)
-h: help
"
exit 0
;;
?)
echo "ERROR! unknown argument"
exit 1
;;
esac
done
echo "BUILD_TYPE: " $BUILD_TYPE
echo "GIT_ARROW_REPO: " $GIT_ARROW_REPO
echo "GIT_ARROW_TAG: " $GIT_ARROW_TAG
pushd ${CMAKE_BUILD}
cmake -DCMAKE_INSTALL_PREFIX=${OUTPUT_LIB} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DGIT_ARROW_REPO=${GIT_ARROW_REPO} -DGIT_ARROW_TAG=${GIT_ARROW_TAG} .. && make && make install

View File

@ -0,0 +1,98 @@
set(ARROW_ROOT ${CMAKE_BINARY_DIR}/arrow)
set(ARROW_CMAKE_ARGS " -DARROW_WITH_LZ4=OFF"
" -DARROW_WITH_ZSTD=OFF"
" -DARROW_WITH_BROTLI=OFF"
" -DARROW_WITH_SNAPPY=OFF"
" -DARROW_WITH_ZLIB=OFF"
" -DARROW_BUILD_STATIC=ON"
" -DARROW_BUILD_SHARED=OFF"
" -DARROW_BOOST_USE_SHARED=OFF"
" -DARROW_BUILD_TESTS=OFF"
" -DARROW_TEST_MEMCHECK=OFF"
" -DARROW_BUILD_BENCHMARKS=OFF"
" -DARROW_CUDA=OFF"
" -DARROW_JEMALLOC=OFF"
" -DARROW_PYTHON=OFF"
" -DARROW_BUILD_UTILITIES=OFF"
" -DARROW_PARQUET=ON"
" -DPARQUET_BUILD_SHARED=OFF"
" -DARROW_S3=OFF"
" -DCMAKE_VERBOSE_MAKEFILE=ON")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/cmake/Templates/Arrow.CMakeLists.txt.cmake"
"${ARROW_ROOT}/CMakeLists.txt")
file(MAKE_DIRECTORY "${ARROW_ROOT}/build")
file(MAKE_DIRECTORY "${ARROW_ROOT}/install")
execute_process(
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
RESULT_VARIABLE ARROW_CONFIG
WORKING_DIRECTORY ${ARROW_ROOT})
if(ARROW_CONFIG)
message(FATAL_ERROR "Configuring Arrow failed: " ${ARROW_CONFIG})
endif(ARROW_CONFIG)
#set(PARALLEL_BUILD -j)
#if($ENV{PARALLEL_LEVEL})
# set(NUM_JOBS $ENV{PARALLEL_LEVEL})
# set(PARALLEL_BUILD "${PARALLEL_BUILD}${NUM_JOBS}")
#endif($ENV{PARALLEL_LEVEL})
set(NUM_JOBS 4)
set(PARALLEL_BUILD "-j${NUM_JOBS}")
if(${NUM_JOBS})
if(${NUM_JOBS} EQUAL 1)
message(STATUS "ARROW BUILD: Enabling Sequential CMake build")
elseif(${NUM_JOBS} GREATER 1)
message(STATUS "ARROW BUILD: Enabling Parallel CMake build with ${NUM_JOBS} jobs")
endif(${NUM_JOBS} EQUAL 1)
else()
message(STATUS "ARROW BUILD: Enabling Parallel CMake build with all threads")
endif(${NUM_JOBS})
execute_process(
COMMAND ${CMAKE_COMMAND} --build .. -- ${PARALLEL_BUILD}
RESULT_VARIABLE ARROW_BUILD
WORKING_DIRECTORY ${ARROW_ROOT}/build)
if(ARROW_BUILD)
message(FATAL_ERROR "Building Arrow failed: " ${ARROW_BUILD})
endif(ARROW_BUILD)
message(STATUS "Arrow installed here: " ${ARROW_ROOT}/install)
set(ARROW_LIBRARY_DIR "${ARROW_ROOT}/install/lib")
set(ARROW_INCLUDE_DIR "${ARROW_ROOT}/install/include")
find_library(ARROW_LIB arrow
NO_DEFAULT_PATH
HINTS "${ARROW_LIBRARY_DIR}")
message(STATUS "Arrow library: " ${ARROW_LIB})
find_library(PARQUET_LIB parquet
NO_DEFAULT_PATH
HINTS "${ARROW_LIBRARY_DIR}")
message(STATUS "Parquet library: " ${PARQUET_LIB})
find_library(THRIFT_LIB thrift
NO_DEFAULT_PATH
HINTS "${ARROW_ROOT}/build/thrift_ep-install/lib")
message(STATUS "Thirft library: " ${THRIFT_LIB})
find_library(UTF8PROC_LIB utf8proc
NO_DEFAULT_PATH
HINTS "${ARROW_ROOT}/build/utf8proc_ep-install/lib")
message(STATUS "utf8proc library: " ${UTF8PROC_LIB})
if(ARROW_LIB AND PARQUET_LIB AND THRIFT_LIB AND UTF8PROC_LIB)
set(ARROW_FOUND TRUE)
endif(ARROW_LIB AND PARQUET_LIB AND THRIFT_LIB AND UTF8PROC_LIB)
# message(STATUS "FlatBuffers installed here: " ${FLATBUFFERS_ROOT})
# set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_ROOT}/include")
# set(FLATBUFFERS_LIBRARY_DIR "${FLATBUFFERS_ROOT}/lib")
add_definitions(-DARROW_METADATA_V4)

View File

@ -0,0 +1,30 @@
#=============================================================================
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
cmake_minimum_required(VERSION 3.14...3.17 FATAL_ERROR)
project(wrapper-Arrow)
include(ExternalProject)
ExternalProject_Add(Arrow
GIT_REPOSITORY ${GIT_ARROW_REPO}
GIT_TAG ${GIT_ARROW_TAG}
GIT_SHALLOW true
SOURCE_DIR "${ARROW_ROOT}/arrow"
SOURCE_SUBDIR "cpp"
BINARY_DIR "${ARROW_ROOT}/build"
INSTALL_DIR "${ARROW_ROOT}/install"
CMAKE_ARGS ${ARROW_CMAKE_ARGS} -DCMAKE_INSTALL_PREFIX=${ARROW_ROOT}/install)

View File

@ -0,0 +1,21 @@
add_executable(wrapper_test
ParquetWrapperTest.cpp)
include(FetchContent)
FetchContent_Declare(googletest
URL "https://github.com/google/googletest/archive/release-1.10.0.tar.gz")
set(BUILD_GMOCK CACHE BOOL OFF)
set(INSTALL_GTEST CACHE BOOL OFF)
FetchContent_MakeAvailable(googletest)
target_link_libraries(wrapper_test
gtest_main
wrapper
parquet arrow thrift utf8proc pthread
)
install(TARGETS wrapper_test DESTINATION ${CMAKE_INSTALL_PREFIX})
# Defines `gtest_discover_tests()`.
#include(GoogleTest)
#gtest_discover_tests(milvusd_test)

Some files were not shown because too many files have changed in this diff Show More