mirror of https://github.com/milvus-io/milvus.git
Refactor the sructure of proto file
Signed-off-by: dragondriver <jiquan.long@zilliz.com>pull/4973/head^2
parent
9828a52999
commit
eba6bb288c
|
@ -21,7 +21,7 @@ cmake_build
|
|||
# Docker generated cache file
|
||||
.docker/
|
||||
|
||||
# proxy
|
||||
# proxynode
|
||||
proxy/milvus
|
||||
proxy/cmake_build
|
||||
proxy/cmake-build-debug
|
||||
|
|
|
@ -21,9 +21,9 @@ dir ('build/docker/deploy') {
|
|||
sh 'docker-compose build --force-rm indexbuilder'
|
||||
sh 'docker-compose push indexbuilder'
|
||||
|
||||
sh 'docker pull ${SOURCE_REPO}/proxy:${SOURCE_TAG} || true'
|
||||
sh 'docker-compose build --force-rm proxy'
|
||||
sh 'docker-compose push proxy'
|
||||
sh 'docker pull ${SOURCE_REPO}/proxynode:${SOURCE_TAG} || true'
|
||||
sh 'docker-compose build --force-rm proxynode'
|
||||
sh 'docker-compose push proxynode'
|
||||
|
||||
sh 'docker pull registry.zilliz.com/milvus-distributed/milvus-distributed-dev:latest || true'
|
||||
sh 'docker pull ${SOURCE_REPO}/querynode:${SOURCE_TAG} || true'
|
||||
|
|
|
@ -7,7 +7,7 @@ try {
|
|||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} pull'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d master'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d indexbuilder'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxy'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} up -d proxynode'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e QUERY_NODE_ID=1 -d querynode'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e QUERY_NODE_ID=2 -d querynode'
|
||||
sh 'docker-compose -p ${DOCKER_COMPOSE_PROJECT_NAME} run -e WRITE_NODE_ID=3 -d writenode'
|
||||
|
|
12
Makefile
12
Makefile
|
@ -87,7 +87,7 @@ master: build-cpp
|
|||
proxynode: build-cpp
|
||||
@echo "Building each component's binary to './bin'"
|
||||
@echo "Building proxy ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="0" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/proxy $(PWD)/cmd/proxy/proxy.go 1>/dev/null
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="0" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/proxynode $(PWD)/cmd/proxy/node/proxy_node.go 1>/dev/null
|
||||
|
||||
# Builds various components locally.
|
||||
querynode: build-cpp
|
||||
|
@ -113,8 +113,8 @@ build-go: build-cpp
|
|||
@echo "Building each component's binary to './bin'"
|
||||
@echo "Building master ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="0" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/master $(PWD)/cmd/master/main.go 1>/dev/null
|
||||
@echo "Building proxy ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="0" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/proxy $(PWD)/cmd/proxy/proxy.go 1>/dev/null
|
||||
@echo "Building proxy node ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="0" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/proxynode $(PWD)/cmd/proxy/node/proxy_node.go 1>/dev/null
|
||||
@echo "Building query node ..."
|
||||
@mkdir -p $(INSTALL_PATH) && go env -w CGO_ENABLED="1" && GO111MODULE=on $(GO) build -o $(INSTALL_PATH)/querynode $(PWD)/cmd/querynode/query_node.go 1>/dev/null
|
||||
@echo "Building indexbuilder ..."
|
||||
|
@ -145,7 +145,7 @@ build-cpp-with-unittest:
|
|||
# Runs the tests.
|
||||
unittest: test-cpp test-go
|
||||
|
||||
#TODO: proxy master query node writer's unittest
|
||||
#TODO: proxynode master query node writer's unittest
|
||||
test-go:build-cpp
|
||||
@echo "Running go unittests..."
|
||||
@(env bash $(PWD)/scripts/run_go_unittest.sh)
|
||||
|
@ -165,7 +165,7 @@ install: all
|
|||
@echo "Installing binary to './bin'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/querynode $(GOPATH)/bin/querynode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/master $(GOPATH)/bin/master
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxy $(GOPATH)/bin/proxy
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxynode $(GOPATH)/bin/proxynode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/writenode $(GOPATH)/bin/writenode
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexbuilder $(GOPATH)/bin/indexbuilder
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/singlenode $(GOPATH)/bin/singlenode
|
||||
|
@ -179,7 +179,7 @@ clean:
|
|||
@rm -rf bin/
|
||||
@rm -rf lib/
|
||||
@rm -rf $(GOPATH)/bin/master
|
||||
@rm -rf $(GOPATH)/bin/proxy
|
||||
@rm -rf $(GOPATH)/bin/proxynode
|
||||
@rm -rf $(GOPATH)/bin/querynode
|
||||
@rm -rf $(GOPATH)/bin/writenode
|
||||
@rm -rf $(GOPATH)/bin/indexbuilder
|
||||
|
|
|
@ -15,13 +15,13 @@ services:
|
|||
networks:
|
||||
- milvus
|
||||
|
||||
proxy:
|
||||
image: ${TARGET_REPO}/proxy:${TARGET_TAG}
|
||||
proxynode:
|
||||
image: ${TARGET_REPO}/proxynode:${TARGET_TAG}
|
||||
build:
|
||||
context: ../../../
|
||||
dockerfile: build/docker/deploy/proxy/DockerFile
|
||||
dockerfile: build/docker/deploy/proxynode/DockerFile
|
||||
cache_from:
|
||||
- ${SOURCE_REPO}/proxy:${SOURCE_TAG}
|
||||
- ${SOURCE_REPO}/proxynode:${SOURCE_TAG}
|
||||
environment:
|
||||
PULSAR_ADDRESS: ${PULSAR_ADDRESS}
|
||||
MASTER_ADDRESS: ${MASTER_ADDRESS}
|
||||
|
|
|
@ -11,12 +11,12 @@
|
|||
|
||||
FROM alpine:3.12.1
|
||||
|
||||
COPY ./bin/proxy /milvus-distributed/bin/proxy
|
||||
COPY ./bin/proxynode /milvus-distributed/bin/proxynode
|
||||
|
||||
COPY ./configs/ /milvus-distributed/configs/
|
||||
|
||||
WORKDIR /milvus-distributed/
|
||||
|
||||
CMD ["./bin/proxy"]
|
||||
CMD ["./bin/proxynode"]
|
||||
|
||||
EXPOSE 19530
|
|
@ -12,7 +12,7 @@ services:
|
|||
- ../../..:/milvus-distributed:delegated
|
||||
working_dir: "/milvus-distributed/tests/python"
|
||||
command: >
|
||||
/bin/bash -c "pytest --ip proxy"
|
||||
/bin/bash -c "pytest --ip proxynode"
|
||||
networks:
|
||||
- milvus
|
||||
|
||||
|
|
|
@ -2,21 +2,19 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proxynode"
|
||||
grpcproxynode "github.com/zilliztech/milvus-distributed/internal/distributed/proxynode"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
proxynode.Init()
|
||||
fmt.Println("ProxyID is", proxynode.Params.ProxyID())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr, err := proxynode.CreateProxy(ctx)
|
||||
svr, err := grpcproxynode.CreateProxyNodeServer()
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -34,6 +32,10 @@ func main() {
|
|||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Init(); err != nil {
|
||||
log.Fatal("Init server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -41,7 +43,9 @@ func main() {
|
|||
<-ctx.Done()
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
|
||||
svr.Close()
|
||||
if err := svr.Stop(); err != nil {
|
||||
log.Fatal("stop server failed", zap.Error(err))
|
||||
}
|
||||
switch sig {
|
||||
case syscall.SIGTERM:
|
||||
exit(0)
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
"github.com/zilliztech/milvus-distributed/internal/indexnode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proxynode"
|
||||
proxynodeimpl "github.com/zilliztech/milvus-distributed/internal/proxynode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/querynode"
|
||||
"github.com/zilliztech/milvus-distributed/internal/writenode"
|
||||
)
|
||||
|
@ -62,10 +62,10 @@ func InitMaster(cpuprofile *string, wg *sync.WaitGroup) {
|
|||
|
||||
func InitProxy(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
proxynode.Init()
|
||||
fmt.Println("ProxyID is", proxynode.Params.ProxyID())
|
||||
//proxynodeimpl.Init()
|
||||
//fmt.Println("ProxyID is", proxynodeimpl.Params.ProxyID())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
svr, err := proxynode.CreateProxy(ctx)
|
||||
svr, err := proxynodeimpl.CreateProxyNodeImpl(ctx)
|
||||
if err != nil {
|
||||
log.Print("create server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -83,6 +83,10 @@ func InitProxy(wg *sync.WaitGroup) {
|
|||
cancel()
|
||||
}()
|
||||
|
||||
if err := svr.Init(); err != nil {
|
||||
log.Fatal("init server failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run server failed", zap.Error(err))
|
||||
}
|
||||
|
@ -90,7 +94,7 @@ func InitProxy(wg *sync.WaitGroup) {
|
|||
<-ctx.Done()
|
||||
log.Print("Got signal to exit", zap.String("signal", sig.String()))
|
||||
|
||||
svr.Close()
|
||||
svr.Stop()
|
||||
switch sig {
|
||||
case syscall.SIGTERM:
|
||||
exit(0)
|
||||
|
|
|
@ -51,6 +51,11 @@ proxyNode:
|
|||
address: localhost
|
||||
port: 19530
|
||||
|
||||
# not used now
|
||||
proxyService:
|
||||
address: localhost
|
||||
port: 19530
|
||||
|
||||
queryNode:
|
||||
gracefulTime: 5000 #ms
|
||||
|
||||
|
|
1
go.mod
1
go.mod
|
@ -12,6 +12,7 @@ require (
|
|||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||
github.com/frankban/quicktest v1.10.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-basic/ipv4 v1.0.0
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/mock v1.3.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
|
|
2
go.sum
2
go.sum
|
@ -100,6 +100,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-basic/ipv4 v1.0.0 h1:gjyFAa1USC1hhXTkPOwBWDPfMcUaIM+tvo1XzV9EZxs=
|
||||
github.com/go-basic/ipv4 v1.0.0/go.mod h1:etLBnaxbidQfuqE6wgZQfs38nEWNmzALkxDZe4xY8Dg=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
get_property(PROTOC_EXCUTABLE GLOBAL PROPERTY PROTOC_EXCUTABLE )
|
||||
|
||||
|
||||
set(proto_file_names common.proto etcd_meta.proto schema.proto service_msg.proto index_cgo_msg.proto)
|
||||
set(proto_file_names common.proto etcd_meta.proto schema.proto milvus.proto index_cgo_msg.proto)
|
||||
|
||||
set( PROTO_PATH "${MILVUS_SOURCE_DIR}/../proto/" )
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -15,7 +15,7 @@
|
|||
#include "query/ExprImpl.h"
|
||||
#include "utils/Json.h"
|
||||
#include "utils/EasyAssert.h"
|
||||
#include "pb/service_msg.pb.h"
|
||||
#include "pb/milvus.pb.h"
|
||||
#include <vector>
|
||||
#include <memory>
|
||||
#include <boost/align/aligned_allocator.hpp>
|
||||
|
@ -281,7 +281,7 @@ Parser::ParseRangeNodeImpl(const FieldName& field_name, const Json& body) {
|
|||
|
||||
std::unique_ptr<PlaceholderGroup>
|
||||
ParsePlaceholderGroup(const Plan* plan, const std::string& blob) {
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
auto result = std::make_unique<PlaceholderGroup>();
|
||||
ser::PlaceholderGroup ph_group;
|
||||
auto ok = ph_group.ParseFromString(blob);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#include "utils/Json.h"
|
||||
#include "PlanNode.h"
|
||||
#include "utils/EasyAssert.h"
|
||||
#include "pb/service_msg.pb.h"
|
||||
#include "pb/milvus.pb.h"
|
||||
#include <memory>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
// or implied. See the License for the specific language governing permissions and limitations under the License
|
||||
|
||||
#pragma once
|
||||
#include "pb/service_msg.pb.h"
|
||||
#include "pb/milvus.pb.h"
|
||||
#include "query/deprecated/BooleanQuery.h"
|
||||
#include "query/deprecated/BinaryQuery.h"
|
||||
#include "query/deprecated/GeneralQuery.h"
|
||||
|
@ -18,6 +18,6 @@
|
|||
namespace milvus::wtf {
|
||||
|
||||
query_old::QueryPtr
|
||||
Transformer(proto::service::Query* query);
|
||||
Transformer(proto::milvus::SearchRequest* query);
|
||||
|
||||
} // namespace milvus::wtf
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
#include "segcore/Reduce.h"
|
||||
#include "common/Types.h"
|
||||
#include "pb/service_msg.pb.h"
|
||||
#include "pb/milvus.pb.h"
|
||||
|
||||
using SearchResult = milvus::QueryResult;
|
||||
|
||||
|
@ -206,7 +206,7 @@ ReorganizeQueryResults(CMarshaledHits* c_marshaled_hits,
|
|||
for (int i = 0; i < num_groups; i++) {
|
||||
MarshaledHitsPeerGroup& hits_peer_group = (*marshaledHits).marshaled_hits_[i];
|
||||
for (int j = 0; j < num_queries_peer_group[i]; j++) {
|
||||
milvus::proto::service::Hits hits;
|
||||
milvus::proto::milvus::Hits hits;
|
||||
for (int k = 0; k < topk; k++, fill_hit_offset++) {
|
||||
hits.add_ids(result_ids[fill_hit_offset]);
|
||||
hits.add_scores(result_distances[fill_hit_offset]);
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
#include <chrono>
|
||||
#include <google/protobuf/text_format.h>
|
||||
|
||||
#include "pb/service_msg.pb.h"
|
||||
#include "pb/milvus.pb.h"
|
||||
#include "segcore/reduce_c.h"
|
||||
|
||||
#include <index/knowhere/knowhere/index/vector_index/helpers/IndexParameter.h>
|
||||
|
@ -32,7 +32,7 @@ namespace chrono = std::chrono;
|
|||
|
||||
using namespace milvus;
|
||||
using namespace milvus::segcore;
|
||||
using namespace milvus::proto;
|
||||
//using namespace milvus::proto;
|
||||
using namespace milvus::knowhere;
|
||||
|
||||
TEST(CApiTest, CollectionTest) {
|
||||
|
@ -155,7 +155,7 @@ TEST(CApiTest, SearchTest) {
|
|||
}
|
||||
})";
|
||||
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
int num_queries = 10;
|
||||
int dim = 16;
|
||||
std::normal_distribution<double> dis(0, 1);
|
||||
|
@ -376,6 +376,7 @@ generate_data(int N) {
|
|||
|
||||
std::string
|
||||
generate_collection_shema(std::string metric_type, std::string dim, bool is_binary) {
|
||||
namespace schema = milvus::proto::schema;
|
||||
schema::CollectionSchema collection_schema;
|
||||
collection_schema.set_name("collection_test");
|
||||
collection_schema.set_autoid(true);
|
||||
|
@ -672,7 +673,7 @@ TEST(CApiTest, Reduce) {
|
|||
}
|
||||
})";
|
||||
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
int num_queries = 10;
|
||||
int dim = 16;
|
||||
std::normal_distribution<double> dis(0, 1);
|
||||
|
|
|
@ -102,7 +102,7 @@ TEST(Query, DSL) {
|
|||
}
|
||||
|
||||
TEST(Query, ParsePlaceholderGroup) {
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
std::string dsl_string = R"(
|
||||
{
|
||||
"bool": {
|
||||
|
|
|
@ -185,7 +185,7 @@ DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42) {
|
|||
|
||||
inline auto
|
||||
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
|
@ -205,7 +205,7 @@ CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
|
|||
|
||||
inline auto
|
||||
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
|
@ -226,7 +226,7 @@ CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
|
|||
inline auto
|
||||
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
|
||||
assert(dim % 8 == 0);
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
|
@ -246,7 +246,7 @@ CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42
|
|||
inline auto
|
||||
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
|
||||
assert(dim % 8 == 0);
|
||||
namespace ser = milvus::proto::service;
|
||||
namespace ser = milvus::proto::milvus;
|
||||
ser::PlaceholderGroup raw_group;
|
||||
auto value = raw_group.add_placeholders();
|
||||
value->set_tag("$0");
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
package grpcproxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
client proxypb.ProxyNodeServiceClient
|
||||
address string
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (c *Client) tryConnect() error {
|
||||
if c.client != nil {
|
||||
return nil
|
||||
}
|
||||
conn, err := grpc.DialContext(c.ctx, c.address, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.client = proxypb.NewProxyNodeServiceClient(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) InvalidateCollectionMetaCache(request *proxypb.InvalidateCollMetaCacheRequest) error {
|
||||
var err error
|
||||
err = c.tryConnect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.client.InvalidateCollectionMetaCache(c.ctx, request)
|
||||
return err
|
||||
}
|
||||
|
||||
func NewClient(ctx context.Context, address string) *Client {
|
||||
return &Client{
|
||||
address: address,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,219 @@
|
|||
package grpcproxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
||||
proxynodeimpl "github.com/zilliztech/milvus-distributed/internal/proxynode"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
ctx context.Context
|
||||
wg sync.WaitGroup
|
||||
impl proxynodeimpl.ProxyNode
|
||||
grpcServer *grpc.Server
|
||||
ip string
|
||||
port int
|
||||
proxyServiceAddress string
|
||||
//proxyServiceClient *proxyservice.Client
|
||||
}
|
||||
|
||||
func CreateProxyNodeServer() (*Server, error) {
|
||||
return &Server{}, nil
|
||||
}
|
||||
|
||||
func (s *Server) connectProxyService() error {
|
||||
proxynodeimpl.Params.Init()
|
||||
|
||||
//s.proxyServiceAddress = proxynodeimpl.Params.ProxyServiceAddress()
|
||||
//s.proxyServiceClient = proxyservice.NewClient(s.ctx, s.proxyServiceAddress)
|
||||
//
|
||||
//getAvailablePort := func() int {
|
||||
// listener, err := net.Listen("tcp", ":0")
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer listener.Close()
|
||||
//
|
||||
// return listener.Addr().(*net.TCPAddr).Port
|
||||
//}
|
||||
//getLocalIp := func() string {
|
||||
// return ipv4.LocalIP()
|
||||
//}
|
||||
//s.ip = getLocalIp()
|
||||
//s.port = getAvailablePort()
|
||||
//
|
||||
//request := &proxypb.RegisterNodeRequest{
|
||||
// Address: &commonpb.Address{
|
||||
// Ip: s.ip,
|
||||
// Port: int64(s.port),
|
||||
// },
|
||||
//}
|
||||
//response, err := s.proxyServiceClient.RegisterNode(request)
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
//
|
||||
//proxynodeimpl.Params.Save("_proxyID", strconv.Itoa(int(response.InitParams.NodeID)))
|
||||
//
|
||||
//for _, params := range response.InitParams.StartParams {
|
||||
// proxynodeimpl.Params.Save(params.Key, params.Value)
|
||||
//}
|
||||
//
|
||||
//return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Init() error {
|
||||
s.ctx = context.Background()
|
||||
var err error
|
||||
s.impl, err = proxynodeimpl.CreateProxyNodeImpl(s.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.connectProxyService()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.impl.Init()
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
|
||||
// TODO: use config
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(proxynodeimpl.Params.NetworkPort()))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
s.grpcServer = grpc.NewServer()
|
||||
proxypb.RegisterProxyNodeServiceServer(s.grpcServer, s)
|
||||
milvuspb.RegisterMilvusServiceServer(s.grpcServer, s)
|
||||
if err = s.grpcServer.Serve(lis); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
return s.impl.Start()
|
||||
}
|
||||
|
||||
func (s *Server) Stop() error {
|
||||
var err error
|
||||
|
||||
if s.grpcServer != nil {
|
||||
s.grpcServer.GracefulStop()
|
||||
}
|
||||
|
||||
err = s.impl.Stop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
return s.impl.InvalidateCollectionMetaCache(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.CreateCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.DropCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return s.impl.HasCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.LoadCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.ReleaseCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
return s.impl.DescribeCollection(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
return s.impl.GetCollectionStatistics(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
return s.impl.ShowCollections(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.CreatePartition(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.DropPartition(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
return s.impl.HasPartition(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error) {
|
||||
return s.impl.LoadPartitions(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
return s.impl.ReleasePartitions(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
|
||||
return s.impl.GetPartitionStatistics(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
return s.impl.ShowPartitions(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
return s.impl.CreateIndex(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
return s.impl.DescribeIndex(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
return s.impl.GetIndexState(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
|
||||
return s.impl.Insert(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
|
||||
return s.impl.Search(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
|
||||
return s.impl.Flush(ctx, request)
|
||||
}
|
||||
|
||||
func (s *Server) GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
return s.impl.GetDdChannel(ctx, request)
|
||||
}
|
|
@ -845,19 +845,19 @@ func TestMaster(t *testing.T) {
|
|||
// ReqID: 1,
|
||||
// Timestamp: uint64(time.Now().Unix()),
|
||||
// ProxyID: 1,
|
||||
// PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
|
||||
// PartitionName: &milvuspb.PartitionName{CollectionName: "col1", Tag: "partition1"},
|
||||
//}
|
||||
//
|
||||
//describePartition, err := cli.DescribePartition(ctx, &describePartitionReq)
|
||||
//assert.Nil(t, err)
|
||||
//assert.Equal(t, &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"}, describePartition.Name)
|
||||
//assert.Equal(t, &milvuspb.PartitionName{CollectionName: "col1", Tag: "partition1"}, describePartition.Name)
|
||||
//
|
||||
//describePartitionReq = internalpb.DescribePartitionRequest{
|
||||
// MsgType: commonpb.MsgType_kDescribePartition,
|
||||
// ReqID: 1,
|
||||
// Timestamp: 0,
|
||||
// ProxyID: 1,
|
||||
// PartitionName: &servicepb.PartitionName{CollectionName: "col1", Tag: "partition1"},
|
||||
// PartitionName: &milvuspb.PartitionName{CollectionName: "col1", Tag: "partition1"},
|
||||
//}
|
||||
//
|
||||
//describePartition, _ = cli.DescribePartition(ctx, &describePartitionReq)
|
||||
|
|
|
@ -31,7 +31,7 @@ type hasPartitionTask struct {
|
|||
|
||||
//type describePartitionTask struct {
|
||||
// baseTask
|
||||
// description *servicepb.PartitionDescription
|
||||
// description *milvuspb.PartitionDescription
|
||||
// req *internalpb.DescribePartitionRequest
|
||||
//}
|
||||
|
||||
|
@ -225,7 +225,7 @@ func (t *hasPartitionTask) Execute() error {
|
|||
//
|
||||
// partitionName := t.req.PartitionName
|
||||
//
|
||||
// description := servicepb.PartitionDescription{
|
||||
// description := milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
// },
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
// querypb(query_service)
|
||||
// datapb(data_service)
|
||||
// indexpb(index_service)
|
||||
// milvuspb -> servicepb
|
||||
// milvuspb -> milvuspb
|
||||
// masterpb2 -> masterpb (master_service)
|
||||
|
||||
type InitParams struct {
|
||||
|
|
|
@ -9,7 +9,7 @@ import "schema.proto";
|
|||
message CreateCollectionRequest {
|
||||
common.MsgBase base = 1;
|
||||
string db_name = 2;
|
||||
string collectionName = 3;
|
||||
string collection_name = 3;
|
||||
// `schema` is the serialized `schema.CollectionSchema`
|
||||
bytes schema = 4;
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ message CreateCollectionRequest {
|
|||
message DropCollectionRequest {
|
||||
common.MsgBase base = 1;
|
||||
string db_name = 2;
|
||||
string collectionName = 3;
|
||||
string collection_name = 3;
|
||||
}
|
||||
|
||||
message HasCollectionRequest {
|
||||
|
|
|
@ -57,7 +57,7 @@ func (PlaceholderType) EnumDescriptor() ([]byte, []int) {
|
|||
type CreateCollectionRequest struct {
|
||||
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collectionName,proto3" json:"collectionName,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
// `schema` is the serialized `schema.CollectionSchema`
|
||||
Schema []byte `protobuf:"bytes,4,opt,name=schema,proto3" json:"schema,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -121,7 +121,7 @@ func (m *CreateCollectionRequest) GetSchema() []byte {
|
|||
type DropCollectionRequest struct {
|
||||
Base *commonpb.MsgBase `protobuf:"bytes,1,opt,name=base,proto3" json:"base,omitempty"`
|
||||
DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collectionName,proto3" json:"collectionName,omitempty"`
|
||||
CollectionName string `protobuf:"bytes,3,opt,name=collection_name,json=collectionName,proto3" json:"collection_name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -2344,108 +2344,107 @@ func init() {
|
|||
func init() { proto.RegisterFile("milvus.proto", fileDescriptor_02345ba45cc0e303) }
|
||||
|
||||
var fileDescriptor_02345ba45cc0e303 = []byte{
|
||||
// 1604 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x5d, 0x6f, 0x1b, 0x45,
|
||||
0x17, 0xce, 0xc6, 0x8e, 0x9b, 0x9c, 0xd8, 0x8e, 0x33, 0xf9, 0x72, 0xdd, 0xf6, 0x6d, 0xba, 0xef,
|
||||
0xdb, 0x26, 0xfd, 0x4a, 0x5e, 0xa5, 0x20, 0xe0, 0x02, 0xa9, 0x4d, 0x9c, 0xa6, 0x56, 0xdb, 0x34,
|
||||
0xac, 0x43, 0x45, 0xa8, 0x2a, 0x33, 0xf6, 0x0e, 0xf1, 0xc2, 0x7a, 0xd7, 0xec, 0x8c, 0x93, 0xa6,
|
||||
0x57, 0x08, 0x24, 0x90, 0x00, 0x01, 0xb7, 0xdc, 0x22, 0xae, 0x11, 0x14, 0x89, 0x7f, 0x80, 0xc4,
|
||||
0x7f, 0x41, 0x88, 0x5f, 0x80, 0x84, 0x76, 0x66, 0xb3, 0xde, 0xdd, 0xcc, 0xc6, 0xa6, 0xa6, 0xd8,
|
||||
0xb9, 0xdb, 0x39, 0x3e, 0x73, 0xe6, 0x39, 0x1f, 0x33, 0x73, 0xe6, 0x31, 0xa4, 0x1b, 0x86, 0xb9,
|
||||
0xd7, 0xa2, 0x4b, 0x4d, 0xc7, 0x66, 0x36, 0x9a, 0x0a, 0x8e, 0x96, 0xc4, 0xa0, 0x90, 0xae, 0xd9,
|
||||
0x8d, 0x86, 0x6d, 0x09, 0x61, 0x21, 0x4d, 0x6b, 0x75, 0xd2, 0xc0, 0x62, 0xa4, 0x7e, 0xa7, 0xc0,
|
||||
0xdc, 0x9a, 0x43, 0x30, 0x23, 0x6b, 0xb6, 0x69, 0x92, 0x1a, 0x33, 0x6c, 0x4b, 0x23, 0x1f, 0xb4,
|
||||
0x08, 0x65, 0xe8, 0xff, 0x90, 0xac, 0x62, 0x4a, 0xf2, 0xca, 0xbc, 0xb2, 0x38, 0xbe, 0x72, 0x76,
|
||||
0x29, 0x64, 0xdb, 0xb3, 0x79, 0x9f, 0xee, 0xae, 0x62, 0x4a, 0x34, 0xae, 0x89, 0xe6, 0xe0, 0x94,
|
||||
0x5e, 0xad, 0x58, 0xb8, 0x41, 0xf2, 0xc3, 0xf3, 0xca, 0xe2, 0x98, 0x96, 0xd2, 0xab, 0x9b, 0xb8,
|
||||
0x41, 0xd0, 0x25, 0xc8, 0xd6, 0x7c, 0xfb, 0xae, 0x24, 0x9f, 0xe0, 0xbf, 0x47, 0xa4, 0x68, 0x16,
|
||||
0x52, 0x02, 0x5e, 0x3e, 0x39, 0xaf, 0x2c, 0xa6, 0x35, 0x6f, 0xa4, 0x7e, 0xa6, 0xc0, 0x4c, 0xd1,
|
||||
0xb1, 0x9b, 0x83, 0x00, 0xd2, 0x05, 0x33, 0x7d, 0x07, 0xd3, 0x17, 0x8a, 0x65, 0x01, 0x26, 0xda,
|
||||
0xab, 0x0a, 0x05, 0x39, 0x98, 0x1d, 0x48, 0xaf, 0xda, 0xb6, 0xa9, 0x11, 0xda, 0xb4, 0x2d, 0x4a,
|
||||
0xd0, 0x0d, 0x48, 0x51, 0x86, 0x59, 0x8b, 0x7a, 0x28, 0xce, 0x48, 0x51, 0x94, 0xb9, 0x8a, 0xe6,
|
||||
0xa9, 0xa2, 0x69, 0x18, 0xd9, 0xc3, 0x66, 0x4b, 0x80, 0x18, 0xd5, 0xc4, 0x40, 0x7d, 0x04, 0xd9,
|
||||
0x32, 0x73, 0x0c, 0x6b, 0xf7, 0x1f, 0x34, 0x3e, 0x76, 0x68, 0xfc, 0x2b, 0x05, 0x4e, 0x17, 0x09,
|
||||
0xad, 0x39, 0x46, 0x95, 0x0c, 0x46, 0x24, 0xbf, 0x56, 0xa0, 0x20, 0x43, 0xd4, 0x8b, 0xef, 0xaf,
|
||||
0xfb, 0xf5, 0x3c, 0xcc, 0x27, 0x5d, 0x0c, 0x4f, 0xf2, 0xb6, 0x62, 0x7b, 0xb5, 0x32, 0x17, 0xf8,
|
||||
0x65, 0xff, 0xb9, 0x02, 0x33, 0xf7, 0x6c, 0xac, 0x0f, 0x46, 0x80, 0xbe, 0x54, 0x20, 0xaf, 0x11,
|
||||
0x93, 0x60, 0x3a, 0x20, 0x19, 0xfb, 0x42, 0x81, 0xd9, 0x40, 0xec, 0x18, 0x66, 0xb4, 0x9f, 0x70,
|
||||
0x3e, 0x75, 0xcf, 0xd2, 0x28, 0x9c, 0x5e, 0xaa, 0xe7, 0x15, 0x18, 0x71, 0xbf, 0x68, 0x7e, 0x78,
|
||||
0x3e, 0xb1, 0x38, 0xbe, 0x72, 0x41, 0x3a, 0xe7, 0x2e, 0x39, 0x78, 0xe8, 0xee, 0xa8, 0x2d, 0x6c,
|
||||
0x38, 0x9a, 0xd0, 0x57, 0xab, 0x30, 0x53, 0xae, 0xdb, 0xfb, 0x2f, 0x32, 0x4b, 0xea, 0x13, 0x98,
|
||||
0x8d, 0xae, 0xd1, 0x8b, 0xaf, 0x97, 0x21, 0x17, 0x89, 0xb2, 0x70, 0x7b, 0x4c, 0x9b, 0x08, 0x87,
|
||||
0x99, 0xaa, 0x3f, 0xb9, 0x69, 0xe7, 0x77, 0xd6, 0x16, 0x76, 0x98, 0xd1, 0xe7, 0x2a, 0x44, 0x17,
|
||||
0x21, 0xdb, 0x3c, 0xc4, 0x21, 0xf4, 0x92, 0x5c, 0x2f, 0xe3, 0x4b, 0x79, 0xbc, 0x7e, 0x54, 0x60,
|
||||
0xda, 0xbd, 0xc2, 0x4e, 0x12, 0xe6, 0x1f, 0x14, 0x98, 0xba, 0x83, 0xe9, 0x49, 0x82, 0xfc, 0x4c,
|
||||
0x81, 0x29, 0xf7, 0xc8, 0x14, 0x98, 0xfb, 0x0b, 0x79, 0x01, 0x26, 0xc2, 0x90, 0x69, 0x3e, 0xc9,
|
||||
0x4b, 0x3a, 0x1b, 0xc2, 0x4c, 0xd5, 0x9f, 0x15, 0x98, 0xf3, 0x4e, 0xd6, 0x81, 0x88, 0x75, 0xd7,
|
||||
0xc0, 0x9f, 0x29, 0x30, 0xe3, 0x23, 0xee, 0xf7, 0x01, 0xdc, 0x6d, 0x89, 0x7c, 0xa2, 0xc0, 0x6c,
|
||||
0x14, 0x74, 0x5f, 0x8e, 0xe9, 0xef, 0x15, 0x98, 0x76, 0xcf, 0xd0, 0x81, 0xc8, 0xb9, 0x0a, 0xe9,
|
||||
0xb6, 0xa4, 0x54, 0xe4, 0xa1, 0x4b, 0x68, 0x21, 0x99, 0xfa, 0x8d, 0x22, 0x2e, 0x96, 0x00, 0xe0,
|
||||
0x5e, 0x02, 0x27, 0x29, 0xb3, 0x61, 0x59, 0x99, 0xb9, 0xd8, 0x7c, 0x49, 0xa9, 0x48, 0xf3, 0x89,
|
||||
0xf9, 0x84, 0x8b, 0x2d, 0x28, 0xe3, 0xcd, 0xc0, 0x61, 0xfb, 0x56, 0x26, 0xbb, 0x0d, 0x62, 0xb1,
|
||||
0xe7, 0x0f, 0x67, 0x34, 0x18, 0xc3, 0x47, 0x83, 0x81, 0xce, 0xc2, 0x18, 0x15, 0xeb, 0x94, 0x8a,
|
||||
0x3c, 0xa6, 0x09, 0xad, 0x2d, 0x50, 0xeb, 0x30, 0x77, 0x04, 0x4d, 0x2f, 0xb1, 0xca, 0xc3, 0x29,
|
||||
0xc3, 0xd2, 0xc9, 0x13, 0x1f, 0xcc, 0xe1, 0xd0, 0x75, 0x1c, 0xb9, 0x49, 0xf9, 0x57, 0x9c, 0x9e,
|
||||
0x87, 0xf1, 0x40, 0xd4, 0x3d, 0xb7, 0x83, 0x22, 0xf5, 0x3d, 0x98, 0x0a, 0xa1, 0xe9, 0xc5, 0xe9,
|
||||
0xff, 0x00, 0xf8, 0x11, 0x15, 0xb5, 0x91, 0xd0, 0x02, 0x12, 0xf5, 0x77, 0x05, 0x90, 0xe8, 0x04,
|
||||
0x4a, 0x6e, 0x30, 0xfa, 0xb9, 0x7d, 0xce, 0x01, 0xbc, 0x6b, 0x10, 0x53, 0x0f, 0x9e, 0x3b, 0x63,
|
||||
0x5c, 0xc2, 0x7f, 0x2e, 0x42, 0x9a, 0x3c, 0x61, 0x0e, 0xae, 0x34, 0xb1, 0x83, 0x1b, 0x34, 0x3f,
|
||||
0xd2, 0xed, 0x51, 0x31, 0xce, 0xa7, 0x6d, 0xf1, 0x59, 0xea, 0xaf, 0x6e, 0x0f, 0xe1, 0x55, 0xd5,
|
||||
0xa0, 0x7b, 0x7c, 0x0e, 0x80, 0x57, 0xa8, 0xf8, 0x79, 0x44, 0xfc, 0xcc, 0x25, 0xfc, 0x10, 0x36,
|
||||
0x21, 0xc7, 0x3d, 0x10, 0xee, 0x34, 0x5d, 0xab, 0x91, 0x29, 0x4a, 0x64, 0x0a, 0x7a, 0x0d, 0x52,
|
||||
0x5e, 0xf4, 0xba, 0x3e, 0x68, 0xbd, 0x09, 0xea, 0xb7, 0x0a, 0xcc, 0x44, 0x02, 0xd7, 0x4b, 0x5d,
|
||||
0x6e, 0x03, 0x12, 0x40, 0xf5, 0x36, 0xfa, 0x43, 0x54, 0x91, 0x27, 0x9e, 0x37, 0x88, 0xfa, 0xaa,
|
||||
0x4d, 0x1a, 0x11, 0x09, 0x55, 0x7f, 0x51, 0x60, 0x92, 0xeb, 0xb9, 0xab, 0x91, 0x93, 0x9b, 0xda,
|
||||
0x0f, 0x15, 0x40, 0x41, 0x3f, 0x7a, 0x89, 0xf4, 0xcb, 0xe2, 0x6e, 0x15, 0x9e, 0x64, 0x57, 0xce,
|
||||
0x4b, 0xe7, 0x04, 0x16, 0x13, 0xda, 0xea, 0x9f, 0x0a, 0x64, 0x4a, 0x16, 0x25, 0x0e, 0x1b, 0xfc,
|
||||
0x7e, 0x04, 0xbd, 0x04, 0xa3, 0x8e, 0xbd, 0x5f, 0xd1, 0x31, 0xc3, 0xde, 0xb9, 0x70, 0x5a, 0x0a,
|
||||
0x6f, 0xd5, 0xb4, 0xab, 0xda, 0x29, 0xc7, 0xde, 0x2f, 0x62, 0x86, 0xd1, 0x19, 0x18, 0xab, 0x63,
|
||||
0x5a, 0xaf, 0xbc, 0x4f, 0x0e, 0x68, 0x3e, 0x35, 0x9f, 0x58, 0xcc, 0x68, 0xa3, 0xae, 0xe0, 0x2e,
|
||||
0x39, 0xa0, 0xea, 0xc7, 0x0a, 0x64, 0x0f, 0xfd, 0xef, 0x25, 0xfc, 0xe7, 0x61, 0xdc, 0xb1, 0xf7,
|
||||
0x4b, 0xc5, 0x4a, 0x95, 0xec, 0x1a, 0x96, 0x77, 0x23, 0x00, 0x17, 0xad, 0xba, 0x12, 0x17, 0x85,
|
||||
0x50, 0x20, 0x96, 0xee, 0xdd, 0x06, 0xa3, 0x5c, 0xb0, 0x6e, 0xe9, 0xea, 0x1e, 0xe4, 0xb6, 0x4c,
|
||||
0x5c, 0x23, 0x75, 0xdb, 0xd4, 0x89, 0xc3, 0x77, 0x25, 0xca, 0x41, 0x82, 0xe1, 0x5d, 0x6f, 0x73,
|
||||
0xbb, 0x9f, 0xe8, 0x55, 0x48, 0xb2, 0x83, 0xe6, 0x61, 0x86, 0xff, 0x27, 0xdd, 0x3e, 0x01, 0x33,
|
||||
0xdb, 0x07, 0x4d, 0xa2, 0xf1, 0x19, 0x68, 0x16, 0x52, 0x9c, 0x4c, 0x12, 0x0d, 0x41, 0x5a, 0xf3,
|
||||
0x46, 0xea, 0xe3, 0xd0, 0xba, 0x1b, 0x8e, 0xdd, 0x6a, 0xa2, 0x12, 0xa4, 0x9b, 0x6d, 0x99, 0x1b,
|
||||
0x84, 0xf8, 0xcd, 0x1a, 0x05, 0xad, 0x85, 0xa6, 0xaa, 0xbf, 0x29, 0x90, 0x29, 0x13, 0xec, 0xd4,
|
||||
0xea, 0x27, 0xa1, 0x47, 0x77, 0x23, 0xae, 0x53, 0xd3, 0xdb, 0xa6, 0xee, 0x27, 0xba, 0x0a, 0x93,
|
||||
0x01, 0x87, 0x2a, 0xbb, 0x6e, 0x80, 0xf2, 0x29, 0x4e, 0xb8, 0xe6, 0x9a, 0x91, 0xc0, 0xa9, 0x77,
|
||||
0x21, 0x79, 0xc7, 0x60, 0xdc, 0x8c, 0x7b, 0x09, 0x2b, 0xfc, 0x12, 0x76, 0x3f, 0xd1, 0xe9, 0x40,
|
||||
0xdd, 0x0e, 0xf3, 0x04, 0xf8, 0xc5, 0xc9, 0x79, 0x5c, 0xdb, 0xf1, 0x32, 0x33, 0xac, 0x79, 0x23,
|
||||
0xf5, 0xad, 0x76, 0xe4, 0x68, 0xcb, 0x64, 0xf4, 0xf9, 0xaa, 0x12, 0x41, 0xb2, 0x6e, 0x78, 0xfd,
|
||||
0x76, 0x5a, 0xe3, 0xdf, 0xea, 0x47, 0x0a, 0xa4, 0x6f, 0x9b, 0x2d, 0xda, 0xcf, 0x9c, 0x5c, 0xb9,
|
||||
0x09, 0x13, 0x91, 0x4a, 0x45, 0xa3, 0x90, 0xdc, 0x7c, 0xb0, 0xb9, 0x9e, 0x1b, 0x42, 0x93, 0x90,
|
||||
0x79, 0xb8, 0xbe, 0xb6, 0xfd, 0x40, 0xab, 0xac, 0x96, 0x36, 0x6f, 0x69, 0x3b, 0x39, 0x1d, 0xe5,
|
||||
0x20, 0xed, 0x89, 0x6e, 0xdf, 0x7b, 0x70, 0x6b, 0x3b, 0x47, 0x56, 0xfe, 0xc8, 0x41, 0xe6, 0x3e,
|
||||
0x47, 0x5a, 0x26, 0xce, 0x9e, 0x51, 0x23, 0xa8, 0x02, 0xb9, 0x28, 0x41, 0x8f, 0xae, 0x49, 0xcb,
|
||||
0x36, 0x86, 0xc7, 0x2f, 0x1c, 0x17, 0x53, 0x75, 0x08, 0x3d, 0x82, 0x6c, 0x98, 0x5a, 0x47, 0x57,
|
||||
0xa4, 0xe6, 0xa5, 0xfc, 0x7b, 0x27, 0xe3, 0x15, 0xc8, 0x84, 0xa8, 0x72, 0x74, 0x59, 0x6a, 0x5b,
|
||||
0x46, 0xa7, 0x17, 0x2e, 0x48, 0x55, 0x83, 0x6c, 0xb7, 0x40, 0x1f, 0x66, 0x48, 0x63, 0xd0, 0x4b,
|
||||
0x69, 0xd4, 0x4e, 0xe8, 0x31, 0x4c, 0x1e, 0x21, 0x3c, 0xd1, 0x75, 0xa9, 0xfd, 0x38, 0x62, 0xb4,
|
||||
0xd3, 0x12, 0xfb, 0x80, 0x8e, 0x92, 0xce, 0x68, 0x49, 0x9e, 0x81, 0x38, 0xbe, 0xbc, 0xb0, 0xdc,
|
||||
0xb5, 0xbe, 0x1f, 0xb8, 0x3d, 0x98, 0xdb, 0x20, 0x2c, 0xcc, 0x57, 0x1a, 0x94, 0x19, 0x35, 0x8a,
|
||||
0xae, 0xca, 0xcb, 0x4b, 0xca, 0xb4, 0x16, 0xae, 0x75, 0xa7, 0xec, 0xaf, 0x6b, 0xc2, 0x44, 0x98,
|
||||
0x37, 0xa4, 0x31, 0x19, 0x93, 0x32, 0x98, 0x85, 0xab, 0x5d, 0xe9, 0xfa, 0xab, 0x3d, 0x86, 0x89,
|
||||
0x08, 0x55, 0x18, 0xe7, 0x9d, 0x94, 0x50, 0xec, 0x94, 0xbd, 0x1d, 0xc8, 0x84, 0x38, 0xbd, 0x98,
|
||||
0xf2, 0x96, 0xf1, 0x7e, 0x9d, 0x4c, 0x3f, 0x86, 0x74, 0x90, 0x7a, 0x43, 0x8b, 0x71, 0x1b, 0xe7,
|
||||
0x88, 0xe1, 0xae, 0xf6, 0xcd, 0x8e, 0xd8, 0x37, 0xfe, 0x64, 0x1a, 0xb3, 0x80, 0x84, 0x4b, 0xeb,
|
||||
0x84, 0xfc, 0x1d, 0x7f, 0xd7, 0x04, 0xac, 0x5f, 0x3b, 0x6e, 0xd7, 0xfc, 0xdd, 0xd8, 0x50, 0x98,
|
||||
0xdd, 0x20, 0x2c, 0xc4, 0xe1, 0x78, 0xa5, 0x2b, 0x2f, 0x25, 0x29, 0x45, 0x15, 0x53, 0x4a, 0x72,
|
||||
0x66, 0x48, 0x1d, 0x42, 0x06, 0x64, 0x43, 0xdc, 0x07, 0x8d, 0x49, 0xb6, 0x8c, 0xd1, 0x29, 0x5c,
|
||||
0xe9, 0x46, 0xd5, 0x5f, 0xea, 0x4d, 0x18, 0x0f, 0x3c, 0x6b, 0xd1, 0xc2, 0x31, 0x15, 0x1b, 0x7c,
|
||||
0x06, 0x76, 0x0a, 0x5b, 0x1d, 0x32, 0xa1, 0x47, 0x50, 0x5c, 0xb5, 0x4a, 0x5e, 0x98, 0x31, 0x0e,
|
||||
0x48, 0xdf, 0x54, 0xea, 0x10, 0xaa, 0x42, 0x66, 0x83, 0xb0, 0x76, 0x5f, 0x8e, 0x2e, 0xc5, 0xbf,
|
||||
0x8a, 0x82, 0xaf, 0x9d, 0xc2, 0x42, 0x47, 0x3d, 0x7f, 0x8d, 0x32, 0xa4, 0x44, 0x8b, 0x8b, 0xd4,
|
||||
0x98, 0x49, 0x81, 0xfe, 0xbf, 0xf0, 0xdf, 0x63, 0x75, 0x7c, 0xa3, 0x1a, 0xa4, 0x44, 0x83, 0x12,
|
||||
0x63, 0x34, 0xd4, 0xf7, 0x15, 0x8e, 0xd7, 0xe1, 0x1d, 0x8e, 0x3a, 0x84, 0x4a, 0x30, 0xc2, 0x3b,
|
||||
0x13, 0x24, 0xdf, 0x98, 0xc1, 0xae, 0xa5, 0x53, 0x06, 0xdf, 0x80, 0xf4, 0x06, 0x61, 0x45, 0x7d,
|
||||
0xad, 0x8e, 0x2d, 0x8b, 0x98, 0xa8, 0x20, 0x55, 0x5f, 0x6f, 0x34, 0xd9, 0x41, 0x8c, 0xc7, 0xe1,
|
||||
0x7f, 0x74, 0xd5, 0xa1, 0xd5, 0xd5, 0xb7, 0x6f, 0xee, 0x1a, 0xac, 0xde, 0xaa, 0xba, 0xb3, 0x97,
|
||||
0x9f, 0x1a, 0xa6, 0x69, 0x3c, 0x65, 0xa4, 0x56, 0x5f, 0x16, 0x13, 0xae, 0xeb, 0x06, 0x65, 0x8e,
|
||||
0x51, 0x6d, 0x31, 0xa2, 0x2f, 0x1b, 0x16, 0x23, 0x8e, 0x85, 0xcd, 0x65, 0x6e, 0xd2, 0xd3, 0x68,
|
||||
0x56, 0xab, 0x29, 0x3e, 0xbe, 0xf1, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x56, 0x43, 0xfd,
|
||||
0x8d, 0x20, 0x00, 0x00,
|
||||
// 1591 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xdd, 0x6e, 0x1b, 0x45,
|
||||
0x14, 0xce, 0xc6, 0x8e, 0x9b, 0x9c, 0xd8, 0x8e, 0x33, 0xf9, 0x73, 0xdd, 0x96, 0xa6, 0x03, 0x25,
|
||||
0xe9, 0x5f, 0x82, 0x52, 0x10, 0x70, 0x81, 0xd4, 0x26, 0x4e, 0x53, 0xab, 0x6d, 0x1a, 0xd6, 0xa1,
|
||||
0x22, 0x54, 0x95, 0x59, 0x7b, 0x87, 0x78, 0x61, 0xbd, 0x6b, 0x76, 0xc6, 0x49, 0xd3, 0x2b, 0x04,
|
||||
0x12, 0x48, 0x80, 0x80, 0x5b, 0x6e, 0x91, 0xb8, 0x45, 0x50, 0x24, 0xde, 0x00, 0x89, 0x77, 0x41,
|
||||
0x88, 0x27, 0x40, 0x42, 0x3b, 0xb3, 0x59, 0xef, 0x6e, 0x66, 0x63, 0x53, 0x03, 0x76, 0xee, 0x76,
|
||||
0x8e, 0xcf, 0x9c, 0xf9, 0xce, 0xcf, 0xcc, 0x9c, 0xf9, 0x0c, 0xe9, 0x86, 0x61, 0xee, 0xb5, 0xe8,
|
||||
0x52, 0xd3, 0xb1, 0x99, 0x8d, 0xa6, 0x82, 0xa3, 0x25, 0x31, 0x28, 0xa4, 0x6b, 0x76, 0xa3, 0x61,
|
||||
0x5b, 0x42, 0x58, 0x48, 0xd3, 0x5a, 0x9d, 0x34, 0x34, 0x31, 0xc2, 0xdf, 0x2b, 0x30, 0xb7, 0xe6,
|
||||
0x10, 0x8d, 0x91, 0x35, 0xdb, 0x34, 0x49, 0x8d, 0x19, 0xb6, 0xa5, 0x92, 0x0f, 0x5b, 0x84, 0x32,
|
||||
0xf4, 0x12, 0x24, 0xab, 0x1a, 0x25, 0x79, 0x65, 0x5e, 0x59, 0x1c, 0x5f, 0x39, 0xbb, 0x14, 0xb2,
|
||||
0xed, 0xd9, 0xbc, 0x47, 0x77, 0x57, 0x35, 0x4a, 0x54, 0xae, 0x89, 0xe6, 0xe0, 0x94, 0x5e, 0xad,
|
||||
0x58, 0x5a, 0x83, 0xe4, 0x87, 0xe7, 0x95, 0xc5, 0x31, 0x35, 0xa5, 0x57, 0x37, 0xb5, 0x06, 0x41,
|
||||
0x0b, 0x30, 0x51, 0xf3, 0xed, 0x0b, 0x85, 0x04, 0x57, 0xc8, 0xb6, 0xc5, 0x5c, 0x71, 0x16, 0x52,
|
||||
0x02, 0x5f, 0x3e, 0x39, 0xaf, 0x2c, 0xa6, 0x55, 0x6f, 0x84, 0xbf, 0x50, 0x60, 0xa6, 0xe8, 0xd8,
|
||||
0xcd, 0x81, 0x40, 0x89, 0x3f, 0x57, 0x60, 0xfa, 0xb6, 0x46, 0x07, 0x03, 0xcc, 0x0e, 0xa4, 0x57,
|
||||
0x6d, 0xdb, 0x54, 0x09, 0x6d, 0xda, 0x16, 0x25, 0xe8, 0x3a, 0xa4, 0x28, 0xd3, 0x58, 0x8b, 0x7a,
|
||||
0x28, 0xce, 0x48, 0x51, 0x94, 0xb9, 0x8a, 0xea, 0xa9, 0xa2, 0x69, 0x18, 0xd9, 0xd3, 0xcc, 0x96,
|
||||
0x00, 0x31, 0xaa, 0x8a, 0x01, 0x7e, 0x08, 0xd9, 0x32, 0x73, 0x0c, 0x6b, 0xf7, 0x5f, 0x34, 0x3e,
|
||||
0x76, 0x68, 0xfc, 0x6b, 0x05, 0x4e, 0x17, 0x09, 0xad, 0x39, 0x46, 0x75, 0x30, 0x8a, 0x0f, 0x7f,
|
||||
0xa3, 0x40, 0x41, 0x86, 0xa8, 0x17, 0xdf, 0xdf, 0xf0, 0x0b, 0x7a, 0x98, 0x4f, 0xba, 0x18, 0x9e,
|
||||
0xe4, 0x6d, 0xc6, 0xf6, 0x6a, 0x65, 0x2e, 0x08, 0xd5, 0xfd, 0x5d, 0x5b, 0xd3, 0x07, 0x23, 0x40,
|
||||
0x5f, 0x29, 0x90, 0x57, 0x89, 0x49, 0x34, 0x3a, 0x20, 0x19, 0xfb, 0x52, 0x81, 0xd9, 0x40, 0xec,
|
||||
0x98, 0xc6, 0x68, 0x3f, 0xe1, 0x7c, 0xe6, 0x9e, 0xa6, 0x51, 0x38, 0xbd, 0x54, 0xcf, 0xab, 0x30,
|
||||
0xe2, 0x7e, 0xd1, 0xfc, 0xf0, 0x7c, 0x62, 0x71, 0x7c, 0xe5, 0x82, 0x74, 0xce, 0x1d, 0x72, 0xf0,
|
||||
0xc0, 0xdd, 0x51, 0x5b, 0x9a, 0xe1, 0xa8, 0x42, 0x1f, 0x57, 0x61, 0xa6, 0x5c, 0xb7, 0xf7, 0xff,
|
||||
0xcb, 0x2c, 0xe1, 0xc7, 0x30, 0x1b, 0x5d, 0xa3, 0x17, 0x5f, 0x2f, 0x41, 0x2e, 0x12, 0x65, 0xe1,
|
||||
0xf6, 0x98, 0x3a, 0x11, 0x0e, 0x33, 0xc5, 0x3f, 0xbb, 0x69, 0xe7, 0xb7, 0xd6, 0x96, 0xe6, 0x30,
|
||||
0xa3, 0xdf, 0x97, 0xd6, 0x45, 0xc8, 0x36, 0x0f, 0x71, 0x08, 0xbd, 0x24, 0xd7, 0xcb, 0xf8, 0x52,
|
||||
0x1e, 0xaf, 0x9f, 0x14, 0x98, 0x76, 0xef, 0xb0, 0x93, 0x84, 0xf9, 0x47, 0x05, 0xa6, 0x6e, 0x6b,
|
||||
0xf4, 0x24, 0x41, 0x7e, 0xaa, 0xc0, 0x94, 0x7b, 0x64, 0x0a, 0xcc, 0xfd, 0x85, 0xbc, 0x00, 0x13,
|
||||
0x61, 0xc8, 0x34, 0x9f, 0xe4, 0x25, 0x9d, 0x0d, 0x61, 0xa6, 0xf8, 0x17, 0x05, 0xe6, 0xbc, 0x93,
|
||||
0x75, 0x20, 0x62, 0xdd, 0x35, 0xf0, 0xa7, 0x0a, 0xcc, 0xf8, 0x88, 0xfb, 0x7d, 0x00, 0x77, 0x5b,
|
||||
0x22, 0x9f, 0x2a, 0x30, 0x1b, 0x05, 0xdd, 0x97, 0x63, 0xfa, 0x07, 0x05, 0xa6, 0xdd, 0x33, 0x74,
|
||||
0x20, 0x72, 0x8e, 0x21, 0xdd, 0x96, 0x94, 0x8a, 0x3c, 0x74, 0x09, 0x35, 0x24, 0xc3, 0xdf, 0x2a,
|
||||
0xe2, 0x62, 0x09, 0x00, 0xee, 0x25, 0x70, 0x92, 0x32, 0x1b, 0x96, 0x95, 0x99, 0x8b, 0xcd, 0x97,
|
||||
0x94, 0x8a, 0x34, 0x9f, 0x98, 0x4f, 0xb8, 0xd8, 0x82, 0x32, 0xde, 0x0c, 0x1c, 0xb6, 0x6f, 0x65,
|
||||
0xb2, 0xdb, 0x20, 0x16, 0x7b, 0xf6, 0x70, 0x46, 0x83, 0x31, 0x7c, 0x34, 0x18, 0xe8, 0x2c, 0x8c,
|
||||
0x51, 0xb1, 0x4e, 0xa9, 0xc8, 0x63, 0x9a, 0x50, 0xdb, 0x02, 0x5c, 0x87, 0xb9, 0x23, 0x68, 0x7a,
|
||||
0x89, 0x55, 0x1e, 0x4e, 0x19, 0x96, 0x4e, 0x1e, 0xfb, 0x60, 0x0e, 0x87, 0xae, 0xe3, 0xc8, 0x4d,
|
||||
0xca, 0xff, 0xe2, 0xf4, 0x3c, 0x8c, 0x07, 0xa2, 0xee, 0xb9, 0x1d, 0x14, 0xe1, 0xf7, 0x61, 0x2a,
|
||||
0x84, 0xa6, 0x17, 0xa7, 0x9f, 0x03, 0xf0, 0x23, 0x2a, 0x6a, 0x23, 0xa1, 0x06, 0x24, 0xf8, 0x0f,
|
||||
0x05, 0x90, 0xe8, 0x04, 0x4a, 0x6e, 0x30, 0xfa, 0xb9, 0x7d, 0xce, 0x01, 0xbc, 0x67, 0x10, 0x53,
|
||||
0x0f, 0x9e, 0x3b, 0x63, 0x5c, 0xc2, 0x7f, 0x2e, 0x42, 0x9a, 0x3c, 0x66, 0x8e, 0x56, 0x69, 0x6a,
|
||||
0x8e, 0xd6, 0xa0, 0xf9, 0x91, 0x6e, 0x8f, 0x8a, 0x71, 0x3e, 0x6d, 0x8b, 0xcf, 0xc2, 0xbf, 0xb9,
|
||||
0x3d, 0x84, 0x57, 0x55, 0x83, 0xee, 0xf1, 0x39, 0x00, 0x5e, 0xa1, 0xe2, 0xe7, 0x11, 0xf1, 0x33,
|
||||
0x97, 0xf0, 0x43, 0xd8, 0x84, 0x1c, 0xf7, 0x40, 0xb8, 0xd3, 0x74, 0xad, 0x46, 0xa6, 0x28, 0x91,
|
||||
0x29, 0xe8, 0x75, 0x48, 0x79, 0xd1, 0xeb, 0xfa, 0xa0, 0xf5, 0x26, 0xe0, 0xef, 0x14, 0x98, 0x89,
|
||||
0x04, 0xae, 0x97, 0xba, 0xdc, 0x06, 0x24, 0x80, 0xea, 0x6d, 0xf4, 0x87, 0xa8, 0x22, 0x4f, 0x3c,
|
||||
0x6f, 0x10, 0xf5, 0x55, 0x9d, 0x34, 0x22, 0x12, 0x8a, 0x7f, 0x55, 0x60, 0x92, 0xeb, 0xb9, 0xab,
|
||||
0x91, 0x93, 0x9b, 0xda, 0x8f, 0x14, 0x40, 0x41, 0x3f, 0x7a, 0x89, 0xf4, 0x2b, 0xe2, 0x6e, 0x15,
|
||||
0x9e, 0x64, 0x57, 0xce, 0x4b, 0xe7, 0x04, 0x16, 0x13, 0xda, 0xf8, 0x2f, 0x05, 0x32, 0x25, 0x8b,
|
||||
0x12, 0x87, 0x0d, 0x7e, 0x3f, 0x82, 0x5e, 0x86, 0x51, 0xc7, 0xde, 0xaf, 0xe8, 0x1a, 0xd3, 0xbc,
|
||||
0x73, 0xe1, 0xb4, 0x14, 0xde, 0xaa, 0x69, 0x57, 0xd5, 0x53, 0x8e, 0xbd, 0x5f, 0xd4, 0x98, 0x86,
|
||||
0xce, 0xc0, 0x58, 0x5d, 0xa3, 0xf5, 0xca, 0x07, 0xe4, 0x80, 0xe6, 0x53, 0xf3, 0x89, 0xc5, 0x8c,
|
||||
0x3a, 0xea, 0x0a, 0xee, 0x90, 0x03, 0x8a, 0x3f, 0x51, 0x20, 0x7b, 0xe8, 0x7f, 0x2f, 0xe1, 0x3f,
|
||||
0x0f, 0xe3, 0x8e, 0xbd, 0x5f, 0x2a, 0x56, 0xaa, 0x64, 0xd7, 0xb0, 0xbc, 0x1b, 0x01, 0xb8, 0x68,
|
||||
0xd5, 0x95, 0xb8, 0x28, 0x84, 0x02, 0xb1, 0x74, 0xef, 0x36, 0x18, 0xe5, 0x82, 0x75, 0x4b, 0xc7,
|
||||
0x7b, 0x90, 0xdb, 0x32, 0xb5, 0x1a, 0xa9, 0xdb, 0xa6, 0x4e, 0x1c, 0xbe, 0x2b, 0x51, 0x0e, 0x12,
|
||||
0x4c, 0xdb, 0xf5, 0x36, 0xb7, 0xfb, 0x89, 0x5e, 0x83, 0x24, 0x3b, 0x68, 0x1e, 0x66, 0xf8, 0x05,
|
||||
0xe9, 0xf6, 0x09, 0x98, 0xd9, 0x3e, 0x68, 0x12, 0x95, 0xcf, 0x40, 0xb3, 0x90, 0xe2, 0x64, 0x92,
|
||||
0x68, 0x08, 0xd2, 0xaa, 0x37, 0xc2, 0x8f, 0x42, 0xeb, 0x6e, 0x38, 0x76, 0xab, 0x89, 0x4a, 0x90,
|
||||
0x6e, 0xb6, 0x65, 0x6e, 0x10, 0xe2, 0x37, 0x6b, 0x14, 0xb4, 0x1a, 0x9a, 0x8a, 0x7f, 0x57, 0x20,
|
||||
0x53, 0x26, 0x9a, 0x53, 0xab, 0x9f, 0x84, 0x1e, 0xdd, 0x8d, 0xb8, 0x4e, 0x4d, 0x6f, 0x9b, 0xba,
|
||||
0x9f, 0xe8, 0x0a, 0x4c, 0x06, 0x1c, 0xaa, 0xec, 0xba, 0x01, 0xca, 0xa7, 0x38, 0xe3, 0x9a, 0x6b,
|
||||
0x46, 0x02, 0x87, 0xef, 0x40, 0xf2, 0xb6, 0xc1, 0xb8, 0x19, 0xf7, 0x12, 0x56, 0xf8, 0x25, 0xec,
|
||||
0x7e, 0xa2, 0xd3, 0x81, 0xba, 0x1d, 0xe6, 0x09, 0xf0, 0x8b, 0x93, 0x13, 0xb9, 0xb6, 0xe3, 0x65,
|
||||
0x66, 0x58, 0xf5, 0x46, 0xf8, 0xed, 0x76, 0xe4, 0x68, 0xcb, 0x64, 0xf4, 0xd9, 0xaa, 0x12, 0x41,
|
||||
0xb2, 0x6e, 0x78, 0xfd, 0x76, 0x5a, 0xe5, 0xdf, 0xf8, 0x63, 0x05, 0xd2, 0xb7, 0xcc, 0x16, 0xed,
|
||||
0x67, 0x4e, 0x2e, 0xdf, 0x80, 0x89, 0x48, 0xa5, 0xa2, 0x51, 0x48, 0x6e, 0xde, 0xdf, 0x5c, 0xcf,
|
||||
0x0d, 0xa1, 0x49, 0xc8, 0x3c, 0x58, 0x5f, 0xdb, 0xbe, 0xaf, 0x56, 0x56, 0x4b, 0x9b, 0x37, 0xd5,
|
||||
0x9d, 0x9c, 0x8e, 0x72, 0x90, 0xf6, 0x44, 0xb7, 0xee, 0xde, 0xbf, 0xb9, 0x9d, 0x23, 0x2b, 0x7f,
|
||||
0xe6, 0x20, 0x73, 0x8f, 0x23, 0x2d, 0x13, 0x67, 0xcf, 0xa8, 0x11, 0x54, 0x81, 0x5c, 0x94, 0xa2,
|
||||
0x47, 0x57, 0xa5, 0x65, 0x1b, 0xc3, 0xe4, 0x17, 0x8e, 0x8b, 0x29, 0x1e, 0x42, 0x0f, 0x21, 0x1b,
|
||||
0xe6, 0xd6, 0xd1, 0x65, 0xa9, 0x79, 0x29, 0x01, 0xdf, 0xc9, 0x78, 0x05, 0x32, 0x21, 0xaa, 0x1c,
|
||||
0x5d, 0x92, 0xda, 0x96, 0xd1, 0xe9, 0x85, 0x0b, 0x52, 0xd5, 0x20, 0xdb, 0x2d, 0xd0, 0x87, 0x19,
|
||||
0xd2, 0x18, 0xf4, 0x52, 0x1a, 0xb5, 0x13, 0x7a, 0x0d, 0x26, 0x8f, 0x10, 0x9e, 0xe8, 0x9a, 0xd4,
|
||||
0x7e, 0x1c, 0x31, 0xda, 0x69, 0x89, 0x7d, 0x40, 0x47, 0x49, 0x67, 0xb4, 0x24, 0xcf, 0x40, 0x1c,
|
||||
0x5f, 0x5e, 0x58, 0xee, 0x5a, 0xdf, 0x0f, 0xdc, 0x1e, 0xcc, 0x6d, 0x10, 0x16, 0xe6, 0x2b, 0x0d,
|
||||
0xca, 0x8c, 0x1a, 0x45, 0x57, 0xe4, 0xe5, 0x25, 0x65, 0x5a, 0x0b, 0x57, 0xbb, 0x53, 0xf6, 0xd7,
|
||||
0x35, 0x61, 0x22, 0xcc, 0x1b, 0xd2, 0x98, 0x8c, 0x49, 0x19, 0xcc, 0xc2, 0x95, 0xae, 0x74, 0xfd,
|
||||
0xd5, 0x1e, 0xc1, 0x44, 0x84, 0x2a, 0x8c, 0xf3, 0x4e, 0x4a, 0x28, 0x76, 0xca, 0xde, 0x0e, 0x64,
|
||||
0x42, 0x9c, 0x5e, 0x4c, 0x79, 0xcb, 0x78, 0xbf, 0x4e, 0xa6, 0x1f, 0x41, 0x3a, 0x48, 0xbd, 0xa1,
|
||||
0xc5, 0xb8, 0x8d, 0x73, 0xc4, 0x70, 0x57, 0xfb, 0x66, 0x47, 0xec, 0x1b, 0x7f, 0x32, 0x8d, 0x59,
|
||||
0x40, 0xc2, 0xa5, 0x75, 0x42, 0xfe, 0xae, 0xbf, 0x6b, 0x02, 0xd6, 0xaf, 0x1e, 0xb7, 0x6b, 0xfe,
|
||||
0x69, 0x6c, 0x28, 0xcc, 0x6e, 0x10, 0x16, 0xe2, 0x70, 0xbc, 0xd2, 0x95, 0x97, 0x92, 0x94, 0xa2,
|
||||
0x8a, 0x29, 0x25, 0x39, 0x33, 0x84, 0x87, 0x90, 0x01, 0xd9, 0x10, 0xf7, 0x41, 0x63, 0x92, 0x2d,
|
||||
0x63, 0x74, 0x0a, 0x97, 0xbb, 0x51, 0xf5, 0x97, 0x7a, 0x0b, 0xc6, 0x03, 0xcf, 0x5a, 0xb4, 0x70,
|
||||
0x4c, 0xc5, 0x06, 0x9f, 0x81, 0x9d, 0xc2, 0x56, 0x87, 0x4c, 0xe8, 0x11, 0x14, 0x57, 0xad, 0x92,
|
||||
0x17, 0x66, 0x8c, 0x03, 0xd2, 0x37, 0x15, 0x1e, 0x42, 0x55, 0xc8, 0x6c, 0x10, 0xd6, 0xee, 0xcb,
|
||||
0xd1, 0x8b, 0xf1, 0xaf, 0xa2, 0xe0, 0x6b, 0xa7, 0xb0, 0xd0, 0x51, 0xcf, 0x5f, 0xa3, 0x0c, 0x29,
|
||||
0xd1, 0xe2, 0x22, 0x1c, 0x33, 0x29, 0xd0, 0xff, 0x17, 0x9e, 0x3f, 0x56, 0xc7, 0x37, 0xaa, 0x42,
|
||||
0x4a, 0x34, 0x28, 0x31, 0x46, 0x43, 0x7d, 0x5f, 0xe1, 0x78, 0x1d, 0xde, 0xe1, 0xe0, 0x21, 0x54,
|
||||
0x82, 0x11, 0xde, 0x99, 0x20, 0xf9, 0xc6, 0x0c, 0x76, 0x2d, 0x9d, 0x32, 0xf8, 0x26, 0xa4, 0x37,
|
||||
0x08, 0x2b, 0xea, 0x6b, 0x75, 0xcd, 0xb2, 0x88, 0x89, 0x0a, 0x52, 0xf5, 0xf5, 0x46, 0x93, 0x1d,
|
||||
0xc4, 0x78, 0x1c, 0xfe, 0x47, 0x17, 0x0f, 0xad, 0xae, 0xbe, 0x73, 0x63, 0xd7, 0x60, 0xf5, 0x56,
|
||||
0xd5, 0x9d, 0xbd, 0xfc, 0xc4, 0x30, 0x4d, 0xe3, 0x09, 0x23, 0xb5, 0xfa, 0xb2, 0x98, 0x70, 0x4d,
|
||||
0x37, 0x28, 0x73, 0x8c, 0x6a, 0x8b, 0x11, 0x7d, 0xd9, 0xb0, 0x18, 0x71, 0x2c, 0xcd, 0x5c, 0xe6,
|
||||
0x26, 0x3d, 0x8d, 0x66, 0xb5, 0x9a, 0xe2, 0xe3, 0xeb, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xf7,
|
||||
0x29, 0xea, 0x7b, 0x8f, 0x20, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
|
|
@ -1,135 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package milvus.proto.service;
|
||||
option go_package="github.com/zilliztech/milvus-distributed/internal/proto/servicepb";
|
||||
|
||||
import "common.proto";
|
||||
import "service_msg.proto";
|
||||
import "schema.proto";
|
||||
|
||||
service MilvusService {
|
||||
/**
|
||||
* @brief This method is used to create collection
|
||||
*
|
||||
* @param CollectionSchema, use to provide collection information to be created.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc CreateCollection(schema.CollectionSchema) returns (common.Status){}
|
||||
|
||||
/**
|
||||
* @brief This method is used to delete collection.
|
||||
*
|
||||
* @param CollectionName, collection name is going to be deleted.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc DropCollection(CollectionName) returns (common.Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to test collection existence.
|
||||
*
|
||||
* @param CollectionName, collection name is going to be tested.
|
||||
*
|
||||
* @return BoolResponse
|
||||
*/
|
||||
rpc HasCollection(CollectionName) returns (BoolResponse) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to get collection schema.
|
||||
*
|
||||
* @param CollectionName, target collection name.
|
||||
*
|
||||
* @return CollectionDescription
|
||||
*/
|
||||
rpc DescribeCollection(CollectionName) returns (CollectionDescription) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to list all collections.
|
||||
*
|
||||
* @return CollectionNameList
|
||||
*/
|
||||
rpc ShowCollections(common.Empty) returns (StringListResponse) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to create partition
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc CreatePartition(PartitionName) returns (common.Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to drop partition
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc DropPartition(PartitionName) returns (common.Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to test partition existence.
|
||||
*
|
||||
* @return BoolResponse
|
||||
*/
|
||||
rpc HasPartition(PartitionName) returns (BoolResponse) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to get basic partition infomation.
|
||||
*
|
||||
* @return PartitionDescription
|
||||
*/
|
||||
rpc DescribePartition(PartitionName) returns (PartitionDescription) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to show partition information
|
||||
*
|
||||
* @param CollectionName, target collection name.
|
||||
*
|
||||
* @return StringListResponse
|
||||
*/
|
||||
rpc ShowPartitions(CollectionName) returns (StringListResponse) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to add vector array to collection.
|
||||
*
|
||||
* @param RowBatch, insert rows.
|
||||
*
|
||||
* @return IntegerRangeResponse contains id of the inserted rows.
|
||||
*/
|
||||
rpc Insert(RowBatch) returns (IntegerRangeResponse) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to query vector in collection.
|
||||
*
|
||||
* @param Query.
|
||||
*
|
||||
* @return QueryResult
|
||||
*/
|
||||
rpc Search(Query) returns (QueryResult) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to build index by collection in sync mode.
|
||||
*
|
||||
* @param IndexParam, index paramters.
|
||||
*
|
||||
* @return Status
|
||||
*/
|
||||
rpc CreateIndex(IndexParam) returns (common.Status) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to describe index
|
||||
*
|
||||
* @param IndexParam, target index.
|
||||
*
|
||||
* @return IndexParam
|
||||
*/
|
||||
rpc DescribeIndex(DescribeIndexRequest) returns (DescribeIndexResponse) {}
|
||||
|
||||
/**
|
||||
* @brief This method is used to query index building progress
|
||||
*
|
||||
* @param IndexParam, target index.
|
||||
*
|
||||
* @return IndexParam
|
||||
*/
|
||||
rpc DescribeIndexProgress(DescribeIndexProgressRequest) returns (BoolResponse) {}
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package milvus.proto.service;
|
||||
option go_package="github.com/zilliztech/milvus-distributed/internal/proto/servicepb";
|
||||
|
||||
import "common.proto";
|
||||
import "schema.proto";
|
||||
|
||||
/**
|
||||
* @brief Collection name
|
||||
*/
|
||||
message CollectionName {
|
||||
string collection_name = 1;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Partition name
|
||||
*/
|
||||
message PartitionName {
|
||||
string collection_name = 1;
|
||||
string tag = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Row batch for Insert call
|
||||
*/
|
||||
message RowBatch {
|
||||
string collection_name = 1;
|
||||
string partition_tag = 2;
|
||||
repeated common.Blob row_data = 3;
|
||||
repeated uint32 hash_keys = 4;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Placeholder value types
|
||||
*/
|
||||
enum PlaceholderType {
|
||||
NONE = 0;
|
||||
VECTOR_BINARY = 100;
|
||||
VECTOR_FLOAT = 101;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Placeholder value in DSL
|
||||
*/
|
||||
message PlaceholderValue {
|
||||
string tag = 1;
|
||||
PlaceholderType type = 2;
|
||||
// values is a 2d-array, every array contains a vector
|
||||
repeated bytes values = 3;
|
||||
|
||||
}
|
||||
|
||||
message PlaceholderGroup {
|
||||
repeated PlaceholderValue placeholders = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Query for Search call
|
||||
*/
|
||||
message Query {
|
||||
string collection_name = 1;
|
||||
repeated string partition_tags = 2;
|
||||
string dsl = 3;
|
||||
// placeholder_group contains the serialized PlaceholderGroup
|
||||
bytes placeholder_group = 4;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief String response
|
||||
*/
|
||||
message StringResponse {
|
||||
common.Status status = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Bool response
|
||||
*/
|
||||
message BoolResponse {
|
||||
common.Status status = 1;
|
||||
bool value = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief String list response
|
||||
*/
|
||||
message StringListResponse {
|
||||
common.Status status = 1;
|
||||
repeated string values = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Integer list response
|
||||
*/
|
||||
message IntegerListResponse {
|
||||
common.Status status = 1;
|
||||
repeated int64 values = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Range response, [begin, end)
|
||||
*/
|
||||
message IntegerRangeResponse {
|
||||
common.Status status = 1;
|
||||
int64 begin = 2;
|
||||
int64 end = 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Response of DescribeCollection
|
||||
*/
|
||||
message CollectionDescription {
|
||||
common.Status status = 1;
|
||||
schema.CollectionSchema schema = 2;
|
||||
repeated common.KeyValuePair statistics = 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Response of DescribePartition
|
||||
*/
|
||||
message PartitionDescription {
|
||||
common.Status status = 1;
|
||||
PartitionName name = 2;
|
||||
repeated common.KeyValuePair statistics = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Response of GetSysConfig
|
||||
*/
|
||||
message SysConfigResponse {
|
||||
common.Status status = 1;
|
||||
repeated string keys = 2;
|
||||
repeated string values = 3;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Entities hit by query
|
||||
*/
|
||||
message Hits {
|
||||
repeated int64 IDs = 1;
|
||||
repeated bytes row_data = 2;
|
||||
repeated float scores = 3;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Query result
|
||||
*/
|
||||
message QueryResult {
|
||||
common.Status status = 1;
|
||||
repeated bytes hits = 2;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Index params
|
||||
* @collection_name: target collection
|
||||
* @field_name: target field
|
||||
* @index_name: a name for index provided by user, unique within this field
|
||||
* @extra_params: index parameters in json format
|
||||
* for vector field:
|
||||
* extra_params["index_type"] = one of the values: FLAT, IVF_LAT, IVF_SQ8, NSGMIX, IVFSQ8H,
|
||||
* PQ, HNSW, HNSW_SQ8NM, ANNOY
|
||||
* extra_params["metric_type"] = one of the values: L2, IP, HAMMING, JACCARD, TANIMOTO
|
||||
* SUBSTRUCTURE, SUPERSTRUCTURE
|
||||
* extra_params["params"] = extra parameters for index, for example ivflat: {nlist: 2048}
|
||||
* for structured field:
|
||||
* extra_params["index_type"] = one of the values: SORTED
|
||||
*/
|
||||
message IndexParam {
|
||||
string collection_name = 1;
|
||||
string field_name = 2;
|
||||
repeated common.KeyValuePair extra_params = 3;
|
||||
}
|
||||
|
||||
message DescribeIndexRequest {
|
||||
string collection_name = 1;
|
||||
string field_name = 2;
|
||||
}
|
||||
|
||||
message DescribeIndexProgressRequest {
|
||||
string collection_name = 1;
|
||||
string field_name = 2;
|
||||
}
|
||||
|
||||
message DescribeIndexResponse {
|
||||
common.Status status = 1;
|
||||
string collection_name = 2;
|
||||
string field_name = 3;
|
||||
repeated common.KeyValuePair extra_params = 4;
|
||||
}
|
|
@ -1,807 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: service.proto
|
||||
|
||||
package servicepb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
commonpb "github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
schemapb "github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
func init() { proto.RegisterFile("service.proto", fileDescriptor_a0b84a42fa06f626) }
|
||||
|
||||
var fileDescriptor_a0b84a42fa06f626 = []byte{
|
||||
// 479 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcf, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0xc6, 0x73, 0xca, 0x61, 0x1b, 0xb7, 0x74, 0x05, 0x17, 0x57, 0x42, 0x60, 0x40, 0x42, 0x20,
|
||||
0x62, 0xa9, 0x3c, 0x01, 0x49, 0x91, 0x1a, 0x04, 0x55, 0xb0, 0x55, 0x44, 0xe1, 0x80, 0xd6, 0xce,
|
||||
0x60, 0x2f, 0xb2, 0x77, 0xdd, 0x9d, 0x71, 0xff, 0xbd, 0x01, 0x6f, 0x8d, 0xe2, 0x75, 0xd2, 0x18,
|
||||
0x39, 0xc4, 0x96, 0xb8, 0x79, 0x67, 0xbe, 0xf9, 0xed, 0xcc, 0x7e, 0x63, 0xe6, 0x20, 0x98, 0x2b,
|
||||
0x19, 0xc3, 0xb8, 0x30, 0x9a, 0x34, 0x7f, 0x98, 0xcb, 0xec, 0xaa, 0x44, 0x7b, 0x1a, 0xd7, 0x39,
|
||||
0x77, 0x14, 0xeb, 0x3c, 0xd7, 0xca, 0x46, 0xdd, 0xc3, 0x3a, 0xfc, 0x23, 0xc7, 0xa4, 0x0e, 0x8d,
|
||||
0x30, 0x4e, 0x21, 0x17, 0xf6, 0x74, 0xfc, 0x7b, 0x8f, 0x39, 0x9f, 0x2a, 0x4e, 0x68, 0x95, 0xfc,
|
||||
0x2b, 0x7b, 0x30, 0x35, 0x20, 0x08, 0xa6, 0x3a, 0xcb, 0x20, 0x26, 0xa9, 0x15, 0x7f, 0x31, 0x6e,
|
||||
0xde, 0x65, 0x09, 0xf7, 0x82, 0xb0, 0x0a, 0xb8, 0x47, 0x4d, 0x59, 0xdd, 0x49, 0x48, 0x82, 0x4a,
|
||||
0xf4, 0x06, 0xfc, 0x9c, 0xed, 0x9f, 0x18, 0x5d, 0x6c, 0x70, 0x9f, 0x8f, 0xdb, 0x66, 0xd8, 0x00,
|
||||
0x9f, 0x89, 0x1c, 0x76, 0x61, 0xbf, 0x33, 0xe7, 0x54, 0x60, 0x6f, 0xaa, 0xd7, 0xae, 0x9a, 0x68,
|
||||
0x9d, 0x05, 0x80, 0x85, 0x56, 0x08, 0xde, 0x80, 0x4b, 0xc6, 0x4f, 0x00, 0x63, 0x23, 0x23, 0xe8,
|
||||
0x7d, 0xc3, 0xeb, 0x5d, 0x2a, 0x4b, 0x2e, 0x96, 0x9f, 0xde, 0x80, 0x5f, 0xb0, 0x83, 0x30, 0xd5,
|
||||
0xd7, 0xf7, 0x69, 0xe4, 0x6e, 0xeb, 0xe4, 0xef, 0xf3, 0x82, 0x6e, 0xdd, 0x97, 0xed, 0xf4, 0x90,
|
||||
0x8c, 0x54, 0xc9, 0x47, 0x89, 0xb4, 0x31, 0xc5, 0x39, 0x3b, 0xb0, 0x9e, 0xce, 0x85, 0x21, 0x59,
|
||||
0x8d, 0xf0, 0xac, 0xbd, 0x7c, 0x2d, 0xe8, 0xf2, 0xf2, 0x21, 0x73, 0x96, 0x86, 0xfe, 0x5f, 0xe8,
|
||||
0x05, 0x1b, 0x9d, 0x0a, 0xec, 0xc9, 0xec, 0x66, 0xe6, 0x4f, 0x76, 0xb8, 0x32, 0xb3, 0x27, 0xff,
|
||||
0xd5, 0x0e, 0x51, 0xd3, 0xc9, 0x88, 0xed, 0x2f, 0x9d, 0x5c, 0x67, 0xb1, 0xe3, 0xc2, 0xf4, 0xb1,
|
||||
0xf4, 0x0b, 0x1b, 0xce, 0x14, 0x82, 0x21, 0xfe, 0xb8, 0xbd, 0x2a, 0xd0, 0xd7, 0x13, 0x41, 0x71,
|
||||
0xba, 0xad, 0xf7, 0x99, 0x22, 0x48, 0xc0, 0x04, 0x42, 0x25, 0xb0, 0xc1, 0xfd, 0xc0, 0x86, 0x21,
|
||||
0x08, 0x13, 0xa7, 0xfc, 0xa8, 0xbd, 0xee, 0x73, 0x09, 0xe6, 0xd6, 0x7d, 0xfa, 0x8f, 0x64, 0x00,
|
||||
0x58, 0x66, 0xe4, 0x0d, 0xf8, 0x19, 0xdb, 0xb3, 0x6b, 0x37, 0x53, 0x0b, 0xb8, 0xe1, 0x4f, 0xb6,
|
||||
0x35, 0xb2, 0x80, 0x9b, 0xb9, 0x30, 0x22, 0xdf, 0xb5, 0x1a, 0xbf, 0x98, 0xb3, 0xf2, 0xcf, 0x12,
|
||||
0xb7, 0x8c, 0xd6, 0x10, 0x05, 0x70, 0x59, 0x02, 0xd2, 0xb6, 0xbf, 0xf1, 0x2f, 0xed, 0xfa, 0x1d,
|
||||
0x2e, 0xd9, 0xa3, 0x46, 0x6a, 0x6e, 0x74, 0x62, 0x00, 0x91, 0x1f, 0x77, 0xe0, 0xac, 0xc4, 0xab,
|
||||
0xbb, 0x3b, 0xad, 0xe7, 0x64, 0xfa, 0xed, 0x5d, 0x22, 0x29, 0x2d, 0xa3, 0xe5, 0xe0, 0xfe, 0x9d,
|
||||
0xcc, 0x32, 0x79, 0x47, 0x10, 0xa7, 0xbe, 0x2d, 0x7e, 0xb3, 0x90, 0x48, 0x46, 0x46, 0x25, 0xc1,
|
||||
0xc2, 0x97, 0x8a, 0xc0, 0x28, 0x91, 0xf9, 0x15, 0xd1, 0xaf, 0x89, 0x45, 0x14, 0x0d, 0xab, 0xc0,
|
||||
0xdb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x71, 0xd7, 0x7f, 0xec, 0x2d, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// MilvusServiceClient is the client API for MilvusService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type MilvusServiceClient interface {
|
||||
//*
|
||||
// @brief This method is used to create collection
|
||||
//
|
||||
// @param CollectionSchema, use to provide collection information to be created.
|
||||
//
|
||||
// @return Status
|
||||
CreateCollection(ctx context.Context, in *schemapb.CollectionSchema, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to delete collection.
|
||||
//
|
||||
// @param CollectionName, collection name is going to be deleted.
|
||||
//
|
||||
// @return Status
|
||||
DropCollection(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to test collection existence.
|
||||
//
|
||||
// @param CollectionName, collection name is going to be tested.
|
||||
//
|
||||
// @return BoolResponse
|
||||
HasCollection(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*BoolResponse, error)
|
||||
//*
|
||||
// @brief This method is used to get collection schema.
|
||||
//
|
||||
// @param CollectionName, target collection name.
|
||||
//
|
||||
// @return CollectionDescription
|
||||
DescribeCollection(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*CollectionDescription, error)
|
||||
//*
|
||||
// @brief This method is used to list all collections.
|
||||
//
|
||||
// @return CollectionNameList
|
||||
ShowCollections(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*StringListResponse, error)
|
||||
//*
|
||||
// @brief This method is used to create partition
|
||||
//
|
||||
// @return Status
|
||||
CreatePartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to drop partition
|
||||
//
|
||||
// @return Status
|
||||
DropPartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to test partition existence.
|
||||
//
|
||||
// @return BoolResponse
|
||||
HasPartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*BoolResponse, error)
|
||||
//*
|
||||
// @brief This method is used to get basic partition infomation.
|
||||
//
|
||||
// @return PartitionDescription
|
||||
DescribePartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*PartitionDescription, error)
|
||||
//*
|
||||
// @brief This method is used to show partition information
|
||||
//
|
||||
// @param CollectionName, target collection name.
|
||||
//
|
||||
// @return StringListResponse
|
||||
ShowPartitions(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*StringListResponse, error)
|
||||
//*
|
||||
// @brief This method is used to add vector array to collection.
|
||||
//
|
||||
// @param RowBatch, insert rows.
|
||||
//
|
||||
// @return IntegerRangeResponse contains id of the inserted rows.
|
||||
Insert(ctx context.Context, in *RowBatch, opts ...grpc.CallOption) (*IntegerRangeResponse, error)
|
||||
//*
|
||||
// @brief This method is used to query vector in collection.
|
||||
//
|
||||
// @param Query.
|
||||
//
|
||||
// @return QueryResult
|
||||
Search(ctx context.Context, in *Query, opts ...grpc.CallOption) (*QueryResult, error)
|
||||
//*
|
||||
// @brief This method is used to build index by collection in sync mode.
|
||||
//
|
||||
// @param IndexParam, index paramters.
|
||||
//
|
||||
// @return Status
|
||||
CreateIndex(ctx context.Context, in *IndexParam, opts ...grpc.CallOption) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to describe index
|
||||
//
|
||||
// @param IndexParam, target index.
|
||||
//
|
||||
// @return IndexParam
|
||||
DescribeIndex(ctx context.Context, in *DescribeIndexRequest, opts ...grpc.CallOption) (*DescribeIndexResponse, error)
|
||||
//*
|
||||
// @brief This method is used to query index building progress
|
||||
//
|
||||
// @param IndexParam, target index.
|
||||
//
|
||||
// @return IndexParam
|
||||
DescribeIndexProgress(ctx context.Context, in *DescribeIndexProgressRequest, opts ...grpc.CallOption) (*BoolResponse, error)
|
||||
}
|
||||
|
||||
type milvusServiceClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewMilvusServiceClient(cc *grpc.ClientConn) MilvusServiceClient {
|
||||
return &milvusServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) CreateCollection(ctx context.Context, in *schemapb.CollectionSchema, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
out := new(commonpb.Status)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/CreateCollection", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) DropCollection(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
out := new(commonpb.Status)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/DropCollection", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) HasCollection(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*BoolResponse, error) {
|
||||
out := new(BoolResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/HasCollection", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) DescribeCollection(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*CollectionDescription, error) {
|
||||
out := new(CollectionDescription)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/DescribeCollection", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) ShowCollections(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*StringListResponse, error) {
|
||||
out := new(StringListResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/ShowCollections", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) CreatePartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
out := new(commonpb.Status)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/CreatePartition", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) DropPartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
out := new(commonpb.Status)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/DropPartition", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) HasPartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*BoolResponse, error) {
|
||||
out := new(BoolResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/HasPartition", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) DescribePartition(ctx context.Context, in *PartitionName, opts ...grpc.CallOption) (*PartitionDescription, error) {
|
||||
out := new(PartitionDescription)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/DescribePartition", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) ShowPartitions(ctx context.Context, in *CollectionName, opts ...grpc.CallOption) (*StringListResponse, error) {
|
||||
out := new(StringListResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/ShowPartitions", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) Insert(ctx context.Context, in *RowBatch, opts ...grpc.CallOption) (*IntegerRangeResponse, error) {
|
||||
out := new(IntegerRangeResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/Insert", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) Search(ctx context.Context, in *Query, opts ...grpc.CallOption) (*QueryResult, error) {
|
||||
out := new(QueryResult)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/Search", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) CreateIndex(ctx context.Context, in *IndexParam, opts ...grpc.CallOption) (*commonpb.Status, error) {
|
||||
out := new(commonpb.Status)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/CreateIndex", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) DescribeIndex(ctx context.Context, in *DescribeIndexRequest, opts ...grpc.CallOption) (*DescribeIndexResponse, error) {
|
||||
out := new(DescribeIndexResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/DescribeIndex", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *milvusServiceClient) DescribeIndexProgress(ctx context.Context, in *DescribeIndexProgressRequest, opts ...grpc.CallOption) (*BoolResponse, error) {
|
||||
out := new(BoolResponse)
|
||||
err := c.cc.Invoke(ctx, "/milvus.proto.service.MilvusService/DescribeIndexProgress", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// MilvusServiceServer is the server API for MilvusService service.
|
||||
type MilvusServiceServer interface {
|
||||
//*
|
||||
// @brief This method is used to create collection
|
||||
//
|
||||
// @param CollectionSchema, use to provide collection information to be created.
|
||||
//
|
||||
// @return Status
|
||||
CreateCollection(context.Context, *schemapb.CollectionSchema) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to delete collection.
|
||||
//
|
||||
// @param CollectionName, collection name is going to be deleted.
|
||||
//
|
||||
// @return Status
|
||||
DropCollection(context.Context, *CollectionName) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to test collection existence.
|
||||
//
|
||||
// @param CollectionName, collection name is going to be tested.
|
||||
//
|
||||
// @return BoolResponse
|
||||
HasCollection(context.Context, *CollectionName) (*BoolResponse, error)
|
||||
//*
|
||||
// @brief This method is used to get collection schema.
|
||||
//
|
||||
// @param CollectionName, target collection name.
|
||||
//
|
||||
// @return CollectionDescription
|
||||
DescribeCollection(context.Context, *CollectionName) (*CollectionDescription, error)
|
||||
//*
|
||||
// @brief This method is used to list all collections.
|
||||
//
|
||||
// @return CollectionNameList
|
||||
ShowCollections(context.Context, *commonpb.Empty) (*StringListResponse, error)
|
||||
//*
|
||||
// @brief This method is used to create partition
|
||||
//
|
||||
// @return Status
|
||||
CreatePartition(context.Context, *PartitionName) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to drop partition
|
||||
//
|
||||
// @return Status
|
||||
DropPartition(context.Context, *PartitionName) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to test partition existence.
|
||||
//
|
||||
// @return BoolResponse
|
||||
HasPartition(context.Context, *PartitionName) (*BoolResponse, error)
|
||||
//*
|
||||
// @brief This method is used to get basic partition infomation.
|
||||
//
|
||||
// @return PartitionDescription
|
||||
DescribePartition(context.Context, *PartitionName) (*PartitionDescription, error)
|
||||
//*
|
||||
// @brief This method is used to show partition information
|
||||
//
|
||||
// @param CollectionName, target collection name.
|
||||
//
|
||||
// @return StringListResponse
|
||||
ShowPartitions(context.Context, *CollectionName) (*StringListResponse, error)
|
||||
//*
|
||||
// @brief This method is used to add vector array to collection.
|
||||
//
|
||||
// @param RowBatch, insert rows.
|
||||
//
|
||||
// @return IntegerRangeResponse contains id of the inserted rows.
|
||||
Insert(context.Context, *RowBatch) (*IntegerRangeResponse, error)
|
||||
//*
|
||||
// @brief This method is used to query vector in collection.
|
||||
//
|
||||
// @param Query.
|
||||
//
|
||||
// @return QueryResult
|
||||
Search(context.Context, *Query) (*QueryResult, error)
|
||||
//*
|
||||
// @brief This method is used to build index by collection in sync mode.
|
||||
//
|
||||
// @param IndexParam, index paramters.
|
||||
//
|
||||
// @return Status
|
||||
CreateIndex(context.Context, *IndexParam) (*commonpb.Status, error)
|
||||
//*
|
||||
// @brief This method is used to describe index
|
||||
//
|
||||
// @param IndexParam, target index.
|
||||
//
|
||||
// @return IndexParam
|
||||
DescribeIndex(context.Context, *DescribeIndexRequest) (*DescribeIndexResponse, error)
|
||||
//*
|
||||
// @brief This method is used to query index building progress
|
||||
//
|
||||
// @param IndexParam, target index.
|
||||
//
|
||||
// @return IndexParam
|
||||
DescribeIndexProgress(context.Context, *DescribeIndexProgressRequest) (*BoolResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedMilvusServiceServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedMilvusServiceServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedMilvusServiceServer) CreateCollection(ctx context.Context, req *schemapb.CollectionSchema) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateCollection not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) DropCollection(ctx context.Context, req *CollectionName) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DropCollection not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) HasCollection(ctx context.Context, req *CollectionName) (*BoolResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method HasCollection not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) DescribeCollection(ctx context.Context, req *CollectionName) (*CollectionDescription, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DescribeCollection not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) ShowCollections(ctx context.Context, req *commonpb.Empty) (*StringListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ShowCollections not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) CreatePartition(ctx context.Context, req *PartitionName) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreatePartition not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) DropPartition(ctx context.Context, req *PartitionName) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DropPartition not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) HasPartition(ctx context.Context, req *PartitionName) (*BoolResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method HasPartition not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) DescribePartition(ctx context.Context, req *PartitionName) (*PartitionDescription, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DescribePartition not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) ShowPartitions(ctx context.Context, req *CollectionName) (*StringListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ShowPartitions not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) Insert(ctx context.Context, req *RowBatch) (*IntegerRangeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Insert not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) Search(ctx context.Context, req *Query) (*QueryResult, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Search not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) CreateIndex(ctx context.Context, req *IndexParam) (*commonpb.Status, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateIndex not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) DescribeIndex(ctx context.Context, req *DescribeIndexRequest) (*DescribeIndexResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DescribeIndex not implemented")
|
||||
}
|
||||
func (*UnimplementedMilvusServiceServer) DescribeIndexProgress(ctx context.Context, req *DescribeIndexProgressRequest) (*BoolResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DescribeIndexProgress not implemented")
|
||||
}
|
||||
|
||||
func RegisterMilvusServiceServer(s *grpc.Server, srv MilvusServiceServer) {
|
||||
s.RegisterService(&_MilvusService_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _MilvusService_CreateCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(schemapb.CollectionSchema)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).CreateCollection(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/CreateCollection",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).CreateCollection(ctx, req.(*schemapb.CollectionSchema))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_DropCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CollectionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).DropCollection(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/DropCollection",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).DropCollection(ctx, req.(*CollectionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_HasCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CollectionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).HasCollection(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/HasCollection",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).HasCollection(ctx, req.(*CollectionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_DescribeCollection_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CollectionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).DescribeCollection(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/DescribeCollection",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).DescribeCollection(ctx, req.(*CollectionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_ShowCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(commonpb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).ShowCollections(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/ShowCollections",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).ShowCollections(ctx, req.(*commonpb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_CreatePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PartitionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).CreatePartition(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/CreatePartition",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).CreatePartition(ctx, req.(*PartitionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_DropPartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PartitionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).DropPartition(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/DropPartition",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).DropPartition(ctx, req.(*PartitionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_HasPartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PartitionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).HasPartition(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/HasPartition",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).HasPartition(ctx, req.(*PartitionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_DescribePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PartitionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).DescribePartition(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/DescribePartition",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).DescribePartition(ctx, req.(*PartitionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_ShowPartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CollectionName)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).ShowPartitions(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/ShowPartitions",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).ShowPartitions(ctx, req.(*CollectionName))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_Insert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RowBatch)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).Insert(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/Insert",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).Insert(ctx, req.(*RowBatch))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(Query)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).Search(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/Search",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).Search(ctx, req.(*Query))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_CreateIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(IndexParam)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).CreateIndex(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/CreateIndex",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).CreateIndex(ctx, req.(*IndexParam))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_DescribeIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DescribeIndexRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).DescribeIndex(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/DescribeIndex",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).DescribeIndex(ctx, req.(*DescribeIndexRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _MilvusService_DescribeIndexProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DescribeIndexProgressRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(MilvusServiceServer).DescribeIndexProgress(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/milvus.proto.service.MilvusService/DescribeIndexProgress",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(MilvusServiceServer).DescribeIndexProgress(ctx, req.(*DescribeIndexProgressRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _MilvusService_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "milvus.proto.service.MilvusService",
|
||||
HandlerType: (*MilvusServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "CreateCollection",
|
||||
Handler: _MilvusService_CreateCollection_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DropCollection",
|
||||
Handler: _MilvusService_DropCollection_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "HasCollection",
|
||||
Handler: _MilvusService_HasCollection_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DescribeCollection",
|
||||
Handler: _MilvusService_DescribeCollection_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ShowCollections",
|
||||
Handler: _MilvusService_ShowCollections_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreatePartition",
|
||||
Handler: _MilvusService_CreatePartition_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DropPartition",
|
||||
Handler: _MilvusService_DropPartition_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "HasPartition",
|
||||
Handler: _MilvusService_HasPartition_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DescribePartition",
|
||||
Handler: _MilvusService_DescribePartition_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ShowPartitions",
|
||||
Handler: _MilvusService_ShowPartitions_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Insert",
|
||||
Handler: _MilvusService_Insert_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Search",
|
||||
Handler: _MilvusService_Search_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateIndex",
|
||||
Handler: _MilvusService_CreateIndex_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DescribeIndex",
|
||||
Handler: _MilvusService_DescribeIndex_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DescribeIndexProgress",
|
||||
Handler: _MilvusService_DescribeIndexProgress_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "service.proto",
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,543 +1,21 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
)
|
||||
|
||||
const (
|
||||
reqTimeoutInterval = time.Second * 10
|
||||
)
|
||||
|
||||
func (p *Proxy) Insert(ctx context.Context, in *servicepb.RowBatch) (*servicepb.IntegerRangeResponse, error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "insert grpc received")
|
||||
defer span.Finish()
|
||||
span.SetTag("collection name", in.CollectionName)
|
||||
span.SetTag("partition tag", in.PartitionTag)
|
||||
log.Println("insert into: ", in.CollectionName)
|
||||
it := &InsertTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
BaseInsertTask: BaseInsertTask{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: in.HashKeys,
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0,
|
||||
},
|
||||
CollectionName: in.CollectionName,
|
||||
PartitionName: in.PartitionTag,
|
||||
RowData: in.RowData,
|
||||
},
|
||||
},
|
||||
manipulationMsgStream: p.manipulationMsgStream,
|
||||
rowIDAllocator: p.idAllocator,
|
||||
}
|
||||
if len(it.PartitionName) <= 0 {
|
||||
it.PartitionName = Params.defaultPartitionTag()
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
it.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("insert timeout")
|
||||
default:
|
||||
return p.sched.DmQueue.Enqueue(it)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
||||
if err != nil {
|
||||
return &servicepb.IntegerRangeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = it.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.IntegerRangeResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return it.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) CreateCollection(ctx context.Context, req *schemapb.CollectionSchema) (*commonpb.Status, error) {
|
||||
log.Println("create collection: ", req)
|
||||
cct := &CreateCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: milvuspb.CreateCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreateCollection,
|
||||
},
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
schema: req,
|
||||
}
|
||||
var cancel func()
|
||||
cct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(cct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = cct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cct.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) Search(ctx context.Context, req *servicepb.Query) (*servicepb.QueryResult, error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "search grpc received")
|
||||
defer span.Finish()
|
||||
span.SetTag("collection name", req.CollectionName)
|
||||
span.SetTag("partition tag", req.PartitionTags)
|
||||
span.SetTag("dsl", req.Dsl)
|
||||
log.Println("search: ", req.CollectionName, req.Dsl)
|
||||
qt := &SearchTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
SearchRequest: internalpb2.SearchRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kSearch,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
ResultChannelID: strconv.FormatInt(Params.ProxyID(), 10),
|
||||
},
|
||||
queryMsgStream: p.queryMsgStream,
|
||||
resultBuf: make(chan []*internalpb2.SearchResults),
|
||||
query: req,
|
||||
}
|
||||
var cancel func()
|
||||
qt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
log.Printf("grpc address of query task: %p", qt)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DqQueue.Enqueue(qt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &servicepb.QueryResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = qt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.QueryResult{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return qt.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) DropCollection(ctx context.Context, req *servicepb.CollectionName) (*commonpb.Status, error) {
|
||||
log.Println("drop collection: ", req)
|
||||
dct := &DropCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropCollectionRequest: milvuspb.DropCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropCollection,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dct.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) HasCollection(ctx context.Context, req *servicepb.CollectionName) (*servicepb.BoolResponse, error) {
|
||||
log.Println("has collection: ", req)
|
||||
hct := &HasCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasCollectionRequest: milvuspb.HasCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kHasCollection,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
hct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(hct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = hct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return hct.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) DescribeCollection(ctx context.Context, req *servicepb.CollectionName) (*servicepb.CollectionDescription, error) {
|
||||
log.Println("describe collection: ", req)
|
||||
dct := &DescribeCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeCollectionRequest: milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &servicepb.CollectionDescription{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.CollectionDescription{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dct.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) ShowCollections(ctx context.Context, req *commonpb.Empty) (*servicepb.StringListResponse, error) {
|
||||
log.Println("show collections")
|
||||
sct := &ShowCollectionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowCollectionRequest: milvuspb.ShowCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowCollections,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
sct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(sct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &servicepb.StringListResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = sct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.StringListResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return sct.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) CreatePartition(ctx context.Context, in *servicepb.PartitionName) (*commonpb.Status, error) {
|
||||
log.Println("create partition", in)
|
||||
cpt := &CreatePartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreatePartitionRequest: milvuspb.CreatePartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreatePartition,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: in.CollectionName,
|
||||
PartitionName: in.Tag,
|
||||
//TODO, ReqID,Timestamp,ProxyID
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
var cancel func()
|
||||
cpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(cpt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
err = cpt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
return cpt.result, nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) DropPartition(ctx context.Context, in *servicepb.PartitionName) (*commonpb.Status, error) {
|
||||
log.Println("drop partition: ", in)
|
||||
dpt := &DropPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropPartitionRequest: milvuspb.DropPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDropPartition,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: in.CollectionName,
|
||||
PartitionName: in.Tag,
|
||||
//TODO, ReqID,Timestamp,ProxyID
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("drop partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dpt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
err = dpt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
return dpt.result, nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) HasPartition(ctx context.Context, in *servicepb.PartitionName) (*servicepb.BoolResponse, error) {
|
||||
log.Println("has partition: ", in)
|
||||
hpt := &HasPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasPartitionRequest: milvuspb.HasPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kHasPartition,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: in.CollectionName,
|
||||
PartitionName: in.Tag,
|
||||
//TODO, ReqID,Timestamp,ProxyID
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
hpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("has partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(hpt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
err = hpt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
return hpt.result, nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionName) (*servicepb.PartitionDescription, error) {
|
||||
log.Println("describe partition: ", in)
|
||||
|
||||
return &servicepb.PartitionDescription{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "Deprecated!",
|
||||
},
|
||||
Name: in,
|
||||
Statistics: nil,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
//func (p *Proxy) DescribePartition2(ctx context.Context, in *servicepb.PartitionName) (*servicepb.PartitionDescription, error) {
|
||||
//
|
||||
//func (node *NodeImpl) DescribePartition(ctx context.Context, in *milvuspb.PartitionName) (*milvuspb.PartitionDescription, error) {
|
||||
// log.Println("describe partition: ", in)
|
||||
//
|
||||
// return &milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: "Deprecated!",
|
||||
// },
|
||||
// Name: in,
|
||||
// Statistics: nil,
|
||||
// }, nil
|
||||
//
|
||||
//}
|
||||
//
|
||||
//func (p *NodeImpl) DescribePartition2(ctx context.Context, in *milvuspb.PartitionName) (*milvuspb.PartitionDescription, error) {
|
||||
// log.Println("describe partition: ", in)
|
||||
// dpt := &DescribePartitionTask{
|
||||
// Condition: NewTaskCondition(ctx),
|
||||
|
@ -568,7 +46,7 @@ func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionNa
|
|||
// }()
|
||||
//
|
||||
// if err != nil {
|
||||
// return &servicepb.PartitionDescription{
|
||||
// return &milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
|
@ -580,7 +58,7 @@ func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionNa
|
|||
//
|
||||
// err = dpt.WaitToFinish()
|
||||
// if err != nil {
|
||||
// return &servicepb.PartitionDescription{
|
||||
// return &milvuspb.PartitionDescription{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
|
@ -591,210 +69,55 @@ func (p *Proxy) DescribePartition(ctx context.Context, in *servicepb.PartitionNa
|
|||
// }
|
||||
// return dpt.result, nil
|
||||
//}
|
||||
|
||||
func (p *Proxy) ShowPartitions(ctx context.Context, req *servicepb.CollectionName) (*servicepb.StringListResponse, error) {
|
||||
log.Println("show partitions: ", req)
|
||||
spt := &ShowPartitionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowPartitionRequest: milvuspb.ShowPartitionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kShowPartitions,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
spt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("show partition timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(spt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &servicepb.StringListResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Values: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = spt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.StringListResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Values: nil,
|
||||
}, nil
|
||||
}
|
||||
return spt.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) CreateIndex(ctx context.Context, indexParam *servicepb.IndexParam) (*commonpb.Status, error) {
|
||||
log.Println("create index for: ", indexParam.FieldName)
|
||||
cit := &CreateIndexTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateIndexRequest: milvuspb.CreateIndexRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kCreateIndex,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: indexParam.CollectionName,
|
||||
FieldName: indexParam.FieldName,
|
||||
ExtraParams: indexParam.ExtraParams,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
cit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(cit)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = cit.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cit.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) DescribeIndex(ctx context.Context, req *servicepb.DescribeIndexRequest) (*servicepb.DescribeIndexResponse, error) {
|
||||
log.Println("Describe index for: ", req.FieldName)
|
||||
dit := &DescribeIndexTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeIndexRequest: milvuspb.DescribeIndexRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeIndex,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
FieldName: req.FieldName,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dit)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &servicepb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
FieldName: req.FieldName,
|
||||
ExtraParams: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dit.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
FieldName: req.FieldName,
|
||||
ExtraParams: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dit.result, nil
|
||||
}
|
||||
|
||||
func (p *Proxy) DescribeIndexProgress(ctx context.Context, req *servicepb.DescribeIndexProgressRequest) (*servicepb.BoolResponse, error) {
|
||||
log.Println("Describe index progress for: ", req.FieldName)
|
||||
dipt := &DescribeIndexProgressTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
IndexStateRequest: milvuspb.IndexStateRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kGetIndexState,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
CollectionName: req.CollectionName,
|
||||
FieldName: req.FieldName,
|
||||
},
|
||||
masterClient: p.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return p.sched.DdQueue.Enqueue(dipt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dipt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dipt.result, nil
|
||||
}
|
||||
//
|
||||
//func (node *NodeImpl) DescribeIndexProgress(ctx context.Context, req *milvuspb.DescribeIndexProgressRequest) (*milvuspb.BoolResponse, error) {
|
||||
// log.Println("Describe index progress for: ", req.FieldName)
|
||||
// dipt := &GetIndexStateTask{
|
||||
// Condition: NewTaskCondition(ctx),
|
||||
// IndexStateRequest: milvuspb.IndexStateRequest{
|
||||
// Base: &commonpb.MsgBase{
|
||||
// MsgType: commonpb.MsgType_kGetIndexState,
|
||||
// SourceID: Params.ProxyID(),
|
||||
// },
|
||||
// CollectionName: req.CollectionName,
|
||||
// FieldName: req.FieldName,
|
||||
// },
|
||||
// masterClient: node.masterClient,
|
||||
// }
|
||||
//
|
||||
// var cancel func()
|
||||
// dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
// defer cancel()
|
||||
//
|
||||
// fn := func() error {
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return errors.New("create index timeout")
|
||||
// default:
|
||||
// return node.sched.DdQueue.Enqueue(dipt)
|
||||
// }
|
||||
// }
|
||||
// err := fn()
|
||||
// if err != nil {
|
||||
// return &milvuspb.BoolResponse{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
// },
|
||||
// Value: false,
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// err = dipt.WaitToFinish()
|
||||
// if err != nil {
|
||||
// return &milvuspb.BoolResponse{
|
||||
// Status: &commonpb.Status{
|
||||
// ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
// Reason: err.Error(),
|
||||
// },
|
||||
// Value: false,
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// return dipt.result, nil
|
||||
//}
|
||||
|
|
|
@ -0,0 +1,674 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
)
|
||||
|
||||
const (
|
||||
reqTimeoutInterval = time.Second * 10
|
||||
)
|
||||
|
||||
func (node *NodeImpl) InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("create collection: ", request)
|
||||
cct := &CreateCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
cct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(cct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = cct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
|
||||
log.Println("drop collection: ", request)
|
||||
dct := &DropCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
|
||||
log.Println("has collection: ", request)
|
||||
hct := &HasCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
hct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(hct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = hct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return hct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
log.Println("describe collection: ", request)
|
||||
dct := &DescribeCollectionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
dct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.DescribeCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
|
||||
log.Println("show collections")
|
||||
sct := &ShowCollectionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowCollectionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
var cancel func()
|
||||
sct.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(sct)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = sct.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.ShowCollectionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return sct.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("create partition", request)
|
||||
cpt := &CreatePartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreatePartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
var cancel func()
|
||||
cpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(cpt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
err = cpt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
return cpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
|
||||
log.Println("drop partition: ", request)
|
||||
dpt := &DropPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DropPartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("drop partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dpt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
err = dpt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
return dpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
|
||||
log.Println("has partition: ", request)
|
||||
hpt := &HasPartitionTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
HasPartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
hpt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("has partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(hpt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
err = hpt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
Value: false,
|
||||
}, nil
|
||||
}
|
||||
return hpt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
|
||||
log.Println("show partitions: ", request)
|
||||
spt := &ShowPartitionsTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
ShowPartitionRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
result: nil,
|
||||
ctx: nil,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
spt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
err := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("show partition timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(spt)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = spt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.ShowPartitionResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return spt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
|
||||
log.Println("create index for: ", request)
|
||||
cit := &CreateIndexTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
CreateIndexRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
cit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(cit)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = cit.WaitToFinish()
|
||||
if err != nil {
|
||||
return &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return cit.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
|
||||
log.Println("Describe index for: ", request)
|
||||
dit := &DescribeIndexTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
DescribeIndexRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dit.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dit)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &milvuspb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dit.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.DescribeIndexResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dit.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
|
||||
log.Println("Describe index progress for: ", request)
|
||||
dipt := &GetIndexStateTask{
|
||||
Condition: NewTaskCondition(ctx),
|
||||
IndexStateRequest: request,
|
||||
masterClient: node.masterClient,
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
dipt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create index timeout")
|
||||
default:
|
||||
return node.sched.DdQueue.Enqueue(dipt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &milvuspb.IndexStateResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = dipt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.IndexStateResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return dipt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "insert grpc received")
|
||||
defer span.Finish()
|
||||
span.SetTag("collection name", request.CollectionName)
|
||||
span.SetTag("partition tag", request.PartitionName)
|
||||
log.Println("insert into: ", request.CollectionName)
|
||||
it := &InsertTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
BaseInsertTask: BaseInsertTask{
|
||||
BaseMsg: msgstream.BaseMsg{
|
||||
HashValues: request.HashKeys,
|
||||
},
|
||||
InsertRequest: internalpb2.InsertRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kInsert,
|
||||
MsgID: 0,
|
||||
},
|
||||
CollectionName: request.CollectionName,
|
||||
PartitionName: request.PartitionName,
|
||||
RowData: request.RowData,
|
||||
},
|
||||
},
|
||||
manipulationMsgStream: node.manipulationMsgStream,
|
||||
rowIDAllocator: node.idAllocator,
|
||||
}
|
||||
if len(it.PartitionName) <= 0 {
|
||||
it.PartitionName = Params.defaultPartitionTag()
|
||||
}
|
||||
|
||||
var cancel func()
|
||||
it.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("insert timeout")
|
||||
default:
|
||||
return node.sched.DmQueue.Enqueue(it)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
|
||||
if err != nil {
|
||||
return &milvuspb.InsertResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = it.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.InsertResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return it.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, "search grpc received")
|
||||
defer span.Finish()
|
||||
span.SetTag("collection name", request.CollectionName)
|
||||
span.SetTag("partition tag", request.PartitionNames)
|
||||
span.SetTag("dsl", request.Dsl)
|
||||
log.Println("search: ", request.CollectionName, request.Dsl)
|
||||
qt := &SearchTask{
|
||||
ctx: ctx,
|
||||
Condition: NewTaskCondition(ctx),
|
||||
SearchRequest: internalpb2.SearchRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kSearch,
|
||||
SourceID: Params.ProxyID(),
|
||||
},
|
||||
ResultChannelID: strconv.FormatInt(Params.ProxyID(), 10),
|
||||
},
|
||||
queryMsgStream: node.queryMsgStream,
|
||||
resultBuf: make(chan []*internalpb2.SearchResults),
|
||||
query: request,
|
||||
}
|
||||
var cancel func()
|
||||
qt.ctx, cancel = context.WithTimeout(ctx, reqTimeoutInterval)
|
||||
log.Printf("grpc address of query task: %p", qt)
|
||||
defer cancel()
|
||||
|
||||
fn := func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("create collection timeout")
|
||||
default:
|
||||
return node.sched.DqQueue.Enqueue(qt)
|
||||
}
|
||||
}
|
||||
err := fn()
|
||||
if err != nil {
|
||||
return &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = qt.WaitToFinish()
|
||||
if err != nil {
|
||||
return &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return qt.result, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (node *NodeImpl) GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error) {
|
||||
panic("implement me")
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package proxynode
|
||||
|
||||
// This file lists all the parameter proxynode node needs to start,
|
||||
// not used, just for me to record.
|
||||
|
||||
type InitParams struct {
|
||||
nodeID UniqueID
|
||||
|
||||
proxyServiceAddress string
|
||||
masterAddress string
|
||||
pulsarAddress string
|
||||
|
||||
searchBufSize int
|
||||
searchChannelNames []string
|
||||
|
||||
searchResultBufSize int
|
||||
searchResultChannelNames []string
|
||||
subTopicName string
|
||||
|
||||
// TODO: this variable dynamic changes, how?
|
||||
queryNodeNum int
|
||||
|
||||
insertBufSize int
|
||||
insertChannelNames []string
|
||||
|
||||
timeTickBufSize int
|
||||
timeTickChannelNames []string
|
||||
|
||||
defaultPartitionName string
|
||||
maxFieldNum int
|
||||
maxNameLength int
|
||||
maxDimension int
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
)
|
||||
|
||||
type ProxyNode interface {
|
||||
Init() error
|
||||
Start() error
|
||||
Stop() error
|
||||
|
||||
InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
|
||||
|
||||
CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
|
||||
DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
|
||||
HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
|
||||
LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error)
|
||||
ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error)
|
||||
DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
|
||||
GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error)
|
||||
ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
|
||||
|
||||
CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
|
||||
DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
|
||||
HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
|
||||
LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error)
|
||||
ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error)
|
||||
GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error)
|
||||
ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
|
||||
|
||||
CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
|
||||
DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
|
||||
GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error)
|
||||
|
||||
Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error)
|
||||
Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error)
|
||||
Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error)
|
||||
|
||||
GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error)
|
||||
}
|
|
@ -7,14 +7,13 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/errors"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
)
|
||||
|
||||
type Cache interface {
|
||||
Hit(collectionName string) bool
|
||||
Get(collectionName string) (*servicepb.CollectionDescription, error)
|
||||
Get(collectionName string) (*milvuspb.DescribeCollectionResponse, error)
|
||||
Sync(collectionName string) error
|
||||
Update(collectionName string, desc *servicepb.CollectionDescription) error
|
||||
Update(collectionName string, desc *milvuspb.DescribeCollectionResponse) error
|
||||
Remove(collectionName string) error
|
||||
}
|
||||
|
||||
|
@ -22,9 +21,9 @@ var globalMetaCache Cache
|
|||
|
||||
type SimpleMetaCache struct {
|
||||
mu sync.RWMutex
|
||||
metas map[string]*servicepb.CollectionDescription // collection name to schema
|
||||
metas map[string]*milvuspb.DescribeCollectionResponse // collection name to schema
|
||||
ctx context.Context
|
||||
proxyInstance *Proxy
|
||||
proxyInstance *NodeImpl
|
||||
}
|
||||
|
||||
func (metaCache *SimpleMetaCache) Hit(collectionName string) bool {
|
||||
|
@ -34,7 +33,7 @@ func (metaCache *SimpleMetaCache) Hit(collectionName string) bool {
|
|||
return ok
|
||||
}
|
||||
|
||||
func (metaCache *SimpleMetaCache) Get(collectionName string) (*servicepb.CollectionDescription, error) {
|
||||
func (metaCache *SimpleMetaCache) Get(collectionName string) (*milvuspb.DescribeCollectionResponse, error) {
|
||||
metaCache.mu.RLock()
|
||||
defer metaCache.mu.RUnlock()
|
||||
schema, ok := metaCache.metas[collectionName]
|
||||
|
@ -47,7 +46,7 @@ func (metaCache *SimpleMetaCache) Get(collectionName string) (*servicepb.Collect
|
|||
func (metaCache *SimpleMetaCache) Sync(collectionName string) error {
|
||||
dct := &DescribeCollectionTask{
|
||||
Condition: NewTaskCondition(metaCache.ctx),
|
||||
DescribeCollectionRequest: milvuspb.DescribeCollectionRequest{
|
||||
DescribeCollectionRequest: &milvuspb.DescribeCollectionRequest{
|
||||
Base: &commonpb.MsgBase{
|
||||
MsgType: commonpb.MsgType_kDescribeCollection,
|
||||
},
|
||||
|
@ -67,7 +66,7 @@ func (metaCache *SimpleMetaCache) Sync(collectionName string) error {
|
|||
return dct.WaitToFinish()
|
||||
}
|
||||
|
||||
func (metaCache *SimpleMetaCache) Update(collectionName string, desc *servicepb.CollectionDescription) error {
|
||||
func (metaCache *SimpleMetaCache) Update(collectionName string, desc *milvuspb.DescribeCollectionResponse) error {
|
||||
metaCache.mu.Lock()
|
||||
defer metaCache.mu.Unlock()
|
||||
|
||||
|
@ -88,14 +87,14 @@ func (metaCache *SimpleMetaCache) Remove(collectionName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func newSimpleMetaCache(ctx context.Context, proxyInstance *Proxy) *SimpleMetaCache {
|
||||
func newSimpleMetaCache(ctx context.Context, proxyInstance *NodeImpl) *SimpleMetaCache {
|
||||
return &SimpleMetaCache{
|
||||
metas: make(map[string]*servicepb.CollectionDescription),
|
||||
metas: make(map[string]*milvuspb.DescribeCollectionResponse),
|
||||
proxyInstance: proxyInstance,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func initGlobalMetaCache(ctx context.Context, proxyInstance *Proxy) {
|
||||
func initGlobalMetaCache(ctx context.Context, proxyInstance *NodeImpl) {
|
||||
globalMetaCache = newSimpleMetaCache(ctx, proxyInstance)
|
||||
}
|
||||
|
|
|
@ -65,6 +65,30 @@ func (pt *ParamTable) NetworkAddress() string {
|
|||
return addr + ":" + port
|
||||
}
|
||||
|
||||
func (pt *ParamTable) ProxyServiceAddress() string {
|
||||
addr, err := pt.Load("proxyService.address")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
hostName, _ := net.LookupHost(addr)
|
||||
if len(hostName) <= 0 {
|
||||
if ip := net.ParseIP(addr); ip == nil {
|
||||
panic("invalid ip proxyService.address")
|
||||
}
|
||||
}
|
||||
|
||||
port, err := pt.Load("proxyService.port")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return addr + ":" + port
|
||||
}
|
||||
|
||||
func (pt *ParamTable) MasterAddress() string {
|
||||
ret, err := pt.Load("_MasterAddress")
|
||||
if err != nil {
|
||||
|
|
|
@ -1,239 +0,0 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
type Proxy struct {
|
||||
proxyLoopCtx context.Context
|
||||
proxyLoopCancel func()
|
||||
proxyLoopWg sync.WaitGroup
|
||||
|
||||
grpcServer *grpc.Server
|
||||
masterConn *grpc.ClientConn
|
||||
masterClient masterpb.MasterServiceClient
|
||||
sched *TaskScheduler
|
||||
tick *timeTick
|
||||
|
||||
idAllocator *allocator.IDAllocator
|
||||
tsoAllocator *allocator.TimestampAllocator
|
||||
segAssigner *allocator.SegIDAssigner
|
||||
|
||||
manipulationMsgStream *pulsarms.PulsarMsgStream
|
||||
queryMsgStream *pulsarms.PulsarMsgStream
|
||||
|
||||
tracer opentracing.Tracer
|
||||
closer io.Closer
|
||||
|
||||
// Add callback functions at different stages
|
||||
startCallbacks []func()
|
||||
closeCallbacks []func()
|
||||
}
|
||||
|
||||
func Init() {
|
||||
Params.Init()
|
||||
}
|
||||
|
||||
func CreateProxy(ctx context.Context) (*Proxy, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
var err error
|
||||
p := &Proxy{
|
||||
proxyLoopCtx: ctx1,
|
||||
proxyLoopCancel: cancel,
|
||||
}
|
||||
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: "proxynode",
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
p.tracer, p.closer, err = cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(p.tracer)
|
||||
|
||||
pulsarAddress := Params.PulsarAddress()
|
||||
|
||||
p.queryMsgStream = pulsarms.NewPulsarMsgStream(p.proxyLoopCtx, Params.MsgStreamSearchBufSize())
|
||||
p.queryMsgStream.SetPulsarClient(pulsarAddress)
|
||||
p.queryMsgStream.CreatePulsarProducers(Params.SearchChannelNames())
|
||||
|
||||
masterAddr := Params.MasterAddress()
|
||||
idAllocator, err := allocator.NewIDAllocator(p.proxyLoopCtx, masterAddr)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.idAllocator = idAllocator
|
||||
p.idAllocator.PeerID = Params.ProxyID()
|
||||
|
||||
tsoAllocator, err := allocator.NewTimestampAllocator(p.proxyLoopCtx, masterAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.tsoAllocator = tsoAllocator
|
||||
p.tsoAllocator.PeerID = Params.ProxyID()
|
||||
|
||||
segAssigner, err := allocator.NewSegIDAssigner(p.proxyLoopCtx, masterAddr, p.lastTick)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.segAssigner = segAssigner
|
||||
p.segAssigner.PeerID = Params.ProxyID()
|
||||
|
||||
p.manipulationMsgStream = pulsarms.NewPulsarMsgStream(p.proxyLoopCtx, Params.MsgStreamInsertBufSize())
|
||||
p.manipulationMsgStream.SetPulsarClient(pulsarAddress)
|
||||
p.manipulationMsgStream.CreatePulsarProducers(Params.InsertChannelNames())
|
||||
repackFuncImpl := func(tsMsgs []msgstream.TsMsg, hashKeys [][]int32) (map[int32]*msgstream.MsgPack, error) {
|
||||
return insertRepackFunc(tsMsgs, hashKeys, p.segAssigner, false)
|
||||
}
|
||||
p.manipulationMsgStream.SetRepackFunc(repackFuncImpl)
|
||||
|
||||
p.sched, err = NewTaskScheduler(p.proxyLoopCtx, p.idAllocator, p.tsoAllocator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.tick = newTimeTick(p.proxyLoopCtx, p.tsoAllocator, time.Millisecond*200, p.sched.TaskDoneTest)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// AddStartCallback adds a callback in the startServer phase.
|
||||
func (p *Proxy) AddStartCallback(callbacks ...func()) {
|
||||
p.startCallbacks = append(p.startCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (p *Proxy) lastTick() Timestamp {
|
||||
return p.tick.LastTick()
|
||||
}
|
||||
|
||||
func (p *Proxy) startProxy() error {
|
||||
err := p.connectMaster()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
initGlobalMetaCache(p.proxyLoopCtx, p)
|
||||
p.manipulationMsgStream.Start()
|
||||
p.queryMsgStream.Start()
|
||||
p.sched.Start()
|
||||
p.idAllocator.Start()
|
||||
p.tsoAllocator.Start()
|
||||
p.segAssigner.Start()
|
||||
p.tick.Start()
|
||||
|
||||
// Start callbacks
|
||||
for _, cb := range p.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
p.proxyLoopWg.Add(1)
|
||||
go p.grpcLoop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddCloseCallback adds a callback in the Close phase.
|
||||
func (p *Proxy) AddCloseCallback(callbacks ...func()) {
|
||||
p.closeCallbacks = append(p.closeCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (p *Proxy) grpcLoop() {
|
||||
defer p.proxyLoopWg.Done()
|
||||
|
||||
lis, err := net.Listen("tcp", ":"+strconv.Itoa(Params.NetworkPort()))
|
||||
if err != nil {
|
||||
log.Fatalf("Proxy grpc server fatal error=%v", err)
|
||||
}
|
||||
|
||||
p.grpcServer = grpc.NewServer()
|
||||
servicepb.RegisterMilvusServiceServer(p.grpcServer, p)
|
||||
if err = p.grpcServer.Serve(lis); err != nil {
|
||||
log.Fatalf("Proxy grpc server fatal error=%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Proxy) connectMaster() error {
|
||||
masterAddr := Params.MasterAddress()
|
||||
log.Printf("Proxy connected to master, master_addr=%s", masterAddr)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, masterAddr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
log.Printf("Proxy connect to master failed, error= %v", err)
|
||||
return err
|
||||
}
|
||||
log.Printf("Proxy connected to master, master_addr=%s", masterAddr)
|
||||
p.masterConn = conn
|
||||
p.masterClient = masterpb.NewMasterServiceClient(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Proxy) Start() error {
|
||||
return p.startProxy()
|
||||
}
|
||||
|
||||
func (p *Proxy) stopProxyLoop() {
|
||||
p.proxyLoopCancel()
|
||||
|
||||
if p.grpcServer != nil {
|
||||
p.grpcServer.GracefulStop()
|
||||
}
|
||||
p.tsoAllocator.Close()
|
||||
|
||||
p.idAllocator.Close()
|
||||
|
||||
p.segAssigner.Close()
|
||||
|
||||
p.sched.Close()
|
||||
|
||||
p.manipulationMsgStream.Close()
|
||||
|
||||
p.queryMsgStream.Close()
|
||||
|
||||
p.tick.Close()
|
||||
|
||||
p.proxyLoopWg.Wait()
|
||||
|
||||
}
|
||||
|
||||
// Close closes the server.
|
||||
func (p *Proxy) Close() {
|
||||
p.stopProxyLoop()
|
||||
|
||||
if p.closer != nil {
|
||||
p.closer.Close()
|
||||
}
|
||||
|
||||
for _, cb := range p.closeCallbacks {
|
||||
cb()
|
||||
}
|
||||
log.Print("proxynode closed.")
|
||||
}
|
|
@ -0,0 +1,212 @@
|
|||
package proxynode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go/config"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/allocator"
|
||||
"github.com/zilliztech/milvus-distributed/internal/msgstream"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
|
||||
type NodeImpl struct {
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
wg sync.WaitGroup
|
||||
|
||||
initParams *internalpb2.InitParams
|
||||
ip string
|
||||
port int
|
||||
|
||||
masterConn *grpc.ClientConn
|
||||
masterClient masterpb.MasterServiceClient
|
||||
sched *TaskScheduler
|
||||
tick *timeTick
|
||||
|
||||
idAllocator *allocator.IDAllocator
|
||||
tsoAllocator *allocator.TimestampAllocator
|
||||
segAssigner *allocator.SegIDAssigner
|
||||
|
||||
manipulationMsgStream *pulsarms.PulsarMsgStream
|
||||
queryMsgStream *pulsarms.PulsarMsgStream
|
||||
|
||||
tracer opentracing.Tracer
|
||||
closer io.Closer
|
||||
|
||||
// Add callback functions at different stages
|
||||
startCallbacks []func()
|
||||
closeCallbacks []func()
|
||||
}
|
||||
|
||||
func CreateProxyNodeImpl(ctx context.Context) (*NodeImpl, error) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
node := &NodeImpl{
|
||||
ctx: ctx1,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Init() error {
|
||||
//Params.Init()
|
||||
|
||||
var err error
|
||||
|
||||
cfg := &config.Configuration{
|
||||
ServiceName: "proxynode",
|
||||
Sampler: &config.SamplerConfig{
|
||||
Type: "const",
|
||||
Param: 1,
|
||||
},
|
||||
}
|
||||
node.tracer, node.closer, err = cfg.NewTracer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
|
||||
}
|
||||
opentracing.SetGlobalTracer(node.tracer)
|
||||
|
||||
pulsarAddress := Params.PulsarAddress()
|
||||
|
||||
node.queryMsgStream = pulsarms.NewPulsarMsgStream(node.ctx, Params.MsgStreamSearchBufSize())
|
||||
node.queryMsgStream.SetPulsarClient(pulsarAddress)
|
||||
node.queryMsgStream.CreatePulsarProducers(Params.SearchChannelNames())
|
||||
|
||||
masterAddr := Params.MasterAddress()
|
||||
idAllocator, err := allocator.NewIDAllocator(node.ctx, masterAddr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node.idAllocator = idAllocator
|
||||
node.idAllocator.PeerID = Params.ProxyID()
|
||||
|
||||
tsoAllocator, err := allocator.NewTimestampAllocator(node.ctx, masterAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node.tsoAllocator = tsoAllocator
|
||||
node.tsoAllocator.PeerID = Params.ProxyID()
|
||||
|
||||
segAssigner, err := allocator.NewSegIDAssigner(node.ctx, masterAddr, node.lastTick)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
node.segAssigner = segAssigner
|
||||
node.segAssigner.PeerID = Params.ProxyID()
|
||||
|
||||
node.manipulationMsgStream = pulsarms.NewPulsarMsgStream(node.ctx, Params.MsgStreamInsertBufSize())
|
||||
node.manipulationMsgStream.SetPulsarClient(pulsarAddress)
|
||||
node.manipulationMsgStream.CreatePulsarProducers(Params.InsertChannelNames())
|
||||
repackFuncImpl := func(tsMsgs []msgstream.TsMsg, hashKeys [][]int32) (map[int32]*msgstream.MsgPack, error) {
|
||||
return insertRepackFunc(tsMsgs, hashKeys, node.segAssigner, false)
|
||||
}
|
||||
node.manipulationMsgStream.SetRepackFunc(repackFuncImpl)
|
||||
|
||||
node.sched, err = NewTaskScheduler(node.ctx, node.idAllocator, node.tsoAllocator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
node.tick = newTimeTick(node.ctx, node.tsoAllocator, time.Millisecond*200, node.sched.TaskDoneTest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Start() error {
|
||||
err := node.connectMaster()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
initGlobalMetaCache(node.ctx, node)
|
||||
node.manipulationMsgStream.Start()
|
||||
node.queryMsgStream.Start()
|
||||
node.sched.Start()
|
||||
node.idAllocator.Start()
|
||||
node.tsoAllocator.Start()
|
||||
node.segAssigner.Start()
|
||||
node.tick.Start()
|
||||
|
||||
// Start callbacks
|
||||
for _, cb := range node.startCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *NodeImpl) Stop() error {
|
||||
node.cancel()
|
||||
|
||||
node.tsoAllocator.Close()
|
||||
node.idAllocator.Close()
|
||||
node.segAssigner.Close()
|
||||
node.sched.Close()
|
||||
node.manipulationMsgStream.Close()
|
||||
node.queryMsgStream.Close()
|
||||
node.tick.Close()
|
||||
|
||||
node.wg.Wait()
|
||||
|
||||
if node.closer != nil {
|
||||
err := node.closer.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, cb := range node.closeCallbacks {
|
||||
cb()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddStartCallback adds a callback in the startServer phase.
|
||||
func (node *NodeImpl) AddStartCallback(callbacks ...func()) {
|
||||
node.startCallbacks = append(node.startCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (node *NodeImpl) lastTick() Timestamp {
|
||||
return node.tick.LastTick()
|
||||
}
|
||||
|
||||
// AddCloseCallback adds a callback in the Close phase.
|
||||
func (node *NodeImpl) AddCloseCallback(callbacks ...func()) {
|
||||
node.closeCallbacks = append(node.closeCallbacks, callbacks...)
|
||||
}
|
||||
|
||||
func (node *NodeImpl) connectMaster() error {
|
||||
masterAddr := Params.MasterAddress()
|
||||
log.Printf("NodeImpl connected to master, master_addr=%s", masterAddr)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, masterAddr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
log.Printf("NodeImpl connect to master failed, error= %v", err)
|
||||
return err
|
||||
}
|
||||
log.Printf("NodeImpl connected to master, master_addr=%s", masterAddr)
|
||||
node.masterConn = conn
|
||||
node.masterClient = masterpb.NewMasterServiceClient(conn)
|
||||
return nil
|
||||
}
|
|
@ -7,15 +7,13 @@ import (
|
|||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/zilliztech/milvus-distributed/internal/master"
|
||||
|
@ -23,17 +21,19 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"go.etcd.io/etcd/clientv3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
var proxyConn *grpc.ClientConn
|
||||
var proxyClient servicepb.MilvusServiceClient
|
||||
|
||||
var proxyServer *Proxy
|
||||
//var proxyConn *grpc.ClientConn
|
||||
//var proxyClient milvuspb.MilvusServiceClient
|
||||
|
||||
var proxyServer *NodeImpl
|
||||
|
||||
var masterServer *master.Master
|
||||
|
||||
|
@ -86,12 +86,16 @@ func startMaster(ctx context.Context) {
|
|||
|
||||
func startProxy(ctx context.Context) {
|
||||
|
||||
svr, err := CreateProxy(ctx)
|
||||
svr, err := CreateProxyNodeImpl(ctx)
|
||||
proxyServer = svr
|
||||
if err != nil {
|
||||
log.Print("create proxynode failed", zap.Error(err))
|
||||
}
|
||||
|
||||
if err := svr.Init(); err != nil {
|
||||
log.Fatal("init proxynode failed", zap.Error(err))
|
||||
}
|
||||
|
||||
// TODO: change to wait until master is ready
|
||||
if err := svr.Start(); err != nil {
|
||||
log.Fatal("run proxynode failed", zap.Error(err))
|
||||
|
@ -104,29 +108,29 @@ func setup() {
|
|||
|
||||
startMaster(ctx)
|
||||
startProxy(ctx)
|
||||
proxyAddr := Params.NetworkAddress()
|
||||
addr := strings.Split(proxyAddr, ":")
|
||||
if addr[0] == "0.0.0.0" {
|
||||
proxyAddr = "127.0.0.1:" + addr[1]
|
||||
}
|
||||
|
||||
conn, err := grpc.DialContext(ctx, proxyAddr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
log.Fatalf("Connect to proxynode failed, error= %v", err)
|
||||
}
|
||||
proxyConn = conn
|
||||
proxyClient = servicepb.NewMilvusServiceClient(proxyConn)
|
||||
//proxyAddr := Params.NetworkAddress()
|
||||
//addr := strings.Split(proxyAddr, ":")
|
||||
//if addr[0] == "0.0.0.0" {
|
||||
// proxyAddr = "127.0.0.1:" + addr[1]
|
||||
//}
|
||||
//
|
||||
//conn, err := grpc.DialContext(ctx, proxyAddr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
//if err != nil {
|
||||
// log.Fatalf("Connect to proxynode failed, error= %v", err)
|
||||
//}
|
||||
//proxyConn = conn
|
||||
//proxyClient = milvuspb.NewMilvusServiceClient(proxyConn)
|
||||
|
||||
}
|
||||
|
||||
func shutdown() {
|
||||
cancel()
|
||||
masterServer.Close()
|
||||
proxyServer.Close()
|
||||
proxyServer.Stop()
|
||||
}
|
||||
|
||||
func hasCollection(t *testing.T, name string) bool {
|
||||
resp, err := proxyClient.HasCollection(ctx, &servicepb.CollectionName{CollectionName: name})
|
||||
resp, err := proxyServer.HasCollection(ctx, &milvuspb.HasCollectionRequest{CollectionName: name})
|
||||
msg := "Has Collection " + name + " should succeed!"
|
||||
assert.Nil(t, err, msg)
|
||||
return resp.Value
|
||||
|
@ -138,20 +142,20 @@ func createCollection(t *testing.T, name string) {
|
|||
dropCollection(t, name)
|
||||
}
|
||||
|
||||
req := &schemapb.CollectionSchema{
|
||||
schema := &schemapb.CollectionSchema{
|
||||
Name: name,
|
||||
Description: "no description",
|
||||
AutoID: true,
|
||||
Fields: make([]*schemapb.FieldSchema, 2),
|
||||
}
|
||||
fieldName := "Field1"
|
||||
req.Fields[0] = &schemapb.FieldSchema{
|
||||
schema.Fields[0] = &schemapb.FieldSchema{
|
||||
Name: fieldName,
|
||||
Description: "no description",
|
||||
DataType: schemapb.DataType_INT32,
|
||||
}
|
||||
fieldName = "vec"
|
||||
req.Fields[1] = &schemapb.FieldSchema{
|
||||
schema.Fields[1] = &schemapb.FieldSchema{
|
||||
Name: fieldName,
|
||||
Description: "vector",
|
||||
DataType: schemapb.DataType_VECTOR_FLOAT,
|
||||
|
@ -168,17 +172,26 @@ func createCollection(t *testing.T, name string) {
|
|||
},
|
||||
},
|
||||
}
|
||||
resp, err := proxyClient.CreateCollection(ctx, req)
|
||||
|
||||
schemaBytes, err := proto.Marshal(schema)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
req := &milvuspb.CreateCollectionRequest{
|
||||
CollectionName: name,
|
||||
Schema: schemaBytes,
|
||||
}
|
||||
resp, err := proxyServer.CreateCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Create Collection " + name + " should succeed!"
|
||||
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
}
|
||||
|
||||
func dropCollection(t *testing.T, name string) {
|
||||
req := &servicepb.CollectionName{
|
||||
req := &milvuspb.DropCollectionRequest{
|
||||
CollectionName: name,
|
||||
}
|
||||
resp, err := proxyClient.DropCollection(ctx, req)
|
||||
resp, err := proxyServer.DropCollection(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Drop Collection " + name + " should succeed! err :" + resp.Reason
|
||||
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
|
@ -186,7 +199,7 @@ func dropCollection(t *testing.T, name string) {
|
|||
|
||||
func createIndex(t *testing.T, collectionName, fieldName string) {
|
||||
|
||||
req := &servicepb.IndexParam{
|
||||
req := &milvuspb.CreateIndexRequest{
|
||||
CollectionName: collectionName,
|
||||
FieldName: fieldName,
|
||||
ExtraParams: []*commonpb.KeyValuePair{
|
||||
|
@ -197,7 +210,7 @@ func createIndex(t *testing.T, collectionName, fieldName string) {
|
|||
},
|
||||
}
|
||||
|
||||
resp, err := proxyClient.CreateIndex(ctx, req)
|
||||
resp, err := proxyServer.CreateIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Create Index for " + fieldName + " should succeed!"
|
||||
assert.Equal(t, resp.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
|
@ -249,7 +262,7 @@ func TestProxy_DescribeCollection(t *testing.T) {
|
|||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
resp, err := proxyClient.DescribeCollection(ctx, &servicepb.CollectionName{CollectionName: collectionName})
|
||||
resp, err := proxyServer.DescribeCollection(ctx, &milvuspb.DescribeCollectionRequest{CollectionName: collectionName})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -275,7 +288,7 @@ func TestProxy_ShowCollections(t *testing.T) {
|
|||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
resp, err := proxyClient.ShowCollections(ctx, &commonpb.Empty{})
|
||||
resp, err := proxyServer.ShowCollections(ctx, &milvuspb.ShowCollectionRequest{})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -295,9 +308,9 @@ func TestProxy_Insert(t *testing.T) {
|
|||
i := i
|
||||
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
req := &servicepb.RowBatch{
|
||||
req := &milvuspb.InsertRequest{
|
||||
CollectionName: collectionName,
|
||||
PartitionTag: "haha",
|
||||
PartitionName: "haha",
|
||||
RowData: make([]*commonpb.Blob, 0),
|
||||
HashKeys: make([]uint32, 0),
|
||||
}
|
||||
|
@ -308,7 +321,7 @@ func TestProxy_Insert(t *testing.T) {
|
|||
createCollection(t, collectionName)
|
||||
has := hasCollection(t, collectionName)
|
||||
if has {
|
||||
resp, err := proxyClient.Insert(ctx, req)
|
||||
resp, err := proxyServer.Insert(ctx, req)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
@ -373,7 +386,7 @@ func TestProxy_Search(t *testing.T) {
|
|||
for i := 0; i < testNum; i++ {
|
||||
i := i
|
||||
collectionName := "CreateCollection" + strconv.FormatInt(int64(i), 10)
|
||||
req := &servicepb.Query{
|
||||
req := &milvuspb.SearchRequest{
|
||||
CollectionName: collectionName,
|
||||
}
|
||||
queryWg.Add(1)
|
||||
|
@ -384,7 +397,7 @@ func TestProxy_Search(t *testing.T) {
|
|||
if !has {
|
||||
createCollection(t, collectionName)
|
||||
}
|
||||
resp, err := proxyClient.Search(ctx, req)
|
||||
resp, err := proxyServer.Search(ctx, req)
|
||||
t.Logf("response of search collection %v: %v", i, resp)
|
||||
assert.Nil(t, err)
|
||||
dropCollection(t, collectionName)
|
||||
|
@ -440,36 +453,44 @@ func TestProxy_PartitionGRPC(t *testing.T) {
|
|||
go func() {
|
||||
defer wg.Done()
|
||||
tag := fmt.Sprintf("partition_%d", i)
|
||||
preq := &servicepb.PartitionName{
|
||||
preq := &milvuspb.HasPartitionRequest{
|
||||
CollectionName: collName,
|
||||
Tag: tag,
|
||||
PartitionName: tag,
|
||||
}
|
||||
|
||||
stb, err := proxyClient.HasPartition(ctx, preq)
|
||||
stb, err := proxyServer.HasPartition(ctx, preq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, stb.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, stb.Value, false)
|
||||
|
||||
st, err := proxyClient.CreatePartition(ctx, preq)
|
||||
cpreq := &milvuspb.CreatePartitionRequest{
|
||||
CollectionName: collName,
|
||||
PartitionName: tag,
|
||||
}
|
||||
st, err := proxyServer.CreatePartition(ctx, cpreq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
stb, err = proxyClient.HasPartition(ctx, preq)
|
||||
stb, err = proxyServer.HasPartition(ctx, preq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, stb.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.Equal(t, stb.Value, true)
|
||||
|
||||
//std, err := proxyClient.DescribePartition(ctx, preq)
|
||||
//std, err := proxyServer.DescribePartition(ctx, preq)
|
||||
//assert.Nil(t, err)
|
||||
//assert.Equal(t, std.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
|
||||
sts, err := proxyClient.ShowPartitions(ctx, &servicepb.CollectionName{CollectionName: collName})
|
||||
sts, err := proxyServer.ShowPartitions(ctx, &milvuspb.ShowPartitionRequest{CollectionName: collName})
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, sts.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
assert.True(t, len(sts.Values) >= 2)
|
||||
assert.True(t, len(sts.Values) <= testNum+1)
|
||||
assert.True(t, len(sts.PartitionNames) >= 2)
|
||||
assert.True(t, len(sts.PartitionNames) <= testNum+1)
|
||||
|
||||
st, err = proxyClient.DropPartition(ctx, preq)
|
||||
dpreq := &milvuspb.DropPartitionRequest{
|
||||
CollectionName: collName,
|
||||
PartitionName: tag,
|
||||
}
|
||||
st, err = proxyServer.DropPartition(ctx, dpreq)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, st.ErrorCode, commonpb.ErrorCode_SUCCESS)
|
||||
}()
|
||||
|
@ -519,11 +540,11 @@ func TestProxy_DescribeIndex(t *testing.T) {
|
|||
if i%2 == 0 {
|
||||
createIndex(t, collName, fieldName)
|
||||
}
|
||||
req := &servicepb.DescribeIndexRequest{
|
||||
req := &milvuspb.DescribeIndexRequest{
|
||||
CollectionName: collName,
|
||||
FieldName: fieldName,
|
||||
}
|
||||
resp, err := proxyClient.DescribeIndex(ctx, req)
|
||||
resp, err := proxyServer.DescribeIndex(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Describe Index for " + fieldName + "should successed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
|
@ -533,7 +554,7 @@ func TestProxy_DescribeIndex(t *testing.T) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestProxy_DescribeIndexProgress(t *testing.T) {
|
||||
func TestProxy_GetIndexState(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < testNum; i++ {
|
||||
|
@ -550,15 +571,15 @@ func TestProxy_DescribeIndexProgress(t *testing.T) {
|
|||
if i%2 == 0 {
|
||||
createIndex(t, collName, fieldName)
|
||||
}
|
||||
req := &servicepb.DescribeIndexProgressRequest{
|
||||
req := &milvuspb.IndexStateRequest{
|
||||
CollectionName: collName,
|
||||
FieldName: fieldName,
|
||||
}
|
||||
resp, err := proxyClient.DescribeIndexProgress(ctx, req)
|
||||
resp, err := proxyServer.GetIndexState(ctx, req)
|
||||
assert.Nil(t, err)
|
||||
msg := "Describe Index Progress for " + fieldName + "should succeed!"
|
||||
assert.Equal(t, resp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS, msg)
|
||||
assert.True(t, resp.Value)
|
||||
assert.True(t, resp.State == commonpb.IndexState_FINISHED)
|
||||
dropCollection(t, collName)
|
||||
}(&wg)
|
||||
}
|
|
@ -3,6 +3,7 @@ package proxynode
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"strconv"
|
||||
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -30,6 +30,7 @@ type task interface {
|
|||
BeginTs() Timestamp
|
||||
EndTs() Timestamp
|
||||
SetTs(ts Timestamp)
|
||||
OnEnqueue() error
|
||||
PreExecute() error
|
||||
Execute() error
|
||||
PostExecute() error
|
||||
|
@ -42,12 +43,16 @@ type BaseInsertTask = msgstream.InsertMsg
|
|||
type InsertTask struct {
|
||||
BaseInsertTask
|
||||
Condition
|
||||
result *servicepb.IntegerRangeResponse
|
||||
result *milvuspb.InsertResponse
|
||||
manipulationMsgStream *pulsarms.PulsarMsgStream
|
||||
ctx context.Context
|
||||
rowIDAllocator *allocator.IDAllocator
|
||||
}
|
||||
|
||||
func (it *InsertTask) OnEnqueue() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (it *InsertTask) SetID(uid UniqueID) {
|
||||
it.Base.MsgID = uid
|
||||
}
|
||||
|
@ -79,6 +84,9 @@ func (it *InsertTask) Type() commonpb.MsgType {
|
|||
}
|
||||
|
||||
func (it *InsertTask) PreExecute() error {
|
||||
it.Base.MsgType = commonpb.MsgType_kInsert
|
||||
it.Base.SourceID = Params.ProxyID()
|
||||
|
||||
span, ctx := opentracing.StartSpanFromContext(it.ctx, "InsertTask preExecute")
|
||||
defer span.Finish()
|
||||
it.ctx = ctx
|
||||
|
@ -158,12 +166,12 @@ func (it *InsertTask) Execute() error {
|
|||
msgPack.Msgs[0] = tsMsg
|
||||
err = it.manipulationMsgStream.Produce(msgPack)
|
||||
|
||||
it.result = &servicepb.IntegerRangeResponse{
|
||||
it.result = &milvuspb.InsertResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
},
|
||||
Begin: rowIDBegin,
|
||||
End: rowIDEnd,
|
||||
RowIDBegin: rowIDBegin,
|
||||
RowIDEnd: rowIDEnd,
|
||||
}
|
||||
if err != nil {
|
||||
it.result.Status.ErrorCode = commonpb.ErrorCode_UNEXPECTED_ERROR
|
||||
|
@ -181,13 +189,18 @@ func (it *InsertTask) PostExecute() error {
|
|||
|
||||
type CreateCollectionTask struct {
|
||||
Condition
|
||||
milvuspb.CreateCollectionRequest
|
||||
*milvuspb.CreateCollectionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
schema *schemapb.CollectionSchema
|
||||
}
|
||||
|
||||
func (cct *CreateCollectionTask) OnEnqueue() error {
|
||||
cct.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cct *CreateCollectionTask) ID() UniqueID {
|
||||
return cct.Base.MsgID
|
||||
}
|
||||
|
@ -213,6 +226,15 @@ func (cct *CreateCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (cct *CreateCollectionTask) PreExecute() error {
|
||||
cct.Base.MsgType = commonpb.MsgType_kCreateCollection
|
||||
cct.Base.SourceID = Params.ProxyID()
|
||||
|
||||
cct.schema = &schemapb.CollectionSchema{}
|
||||
err := proto.Unmarshal(cct.Schema, cct.schema)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if int64(len(cct.schema.Fields)) > Params.MaxFieldNum() {
|
||||
return errors.New("maximum field's number should be limited to " + strconv.FormatInt(Params.MaxFieldNum(), 10))
|
||||
}
|
||||
|
@ -271,18 +293,8 @@ func (cct *CreateCollectionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (cct *CreateCollectionTask) Execute() error {
|
||||
schemaBytes, _ := proto.Marshal(cct.schema)
|
||||
cct.CreateCollectionRequest.Schema = schemaBytes
|
||||
resp, err := cct.masterClient.CreateCollection(cct.ctx, &cct.CreateCollectionRequest)
|
||||
if err != nil {
|
||||
log.Printf("create collection failed, error= %v", err)
|
||||
cct.result = &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}
|
||||
} else {
|
||||
cct.result = resp
|
||||
}
|
||||
var err error
|
||||
cct.result, err = cct.masterClient.CreateCollection(cct.ctx, cct.CreateCollectionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -292,12 +304,17 @@ func (cct *CreateCollectionTask) PostExecute() error {
|
|||
|
||||
type DropCollectionTask struct {
|
||||
Condition
|
||||
milvuspb.DropCollectionRequest
|
||||
*milvuspb.DropCollectionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dct *DropCollectionTask) OnEnqueue() error {
|
||||
dct.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dct *DropCollectionTask) ID() UniqueID {
|
||||
return dct.Base.MsgID
|
||||
}
|
||||
|
@ -323,6 +340,9 @@ func (dct *DropCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dct *DropCollectionTask) PreExecute() error {
|
||||
dct.Base.MsgType = commonpb.MsgType_kDropCollection
|
||||
dct.Base.SourceID = Params.ProxyID()
|
||||
|
||||
if err := ValidateCollectionName(dct.CollectionName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -330,16 +350,8 @@ func (dct *DropCollectionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (dct *DropCollectionTask) Execute() error {
|
||||
resp, err := dct.masterClient.DropCollection(dct.ctx, &dct.DropCollectionRequest)
|
||||
if err != nil {
|
||||
log.Printf("drop collection failed, error= %v", err)
|
||||
dct.result = &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: err.Error(),
|
||||
}
|
||||
} else {
|
||||
dct.result = resp
|
||||
}
|
||||
var err error
|
||||
dct.result, err = dct.masterClient.DropCollection(dct.ctx, dct.DropCollectionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -355,9 +367,13 @@ type SearchTask struct {
|
|||
internalpb2.SearchRequest
|
||||
queryMsgStream *pulsarms.PulsarMsgStream
|
||||
resultBuf chan []*internalpb2.SearchResults
|
||||
result *servicepb.QueryResult
|
||||
result *milvuspb.SearchResults
|
||||
ctx context.Context
|
||||
query *servicepb.Query
|
||||
query *milvuspb.SearchRequest
|
||||
}
|
||||
|
||||
func (st *SearchTask) OnEnqueue() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st *SearchTask) ID() UniqueID {
|
||||
|
@ -385,6 +401,9 @@ func (st *SearchTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (st *SearchTask) PreExecute() error {
|
||||
st.Base.MsgType = commonpb.MsgType_kSearch
|
||||
st.Base.SourceID = Params.ProxyID()
|
||||
|
||||
span, ctx := opentracing.StartSpanFromContext(st.ctx, "SearchTask preExecute")
|
||||
defer span.Finish()
|
||||
st.ctx = ctx
|
||||
|
@ -413,7 +432,7 @@ func (st *SearchTask) PreExecute() error {
|
|||
return err
|
||||
}
|
||||
|
||||
for _, tag := range st.query.PartitionTags {
|
||||
for _, tag := range st.query.PartitionNames {
|
||||
if err := ValidatePartitionTag(tag, false); err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
|
@ -455,11 +474,11 @@ func (st *SearchTask) Execute() error {
|
|||
tsMsg.SetMsgContext(ctx)
|
||||
msgPack.Msgs[0] = tsMsg
|
||||
err := st.queryMsgStream.Produce(msgPack)
|
||||
log.Printf("[Proxy] length of searchMsg: %v", len(msgPack.Msgs))
|
||||
log.Printf("[NodeImpl] length of searchMsg: %v", len(msgPack.Msgs))
|
||||
if err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
span.Finish()
|
||||
log.Printf("[Proxy] send search request failed: %v", err)
|
||||
log.Printf("[NodeImpl] send search request failed: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -476,6 +495,7 @@ func (st *SearchTask) PostExecute() error {
|
|||
span.LogFields(oplog.String("wait to finish failed, timeout", "wait to finish failed, timeout"))
|
||||
return errors.New("wait to finish failed, timeout")
|
||||
case searchResults := <-st.resultBuf:
|
||||
fmt.Println("searchResults: ", searchResults)
|
||||
span.LogFields(oplog.String("receive result", "receive result"))
|
||||
filterSearchResult := make([]*internalpb2.SearchResults, 0)
|
||||
var filterReason string
|
||||
|
@ -499,7 +519,7 @@ func (st *SearchTask) PostExecute() error {
|
|||
|
||||
availableQueryNodeNum := len(filterSearchResult)
|
||||
if availableQueryNodeNum <= 0 {
|
||||
st.result = &servicepb.QueryResult{
|
||||
st.result = &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: filterReason,
|
||||
|
@ -509,15 +529,15 @@ func (st *SearchTask) PostExecute() error {
|
|||
return errors.New(filterReason)
|
||||
}
|
||||
|
||||
hits := make([][]*servicepb.Hits, 0)
|
||||
hits := make([][]*milvuspb.Hits, 0)
|
||||
for _, partialSearchResult := range filterSearchResult {
|
||||
if partialSearchResult.Hits == nil || len(partialSearchResult.Hits) <= 0 {
|
||||
filterReason += "nq is zero\n"
|
||||
continue
|
||||
}
|
||||
partialHits := make([]*servicepb.Hits, 0)
|
||||
partialHits := make([]*milvuspb.Hits, 0)
|
||||
for _, bs := range partialSearchResult.Hits {
|
||||
partialHit := &servicepb.Hits{}
|
||||
partialHit := &milvuspb.Hits{}
|
||||
err := proto.Unmarshal(bs, partialHit)
|
||||
if err != nil {
|
||||
log.Println("unmarshal error")
|
||||
|
@ -530,7 +550,7 @@ func (st *SearchTask) PostExecute() error {
|
|||
|
||||
availableQueryNodeNum = len(hits)
|
||||
if availableQueryNodeNum <= 0 {
|
||||
st.result = &servicepb.QueryResult{
|
||||
st.result = &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: filterReason,
|
||||
|
@ -541,7 +561,7 @@ func (st *SearchTask) PostExecute() error {
|
|||
|
||||
nq := len(hits[0])
|
||||
if nq <= 0 {
|
||||
st.result = &servicepb.QueryResult{
|
||||
st.result = &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_SUCCESS,
|
||||
Reason: filterReason,
|
||||
|
@ -560,7 +580,7 @@ func (st *SearchTask) PostExecute() error {
|
|||
for _, hit := range hits {
|
||||
topk = getMax(topk, len(hit[0].IDs))
|
||||
}
|
||||
st.result = &servicepb.QueryResult{
|
||||
st.result = &milvuspb.SearchResults{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: 0,
|
||||
},
|
||||
|
@ -570,7 +590,7 @@ func (st *SearchTask) PostExecute() error {
|
|||
const minFloat32 = -1 * float32(math.MaxFloat32)
|
||||
for i := 0; i < nq; i++ {
|
||||
locs := make([]int, availableQueryNodeNum)
|
||||
reducedHits := &servicepb.Hits{
|
||||
reducedHits := &milvuspb.Hits{
|
||||
IDs: make([]int64, 0),
|
||||
RowData: make([][]byte, 0),
|
||||
Scores: make([]float32, 0),
|
||||
|
@ -626,12 +646,17 @@ func (st *SearchTask) PostExecute() error {
|
|||
|
||||
type HasCollectionTask struct {
|
||||
Condition
|
||||
milvuspb.HasCollectionRequest
|
||||
*milvuspb.HasCollectionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.BoolResponse
|
||||
result *milvuspb.BoolResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (hct *HasCollectionTask) OnEnqueue() error {
|
||||
hct.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hct *HasCollectionTask) ID() UniqueID {
|
||||
return hct.Base.MsgID
|
||||
}
|
||||
|
@ -657,6 +682,9 @@ func (hct *HasCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (hct *HasCollectionTask) PreExecute() error {
|
||||
hct.Base.MsgType = commonpb.MsgType_kHasCollection
|
||||
hct.Base.SourceID = Params.ProxyID()
|
||||
|
||||
if err := ValidateCollectionName(hct.CollectionName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -664,22 +692,8 @@ func (hct *HasCollectionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (hct *HasCollectionTask) Execute() error {
|
||||
resp, err := hct.masterClient.HasCollection(hct.ctx, &hct.HasCollectionRequest)
|
||||
if err != nil {
|
||||
log.Printf("has collection failed, error= %v", err)
|
||||
hct.result = &servicepb.BoolResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "internal error",
|
||||
},
|
||||
Value: false,
|
||||
}
|
||||
} else {
|
||||
hct.result = &servicepb.BoolResponse{
|
||||
Status: resp.Status,
|
||||
Value: resp.Value,
|
||||
}
|
||||
}
|
||||
var err error
|
||||
hct.result, err = hct.masterClient.HasCollection(hct.ctx, hct.HasCollectionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -689,12 +703,17 @@ func (hct *HasCollectionTask) PostExecute() error {
|
|||
|
||||
type DescribeCollectionTask struct {
|
||||
Condition
|
||||
milvuspb.DescribeCollectionRequest
|
||||
*milvuspb.DescribeCollectionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.CollectionDescription
|
||||
result *milvuspb.DescribeCollectionResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dct *DescribeCollectionTask) OnEnqueue() error {
|
||||
dct.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dct *DescribeCollectionTask) ID() UniqueID {
|
||||
return dct.Base.MsgID
|
||||
}
|
||||
|
@ -720,6 +739,9 @@ func (dct *DescribeCollectionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dct *DescribeCollectionTask) PreExecute() error {
|
||||
dct.Base.MsgType = commonpb.MsgType_kDescribeCollection
|
||||
dct.Base.SourceID = Params.ProxyID()
|
||||
|
||||
if err := ValidateCollectionName(dct.CollectionName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -727,14 +749,11 @@ func (dct *DescribeCollectionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (dct *DescribeCollectionTask) Execute() error {
|
||||
result, err := dct.masterClient.DescribeCollection(dct.ctx, &dct.DescribeCollectionRequest)
|
||||
var err error
|
||||
dct.result, err = dct.masterClient.DescribeCollection(dct.ctx, dct.DescribeCollectionRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dct.result = &servicepb.CollectionDescription{
|
||||
Status: result.Status,
|
||||
Schema: result.Schema,
|
||||
}
|
||||
err = globalMetaCache.Update(dct.CollectionName, dct.result)
|
||||
return err
|
||||
}
|
||||
|
@ -745,12 +764,17 @@ func (dct *DescribeCollectionTask) PostExecute() error {
|
|||
|
||||
type ShowCollectionsTask struct {
|
||||
Condition
|
||||
milvuspb.ShowCollectionRequest
|
||||
*milvuspb.ShowCollectionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.StringListResponse
|
||||
result *milvuspb.ShowCollectionResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (sct *ShowCollectionsTask) OnEnqueue() error {
|
||||
sct.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sct *ShowCollectionsTask) ID() UniqueID {
|
||||
return sct.Base.MsgID
|
||||
}
|
||||
|
@ -776,25 +800,15 @@ func (sct *ShowCollectionsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (sct *ShowCollectionsTask) PreExecute() error {
|
||||
sct.Base.MsgType = commonpb.MsgType_kShowCollections
|
||||
sct.Base.SourceID = Params.ProxyID()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sct *ShowCollectionsTask) Execute() error {
|
||||
resp, err := sct.masterClient.ShowCollections(sct.ctx, &sct.ShowCollectionRequest)
|
||||
if err != nil {
|
||||
log.Printf("show collections failed, error= %v", err)
|
||||
sct.result = &servicepb.StringListResponse{
|
||||
Status: &commonpb.Status{
|
||||
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
|
||||
Reason: "internal error",
|
||||
},
|
||||
}
|
||||
} else {
|
||||
sct.result = &servicepb.StringListResponse{
|
||||
Status: resp.Status,
|
||||
Values: resp.CollectionNames,
|
||||
}
|
||||
}
|
||||
var err error
|
||||
sct.result, err = sct.masterClient.ShowCollections(sct.ctx, sct.ShowCollectionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -804,12 +818,17 @@ func (sct *ShowCollectionsTask) PostExecute() error {
|
|||
|
||||
type CreatePartitionTask struct {
|
||||
Condition
|
||||
milvuspb.CreatePartitionRequest
|
||||
*milvuspb.CreatePartitionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (cpt *CreatePartitionTask) OnEnqueue() error {
|
||||
cpt.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cpt *CreatePartitionTask) ID() UniqueID {
|
||||
return cpt.Base.MsgID
|
||||
}
|
||||
|
@ -835,6 +854,9 @@ func (cpt *CreatePartitionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (cpt *CreatePartitionTask) PreExecute() error {
|
||||
cpt.Base.MsgType = commonpb.MsgType_kCreatePartition
|
||||
cpt.Base.SourceID = Params.ProxyID()
|
||||
|
||||
collName, partitionTag := cpt.CollectionName, cpt.PartitionName
|
||||
|
||||
if err := ValidateCollectionName(collName); err != nil {
|
||||
|
@ -849,7 +871,7 @@ func (cpt *CreatePartitionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (cpt *CreatePartitionTask) Execute() (err error) {
|
||||
cpt.result, err = cpt.masterClient.CreatePartition(cpt.ctx, &cpt.CreatePartitionRequest)
|
||||
cpt.result, err = cpt.masterClient.CreatePartition(cpt.ctx, cpt.CreatePartitionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -859,12 +881,17 @@ func (cpt *CreatePartitionTask) PostExecute() error {
|
|||
|
||||
type DropPartitionTask struct {
|
||||
Condition
|
||||
milvuspb.DropPartitionRequest
|
||||
*milvuspb.DropPartitionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dpt *DropPartitionTask) OnEnqueue() error {
|
||||
dpt.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dpt *DropPartitionTask) ID() UniqueID {
|
||||
return dpt.Base.MsgID
|
||||
}
|
||||
|
@ -890,6 +917,9 @@ func (dpt *DropPartitionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dpt *DropPartitionTask) PreExecute() error {
|
||||
dpt.Base.MsgType = commonpb.MsgType_kDropPartition
|
||||
dpt.Base.SourceID = Params.ProxyID()
|
||||
|
||||
collName, partitionTag := dpt.CollectionName, dpt.PartitionName
|
||||
|
||||
if err := ValidateCollectionName(collName); err != nil {
|
||||
|
@ -904,7 +934,7 @@ func (dpt *DropPartitionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (dpt *DropPartitionTask) Execute() (err error) {
|
||||
dpt.result, err = dpt.masterClient.DropPartition(dpt.ctx, &dpt.DropPartitionRequest)
|
||||
dpt.result, err = dpt.masterClient.DropPartition(dpt.ctx, dpt.DropPartitionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -914,12 +944,17 @@ func (dpt *DropPartitionTask) PostExecute() error {
|
|||
|
||||
type HasPartitionTask struct {
|
||||
Condition
|
||||
milvuspb.HasPartitionRequest
|
||||
*milvuspb.HasPartitionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.BoolResponse
|
||||
result *milvuspb.BoolResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (hpt *HasPartitionTask) OnEnqueue() error {
|
||||
hpt.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hpt *HasPartitionTask) ID() UniqueID {
|
||||
return hpt.Base.MsgID
|
||||
}
|
||||
|
@ -945,6 +980,9 @@ func (hpt *HasPartitionTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (hpt *HasPartitionTask) PreExecute() error {
|
||||
hpt.Base.MsgType = commonpb.MsgType_kHasPartition
|
||||
hpt.Base.SourceID = Params.ProxyID()
|
||||
|
||||
collName, partitionTag := hpt.CollectionName, hpt.PartitionName
|
||||
|
||||
if err := ValidateCollectionName(collName); err != nil {
|
||||
|
@ -958,11 +996,7 @@ func (hpt *HasPartitionTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (hpt *HasPartitionTask) Execute() (err error) {
|
||||
result, err := hpt.masterClient.HasPartition(hpt.ctx, &hpt.HasPartitionRequest)
|
||||
hpt.result = &servicepb.BoolResponse{
|
||||
Status: result.Status,
|
||||
Value: result.Value,
|
||||
}
|
||||
hpt.result, err = hpt.masterClient.HasPartition(hpt.ctx, hpt.HasPartitionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -974,7 +1008,7 @@ func (hpt *HasPartitionTask) PostExecute() error {
|
|||
// Condition
|
||||
// internalpb.DescribePartitionRequest
|
||||
// masterClient masterpb.MasterServiceClient
|
||||
// result *servicepb.PartitionDescription
|
||||
// result *milvuspb.PartitionDescription
|
||||
// ctx context.Context
|
||||
//}
|
||||
//
|
||||
|
@ -1026,12 +1060,17 @@ func (hpt *HasPartitionTask) PostExecute() error {
|
|||
|
||||
type ShowPartitionsTask struct {
|
||||
Condition
|
||||
milvuspb.ShowPartitionRequest
|
||||
*milvuspb.ShowPartitionRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.StringListResponse
|
||||
result *milvuspb.ShowPartitionResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (spt *ShowPartitionsTask) OnEnqueue() error {
|
||||
spt.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (spt *ShowPartitionsTask) ID() UniqueID {
|
||||
return spt.Base.MsgID
|
||||
}
|
||||
|
@ -1057,6 +1096,9 @@ func (spt *ShowPartitionsTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (spt *ShowPartitionsTask) PreExecute() error {
|
||||
spt.Base.MsgType = commonpb.MsgType_kShowPartitions
|
||||
spt.Base.SourceID = Params.ProxyID()
|
||||
|
||||
if err := ValidateCollectionName(spt.CollectionName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1064,11 +1106,8 @@ func (spt *ShowPartitionsTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (spt *ShowPartitionsTask) Execute() error {
|
||||
result, err := spt.masterClient.ShowPartitions(spt.ctx, &spt.ShowPartitionRequest)
|
||||
spt.result = &servicepb.StringListResponse{
|
||||
Status: result.Status,
|
||||
Values: result.PartitionNames,
|
||||
}
|
||||
var err error
|
||||
spt.result, err = spt.masterClient.ShowPartitions(spt.ctx, spt.ShowPartitionRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1078,12 +1117,17 @@ func (spt *ShowPartitionsTask) PostExecute() error {
|
|||
|
||||
type CreateIndexTask struct {
|
||||
Condition
|
||||
milvuspb.CreateIndexRequest
|
||||
*milvuspb.CreateIndexRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *commonpb.Status
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (cit *CreateIndexTask) OnEnqueue() error {
|
||||
cit.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cit *CreateIndexTask) ID() UniqueID {
|
||||
return cit.Base.MsgID
|
||||
}
|
||||
|
@ -1109,6 +1153,9 @@ func (cit *CreateIndexTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (cit *CreateIndexTask) PreExecute() error {
|
||||
cit.Base.MsgType = commonpb.MsgType_kCreateIndex
|
||||
cit.Base.SourceID = Params.ProxyID()
|
||||
|
||||
collName, fieldName := cit.CollectionName, cit.FieldName
|
||||
|
||||
if err := ValidateCollectionName(collName); err != nil {
|
||||
|
@ -1123,7 +1170,7 @@ func (cit *CreateIndexTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (cit *CreateIndexTask) Execute() (err error) {
|
||||
cit.result, err = cit.masterClient.CreateIndex(cit.ctx, &cit.CreateIndexRequest)
|
||||
cit.result, err = cit.masterClient.CreateIndex(cit.ctx, cit.CreateIndexRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1133,12 +1180,17 @@ func (cit *CreateIndexTask) PostExecute() error {
|
|||
|
||||
type DescribeIndexTask struct {
|
||||
Condition
|
||||
milvuspb.DescribeIndexRequest
|
||||
*milvuspb.DescribeIndexRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.DescribeIndexResponse
|
||||
result *milvuspb.DescribeIndexResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dit *DescribeIndexTask) OnEnqueue() error {
|
||||
dit.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dit *DescribeIndexTask) ID() UniqueID {
|
||||
return dit.Base.MsgID
|
||||
}
|
||||
|
@ -1164,6 +1216,9 @@ func (dit *DescribeIndexTask) SetTs(ts Timestamp) {
|
|||
}
|
||||
|
||||
func (dit *DescribeIndexTask) PreExecute() error {
|
||||
dit.Base.MsgType = commonpb.MsgType_kDescribeIndex
|
||||
dit.Base.SourceID = Params.ProxyID()
|
||||
|
||||
collName, fieldName := dit.CollectionName, dit.FieldName
|
||||
|
||||
if err := ValidateCollectionName(collName); err != nil {
|
||||
|
@ -1178,13 +1233,8 @@ func (dit *DescribeIndexTask) PreExecute() error {
|
|||
}
|
||||
|
||||
func (dit *DescribeIndexTask) Execute() error {
|
||||
result, err := dit.masterClient.DescribeIndex(dit.ctx, &dit.DescribeIndexRequest)
|
||||
dit.result = &servicepb.DescribeIndexResponse{
|
||||
Status: result.Status,
|
||||
CollectionName: dit.CollectionName,
|
||||
FieldName: dit.FieldName,
|
||||
ExtraParams: result.IndexDescriptions[0].Params,
|
||||
}
|
||||
var err error
|
||||
dit.result, err = dit.masterClient.DescribeIndex(dit.ctx, dit.DescribeIndexRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1192,39 +1242,47 @@ func (dit *DescribeIndexTask) PostExecute() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type DescribeIndexProgressTask struct {
|
||||
type GetIndexStateTask struct {
|
||||
Condition
|
||||
milvuspb.IndexStateRequest
|
||||
*milvuspb.IndexStateRequest
|
||||
masterClient masterpb.MasterServiceClient
|
||||
result *servicepb.BoolResponse
|
||||
result *milvuspb.IndexStateResponse
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) ID() UniqueID {
|
||||
func (dipt *GetIndexStateTask) OnEnqueue() error {
|
||||
dipt.Base = &commonpb.MsgBase{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dipt *GetIndexStateTask) ID() UniqueID {
|
||||
return dipt.Base.MsgID
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) SetID(uid UniqueID) {
|
||||
func (dipt *GetIndexStateTask) SetID(uid UniqueID) {
|
||||
dipt.Base.MsgID = uid
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) Type() commonpb.MsgType {
|
||||
func (dipt *GetIndexStateTask) Type() commonpb.MsgType {
|
||||
return dipt.Base.MsgType
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) BeginTs() Timestamp {
|
||||
func (dipt *GetIndexStateTask) BeginTs() Timestamp {
|
||||
return dipt.Base.Timestamp
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) EndTs() Timestamp {
|
||||
func (dipt *GetIndexStateTask) EndTs() Timestamp {
|
||||
return dipt.Base.Timestamp
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) SetTs(ts Timestamp) {
|
||||
func (dipt *GetIndexStateTask) SetTs(ts Timestamp) {
|
||||
dipt.Base.Timestamp = ts
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) PreExecute() error {
|
||||
func (dipt *GetIndexStateTask) PreExecute() error {
|
||||
dipt.Base.MsgType = commonpb.MsgType_kGetIndexState
|
||||
dipt.Base.SourceID = Params.ProxyID()
|
||||
|
||||
collName, fieldName := dipt.CollectionName, dipt.FieldName
|
||||
|
||||
if err := ValidateCollectionName(collName); err != nil {
|
||||
|
@ -1238,15 +1296,12 @@ func (dipt *DescribeIndexProgressTask) PreExecute() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) Execute() error {
|
||||
result, err := dipt.masterClient.GetIndexState(dipt.ctx, &dipt.IndexStateRequest)
|
||||
dipt.result = &servicepb.BoolResponse{
|
||||
Status: result.Status,
|
||||
Value: result.State == commonpb.IndexState_FINISHED,
|
||||
}
|
||||
func (dipt *GetIndexStateTask) Execute() error {
|
||||
var err error
|
||||
dipt.result, err = dipt.masterClient.GetIndexState(dipt.ctx, dipt.IndexStateRequest)
|
||||
return err
|
||||
}
|
||||
|
||||
func (dipt *DescribeIndexProgressTask) PostExecute() error {
|
||||
func (dipt *GetIndexStateTask) PostExecute() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -164,12 +164,17 @@ func (queue *BaseTaskQueue) TaskDoneTest(ts Timestamp) bool {
|
|||
}
|
||||
|
||||
func (queue *BaseTaskQueue) Enqueue(t task) error {
|
||||
err := t.OnEnqueue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ts, _ := queue.sched.tsoAllocator.AllocOne()
|
||||
log.Printf("[Proxy] allocate timestamp: %v", ts)
|
||||
log.Printf("[NodeImpl] allocate timestamp: %v", ts)
|
||||
t.SetTs(ts)
|
||||
|
||||
reqID, _ := queue.sched.idAllocator.AllocOne()
|
||||
log.Printf("[Proxy] allocate reqID: %v", reqID)
|
||||
log.Printf("[NodeImpl] allocate reqID: %v", reqID)
|
||||
t.SetID(reqID)
|
||||
|
||||
return queue.addUnissuedTask(t)
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
package proxyservice
|
||||
|
||||
import (
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/proxypb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
|
||||
)
|
||||
|
||||
type ServiceBase = typeutil.Component
|
||||
|
||||
type Interface interface {
|
||||
ServiceBase
|
||||
RegisterLink() (proxypb.RegisterLinkResponse, error)
|
||||
RegisterNode(request proxypb.RegisterNodeRequest) (proxypb.RegisterNodeResponse, error)
|
||||
// TODO: i'm sure it's not a best way to keep consistency, fix me
|
||||
InvalidateCollectionMetaCache(request proxypb.InvalidateCollMetaCacheRequest) error
|
||||
}
|
|
@ -49,6 +49,6 @@ func (s ProxyService) InvalidateCollectionMetaCache(request proxypb.InvalidateCo
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func NewProxyServiceImpl() Interface {
|
||||
func NewProxyServiceImpl() interface{} {
|
||||
return &ProxyService{}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/querynode/client"
|
||||
)
|
||||
|
||||
|
@ -159,21 +159,21 @@ func TestLoadIndexService_FloatVector(t *testing.T) {
|
|||
binary.LittleEndian.PutUint32(vec, math.Float32bits(searchRowData[i]))
|
||||
searchRowByteData = append(searchRowByteData, vec...)
|
||||
}
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_FLOAT,
|
||||
Values: [][]byte{searchRowByteData},
|
||||
}
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
||||
if err != nil {
|
||||
log.Print("marshal placeholderGroup failed")
|
||||
}
|
||||
query := servicepb.Query{
|
||||
query := milvuspb.SearchRequest{
|
||||
CollectionName: "collection0",
|
||||
PartitionTags: []string{"default"},
|
||||
PartitionNames: []string{"default"},
|
||||
Dsl: dslString,
|
||||
PlaceholderGroup: placeGroupByte,
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ func TestLoadIndexService_FloatVector(t *testing.T) {
|
|||
searchResultStream.Start()
|
||||
searchResult := searchResultStream.Consume()
|
||||
assert.NotNil(t, searchResult)
|
||||
unMarshaledHit := servicepb.Hits{}
|
||||
unMarshaledHit := milvuspb.Hits{}
|
||||
err = proto.Unmarshal(searchResult.Msgs[0].(*msgstream.SearchResultMsg).Hits[0], &unMarshaledHit)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
@ -481,21 +481,21 @@ func TestLoadIndexService_BinaryVector(t *testing.T) {
|
|||
//generate search data and send search msg
|
||||
searchRowData := indexRowData[42*(DIM/8) : 43*(DIM/8)]
|
||||
dslString := "{\"bool\": { \n\"vector\": {\n \"vec\": {\n \"metric_type\": \"JACCARD\", \n \"params\": {\n \"nprobe\": 10 \n},\n \"query\": \"$0\",\"topk\": 10 \n } \n } \n } \n }"
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_BINARY,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_BINARY,
|
||||
Values: [][]byte{searchRowData},
|
||||
}
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
||||
if err != nil {
|
||||
log.Print("marshal placeholderGroup failed")
|
||||
}
|
||||
query := servicepb.Query{
|
||||
query := milvuspb.SearchRequest{
|
||||
CollectionName: "collection0",
|
||||
PartitionTags: []string{"default"},
|
||||
PartitionNames: []string{"default"},
|
||||
Dsl: dslString,
|
||||
PlaceholderGroup: placeGroupByte,
|
||||
}
|
||||
|
@ -541,7 +541,7 @@ func TestLoadIndexService_BinaryVector(t *testing.T) {
|
|||
searchResultStream.Start()
|
||||
searchResult := searchResultStream.Consume()
|
||||
assert.NotNil(t, searchResult)
|
||||
unMarshaledHit := servicepb.Hits{}
|
||||
unMarshaledHit := milvuspb.Hits{}
|
||||
err = proto.Unmarshal(searchResult.Msgs[0].(*msgstream.SearchResultMsg).Hits[0], &unMarshaledHit)
|
||||
assert.Nil(t, err)
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
func TestPlan_Plan(t *testing.T) {
|
||||
|
@ -58,14 +58,14 @@ func TestPlan_PlaceholderGroup(t *testing.T) {
|
|||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*4)))
|
||||
searchRawData2 = append(searchRawData2, buf...)
|
||||
}
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_FLOAT,
|
||||
Values: [][]byte{searchRawData1, searchRawData2},
|
||||
}
|
||||
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
|
||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
func TestReduce_AllFunc(t *testing.T) {
|
||||
|
@ -38,14 +38,14 @@ func TestReduce_AllFunc(t *testing.T) {
|
|||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*4)))
|
||||
searchRawData2 = append(searchRawData2, buf...)
|
||||
}
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_FLOAT,
|
||||
Values: [][]byte{searchRawData1, searchRawData2},
|
||||
}
|
||||
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
|
||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
||||
|
@ -86,7 +86,7 @@ func TestReduce_AllFunc(t *testing.T) {
|
|||
assert.Nil(t, err)
|
||||
for _, len := range hitBolbSizePeerQuery {
|
||||
marshaledHit := hitsBlob[offset : offset+len]
|
||||
unMarshaledHit := servicepb.Hits{}
|
||||
unMarshaledHit := milvuspb.Hits{}
|
||||
err = proto.Unmarshal(marshaledHit, &unMarshaledHit)
|
||||
assert.Nil(t, err)
|
||||
log.Println("hits msg = ", unMarshaledHit)
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/msgstream/util"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
type searchService struct {
|
||||
|
@ -233,14 +233,16 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||
|
||||
searchTimestamp := searchMsg.Base.Timestamp
|
||||
var queryBlob = searchMsg.Query.Value
|
||||
query := servicepb.Query{}
|
||||
query := milvuspb.SearchRequest{}
|
||||
err := proto.Unmarshal(queryBlob, &query)
|
||||
if err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
return errors.New("unmarshal query failed")
|
||||
}
|
||||
collectionName := query.CollectionName
|
||||
partitionTagsInQuery := query.PartitionTags
|
||||
fmt.Println("[ljq collection name]: ", collectionName)
|
||||
partitionTagsInQuery := query.PartitionNames
|
||||
fmt.Println("[search service ljq] query: ", query)
|
||||
collection, err := ss.replica.getCollectionByName(collectionName)
|
||||
if err != nil {
|
||||
span.LogFields(oplog.Error(err))
|
||||
|
@ -306,7 +308,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||
for _, group := range placeholderGroups {
|
||||
nq := group.getNumOfQuery()
|
||||
nilHits := make([][]byte, nq)
|
||||
hit := &servicepb.Hits{}
|
||||
hit := &milvuspb.Hits{}
|
||||
for i := 0; i < int(nq); i++ {
|
||||
bs, err := proto.Marshal(hit)
|
||||
if err != nil {
|
||||
|
@ -374,7 +376,7 @@ func (ss *searchService) search(msg msgstream.TsMsg) error {
|
|||
hits = append(hits, hitsBlob[offset:offset+len])
|
||||
//test code to checkout marshaled hits
|
||||
//marshaledHit := hitsBlob[offset:offset+len]
|
||||
//unMarshaledHit := servicepb.Hits{}
|
||||
//unMarshaledHit := milvuspb.Hits{}
|
||||
//err = proto.Unmarshal(marshaledHit, &unMarshaledHit)
|
||||
//if err != nil {
|
||||
// return err
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/zilliztech/milvus-distributed/internal/msgstream/pulsarms"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
func TestSearch_Search(t *testing.T) {
|
||||
|
@ -45,14 +45,14 @@ func TestSearch_Search(t *testing.T) {
|
|||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*4)))
|
||||
searchRawData2 = append(searchRawData2, buf...)
|
||||
}
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_FLOAT,
|
||||
Values: [][]byte{searchRawData1, searchRawData2},
|
||||
}
|
||||
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
|
||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
||||
|
@ -60,9 +60,9 @@ func TestSearch_Search(t *testing.T) {
|
|||
log.Print("marshal placeholderGroup failed")
|
||||
}
|
||||
|
||||
query := servicepb.Query{
|
||||
query := milvuspb.SearchRequest{
|
||||
CollectionName: "collection0",
|
||||
PartitionTags: []string{"default"},
|
||||
PartitionNames: []string{"default"},
|
||||
Dsl: dslString,
|
||||
PlaceholderGroup: placeGroupByte,
|
||||
}
|
||||
|
@ -239,14 +239,14 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele+float32(i*4)))
|
||||
searchRawData2 = append(searchRawData2, buf...)
|
||||
}
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_FLOAT,
|
||||
Values: [][]byte{searchRawData1, searchRawData2},
|
||||
}
|
||||
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
|
||||
placeGroupByte, err := proto.Marshal(&placeholderGroup)
|
||||
|
@ -254,9 +254,9 @@ func TestSearch_SearchMultiSegments(t *testing.T) {
|
|||
log.Print("marshal placeholderGroup failed")
|
||||
}
|
||||
|
||||
query := servicepb.Query{
|
||||
query := milvuspb.SearchRequest{
|
||||
CollectionName: "collection0",
|
||||
PartitionTags: []string{"default"},
|
||||
PartitionNames: []string{"default"},
|
||||
Dsl: dslString,
|
||||
PlaceholderGroup: placeGroupByte,
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/servicepb"
|
||||
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------------------- constructor and destructor
|
||||
|
@ -347,14 +347,14 @@ func TestSegment_segmentSearch(t *testing.T) {
|
|||
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
|
||||
searchRawData = append(searchRawData, buf...)
|
||||
}
|
||||
placeholderValue := servicepb.PlaceholderValue{
|
||||
placeholderValue := milvuspb.PlaceholderValue{
|
||||
Tag: "$0",
|
||||
Type: servicepb.PlaceholderType_VECTOR_FLOAT,
|
||||
Type: milvuspb.PlaceholderType_VECTOR_FLOAT,
|
||||
Values: [][]byte{searchRawData},
|
||||
}
|
||||
|
||||
placeholderGroup := servicepb.PlaceholderGroup{
|
||||
Placeholders: []*servicepb.PlaceholderValue{&placeholderValue},
|
||||
placeholderGroup := milvuspb.PlaceholderGroup{
|
||||
Placeholders: []*milvuspb.PlaceholderValue{&placeholderValue},
|
||||
}
|
||||
|
||||
placeHolderGroupBlob, err := proto.Marshal(&placeholderGroup)
|
||||
|
|
|
@ -23,7 +23,7 @@ mkdir -p etcdpb
|
|||
mkdir -p indexcgopb
|
||||
|
||||
#mkdir -p internalpb
|
||||
mkdir -p servicepb
|
||||
mkdir -p milvuspb
|
||||
mkdir -p masterpb
|
||||
mkdir -p indexbuilderpb
|
||||
mkdir -p writerpb
|
||||
|
@ -44,8 +44,6 @@ ${protoc} --go_out=plugins=grpc,paths=source_relative:./indexcgopb index_cgo_msg
|
|||
|
||||
#${protoc} --go_out=plugins=grpc,paths=source_relative:./internalpb internal_msg.proto
|
||||
|
||||
${protoc} --go_out=plugins=grpc,paths=source_relative:./servicepb service_msg.proto
|
||||
${protoc} --go_out=plugins=grpc,paths=source_relative:./servicepb service.proto
|
||||
${protoc} --go_out=plugins=grpc,paths=source_relative:./masterpb master.proto
|
||||
${protoc} --go_out=plugins=grpc,paths=source_relative:./writerpb write_node.proto
|
||||
|
||||
|
|
|
@ -4,5 +4,5 @@ numpy==1.18.1
|
|||
pytest==5.3.4
|
||||
pytest-cov==2.8.1
|
||||
pytest-timeout==1.3.4
|
||||
pymilvus-distributed==0.0.14
|
||||
pymilvus-distributed==0.0.16
|
||||
sklearn==0.0
|
||||
|
|
|
@ -981,7 +981,7 @@ class TestSearchDSL(object):
|
|||
with pytest.raises(Exception) as e:
|
||||
res = connect.search(collection, query)
|
||||
|
||||
# PASS
|
||||
# PASS
|
||||
def test_query_no_vector_term_only(self, connect, collection):
|
||||
'''
|
||||
method: build query without vector only term
|
||||
|
@ -1634,7 +1634,7 @@ class TestSearchDSLBools(object):
|
|||
|
||||
"""
|
||||
******************************************************************
|
||||
# The following cases are used to test `search` function
|
||||
# The following cases are used to test `search` function
|
||||
# with invalid collection_name, or invalid query expr
|
||||
******************************************************************
|
||||
"""
|
||||
|
|
Loading…
Reference in New Issue