Add DataNode package

Signed-off-by: XuanYang-cn <xuan.yang@zilliz.com>
pull/4973/head^2
XuanYang-cn 2021-01-19 11:37:16 +08:00 committed by yefu.chen
parent 1ce32d8775
commit 6a13386393
70 changed files with 5421 additions and 6445 deletions

View File

@ -19,7 +19,9 @@ msgChannel:
searchResult: "searchResult"
k2s: "k2s"
proxyTimeTick: "proxyTimeTick"
writeNodeTimeTick: "writeNodeTimeTick"
writeNodeTimeTick: "writeNodeTimeTick" # GOOSE TODO: remove this
dataNodeTimeTick: "dataNodeTimeTick"
dataNodeSegStatistics: "dataNodeSegStatistics"
# old name: statsChannels: "statistic"
queryNodeStats: "query-node-stats"
# cmd for loadIndex, flush, etc...
@ -30,7 +32,8 @@ msgChannel:
masterSubNamePrefix: "master"
proxySubNamePrefix: "proxy"
queryNodeSubNamePrefix: "queryNode"
writeNodeSubNamePrefix: "writeNode"
writeNodeSubNamePrefix: "writeNode" # GOOSE TODO: remove this
dataNodeSubNamePrefix: "dataNode"
# default channel range [0, 1)
channelRange:

View File

@ -0,0 +1,42 @@
# Copyright (C) 2019-2020 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under the License.
dataNode:
dataSync:
flowGraph:
maxQueueLength: 1024
maxParallelism: 1024
msgStream:
dataDefinition:
recvBufSize: 64 # msgPack chan buffer size
pulsarBufSize: 64 # pulsar chan buffer size
insert:
#streamBufSize: 1024 # msgPack chan buffer size
recvBufSize: 1024 # msgPack chan buffer size
pulsarBufSize: 1024 # pulsar chan buffer size
delete:
#streamBufSize: 1024 # msgPack chan buffer size
recvBufSize: 1024 # msgPack chan buffer size
pulsarBufSize: 1024 # pulsar chan buffer size
segStatistics:
recvBufSize: 64
publishInterval: 1000 # milliseconds
flush:
# max buffer size to flush
insertBufSize: 500
ddBufSize: 20

View File

@ -13,7 +13,8 @@
nodeID: # will be deprecated after v0.2
proxyIDList: [0]
queryNodeIDList: [1, 2]
writeNodeIDList: [3]
writeNodeIDList: [3] # GOOSE TODO: remove this
dataNodeIDList: [3]
etcd:
address: localhost
@ -21,8 +22,10 @@ etcd:
rootPath: by-dev
metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
writeNodeSegKvSubPath: writer/segment
writeNodeDDLKvSubPath: writer/ddl
segFlushMetaSubPath: writer/segment
ddlFlushMetaSubPath: writer/ddl
writeNodeSegKvSubPath: writer/segment # GOOSE TODO: remove this
writeNodeDDLKvSubPath: writer/ddl # GOOSE TODO: remove this
segThreshold: 10000
minio:

View File

@ -15,6 +15,12 @@
extern "C" {
#endif
enum SegmentType {
Invalid = 0,
Growing = 1,
Sealed = 2,
};
enum ErrorCode {
Success = 0,
UnexpectedException = 1,

View File

@ -28,7 +28,7 @@ extern "C" {
#include <stdint.h>
#include "segcore/collection_c.h"
#include "common/status_c.h"
#include "common/type_c.h"
typedef void* CIndex;
typedef void* CIndexQueryResult;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -77,9 +77,9 @@ class SegmentGrowing : public SegmentInternalInterface {
get_deleted_count() const = 0;
};
using SegmentBasePtr = std::unique_ptr<SegmentGrowing>;
using SegmentGrowingPtr = std::unique_ptr<SegmentGrowing>;
SegmentBasePtr
SegmentGrowingPtr
CreateGrowingSegment(SchemaPtr schema, int64_t chunk_size = 32 * 1024);
} // namespace segcore

View File

@ -9,12 +9,14 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#include <memory>
#include "SegmentInterface.h"
#include "common/LoadInfo.h"
namespace milvus::segcore {
class SegmentSealed {
class SegmentSealed : public SegmentInterface {
public:
virtual const Schema&
get_schema() = 0;
@ -26,4 +28,11 @@ class SegmentSealed {
LoadFieldData(const LoadFieldDataInfo& info) = 0;
};
using SegmentSealedPtr = std::unique_ptr<SegmentSealed>;
SegmentSealedPtr
CreateSealedSegment(SchemaPtr schema, int64_t chunk_size = 32 * 1024) {
return nullptr;
}
} // namespace milvus::segcore

View File

@ -18,7 +18,7 @@ extern "C" {
#include <stdint.h>
#include "segcore/collection_c.h"
#include "common/status_c.h"
#include "common/type_c.h"
typedef void* CLoadIndexInfo;
typedef void* CBinarySet;

View File

@ -16,7 +16,7 @@ extern "C" {
#include <stdbool.h>
#include <stdint.h>
#include "segcore/collection_c.h"
#include "common/status_c.h"
#include "common/type_c.h"
typedef void* CPlan;
typedef void* CPlaceholderGroup;

View File

@ -16,7 +16,7 @@ extern "C" {
#include <stdbool.h>
#include <stdint.h>
#include "segcore/segment_c.h"
#include "common/status_c.h"
#include "common/type_c.h"
typedef void* CMarshaledHits;

View File

@ -10,29 +10,42 @@
// or implied. See the License for the specific language governing permissions and limitations under the License
#include <cstring>
#include <cstdint>
#include "segcore/SegmentGrowing.h"
#include "segcore/SegmentSealed.h"
#include "segcore/Collection.h"
#include "segcore/segment_c.h"
#include "common/LoadInfo.h"
#include "common/type_c.h"
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <cstdint>
#include <boost/concept_check.hpp>
#include "common/LoadInfo.h"
CSegmentBase
NewSegment(CCollection collection, uint64_t segment_id) {
CSegmentInterface
NewSegment(CCollection collection, uint64_t segment_id, int seg_type) {
auto col = (milvus::segcore::Collection*)collection;
auto segment = milvus::segcore::CreateGrowingSegment(col->get_schema());
std::unique_ptr<milvus::segcore::SegmentInterface> segment;
switch (seg_type) {
case Invalid:
std::cout << "invalid segment type" << std::endl;
break;
case Growing:
segment = milvus::segcore::CreateGrowingSegment(col->get_schema());
break;
case Sealed:
segment = milvus::segcore::CreateSealedSegment(col->get_schema());
break;
default:
std::cout << "invalid segment type" << std::endl;
}
std::cout << "create segment " << segment_id << std::endl;
return (void*)segment.release();
}
void
DeleteSegment(CSegmentBase segment) {
DeleteSegment(CSegmentInterface segment) {
auto s = (milvus::segcore::SegmentGrowing*)segment;
std::cout << "delete segment " << std::endl;
@ -48,7 +61,7 @@ DeleteQueryResult(CQueryResult query_result) {
//////////////////////////////////////////////////////////////////
CStatus
Insert(CSegmentBase c_segment,
Insert(CSegmentInterface c_segment,
int64_t reserved_offset,
int64_t size,
const int64_t* row_ids,
@ -79,15 +92,18 @@ Insert(CSegmentBase c_segment,
}
int64_t
PreInsert(CSegmentBase c_segment, int64_t size) {
PreInsert(CSegmentInterface c_segment, int64_t size) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
return segment->PreInsert(size);
}
CStatus
Delete(
CSegmentBase c_segment, int64_t reserved_offset, int64_t size, const int64_t* row_ids, const uint64_t* timestamps) {
Delete(CSegmentInterface c_segment,
int64_t reserved_offset,
int64_t size,
const int64_t* row_ids,
const uint64_t* timestamps) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
try {
@ -106,14 +122,15 @@ Delete(
}
int64_t
PreDelete(CSegmentBase c_segment, int64_t size) {
PreDelete(CSegmentInterface c_segment, int64_t size) {
// TODO: use dynamic cast, and return c status
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
return segment->PreDelete(size);
}
CStatus
Search(CSegmentBase c_segment,
Search(CSegmentInterface c_segment,
CPlan c_plan,
CPlaceholderGroup* c_placeholder_groups,
uint64_t* timestamps,
@ -153,7 +170,7 @@ Search(CSegmentBase c_segment,
}
CStatus
FillTargetEntry(CSegmentBase c_segment, CPlan c_plan, CQueryResult c_result) {
FillTargetEntry(CSegmentInterface c_segment, CPlan c_plan, CQueryResult c_result) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto plan = (milvus::query::Plan*)c_plan;
auto result = (milvus::QueryResult*)c_result;
@ -171,7 +188,7 @@ FillTargetEntry(CSegmentBase c_segment, CPlan c_plan, CQueryResult c_result) {
}
CStatus
UpdateSegmentIndex(CSegmentBase c_segment, CLoadIndexInfo c_load_index_info) {
UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info) {
auto status = CStatus();
try {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
@ -189,26 +206,26 @@ UpdateSegmentIndex(CSegmentBase c_segment, CLoadIndexInfo c_load_index_info) {
//////////////////////////////////////////////////////////////////
int
Close(CSegmentBase c_segment) {
Close(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto status = segment->Close();
return status.code();
}
int
BuildIndex(CCollection c_collection, CSegmentBase c_segment) {
BuildIndex(CCollection c_collection, CSegmentInterface c_segment) {
PanicInfo("unimplemented");
}
bool
IsOpened(CSegmentBase c_segment) {
IsOpened(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto status = segment->get_state();
return status == milvus::segcore::SegmentGrowing::SegmentState::Open;
}
int64_t
GetMemoryUsageInBytes(CSegmentBase c_segment) {
GetMemoryUsageInBytes(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto mem_size = segment->GetMemoryUsageInBytes();
return mem_size;
@ -217,14 +234,14 @@ GetMemoryUsageInBytes(CSegmentBase c_segment) {
//////////////////////////////////////////////////////////////////
int64_t
GetRowCount(CSegmentBase c_segment) {
GetRowCount(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto row_count = segment->get_row_count();
return row_count;
}
int64_t
GetDeletedCount(CSegmentBase c_segment) {
GetDeletedCount(CSegmentInterface c_segment) {
auto segment = (milvus::segcore::SegmentGrowing*)c_segment;
auto deleted_count = segment->get_deleted_count();
return deleted_count;

View File

@ -9,6 +9,8 @@
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
@ -17,26 +19,27 @@ extern "C" {
#include <stdlib.h>
#include <stdint.h>
#include "common/type_c.h"
#include "segcore/plan_c.h"
#include "segcore/load_index_c.h"
#include "common/status_c.h"
typedef void* CSegmentBase;
typedef void* CSegmentInterface;
typedef void* CQueryResult;
CSegmentBase
NewSegment(CCollection collection, uint64_t segment_id);
CSegmentInterface
NewSegment(CCollection collection, uint64_t segment_id, int seg_type);
void
DeleteSegment(CSegmentBase segment);
DeleteSegment(CSegmentInterface segment);
void
DeleteQueryResult(CQueryResult query_result);
//////////////////////////////////////////////////////////////////
// interface for growing segment
CStatus
Insert(CSegmentBase c_segment,
Insert(CSegmentInterface c_segment,
int64_t reserved_offset,
int64_t size,
const int64_t* row_ids,
@ -45,50 +48,65 @@ Insert(CSegmentBase c_segment,
int sizeof_per_row,
int64_t count);
// interface for growing segment
int64_t
PreInsert(CSegmentBase c_segment, int64_t size);
PreInsert(CSegmentInterface c_segment, int64_t size);
// interface for growing segment
CStatus
Delete(
CSegmentBase c_segment, int64_t reserved_offset, int64_t size, const int64_t* row_ids, const uint64_t* timestamps);
Delete(CSegmentInterface c_segment,
int64_t reserved_offset,
int64_t size,
const int64_t* row_ids,
const uint64_t* timestamps);
// interface for growing segment
int64_t
PreDelete(CSegmentBase c_segment, int64_t size);
PreDelete(CSegmentInterface c_segment, int64_t size);
// common interface
CStatus
Search(CSegmentBase c_segment,
Search(CSegmentInterface c_segment,
CPlan plan,
CPlaceholderGroup* placeholder_groups,
uint64_t* timestamps,
int num_groups,
CQueryResult* result);
// common interface
CStatus
FillTargetEntry(CSegmentBase c_segment, CPlan c_plan, CQueryResult result);
FillTargetEntry(CSegmentInterface c_segment, CPlan c_plan, CQueryResult result);
// deprecated
CStatus
UpdateSegmentIndex(CSegmentBase c_segment, CLoadIndexInfo c_load_index_info);
UpdateSegmentIndex(CSegmentInterface c_segment, CLoadIndexInfo c_load_index_info);
//////////////////////////////////////////////////////////////////
// deprecated
int
Close(CSegmentBase c_segment);
Close(CSegmentInterface c_segment);
// deprecated
int
BuildIndex(CCollection c_collection, CSegmentBase c_segment);
BuildIndex(CCollection c_collection, CSegmentInterface c_segment);
// deprecated
bool
IsOpened(CSegmentBase c_segment);
IsOpened(CSegmentInterface c_segment);
// common interface
int64_t
GetMemoryUsageInBytes(CSegmentBase c_segment);
GetMemoryUsageInBytes(CSegmentInterface c_segment);
//////////////////////////////////////////////////////////////////
// common interface
int64_t
GetRowCount(CSegmentBase c_segment);
GetRowCount(CSegmentInterface c_segment);
// ???
int64_t
GetDeletedCount(CSegmentBase c_segment);
GetDeletedCount(CSegmentInterface c_segment);
#ifdef __cplusplus
}

View File

@ -52,7 +52,7 @@ TEST(CApiTest, GetCollectionNameTest) {
TEST(CApiTest, SegmentTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
DeleteCollection(collection);
DeleteSegment(segment);
}
@ -60,7 +60,7 @@ TEST(CApiTest, SegmentTest) {
TEST(CApiTest, InsertTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
@ -95,7 +95,7 @@ TEST(CApiTest, InsertTest) {
TEST(CApiTest, DeleteTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
long delete_row_ids[] = {100000, 100001, 100002};
unsigned long delete_timestamps[] = {0, 0, 0};
@ -112,7 +112,7 @@ TEST(CApiTest, DeleteTest) {
TEST(CApiTest, SearchTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
@ -201,7 +201,7 @@ TEST(CApiTest, SearchTest) {
// TEST(CApiTest, BuildIndexTest) {
// auto schema_tmp_conf = "";
// auto collection = NewCollection(schema_tmp_conf);
// auto segment = NewSegment(collection, 0);
// auto segment = NewSegment(collection, 0, 1);
//
// std::vector<char> raw_data;
// std::vector<uint64_t> timestamps;
@ -285,7 +285,7 @@ TEST(CApiTest, SearchTest) {
TEST(CApiTest, IsOpenedTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto is_opened = IsOpened(segment);
assert(is_opened);
@ -297,7 +297,7 @@ TEST(CApiTest, IsOpenedTest) {
TEST(CApiTest, CloseTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto status = Close(segment);
assert(status == 0);
@ -309,7 +309,7 @@ TEST(CApiTest, CloseTest) {
TEST(CApiTest, GetMemoryUsageInBytesTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto old_memory_usage_size = GetMemoryUsageInBytes(segment);
std::cout << "old_memory_usage_size = " << old_memory_usage_size << std::endl;
@ -428,7 +428,7 @@ generate_index(
// TEST(CApiTest, TestSearchPreference) {
// auto schema_tmp_conf = "";
// auto collection = NewCollection(schema_tmp_conf);
// auto segment = NewSegment(collection, 0);
// auto segment = NewSegment(collection, 0, 1);
//
// auto beg = chrono::high_resolution_clock::now();
// auto next = beg;
@ -547,7 +547,7 @@ generate_index(
TEST(CApiTest, GetDeletedCountTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
long delete_row_ids[] = {100000, 100001, 100002};
unsigned long delete_timestamps[] = {0, 0, 0};
@ -568,7 +568,7 @@ TEST(CApiTest, GetDeletedCountTest) {
TEST(CApiTest, GetRowCountTest) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
int N = 10000;
auto [raw_data, timestamps, uids] = generate_data(N);
@ -592,7 +592,7 @@ TEST(CApiTest, GetRowCountTest) {
// "\u003e\ncreate_time: 1600416765\nsegment_ids: 6873737669791618215\npartition_tags: \"default\"\n";
//
// auto collection = NewCollection(schema_string.data());
// auto segment = NewSegment(collection, 0);
// auto segment = NewSegment(collection, 0, 1);
// DeleteCollection(collection);
// DeleteSegment(segment);
//}
@ -629,7 +629,7 @@ TEST(CApiTest, MergeInto) {
TEST(CApiTest, Reduce) {
auto schema_tmp_conf = "";
auto collection = NewCollection(schema_tmp_conf);
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
std::vector<char> raw_data;
std::vector<uint64_t> timestamps;
@ -845,7 +845,7 @@ TEST(CApiTest, UpdateSegmentIndex_Without_Predicate) {
std::string schema_string = generate_collection_shema("L2", "16", false);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
@ -970,7 +970,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_float_Predicate_Range) {
std::string schema_string = generate_collection_shema("L2", "16", false);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
@ -1108,7 +1108,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_float_Predicate_Term) {
std::string schema_string = generate_collection_shema("L2", "16", false);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
@ -1245,7 +1245,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_binary_Predicate_Range) {
std::string schema_string = generate_collection_shema("JACCARD", "16", true);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);
@ -1384,7 +1384,7 @@ TEST(CApiTest, UpdateSegmentIndex_With_binary_Predicate_Term) {
std::string schema_string = generate_collection_shema("JACCARD", "16", true);
auto collection = NewCollection(schema_string.c_str());
auto schema = ((segcore::Collection*)collection)->get_schema();
auto segment = NewSegment(collection, 0);
auto segment = NewSegment(collection, 0, 1);
auto N = 1000 * 1000;
auto dataset = DataGen(schema, N);

View File

@ -0,0 +1,137 @@
package writerclient
import (
"strconv"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
pb "github.com/zilliztech/milvus-distributed/internal/proto/writerpb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
type UniqueID = typeutil.UniqueID
type Timestamp = typeutil.Timestamp
type Client struct {
kvClient kv.TxnBase // client of a reliable kv service, i.e. etcd client
kvPrefix string
flushStream msgstream.MsgStream
}
func NewWriterClient(etcdAddress string, kvRootPath string, writeNodeSegKvSubPath string, flushStream msgstream.MsgStream) (*Client, error) {
// init kv client
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddress}})
if err != nil {
return nil, err
}
kvClient := etcdkv.NewEtcdKV(etcdClient, kvRootPath)
return &Client{
kvClient: kvClient,
kvPrefix: writeNodeSegKvSubPath,
flushStream: flushStream,
}, nil
}
type SegmentDescription struct {
SegmentID UniqueID
IsClosed bool
OpenTime Timestamp
CloseTime Timestamp
}
func (c *Client) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
baseMsg := msgstream.BaseMsg{
BeginTimestamp: 0,
EndTimestamp: 0,
HashValues: []uint32{0},
}
flushMsg := internalpb2.FlushMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kFlush,
Timestamp: timestamp,
},
SegmentID: segmentID,
CollectionID: collectionID,
PartitionTag: partitionTag,
}
fMsg := &msgstream.FlushMsg{
BaseMsg: baseMsg,
FlushMsg: flushMsg,
}
msgPack := msgstream.MsgPack{}
msgPack.Msgs = append(msgPack.Msgs, fMsg)
err := c.flushStream.Produce(&msgPack)
return err
}
func (c *Client) DescribeSegment(segmentID UniqueID) (*SegmentDescription, error) {
// query etcd
ret := &SegmentDescription{
SegmentID: segmentID,
IsClosed: false,
}
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
etcdKV, ok := c.kvClient.(*etcdkv.EtcdKV)
if !ok {
return nil, errors.New("type assertion failed for etcd kv")
}
count, err := etcdKV.GetCount(key)
if err != nil {
return nil, err
}
if count <= 0 {
ret.IsClosed = false
return ret, nil
}
value, err := c.kvClient.Load(key)
if err != nil {
return ret, err
}
flushMeta := pb.SegmentFlushMeta{}
err = proto.UnmarshalText(value, &flushMeta)
if err != nil {
return ret, err
}
ret.IsClosed = flushMeta.IsClosed
ret.OpenTime = flushMeta.OpenTime
ret.CloseTime = flushMeta.CloseTime
return ret, nil
}
func (c *Client) GetInsertBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
key := c.kvPrefix + strconv.FormatInt(segmentID, 10)
value, err := c.kvClient.Load(key)
if err != nil {
return nil, err
}
flushMeta := pb.SegmentFlushMeta{}
err = proto.UnmarshalText(value, &flushMeta)
if err != nil {
return nil, err
}
ret := make(map[int64][]string)
for _, field := range flushMeta.Fields {
ret[field.FieldID] = field.BinlogPaths
}
return ret, nil
}

View File

@ -0,0 +1,37 @@
package datanode
import (
"log"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
type Collection struct {
schema *schemapb.CollectionSchema
id UniqueID
}
func (c *Collection) Name() string {
return c.schema.Name
}
func (c *Collection) ID() UniqueID {
return c.id
}
func newCollection(collectionID UniqueID, schemaStr string) *Collection {
var schema schemapb.CollectionSchema
err := proto.UnmarshalText(schemaStr, &schema)
if err != nil {
log.Println(err)
return nil
}
var newCollection = &Collection{
schema: &schema,
id: collectionID,
}
return newCollection
}

View File

@ -0,0 +1,104 @@
package datanode
import (
"fmt"
"strconv"
"sync"
"github.com/zilliztech/milvus-distributed/internal/errors"
)
type collectionReplica interface {
// collection
getCollectionNum() int
addCollection(collectionID UniqueID, schemaBlob string) error
removeCollection(collectionID UniqueID) error
getCollectionByID(collectionID UniqueID) (*Collection, error)
getCollectionByName(collectionName string) (*Collection, error)
hasCollection(collectionID UniqueID) bool
getSegmentStatistics() // GOOSE TODO
}
type collectionReplicaImpl struct {
mu sync.RWMutex
collections []*Collection
}
//----------------------------------------------------------------------------------------------------- collection
func (colReplica *collectionReplicaImpl) getSegmentStatistics() {
// GOOSE TODO
}
func (colReplica *collectionReplicaImpl) getCollectionNum() int {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
return len(colReplica.collections)
}
func (colReplica *collectionReplicaImpl) addCollection(collectionID UniqueID, schemaBlob string) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
var newCollection = newCollection(collectionID, schemaBlob)
colReplica.collections = append(colReplica.collections, newCollection)
fmt.Println("yyy, create collection: ", newCollection.Name())
return nil
}
func (colReplica *collectionReplicaImpl) removeCollection(collectionID UniqueID) error {
fmt.Println("drop collection:", collectionID)
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
tmpCollections := make([]*Collection, 0)
for _, col := range colReplica.collections {
if col.ID() != collectionID {
tmpCollections = append(tmpCollections, col)
} else {
fmt.Println("yyy, drop collection name: ", col.Name())
}
}
colReplica.collections = tmpCollections
return nil
}
func (colReplica *collectionReplicaImpl) getCollectionByID(collectionID UniqueID) (*Collection, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, collection := range colReplica.collections {
if collection.ID() == collectionID {
return collection, nil
}
}
return nil, errors.New("cannot find collection, id = " + strconv.FormatInt(collectionID, 10))
}
func (colReplica *collectionReplicaImpl) getCollectionByName(collectionName string) (*Collection, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, collection := range colReplica.collections {
if collection.Name() == collectionName {
return collection, nil
}
}
return nil, errors.New("Cannot found collection: " + collectionName)
}
func (colReplica *collectionReplicaImpl) hasCollection(collectionID UniqueID) bool {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, col := range colReplica.collections {
if col.ID() == collectionID {
return true
}
}
return false
}

View File

@ -0,0 +1,154 @@
package datanode
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newReplica() collectionReplica {
collections := make([]*Collection, 0)
var replica collectionReplica = &collectionReplicaImpl{
collections: collections,
}
return replica
}
func genTestCollectionMeta(collectionName string, collectionID UniqueID) *etcdpb.CollectionMeta {
fieldVec := schemapb.FieldSchema{
FieldID: UniqueID(100),
Name: "vec",
IsPrimaryKey: false,
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "16",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "metric_type",
Value: "L2",
},
},
}
fieldInt := schemapb.FieldSchema{
FieldID: UniqueID(101),
Name: "age",
IsPrimaryKey: false,
DataType: schemapb.DataType_INT32,
}
schema := schemapb.CollectionSchema{
Name: collectionName,
AutoID: true,
Fields: []*schemapb.FieldSchema{
&fieldVec, &fieldInt,
},
}
collectionMeta := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
return &collectionMeta
}
func initTestMeta(t *testing.T, replica collectionReplica, collectionName string, collectionID UniqueID, segmentID UniqueID) {
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
require.NotEqual(t, "", schemaBlob)
var err = replica.addCollection(collectionMeta.ID, schemaBlob)
require.NoError(t, err)
collection, err := replica.getCollectionByName(collectionName)
require.NoError(t, err)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
assert.Equal(t, replica.getCollectionNum(), 1)
}
//----------------------------------------------------------------------------------------------------- collection
func TestCollectionReplica_getCollectionNum(t *testing.T) {
replica := newReplica()
initTestMeta(t, replica, "collection0", 0, 0)
assert.Equal(t, replica.getCollectionNum(), 1)
}
func TestCollectionReplica_addCollection(t *testing.T) {
replica := newReplica()
initTestMeta(t, replica, "collection0", 0, 0)
}
func TestCollectionReplica_removeCollection(t *testing.T) {
replica := newReplica()
initTestMeta(t, replica, "collection0", 0, 0)
assert.Equal(t, replica.getCollectionNum(), 1)
err := replica.removeCollection(0)
assert.NoError(t, err)
assert.Equal(t, replica.getCollectionNum(), 0)
}
func TestCollectionReplica_getCollectionByID(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
targetCollection, err := replica.getCollectionByID(collectionID)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
}
func TestCollectionReplica_getCollectionByName(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
targetCollection, err := replica.getCollectionByName(collectionName)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
}
func TestCollectionReplica_hasCollection(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
hasCollection := replica.hasCollection(collectionID)
assert.Equal(t, hasCollection, true)
hasCollection = replica.hasCollection(UniqueID(1))
assert.Equal(t, hasCollection, false)
}
func TestCollectionReplica_freeAll(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
}

View File

@ -0,0 +1,34 @@
package datanode
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
)
func TestCollection_newCollection(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
}
func TestCollection_deleteCollection(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
}

View File

@ -0,0 +1,102 @@
package datanode
import (
"context"
"fmt"
"io"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/config"
)
type DataNode struct {
ctx context.Context
DataNodeID uint64
dataSyncService *dataSyncService
flushSyncService *flushSyncService
metaService *metaService
// segStatsService *statsService
replica collectionReplica
tracer opentracing.Tracer
closer io.Closer
}
func NewDataNode(ctx context.Context, dataNodeID uint64) *DataNode {
collections := make([]*Collection, 0)
var replica collectionReplica = &collectionReplicaImpl{
collections: collections,
}
node := &DataNode{
ctx: ctx,
DataNodeID: dataNodeID,
dataSyncService: nil,
flushSyncService: nil,
metaService: nil,
// segStatsService: nil,
replica: replica,
}
return node
}
func Init() {
Params.Init()
}
func (node *DataNode) Start() error {
cfg := &config.Configuration{
ServiceName: "data_node",
Sampler: &config.SamplerConfig{
Type: "const",
Param: 1,
},
Reporter: &config.ReporterConfig{
LogSpans: true,
},
}
var err error
node.tracer, node.closer, err = cfg.NewTracer(config.Logger(jaeger.StdLogger))
if err != nil {
panic(fmt.Sprintf("ERROR: cannot init Jaeger: %v\n", err))
}
opentracing.SetGlobalTracer(node.tracer)
// TODO GOOSE Init Size??
chanSize := 100
ddChan := make(chan *ddlFlushSyncMsg, chanSize)
insertChan := make(chan *insertFlushSyncMsg, chanSize)
node.flushSyncService = newFlushSyncService(node.ctx, ddChan, insertChan)
node.dataSyncService = newDataSyncService(node.ctx, ddChan, insertChan, node.replica)
node.metaService = newMetaService(node.ctx, node.replica)
// node.segStatsService = newStatsService(node.ctx, node.replica)
go node.dataSyncService.start()
go node.flushSyncService.start()
// go node.segStatsService.start()
node.metaService.start()
return nil
}
func (node *DataNode) Close() {
<-node.ctx.Done()
// close services
if node.dataSyncService != nil {
(*node.dataSyncService).close()
}
// if node.segStatsService != nil {
// (*node.segStatsService).close()
// }
if node.closer != nil {
node.closer.Close()
}
}

View File

@ -0,0 +1,201 @@
package datanode
import (
"context"
"fmt"
"log"
"math/rand"
"os"
"strconv"
"testing"
"time"
// "github.com/stretchr/testify/assert"
// "github.com/stretchr/testify/require"
"go.etcd.io/etcd/clientv3"
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/master"
)
func makeNewChannelNames(names []string, suffix string) []string {
var ret []string
for _, name := range names {
ret = append(ret, name+suffix)
}
return ret
}
func refreshChannelNames() {
suffix := "-test-data-node" + strconv.FormatInt(rand.Int63n(100), 10)
Params.DDChannelNames = makeNewChannelNames(Params.DDChannelNames, suffix)
Params.InsertChannelNames = makeNewChannelNames(Params.InsertChannelNames, suffix)
}
func startMaster(ctx context.Context) {
master.Init()
etcdAddr := master.Params.EtcdAddress
metaRootPath := master.Params.MetaRootPath
etcdCli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
if err != nil {
panic(err)
}
_, err = etcdCli.Delete(context.TODO(), metaRootPath, clientv3.WithPrefix())
if err != nil {
panic(err)
}
masterPort := 53101
master.Params.Port = masterPort
svr, err := master.CreateServer(ctx)
if err != nil {
log.Print("create server failed", zap.Error(err))
}
if err := svr.Run(int64(master.Params.Port)); err != nil {
log.Fatal("run server failed", zap.Error(err))
}
fmt.Println("Waiting for server!", svr.IsServing())
Params.MasterAddress = master.Params.Address + ":" + strconv.Itoa(masterPort)
}
func TestMain(m *testing.M) {
Params.Init()
refreshChannelNames()
const ctxTimeInMillisecond = 2000
const closeWithDeadline = true
var ctx context.Context
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
ctx = context.Background()
}
startMaster(ctx)
// p := Params
// fmt.Println(p)
exitCode := m.Run()
os.Exit(exitCode)
}
func newDataNode() *DataNode {
const ctxTimeInMillisecond = 2000
const closeWithDeadline = true
var ctx context.Context
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
go func() {
<-ctx.Done()
cancel()
}()
} else {
ctx = context.Background()
}
svr := NewDataNode(ctx, 0)
return svr
}
// func genTestCollectionMeta(collectionName string, collectionID UniqueID, isBinary bool) *etcdpb.CollectionMeta {
// var fieldVec schemapb.FieldSchema
// if isBinary {
// fieldVec = schemapb.FieldSchema{
// FieldID: UniqueID(100),
// Name: "vec",
// IsPrimaryKey: false,
// DataType: schemapb.DataType_VECTOR_BINARY,
// TypeParams: []*commonpb.KeyValuePair{
// {
// Key: "dim",
// Value: "128",
// },
// },
// IndexParams: []*commonpb.KeyValuePair{
// {
// Key: "metric_type",
// Value: "JACCARD",
// },
// },
// }
// } else {
// fieldVec = schemapb.FieldSchema{
// FieldID: UniqueID(100),
// Name: "vec",
// IsPrimaryKey: false,
// DataType: schemapb.DataType_VECTOR_FLOAT,
// TypeParams: []*commonpb.KeyValuePair{
// {
// Key: "dim",
// Value: "16",
// },
// },
// IndexParams: []*commonpb.KeyValuePair{
// {
// Key: "metric_type",
// Value: "L2",
// },
// },
// }
// }
//
// fieldInt := schemapb.FieldSchema{
// FieldID: UniqueID(101),
// Name: "age",
// IsPrimaryKey: false,
// DataType: schemapb.DataType_INT32,
// }
//
// schema := schemapb.CollectionSchema{
// Name: collectionName,
// AutoID: true,
// Fields: []*schemapb.FieldSchema{
// &fieldVec, &fieldInt,
// },
// }
//
// collectionMeta := etcdpb.CollectionMeta{
// ID: collectionID,
// Schema: &schema,
// CreateTime: Timestamp(0),
// SegmentIDs: []UniqueID{0},
// PartitionTags: []string{"default"},
// }
//
// return &collectionMeta
// }
// func initTestMeta(t *testing.T, node *DataNode, collectionName string, collectionID UniqueID, segmentID UniqueID, optional ...bool) {
// isBinary := false
// if len(optional) > 0 {
// isBinary = optional[0]
// }
// collectionMeta := genTestCollectionMeta(collectionName, collectionID, isBinary)
//
// schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
// require.NotEqual(t, "", schemaBlob)
//
// var err = node.replica.addCollection(collectionMeta.ID, schemaBlob)
// require.NoError(t, err)
//
// collection, err := node.replica.getCollectionByName(collectionName)
// require.NoError(t, err)
// require.Equal(t, collection.Name(), collectionName)
// require.Equal(t, collection.ID(), collectionID)
// require.Equal(t, node.replica.getCollectionNum(), 1)
//
// err = node.replica.addPartition(collection.ID(), collectionMeta.PartitionTags[0])
// require.NoError(t, err)
//
// err = node.replica.addSegment(segmentID, collectionMeta.PartitionTags[0], collectionID)
// require.NoError(t, err)
// }

View File

@ -0,0 +1,116 @@
package datanode
import (
"context"
"log"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
type dataSyncService struct {
ctx context.Context
fg *flowgraph.TimeTickedFlowGraph
ddChan chan *ddlFlushSyncMsg
insertChan chan *insertFlushSyncMsg
replica collectionReplica
}
func newDataSyncService(ctx context.Context,
ddChan chan *ddlFlushSyncMsg, insertChan chan *insertFlushSyncMsg, replica collectionReplica) *dataSyncService {
return &dataSyncService{
ctx: ctx,
fg: nil,
ddChan: ddChan,
insertChan: insertChan,
replica: replica,
}
}
func (dsService *dataSyncService) start() {
dsService.initNodes()
dsService.fg.Start()
}
func (dsService *dataSyncService) close() {
if dsService.fg != nil {
dsService.fg.Close()
}
}
func (dsService *dataSyncService) initNodes() {
// TODO: add delete pipeline support
dsService.fg = flowgraph.NewTimeTickedFlowGraph(dsService.ctx)
var dmStreamNode Node = newDmInputNode(dsService.ctx)
var ddStreamNode Node = newDDInputNode(dsService.ctx)
var filterDmNode Node = newFilteredDmNode()
var ddNode Node = newDDNode(dsService.ctx, dsService.ddChan, dsService.replica)
var insertBufferNode Node = newInsertBufferNode(dsService.ctx, dsService.insertChan, dsService.replica)
var gcNode Node = newGCNode(dsService.replica)
dsService.fg.AddNode(&dmStreamNode)
dsService.fg.AddNode(&ddStreamNode)
dsService.fg.AddNode(&filterDmNode)
dsService.fg.AddNode(&ddNode)
dsService.fg.AddNode(&insertBufferNode)
dsService.fg.AddNode(&gcNode)
// dmStreamNode
var err = dsService.fg.SetEdges(dmStreamNode.Name(),
[]string{},
[]string{filterDmNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", dmStreamNode.Name())
}
// ddStreamNode
err = dsService.fg.SetEdges(ddStreamNode.Name(),
[]string{},
[]string{ddNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", ddStreamNode.Name())
}
// filterDmNode
err = dsService.fg.SetEdges(filterDmNode.Name(),
[]string{dmStreamNode.Name(), ddNode.Name()},
[]string{insertBufferNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", filterDmNode.Name())
}
// ddNode
err = dsService.fg.SetEdges(ddNode.Name(),
[]string{ddStreamNode.Name()},
[]string{filterDmNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", ddNode.Name())
}
// insertBufferNode
err = dsService.fg.SetEdges(insertBufferNode.Name(),
[]string{filterDmNode.Name()},
[]string{gcNode.Name()},
)
if err != nil {
log.Fatal("set edges failed in node:", insertBufferNode.Name())
}
// gcNode
err = dsService.fg.SetEdges(gcNode.Name(),
[]string{insertBufferNode.Name()},
[]string{})
if err != nil {
log.Fatal("set edges failed in node:", gcNode.Name())
}
}

View File

@ -0,0 +1,385 @@
package datanode
import (
"context"
"encoding/binary"
"math"
"strconv"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"go.etcd.io/etcd/clientv3"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
// NOTE: start pulsar before test
func TestDataSyncService_Start(t *testing.T) {
newMeta()
const ctxTimeInMillisecond = 2000
const closeWithDeadline = true
var ctx context.Context
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
ctx = context.Background()
}
// init data node
pulsarURL := Params.PulsarAddress
node := NewDataNode(ctx, 0)
// test data generate
// GOOSE TODO orgnize
const DIM = 2
const N = 1
var rawData []byte
// Float vector
var fvector = [DIM]float32{1, 2}
for _, ele := range fvector {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
rawData = append(rawData, buf...)
}
// Binary vector
// Dimension of binary vector is 32
var bvector = [4]byte{255, 255, 255, 0}
for _, ele := range bvector {
bs := make([]byte, 4)
binary.LittleEndian.PutUint32(bs, uint32(ele))
rawData = append(rawData, bs...)
}
// Bool
bb := make([]byte, 4)
var fieldBool = true
var fieldBoolInt uint32
if fieldBool {
fieldBoolInt = 1
} else {
fieldBoolInt = 0
}
binary.LittleEndian.PutUint32(bb, fieldBoolInt)
rawData = append(rawData, bb...)
// int8
var dataInt8 int8 = 100
bint8 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint8, uint32(dataInt8))
rawData = append(rawData, bint8...)
// int16
var dataInt16 int16 = 200
bint16 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint16, uint32(dataInt16))
rawData = append(rawData, bint16...)
// int32
var dataInt32 int32 = 300
bint32 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint32, uint32(dataInt32))
rawData = append(rawData, bint32...)
// int64
var dataInt64 int64 = 300
bint64 := make([]byte, 4)
binary.LittleEndian.PutUint32(bint64, uint32(dataInt64))
rawData = append(rawData, bint64...)
// float32
var datafloat float32 = 1.1
bfloat32 := make([]byte, 4)
binary.LittleEndian.PutUint32(bfloat32, math.Float32bits(datafloat))
rawData = append(rawData, bfloat32...)
// float64
var datafloat64 float64 = 2.2
bfloat64 := make([]byte, 8)
binary.LittleEndian.PutUint64(bfloat64, math.Float64bits(datafloat64))
rawData = append(rawData, bfloat64...)
timeRange := TimeRange{
timestampMin: 0,
timestampMax: math.MaxUint64,
}
// messages generate
const MSGLENGTH = 1
insertMessages := make([]msgstream.TsMsg, 0)
for i := 0; i < MSGLENGTH; i++ {
var msg msgstream.TsMsg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{
uint32(i),
},
},
InsertRequest: internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
MsgID: UniqueID(0),
Timestamp: Timestamp(i + 1000),
SourceID: 0,
},
CollectionName: "col1",
PartitionName: "default",
SegmentID: UniqueID(1),
ChannelID: "0",
Timestamps: []Timestamp{Timestamp(i + 1000)},
RowIDs: []UniqueID{UniqueID(i)},
RowData: []*commonpb.Blob{
{Value: rawData},
},
},
}
insertMessages = append(insertMessages, msg)
}
msgPack := msgstream.MsgPack{
BeginTs: timeRange.timestampMin,
EndTs: timeRange.timestampMax,
Msgs: insertMessages,
}
// generate timeTick
timeTickMsgPack := msgstream.MsgPack{}
timeTickMsg := &msgstream.TimeTickMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(0),
EndTimestamp: Timestamp(0),
HashValues: []uint32{0},
},
TimeTickMsg: internalpb2.TimeTickMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kTimeTick,
MsgID: UniqueID(0),
Timestamp: math.MaxUint64,
SourceID: 0,
},
},
}
timeTickMsgPack.Msgs = append(timeTickMsgPack.Msgs, timeTickMsg)
// pulsar produce
const receiveBufSize = 1024
insertChannels := Params.InsertChannelNames
ddChannels := Params.DDChannelNames
insertStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
insertStream.SetPulsarClient(pulsarURL)
insertStream.CreatePulsarProducers(insertChannels)
ddStream := msgstream.NewPulsarMsgStream(ctx, receiveBufSize)
ddStream.SetPulsarClient(pulsarURL)
ddStream.CreatePulsarProducers(ddChannels)
var insertMsgStream msgstream.MsgStream = insertStream
insertMsgStream.Start()
var ddMsgStream msgstream.MsgStream = ddStream
ddMsgStream.Start()
err := insertMsgStream.Produce(&msgPack)
assert.NoError(t, err)
err = insertMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
err = ddMsgStream.Broadcast(&timeTickMsgPack)
assert.NoError(t, err)
// dataSync
replica := newReplica()
node.dataSyncService = newDataSyncService(node.ctx, nil, nil, replica)
go node.dataSyncService.start()
node.Close()
<-ctx.Done()
}
func newMeta() *etcdpb.CollectionMeta {
ETCDAddr := Params.EtcdAddress
MetaRootPath := Params.MetaRootPath
cli, _ := clientv3.New(clientv3.Config{
Endpoints: []string{ETCDAddr},
DialTimeout: 5 * time.Second,
})
kvClient := etcdkv.NewEtcdKV(cli, MetaRootPath)
defer kvClient.Close()
sch := schemapb.CollectionSchema{
Name: "col1",
Description: "test collection",
AutoID: false,
Fields: []*schemapb.FieldSchema{
{
FieldID: 1,
Name: "Timestamp",
Description: "test collection filed 1",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_tk2",
Value: "col1_f1_tv2",
},
},
},
{
FieldID: 0,
Name: "RowID",
Description: "test collection filed 1",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_tk2",
Value: "col1_f1_tv2",
},
},
},
{
FieldID: 100,
Name: "col1_f1",
Description: "test collection filed 1",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "2",
},
{
Key: "col1_f1_tk2",
Value: "col1_f1_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f1_ik1",
Value: "col1_f1_iv1",
},
{
Key: "col1_f1_ik2",
Value: "col1_f1_iv2",
},
},
},
{
FieldID: 101,
Name: "col1_f2",
Description: "test collection filed 2",
DataType: schemapb.DataType_VECTOR_BINARY,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "32",
},
{
Key: "col1_f2_tk2",
Value: "col1_f2_tv2",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "col1_f2_ik1",
Value: "col1_f2_iv1",
},
{
Key: "col1_f2_ik2",
Value: "col1_f2_iv2",
},
},
},
{
FieldID: 102,
Name: "col1_f3",
Description: "test collection filed 3",
DataType: schemapb.DataType_BOOL,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 103,
Name: "col1_f4",
Description: "test collection filed 3",
DataType: schemapb.DataType_INT8,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 104,
Name: "col1_f5",
Description: "test collection filed 3",
DataType: schemapb.DataType_INT16,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 105,
Name: "col1_f6",
Description: "test collection filed 3",
DataType: schemapb.DataType_INT32,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 106,
Name: "col1_f7",
Description: "test collection filed 3",
DataType: schemapb.DataType_INT64,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 107,
Name: "col1_f8",
Description: "test collection filed 3",
DataType: schemapb.DataType_FLOAT,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
{
FieldID: 108,
Name: "col1_f9",
Description: "test collection filed 3",
DataType: schemapb.DataType_DOUBLE,
TypeParams: []*commonpb.KeyValuePair{},
IndexParams: []*commonpb.KeyValuePair{},
},
},
}
collection := etcdpb.CollectionMeta{
ID: UniqueID(1),
Schema: &sch,
CreateTime: Timestamp(1),
SegmentIDs: make([]UniqueID, 0),
PartitionTags: make([]string, 0),
}
collBytes := proto.MarshalTextString(&collection)
kvClient.Save("/collection/"+strconv.FormatInt(collection.ID, 10), collBytes)
segSch := etcdpb.SegmentMeta{
SegmentID: UniqueID(1),
CollectionID: UniqueID(1),
}
segBytes := proto.MarshalTextString(&segSch)
kvClient.Save("/segment/"+strconv.FormatInt(segSch.SegmentID, 10), segBytes)
return &collection
}

View File

@ -0,0 +1,418 @@
package datanode
import (
"context"
"errors"
"log"
"path"
"sort"
"strconv"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/kv"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/storage"
)
type ddNode struct {
BaseNode
ddMsg *ddMsg
ddRecords *ddRecords
ddBuffer *ddBuffer
outCh chan *ddlFlushSyncMsg // for flush sync
idAllocator *allocator.IDAllocator
kv kv.Base
replica collectionReplica
}
type ddData struct {
ddRequestString []string
timestamps []Timestamp
eventTypes []storage.EventTypeCode
}
type ddBuffer struct {
ddData map[UniqueID]*ddData // collection ID
maxSize int32
}
type ddRecords struct {
collectionRecords map[UniqueID]interface{}
partitionRecords map[UniqueID]interface{}
}
func (d *ddBuffer) size() int32 {
if d.ddData == nil || len(d.ddData) <= 0 {
return 0
}
var size int32 = 0
for _, data := range d.ddData {
size += int32(len(data.ddRequestString))
}
return size
}
func (d *ddBuffer) full() bool {
return d.size() >= d.maxSize
}
func (ddNode *ddNode) Name() string {
return "ddNode"
}
func (ddNode *ddNode) Operate(in []*Msg) []*Msg {
//fmt.Println("Do filterDdNode operation")
if len(in) != 1 {
log.Println("Invalid operate message input in ddNode, input length = ", len(in))
// TODO: add error handling
}
msMsg, ok := (*in[0]).(*MsgStreamMsg)
if !ok {
log.Println("type assertion failed for MsgStreamMsg")
// TODO: add error handling
}
var ddMsg = ddMsg{
collectionRecords: make(map[string][]metaOperateRecord),
partitionRecords: make(map[string][]metaOperateRecord),
timeRange: TimeRange{
timestampMin: msMsg.TimestampMin(),
timestampMax: msMsg.TimestampMax(),
},
flushMessages: make([]*msgstream.FlushMsg, 0),
}
ddNode.ddMsg = &ddMsg
gcRecord := gcRecord{
collections: make([]UniqueID, 0),
}
ddNode.ddMsg.gcRecord = &gcRecord
// sort tsMessages
tsMessages := msMsg.TsMessages()
sort.Slice(tsMessages,
func(i, j int) bool {
return tsMessages[i].BeginTs() < tsMessages[j].BeginTs()
})
// do dd tasks
for _, msg := range tsMessages {
switch msg.Type() {
case commonpb.MsgType_kCreateCollection:
ddNode.createCollection(msg.(*msgstream.CreateCollectionMsg))
case commonpb.MsgType_kDropCollection:
ddNode.dropCollection(msg.(*msgstream.DropCollectionMsg))
case commonpb.MsgType_kCreatePartition:
ddNode.createPartition(msg.(*msgstream.CreatePartitionMsg))
case commonpb.MsgType_kDropPartition:
ddNode.dropPartition(msg.(*msgstream.DropPartitionMsg))
case commonpb.MsgType_kFlush:
fMsg := msg.(*msgstream.FlushMsg)
flushSegID := fMsg.SegmentID
ddMsg.flushMessages = append(ddMsg.flushMessages, fMsg)
ddNode.flush()
log.Println(".. manual flush completed ...")
ddlFlushMsg := &ddlFlushSyncMsg{
flushCompleted: true,
ddlBinlogPathMsg: ddlBinlogPathMsg{
segID: flushSegID,
},
}
ddNode.outCh <- ddlFlushMsg
default:
log.Println("Non supporting message type:", msg.Type())
}
}
// generate binlog
if ddNode.ddBuffer.full() {
ddNode.flush()
}
var res Msg = ddNode.ddMsg
return []*Msg{&res}
}
func (ddNode *ddNode) flush() {
// generate binlog
log.Println(". dd buffer full or receive Flush msg ...")
ddCodec := &storage.DataDefinitionCodec{}
for collectionID, data := range ddNode.ddBuffer.ddData {
// buffer data to binlog
binLogs, err := ddCodec.Serialize(data.timestamps, data.ddRequestString, data.eventTypes)
if err != nil {
log.Println(err)
continue
}
if len(binLogs) != 2 {
log.Println("illegal binLogs")
continue
}
// binLogs -> minIO/S3
if len(data.ddRequestString) != len(data.timestamps) ||
len(data.timestamps) != len(data.eventTypes) {
log.Println("illegal ddBuffer, failed to save binlog")
continue
} else {
log.Println(".. dd buffer flushing ...")
// Blob key example:
// ${tenant}/data_definition_log/${collection_id}/ts/${log_idx}
// ${tenant}/data_definition_log/${collection_id}/ddl/${log_idx}
keyCommon := path.Join(Params.DdBinlogRootPath, strconv.FormatInt(collectionID, 10))
// save ts binlog
timestampLogIdx, err := ddNode.idAllocator.AllocOne()
if err != nil {
log.Println(err)
}
timestampKey := path.Join(keyCommon, binLogs[0].GetKey(), strconv.FormatInt(timestampLogIdx, 10))
err = ddNode.kv.Save(timestampKey, string(binLogs[0].GetValue()))
if err != nil {
log.Println(err)
}
log.Println("save ts binlog, key = ", timestampKey)
// save dd binlog
ddLogIdx, err := ddNode.idAllocator.AllocOne()
if err != nil {
log.Println(err)
}
ddKey := path.Join(keyCommon, binLogs[1].GetKey(), strconv.FormatInt(ddLogIdx, 10))
err = ddNode.kv.Save(ddKey, string(binLogs[1].GetValue()))
if err != nil {
log.Println(err)
}
log.Println("save dd binlog, key = ", ddKey)
ddlFlushMsg := &ddlFlushSyncMsg{
flushCompleted: false,
ddlBinlogPathMsg: ddlBinlogPathMsg{
collID: collectionID,
paths: []string{timestampKey, ddKey},
},
}
ddNode.outCh <- ddlFlushMsg
}
}
// clear buffer
ddNode.ddBuffer.ddData = make(map[UniqueID]*ddData)
}
func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
collectionID := msg.CollectionID
// add collection
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; ok {
err := errors.New("collection " + strconv.FormatInt(collectionID, 10) + " is already exists")
log.Println(err)
return
}
ddNode.ddRecords.collectionRecords[collectionID] = nil
// TODO: add default partition?
var schema schemapb.CollectionSchema
err := proto.Unmarshal(msg.Schema, &schema)
if err != nil {
log.Println(err)
return
}
schemaStr := proto.MarshalTextString(&schema)
// add collection
err = ddNode.replica.addCollection(collectionID, schemaStr)
if err != nil {
log.Println(err)
return
}
collectionName := schema.Name
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
metaOperateRecord{
createOrDrop: true,
timestamp: msg.Base.Timestamp,
})
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreateCollectionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreateCollectionEventType)
}
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
collectionID := msg.CollectionID
//err := ddNode.replica.removeCollection(collectionID)
//if err != nil {
// log.Println(err)
//}
// remove collection
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; !ok {
err := errors.New("cannot found collection " + strconv.FormatInt(collectionID, 10))
log.Println(err)
return
}
delete(ddNode.ddRecords.collectionRecords, collectionID)
collectionName := msg.CollectionName
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
metaOperateRecord{
createOrDrop: false,
timestamp: msg.Base.Timestamp,
})
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropCollectionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropCollectionEventType)
ddNode.ddMsg.gcRecord.collections = append(ddNode.ddMsg.gcRecord.collections, collectionID)
}
func (ddNode *ddNode) createPartition(msg *msgstream.CreatePartitionMsg) {
partitionID := msg.PartitionID
collectionID := msg.CollectionID
// add partition
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; ok {
err := errors.New("partition " + strconv.FormatInt(partitionID, 10) + " is already exists")
log.Println(err)
return
}
ddNode.ddRecords.partitionRecords[partitionID] = nil
partitionTag := msg.PartitionName
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
metaOperateRecord{
createOrDrop: true,
timestamp: msg.Base.Timestamp,
})
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.CreatePartitionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.CreatePartitionEventType)
}
func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
partitionID := msg.PartitionID
collectionID := msg.CollectionID
// remove partition
if _, ok := ddNode.ddRecords.partitionRecords[partitionID]; !ok {
err := errors.New("cannot found partition " + strconv.FormatInt(partitionID, 10))
log.Println(err)
return
}
delete(ddNode.ddRecords.partitionRecords, partitionID)
partitionTag := msg.PartitionName
ddNode.ddMsg.partitionRecords[partitionTag] = append(ddNode.ddMsg.partitionRecords[partitionTag],
metaOperateRecord{
createOrDrop: false,
timestamp: msg.Base.Timestamp,
})
_, ok := ddNode.ddBuffer.ddData[collectionID]
if !ok {
ddNode.ddBuffer.ddData[collectionID] = &ddData{
ddRequestString: make([]string, 0),
timestamps: make([]Timestamp, 0),
eventTypes: make([]storage.EventTypeCode, 0),
}
}
ddNode.ddBuffer.ddData[collectionID].ddRequestString = append(ddNode.ddBuffer.ddData[collectionID].ddRequestString, msg.DropPartitionRequest.String())
ddNode.ddBuffer.ddData[collectionID].timestamps = append(ddNode.ddBuffer.ddData[collectionID].timestamps, msg.Base.Timestamp)
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropPartitionEventType)
}
func newDDNode(ctx context.Context, outCh chan *ddlFlushSyncMsg, replica collectionReplica) *ddNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
ddRecords := &ddRecords{
collectionRecords: make(map[UniqueID]interface{}),
partitionRecords: make(map[UniqueID]interface{}),
}
bucketName := Params.MinioBucketName
option := &miniokv.Option{
Address: Params.MinioAddress,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSL,
BucketName: bucketName,
CreateBucket: true,
}
minioKV, err := miniokv.NewMinIOKV(ctx, option)
if err != nil {
panic(err)
}
idAllocator, err := allocator.NewIDAllocator(ctx, Params.MasterAddress)
if err != nil {
panic(err)
}
err = idAllocator.Start()
if err != nil {
panic(err)
}
return &ddNode{
BaseNode: baseNode,
ddRecords: ddRecords,
ddBuffer: &ddBuffer{
ddData: make(map[UniqueID]*ddData),
maxSize: Params.FlushDdBufferSize,
},
outCh: outCh,
idAllocator: idAllocator,
kv: minioKV,
replica: replica,
}
}

View File

@ -0,0 +1,164 @@
package datanode
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
func TestFlowGraphDDNode_Operate(t *testing.T) {
newMeta()
const ctxTimeInMillisecond = 2000
const closeWithDeadline = false
var ctx context.Context
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
ctx = context.Background()
}
ddChan := make(chan *ddlFlushSyncMsg, 10)
defer close(ddChan)
insertChan := make(chan *insertFlushSyncMsg, 10)
defer close(insertChan)
testPath := "/test/datanode/root/meta"
err := clearEtcd(testPath)
require.NoError(t, err)
Params.MetaRootPath = testPath
fService := newFlushSyncService(ctx, ddChan, insertChan)
assert.Equal(t, testPath, fService.metaTable.client.(*etcdkv.EtcdKV).GetPath("."))
go fService.start()
Params.FlushDdBufferSize = 4
replica := newReplica()
ddNode := newDDNode(ctx, ddChan, replica)
colID := UniqueID(0)
colName := "col-test-0"
// create collection
createColReq := internalpb2.CreateCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kCreateCollection,
MsgID: 1,
Timestamp: 1,
SourceID: 1,
},
CollectionID: colID,
Schema: make([]byte, 0),
}
createColMsg := msgstream.CreateCollectionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(1),
EndTimestamp: Timestamp(1),
HashValues: []uint32{uint32(0)},
},
CreateCollectionRequest: createColReq,
}
// drop collection
dropColReq := internalpb2.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropCollection,
MsgID: 2,
Timestamp: 2,
SourceID: 2,
},
CollectionID: colID,
CollectionName: colName,
}
dropColMsg := msgstream.DropCollectionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(2),
EndTimestamp: Timestamp(2),
HashValues: []uint32{uint32(0)},
},
DropCollectionRequest: dropColReq,
}
partitionID := UniqueID(100)
partitionTag := "partition-test-0"
// create partition
createPartitionReq := internalpb2.CreatePartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kCreatePartition,
MsgID: 3,
Timestamp: 3,
SourceID: 3,
},
CollectionID: colID,
PartitionID: partitionID,
CollectionName: colName,
PartitionName: partitionTag,
}
createPartitionMsg := msgstream.CreatePartitionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(3),
EndTimestamp: Timestamp(3),
HashValues: []uint32{uint32(0)},
},
CreatePartitionRequest: createPartitionReq,
}
// drop partition
dropPartitionReq := internalpb2.DropPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropPartition,
MsgID: 4,
Timestamp: 4,
SourceID: 4,
},
CollectionID: colID,
PartitionID: partitionID,
CollectionName: colName,
PartitionName: partitionTag,
}
dropPartitionMsg := msgstream.DropPartitionMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(4),
EndTimestamp: Timestamp(4),
HashValues: []uint32{uint32(0)},
},
DropPartitionRequest: dropPartitionReq,
}
flushMsg := msgstream.FlushMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: Timestamp(5),
EndTimestamp: Timestamp(5),
HashValues: []uint32{uint32(0)},
},
FlushMsg: internalpb2.FlushMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kFlush,
MsgID: 1,
Timestamp: 6,
SourceID: 1,
},
SegmentID: 1,
},
}
tsMessages := make([]msgstream.TsMsg, 0)
tsMessages = append(tsMessages, msgstream.TsMsg(&createColMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&dropColMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&createPartitionMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&dropPartitionMsg))
tsMessages = append(tsMessages, msgstream.TsMsg(&flushMsg))
msgStream := flowgraph.GenerateMsgStreamMsg(tsMessages, Timestamp(0), Timestamp(3))
var inMsg Msg = msgStream
ddNode.Operate([]*Msg{&inMsg})
}

View File

@ -0,0 +1,179 @@
package datanode
import (
"context"
"log"
"math"
"github.com/opentracing/opentracing-go"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
)
type filterDmNode struct {
BaseNode
ddMsg *ddMsg
}
func (fdmNode *filterDmNode) Name() string {
return "fdmNode"
}
func (fdmNode *filterDmNode) Operate(in []*Msg) []*Msg {
//fmt.Println("Do filterDmNode operation")
if len(in) != 2 {
log.Println("Invalid operate message input in filterDmNode, input length = ", len(in))
// TODO: add error handling
}
msgStreamMsg, ok := (*in[0]).(*MsgStreamMsg)
if !ok {
log.Println("type assertion failed for MsgStreamMsg")
// TODO: add error handling
}
var childs []opentracing.Span
tracer := opentracing.GlobalTracer()
if tracer != nil {
for _, msg := range msgStreamMsg.TsMessages() {
if msg.Type() == commonpb.MsgType_kInsert {
var child opentracing.Span
ctx := msg.GetMsgContext()
if parent := opentracing.SpanFromContext(ctx); parent != nil {
child = tracer.StartSpan("pass filter node",
opentracing.FollowsFrom(parent.Context()))
} else {
child = tracer.StartSpan("pass filter node")
}
child.SetTag("hash keys", msg.HashKeys())
child.SetTag("start time", msg.BeginTs())
child.SetTag("end time", msg.EndTs())
msg.SetMsgContext(opentracing.ContextWithSpan(ctx, child))
childs = append(childs, child)
}
}
}
ddMsg, ok := (*in[1]).(*ddMsg)
if !ok {
log.Println("type assertion failed for ddMsg")
// TODO: add error handling
}
fdmNode.ddMsg = ddMsg
var iMsg = insertMsg{
insertMessages: make([]*msgstream.InsertMsg, 0),
flushMessages: make([]*msgstream.FlushMsg, 0),
timeRange: TimeRange{
timestampMin: msgStreamMsg.TimestampMin(),
timestampMax: msgStreamMsg.TimestampMax(),
},
}
for _, fmsg := range ddMsg.flushMessages {
switch fmsg.Type() {
case commonpb.MsgType_kFlush:
iMsg.flushMessages = append(iMsg.flushMessages, fmsg)
default:
log.Println("Non supporting message type:", fmsg.Type())
}
}
for key, msg := range msgStreamMsg.TsMessages() {
switch msg.Type() {
case commonpb.MsgType_kInsert:
var ctx2 context.Context
if childs != nil {
if childs[key] != nil {
ctx2 = opentracing.ContextWithSpan(msg.GetMsgContext(), childs[key])
} else {
ctx2 = context.Background()
}
}
resMsg := fdmNode.filterInvalidInsertMessage(msg.(*msgstream.InsertMsg))
if resMsg != nil {
resMsg.SetMsgContext(ctx2)
iMsg.insertMessages = append(iMsg.insertMessages, resMsg)
}
// case commonpb.MsgType_kDelete:
// dmMsg.deleteMessages = append(dmMsg.deleteMessages, (*msg).(*msgstream.DeleteTask))
default:
log.Println("Non supporting message type:", msg.Type())
}
}
iMsg.gcRecord = ddMsg.gcRecord
var res Msg = &iMsg
for _, child := range childs {
child.Finish()
}
return []*Msg{&res}
}
func (fdmNode *filterDmNode) filterInvalidInsertMessage(msg *msgstream.InsertMsg) *msgstream.InsertMsg {
// No dd record, do all insert requests.
records, ok := fdmNode.ddMsg.collectionRecords[msg.CollectionName]
if !ok {
return msg
}
// TODO: If the last record is drop type, all insert requests are invalid.
//if !records[len(records)-1].createOrDrop {
// return nil
//}
// Filter insert requests before last record.
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
// TODO: what if the messages are misaligned? Here, we ignore those messages and print error
log.Println("Error, misaligned messages detected")
return nil
}
tmpTimestamps := make([]Timestamp, 0)
tmpRowIDs := make([]int64, 0)
tmpRowData := make([]*commonpb.Blob, 0)
// calculate valid time range
timeBegin := Timestamp(0)
timeEnd := Timestamp(math.MaxUint64)
for _, record := range records {
if record.createOrDrop && timeBegin < record.timestamp {
timeBegin = record.timestamp
}
if !record.createOrDrop && timeEnd > record.timestamp {
timeEnd = record.timestamp
}
}
for i, t := range msg.Timestamps {
if t >= timeBegin && t <= timeEnd {
tmpTimestamps = append(tmpTimestamps, t)
tmpRowIDs = append(tmpRowIDs, msg.RowIDs[i])
tmpRowData = append(tmpRowData, msg.RowData[i])
}
}
if len(tmpRowIDs) <= 0 {
return nil
}
msg.Timestamps = tmpTimestamps
msg.RowIDs = tmpRowIDs
msg.RowData = tmpRowData
return msg
}
func newFilteredDmNode() *filterDmNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
return &filterDmNode{
BaseNode: baseNode,
}
}

View File

@ -0,0 +1,53 @@
package datanode
import (
"log"
)
type gcNode struct {
BaseNode
replica collectionReplica
}
func (gcNode *gcNode) Name() string {
return "gcNode"
}
func (gcNode *gcNode) Operate(in []*Msg) []*Msg {
//fmt.Println("Do gcNode operation")
if len(in) != 1 {
log.Println("Invalid operate message input in gcNode, input length = ", len(in))
// TODO: add error handling
}
gcMsg, ok := (*in[0]).(*gcMsg)
if !ok {
log.Println("type assertion failed for gcMsg")
// TODO: add error handling
}
// drop collections
for _, collectionID := range gcMsg.gcRecord.collections {
err := gcNode.replica.removeCollection(collectionID)
if err != nil {
log.Println(err)
}
}
return nil
}
func newGCNode(replica collectionReplica) *gcNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
return &gcNode{
BaseNode: baseNode,
replica: replica,
}
}

View File

@ -0,0 +1,660 @@
package datanode
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"log"
"path"
"strconv"
"unsafe"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/opentracing/opentracing-go"
oplog "github.com/opentracing/opentracing-go/log"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/kv"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/storage"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
const (
CollectionPrefix = "/collection/"
SegmentPrefix = "/segment/"
)
type (
InsertData = storage.InsertData
Blob = storage.Blob
insertBufferNode struct {
BaseNode
insertBuffer *insertBuffer
minIOKV kv.Base
minioPrifex string
idAllocator *allocator.IDAllocator
outCh chan *insertFlushSyncMsg
pulsarDataNodeTimeTickStream *msgstream.PulsarMsgStream
replica collectionReplica
}
insertBuffer struct {
insertData map[UniqueID]*InsertData // SegmentID to InsertData
maxSize int32
}
)
func (ib *insertBuffer) size(segmentID UniqueID) int32 {
if ib.insertData == nil || len(ib.insertData) <= 0 {
return 0
}
idata, ok := ib.insertData[segmentID]
if !ok {
return 0
}
var maxSize int32 = 0
for _, data := range idata.Data {
fdata, ok := data.(*storage.FloatVectorFieldData)
if ok && int32(fdata.NumRows) > maxSize {
maxSize = int32(fdata.NumRows)
}
bdata, ok := data.(*storage.BinaryVectorFieldData)
if ok && int32(bdata.NumRows) > maxSize {
maxSize = int32(bdata.NumRows)
}
}
return maxSize
}
func (ib *insertBuffer) full(segmentID UniqueID) bool {
return ib.size(segmentID) >= ib.maxSize
}
func (ibNode *insertBufferNode) Name() string {
return "ibNode"
}
func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
// log.Println("=========== insert buffer Node Operating")
if len(in) != 1 {
log.Println("Error: Invalid operate message input in insertBuffertNode, input length = ", len(in))
// TODO: add error handling
}
iMsg, ok := (*in[0]).(*insertMsg)
if !ok {
log.Println("Error: type assertion failed for insertMsg")
// TODO: add error handling
}
// iMsg is insertMsg
// 1. iMsg -> buffer
for _, msg := range iMsg.insertMessages {
ctx := msg.GetMsgContext()
var span opentracing.Span
if ctx != nil {
span, _ = opentracing.StartSpanFromContext(ctx, fmt.Sprintf("insert buffer node, start time = %d", msg.BeginTs()))
} else {
span = opentracing.StartSpan(fmt.Sprintf("insert buffer node, start time = %d", msg.BeginTs()))
}
span.SetTag("hash keys", msg.HashKeys())
span.SetTag("start time", msg.BeginTs())
span.SetTag("end time", msg.EndTs())
if len(msg.RowIDs) != len(msg.Timestamps) || len(msg.RowIDs) != len(msg.RowData) {
log.Println("Error: misaligned messages detected")
continue
}
currentSegID := msg.GetSegmentID()
collectionName := msg.GetCollectionName()
span.LogFields(oplog.Int("segment id", int(currentSegID)))
idata, ok := ibNode.insertBuffer.insertData[currentSegID]
if !ok {
idata = &InsertData{
Data: make(map[UniqueID]storage.FieldData),
}
}
// 1.1 Get CollectionMeta from etcd
collection, err := ibNode.replica.getCollectionByName(collectionName)
if err != nil {
// GOOSE TODO add error handler
log.Println("bbb, Get meta wrong:", err)
continue
}
collectionID := collection.ID()
collSchema := collection.schema
// 1.2 Get Fields
var pos int = 0 // Record position of blob
for _, field := range collSchema.Fields {
switch field.DataType {
case schemapb.DataType_VECTOR_FLOAT:
var dim int
for _, t := range field.TypeParams {
if t.Key == "dim" {
dim, err = strconv.Atoi(t.Value)
if err != nil {
log.Println("strconv wrong")
}
break
}
}
if dim <= 0 {
log.Println("invalid dim")
// TODO: add error handling
}
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.FloatVectorFieldData{
NumRows: 0,
Data: make([]float32, 0),
Dim: dim,
}
}
fieldData := idata.Data[field.FieldID].(*storage.FloatVectorFieldData)
var offset int
for _, blob := range msg.RowData {
offset = 0
for j := 0; j < dim; j++ {
var v float32
buf := bytes.NewBuffer(blob.GetValue()[pos+offset:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.read float32 err:", err)
}
fieldData.Data = append(fieldData.Data, v)
offset += int(unsafe.Sizeof(*(&v)))
}
}
pos += offset
fieldData.NumRows += len(msg.RowIDs)
case schemapb.DataType_VECTOR_BINARY:
var dim int
for _, t := range field.TypeParams {
if t.Key == "dim" {
dim, err = strconv.Atoi(t.Value)
if err != nil {
log.Println("strconv wrong")
}
break
}
}
if dim <= 0 {
log.Println("invalid dim")
// TODO: add error handling
}
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.BinaryVectorFieldData{
NumRows: 0,
Data: make([]byte, 0),
Dim: dim,
}
}
fieldData := idata.Data[field.FieldID].(*storage.BinaryVectorFieldData)
var offset int
for _, blob := range msg.RowData {
bv := blob.GetValue()[pos+offset : pos+(dim/8)]
fieldData.Data = append(fieldData.Data, bv...)
offset = len(bv)
}
pos += offset
fieldData.NumRows += len(msg.RowData)
case schemapb.DataType_BOOL:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.BoolFieldData{
NumRows: 0,
Data: make([]bool, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.BoolFieldData)
var v bool
for _, blob := range msg.RowData {
buf := bytes.NewReader(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read bool failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
case schemapb.DataType_INT8:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.Int8FieldData{
NumRows: 0,
Data: make([]int8, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.Int8FieldData)
var v int8
for _, blob := range msg.RowData {
buf := bytes.NewReader(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read int8 failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
case schemapb.DataType_INT16:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.Int16FieldData{
NumRows: 0,
Data: make([]int16, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.Int16FieldData)
var v int16
for _, blob := range msg.RowData {
buf := bytes.NewReader(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read int16 failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
case schemapb.DataType_INT32:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.Int32FieldData{
NumRows: 0,
Data: make([]int32, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.Int32FieldData)
var v int32
for _, blob := range msg.RowData {
buf := bytes.NewReader(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read int32 failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
case schemapb.DataType_INT64:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.Int64FieldData{
NumRows: 0,
Data: make([]int64, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.Int64FieldData)
switch field.FieldID {
case 0: // rowIDs
fieldData.Data = append(fieldData.Data, msg.RowIDs...)
fieldData.NumRows += len(msg.RowIDs)
case 1: // Timestamps
for _, ts := range msg.Timestamps {
fieldData.Data = append(fieldData.Data, int64(ts))
}
fieldData.NumRows += len(msg.Timestamps)
default:
var v int64
for _, blob := range msg.RowData {
buf := bytes.NewBuffer(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read int64 failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
}
case schemapb.DataType_FLOAT:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.FloatFieldData{
NumRows: 0,
Data: make([]float32, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.FloatFieldData)
var v float32
for _, blob := range msg.RowData {
buf := bytes.NewBuffer(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read float32 failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
case schemapb.DataType_DOUBLE:
if _, ok := idata.Data[field.FieldID]; !ok {
idata.Data[field.FieldID] = &storage.DoubleFieldData{
NumRows: 0,
Data: make([]float64, 0),
}
}
fieldData := idata.Data[field.FieldID].(*storage.DoubleFieldData)
var v float64
for _, blob := range msg.RowData {
buf := bytes.NewBuffer(blob.GetValue()[pos:])
if err := binary.Read(buf, binary.LittleEndian, &v); err != nil {
log.Println("binary.Read float64 failed:", err)
}
fieldData.Data = append(fieldData.Data, v)
}
pos += int(unsafe.Sizeof(*(&v)))
fieldData.NumRows += len(msg.RowIDs)
}
}
// 1.3 store in buffer
ibNode.insertBuffer.insertData[currentSegID] = idata
span.LogFields(oplog.String("store in buffer", "store in buffer"))
// 1.4 if full
// 1.4.1 generate binlogs
span.LogFields(oplog.String("generate binlogs", "generate binlogs"))
if ibNode.insertBuffer.full(currentSegID) {
log.Printf(". Insert Buffer full, auto flushing (%v) rows of data...", ibNode.insertBuffer.size(currentSegID))
// partitionTag -> partitionID
partitionTag := msg.GetPartitionName()
partitionID, err := typeutil.Hash32String(partitionTag)
if err != nil {
log.Println("partitionTag to partitionID wrong")
// TODO GOOSE add error handler
}
collMeta := &etcdpb.CollectionMeta{
Schema: collSchema,
ID: collectionID,
}
inCodec := storage.NewInsertCodec(collMeta)
// buffer data to binlogs
binLogs, err := inCodec.Serialize(partitionID,
currentSegID, ibNode.insertBuffer.insertData[currentSegID])
if err != nil {
log.Println("generate binlog wrong: ", err)
}
// clear buffer
delete(ibNode.insertBuffer.insertData, currentSegID)
log.Println(".. Clearing buffer")
// 1.5.2 binLogs -> minIO/S3
collIDStr := strconv.FormatInt(collectionID, 10)
partitionIDStr := strconv.FormatInt(partitionID, 10)
segIDStr := strconv.FormatInt(currentSegID, 10)
keyPrefix := path.Join(ibNode.minioPrifex, collIDStr, partitionIDStr, segIDStr)
log.Printf(".. Saving (%v) binlogs to MinIO ...", len(binLogs))
for index, blob := range binLogs {
uid, err := ibNode.idAllocator.AllocOne()
if err != nil {
log.Println("Allocate Id failed")
// GOOSE TODO error handler
}
key := path.Join(keyPrefix, blob.Key, strconv.FormatInt(uid, 10))
err = ibNode.minIOKV.Save(key, string(blob.Value[:]))
if err != nil {
log.Println("Save to MinIO failed")
// GOOSE TODO error handler
}
fieldID, err := strconv.ParseInt(blob.Key, 10, 32)
if err != nil {
log.Println("string to fieldID wrong")
// GOOSE TODO error handler
}
inBinlogMsg := &insertFlushSyncMsg{
flushCompleted: false,
insertBinlogPathMsg: insertBinlogPathMsg{
ts: iMsg.timeRange.timestampMax,
segID: currentSegID,
fieldID: fieldID,
paths: []string{key},
},
}
log.Println("... Appending binlog paths ...", index)
ibNode.outCh <- inBinlogMsg
}
}
span.Finish()
}
if len(iMsg.insertMessages) > 0 {
log.Println("---insert buffer status---")
var stopSign int = 0
for k := range ibNode.insertBuffer.insertData {
if stopSign >= 10 {
break
}
log.Printf("seg(%v) buffer size = (%v)", k, ibNode.insertBuffer.size(k))
stopSign++
}
}
// iMsg is Flush() msg from master
// 1. insertBuffer(not empty) -> binLogs -> minIO/S3
for _, msg := range iMsg.flushMessages {
currentSegID := msg.GetSegmentID()
flushTs := msg.Base.GetTimestamp()
partitionTag := msg.GetPartitionTag()
collectionID := msg.GetCollectionID()
log.Printf(". Receiving flush message segID(%v)...", currentSegID)
if ibNode.insertBuffer.size(currentSegID) > 0 {
log.Println(".. Buffer not empty, flushing ...")
collSchema, err := ibNode.getCollectionSchemaByID(collectionID)
if err != nil {
// GOOSE TODO add error handler
log.Println("aaa, Get meta wrong: ", err)
}
collMeta := &etcdpb.CollectionMeta{
Schema: collSchema,
ID: collectionID,
}
inCodec := storage.NewInsertCodec(collMeta)
// partitionTag -> partitionID
partitionID, err := typeutil.Hash32String(partitionTag)
if err != nil {
// GOOSE TODO add error handler
log.Println("partitionTag to partitionID Wrong: ", err)
}
// buffer data to binlogs
binLogs, err := inCodec.Serialize(partitionID,
currentSegID, ibNode.insertBuffer.insertData[currentSegID])
if err != nil {
log.Println("generate binlog wrong: ", err)
}
// clear buffer
delete(ibNode.insertBuffer.insertData, currentSegID)
// binLogs -> minIO/S3
collIDStr := strconv.FormatInt(collectionID, 10)
partitionIDStr := strconv.FormatInt(partitionID, 10)
segIDStr := strconv.FormatInt(currentSegID, 10)
keyPrefix := path.Join(ibNode.minioPrifex, collIDStr, partitionIDStr, segIDStr)
for _, blob := range binLogs {
uid, err := ibNode.idAllocator.AllocOne()
if err != nil {
log.Println("Allocate Id failed")
// GOOSE TODO error handler
}
key := path.Join(keyPrefix, blob.Key, strconv.FormatInt(uid, 10))
err = ibNode.minIOKV.Save(key, string(blob.Value[:]))
if err != nil {
log.Println("Save to MinIO failed")
// GOOSE TODO error handler
}
fieldID, err := strconv.ParseInt(blob.Key, 10, 32)
if err != nil {
log.Println("string to fieldID wrong")
// GOOSE TODO error handler
}
// Append binlogs
inBinlogMsg := &insertFlushSyncMsg{
flushCompleted: false,
insertBinlogPathMsg: insertBinlogPathMsg{
ts: flushTs,
segID: currentSegID,
fieldID: fieldID,
paths: []string{key},
},
}
ibNode.outCh <- inBinlogMsg
}
}
// Flushed
log.Println(".. Flush finished ...")
inBinlogMsg := &insertFlushSyncMsg{
flushCompleted: true,
insertBinlogPathMsg: insertBinlogPathMsg{
ts: flushTs,
segID: currentSegID,
},
}
ibNode.outCh <- inBinlogMsg
}
if err := ibNode.writeHardTimeTick(iMsg.timeRange.timestampMax); err != nil {
log.Printf("Error: send hard time tick into pulsar channel failed, %s\n", err.Error())
}
var res Msg = &gcMsg{
gcRecord: iMsg.gcRecord,
timeRange: iMsg.timeRange,
}
return []*Msg{&res}
}
func (ibNode *insertBufferNode) getCollectionSchemaByID(collectionID UniqueID) (*schemapb.CollectionSchema, error) {
ret, err := ibNode.replica.getCollectionByID(collectionID)
if err != nil {
return nil, err
}
return ret.schema, nil
}
func (ibNode *insertBufferNode) getCollectionSchemaByName(collectionName string) (*schemapb.CollectionSchema, error) {
ret, err := ibNode.replica.getCollectionByName(collectionName)
if err != nil {
return nil, err
}
return ret.schema, nil
}
func (ibNode *insertBufferNode) writeHardTimeTick(ts Timestamp) error {
msgPack := msgstream.MsgPack{}
timeTickMsg := msgstream.TimeTickMsg{
BaseMsg: msgstream.BaseMsg{
BeginTimestamp: ts,
EndTimestamp: ts,
HashValues: []uint32{0},
},
TimeTickMsg: internalpb2.TimeTickMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kTimeTick,
MsgID: 0,
Timestamp: ts,
SourceID: Params.DataNodeID,
},
},
}
msgPack.Msgs = append(msgPack.Msgs, &timeTickMsg)
return ibNode.pulsarDataNodeTimeTickStream.Produce(&msgPack)
}
func newInsertBufferNode(ctx context.Context, outCh chan *insertFlushSyncMsg, replica collectionReplica) *insertBufferNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
baseNode := BaseNode{}
baseNode.SetMaxQueueLength(maxQueueLength)
baseNode.SetMaxParallelism(maxParallelism)
maxSize := Params.FlushInsertBufferSize
iBuffer := &insertBuffer{
insertData: make(map[UniqueID]*InsertData),
maxSize: maxSize,
}
// MinIO
option := &miniokv.Option{
Address: Params.MinioAddress,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSL,
CreateBucket: true,
BucketName: Params.MinioBucketName,
}
minIOKV, err := miniokv.NewMinIOKV(ctx, option)
if err != nil {
panic(err)
}
minioPrefix := Params.InsertBinlogRootPath
idAllocator, err := allocator.NewIDAllocator(ctx, Params.MasterAddress)
if err != nil {
panic(err)
}
err = idAllocator.Start()
if err != nil {
panic(err)
}
wTt := msgstream.NewPulsarMsgStream(ctx, 1024) //input stream, data node time tick
wTt.SetPulsarClient(Params.PulsarAddress)
wTt.CreatePulsarProducers([]string{Params.TimeTickChannelName})
return &insertBufferNode{
BaseNode: baseNode,
insertBuffer: iBuffer,
minIOKV: minIOKV,
minioPrifex: minioPrefix,
idAllocator: idAllocator,
outCh: outCh,
pulsarDataNodeTimeTickStream: wTt,
replica: replica,
}
}

View File

@ -0,0 +1,227 @@
package datanode
import (
"bytes"
"context"
"encoding/binary"
"log"
"math"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
func TestFlowGraphInputBufferNode_Operate(t *testing.T) {
const ctxTimeInMillisecond = 2000
const closeWithDeadline = false
var ctx context.Context
if closeWithDeadline {
var cancel context.CancelFunc
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
ctx = context.Background()
}
ddChan := make(chan *ddlFlushSyncMsg, 10)
defer close(ddChan)
insertChan := make(chan *insertFlushSyncMsg, 10)
defer close(insertChan)
testPath := "/test/datanode/root/meta"
err := clearEtcd(testPath)
require.NoError(t, err)
Params.MetaRootPath = testPath
fService := newFlushSyncService(ctx, ddChan, insertChan)
assert.Equal(t, testPath, fService.metaTable.client.(*etcdkv.EtcdKV).GetPath("."))
go fService.start()
collMeta := newMeta()
schemaBlob := proto.MarshalTextString(collMeta.Schema)
require.NotEqual(t, "", schemaBlob)
replica := newReplica()
err = replica.addCollection(collMeta.ID, schemaBlob)
require.NoError(t, err)
// Params.FlushInsertBufSize = 2
iBNode := newInsertBufferNode(ctx, insertChan, replica)
inMsg := genInsertMsg()
var iMsg flowgraph.Msg = &inMsg
iBNode.Operate([]*flowgraph.Msg{&iMsg})
}
func genInsertMsg() insertMsg {
// test data generate
// GOOSE TODO orgnize
const DIM = 2
const N = 1
var rawData []byte
// Float vector
var fvector = [DIM]float32{1, 2}
for _, ele := range fvector {
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, math.Float32bits(ele))
rawData = append(rawData, buf...)
}
// Binary vector
// Dimension of binary vector is 32
// size := 4, = 32 / 8
var bvector = []byte{255, 255, 255, 0}
rawData = append(rawData, bvector...)
// Bool
var fieldBool = true
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.LittleEndian, fieldBool); err != nil {
panic(err)
}
rawData = append(rawData, buf.Bytes()...)
// int8
var dataInt8 int8 = 100
bint8 := new(bytes.Buffer)
if err := binary.Write(bint8, binary.LittleEndian, dataInt8); err != nil {
panic(err)
}
rawData = append(rawData, bint8.Bytes()...)
// int16
var dataInt16 int16 = 200
bint16 := new(bytes.Buffer)
if err := binary.Write(bint16, binary.LittleEndian, dataInt16); err != nil {
panic(err)
}
rawData = append(rawData, bint16.Bytes()...)
// int32
var dataInt32 int32 = 300
bint32 := new(bytes.Buffer)
if err := binary.Write(bint32, binary.LittleEndian, dataInt32); err != nil {
panic(err)
}
rawData = append(rawData, bint32.Bytes()...)
// int64
var dataInt64 int64 = 400
bint64 := new(bytes.Buffer)
if err := binary.Write(bint64, binary.LittleEndian, dataInt64); err != nil {
panic(err)
}
rawData = append(rawData, bint64.Bytes()...)
// float32
var datafloat float32 = 1.1
bfloat32 := new(bytes.Buffer)
if err := binary.Write(bfloat32, binary.LittleEndian, datafloat); err != nil {
panic(err)
}
rawData = append(rawData, bfloat32.Bytes()...)
// float64
var datafloat64 float64 = 2.2
bfloat64 := new(bytes.Buffer)
if err := binary.Write(bfloat64, binary.LittleEndian, datafloat64); err != nil {
panic(err)
}
rawData = append(rawData, bfloat64.Bytes()...)
log.Println("Test rawdata length:", len(rawData))
timeRange := TimeRange{
timestampMin: 0,
timestampMax: math.MaxUint64,
}
var iMsg = &insertMsg{
insertMessages: make([]*msgstream.InsertMsg, 0),
flushMessages: make([]*msgstream.FlushMsg, 0),
timeRange: TimeRange{
timestampMin: timeRange.timestampMin,
timestampMax: timeRange.timestampMax,
},
}
// messages generate
const MSGLENGTH = 1
// insertMessages := make([]msgstream.TsMsg, 0)
for i := 0; i < MSGLENGTH; i++ {
var msg = &msgstream.InsertMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{
uint32(i),
},
},
InsertRequest: internalpb2.InsertRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kInsert,
MsgID: 0,
Timestamp: Timestamp(i + 1000),
SourceID: 0,
},
CollectionName: "col1",
PartitionName: "default",
SegmentID: UniqueID(1),
ChannelID: "0",
Timestamps: []Timestamp{
Timestamp(i + 1000),
Timestamp(i + 1000),
Timestamp(i + 1000),
Timestamp(i + 1000),
Timestamp(i + 1000),
},
RowIDs: []UniqueID{
UniqueID(i),
UniqueID(i),
UniqueID(i),
UniqueID(i),
UniqueID(i),
},
RowData: []*commonpb.Blob{
{Value: rawData},
{Value: rawData},
{Value: rawData},
{Value: rawData},
{Value: rawData},
},
},
}
iMsg.insertMessages = append(iMsg.insertMessages, msg)
}
var fmsg msgstream.FlushMsg = msgstream.FlushMsg{
BaseMsg: msgstream.BaseMsg{
HashValues: []uint32{
uint32(10),
},
},
FlushMsg: internalpb2.FlushMsg{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kFlush,
MsgID: 1,
Timestamp: 2000,
SourceID: 1,
},
SegmentID: UniqueID(1),
CollectionID: UniqueID(1),
PartitionTag: "default",
},
}
iMsg.flushMessages = append(iMsg.flushMessages, &fmsg)
return *iMsg
}

View File

@ -0,0 +1,74 @@
package datanode
import (
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
type (
Msg = flowgraph.Msg
MsgStreamMsg = flowgraph.MsgStreamMsg
)
type (
key2SegMsg struct {
tsMessages []msgstream.TsMsg
timeRange TimeRange
}
ddMsg struct {
// TODO: use collection id
collectionRecords map[string][]metaOperateRecord
// TODO: use partition id
partitionRecords map[string][]metaOperateRecord
flushMessages []*msgstream.FlushMsg
gcRecord *gcRecord
timeRange TimeRange
}
metaOperateRecord struct {
createOrDrop bool // create: true, drop: false
timestamp Timestamp
}
insertMsg struct {
insertMessages []*msgstream.InsertMsg
flushMessages []*msgstream.FlushMsg
gcRecord *gcRecord
timeRange TimeRange
}
deleteMsg struct {
deleteMessages []*msgstream.DeleteMsg
timeRange TimeRange
}
gcMsg struct {
gcRecord *gcRecord
timeRange TimeRange
}
gcRecord struct {
collections []UniqueID
}
)
func (ksMsg *key2SegMsg) TimeTick() Timestamp {
return ksMsg.timeRange.timestampMax
}
func (suMsg *ddMsg) TimeTick() Timestamp {
return suMsg.timeRange.timestampMax
}
func (iMsg *insertMsg) TimeTick() Timestamp {
return iMsg.timeRange.timestampMax
}
func (dMsg *deleteMsg) TimeTick() Timestamp {
return dMsg.timeRange.timestampMax
}
func (gcMsg *gcMsg) TimeTick() Timestamp {
return gcMsg.timeRange.timestampMax
}

View File

@ -0,0 +1,58 @@
package datanode
import (
"context"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
)
func newDmInputNode(ctx context.Context) *flowgraph.InputNode {
receiveBufSize := Params.InsertReceiveBufSize
pulsarBufSize := Params.InsertPulsarBufSize
msgStreamURL := Params.PulsarAddress
consumeChannels := Params.InsertChannelNames
consumeSubName := Params.MsgChannelSubName
insertStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
// TODO could panic of nil pointer
insertStream.SetPulsarClient(msgStreamURL)
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
// TODO could panic of nil pointer
insertStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
var stream msgstream.MsgStream = insertStream
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
node := flowgraph.NewInputNode(&stream, "dmInputNode", maxQueueLength, maxParallelism)
return node
}
func newDDInputNode(ctx context.Context) *flowgraph.InputNode {
receiveBufSize := Params.DDReceiveBufSize
pulsarBufSize := Params.DDPulsarBufSize
msgStreamURL := Params.PulsarAddress
consumeChannels := Params.DDChannelNames
consumeSubName := Params.MsgChannelSubName
ddStream := msgstream.NewPulsarTtMsgStream(ctx, receiveBufSize)
ddStream.SetPulsarClient(msgStreamURL)
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
ddStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
var stream msgstream.MsgStream = ddStream
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
node := flowgraph.NewInputNode(&stream, "ddInputNode", maxQueueLength, maxParallelism)
return node
}

View File

@ -0,0 +1,9 @@
package datanode
import "github.com/zilliztech/milvus-distributed/internal/util/flowgraph"
type (
Node = flowgraph.Node
BaseNode = flowgraph.BaseNode
InputNode = flowgraph.InputNode
)

View File

@ -0,0 +1,39 @@
package datanode
type (
// segID: set when flushComplete == true, to tell
// the flush_sync_service which segFlush msg does this
// DDL flush for, so that ddl flush and insert flush
// will sync.
ddlBinlogPathMsg struct {
collID UniqueID
segID UniqueID
paths []string
}
ddlFlushSyncMsg struct {
ddlBinlogPathMsg
flushCompleted bool
}
insertBinlogPathMsg struct {
ts Timestamp
segID UniqueID
fieldID int64 // TODO GOOSE may need to change
paths []string
}
// This Msg can notify flushSyncService
// 1.To append binary logs
// 2.To set flush-completed status
//
// When `flushComplete == false`
// `ts` means OpenTime of a segFlushMeta
// When `flushComplete == true`
// `ts` means CloseTime of a segFlushMeta,
// `fieldID` and `paths` need to be empty
insertFlushSyncMsg struct {
insertBinlogPathMsg
flushCompleted bool
}
)

View File

@ -0,0 +1,120 @@
package datanode
import (
"context"
"log"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"go.etcd.io/etcd/clientv3"
)
type (
flushSyncService struct {
ctx context.Context
metaTable *metaTable
ddChan chan *ddlFlushSyncMsg
insertChan chan *insertFlushSyncMsg
ddFlushed map[UniqueID]bool // Segment ID
insertFlushed map[UniqueID]bool // Segment ID
}
)
func newFlushSyncService(ctx context.Context,
ddChan chan *ddlFlushSyncMsg, insertChan chan *insertFlushSyncMsg) *flushSyncService {
service := &flushSyncService{
ctx: ctx,
ddChan: ddChan,
insertChan: insertChan,
ddFlushed: make(map[UniqueID]bool),
insertFlushed: make(map[UniqueID]bool),
}
// New metaTable
etcdAddr := Params.EtcdAddress
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
if err != nil {
panic(err)
}
etcdKV := etcdkv.NewEtcdKV(etcdClient, Params.MetaRootPath)
metaKV, err2 := NewMetaTable(etcdKV)
if err2 != nil {
panic(err2)
}
service.metaTable = metaKV
return service
}
func (fService *flushSyncService) completeDDFlush(segID UniqueID) {
if _, ok := fService.ddFlushed[segID]; !ok {
fService.ddFlushed[segID] = true
return
}
fService.ddFlushed[segID] = true
}
func (fService *flushSyncService) completeInsertFlush(segID UniqueID) {
if _, ok := fService.insertFlushed[segID]; !ok {
fService.insertFlushed[segID] = true
return
}
fService.insertFlushed[segID] = true
}
func (fService *flushSyncService) FlushCompleted(segID UniqueID) bool {
isddFlushed, ok := fService.ddFlushed[segID]
if !ok {
return false
}
isinsertFlushed, ok := fService.insertFlushed[segID]
if !ok {
return false
}
return isddFlushed && isinsertFlushed
}
func (fService *flushSyncService) start() {
for {
select {
case <-fService.ctx.Done():
return
case ddFlushMsg := <-fService.ddChan:
if ddFlushMsg == nil {
continue
}
if !ddFlushMsg.flushCompleted {
err := fService.metaTable.AppendDDLBinlogPaths(ddFlushMsg.collID, ddFlushMsg.paths)
if err != nil {
log.Println("Append segBinlog Error")
// GOOSE TODO error handling
}
continue
}
fService.completeDDFlush(ddFlushMsg.segID)
case insertFlushMsg := <-fService.insertChan:
if insertFlushMsg == nil {
continue
}
if !insertFlushMsg.flushCompleted {
err := fService.metaTable.AppendSegBinlogPaths(insertFlushMsg.ts, insertFlushMsg.segID, insertFlushMsg.fieldID,
insertFlushMsg.paths)
if err != nil {
log.Println("Append segBinlog Error")
// GOOSE TODO error handling
}
continue
}
fService.completeInsertFlush(insertFlushMsg.segID)
if fService.FlushCompleted(insertFlushMsg.segID) {
log.Printf("Seg(%d) flush completed.", insertFlushMsg.segID)
fService.metaTable.CompleteFlush(insertFlushMsg.ts, insertFlushMsg.segID)
}
}
}
}

View File

@ -0,0 +1,166 @@
package datanode
import (
"context"
"log"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.etcd.io/etcd/clientv3"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
)
func clearEtcd(rootPath string) error {
etcdAddr := Params.EtcdAddress
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
if err != nil {
return err
}
etcdKV := etcdkv.NewEtcdKV(etcdClient, rootPath)
err = etcdKV.RemoveWithPrefix("writer/segment")
if err != nil {
return err
}
_, _, err = etcdKV.LoadWithPrefix("writer/segment")
if err != nil {
return err
}
log.Println("Clear ETCD with prefix writer/segment ")
err = etcdKV.RemoveWithPrefix("writer/ddl")
if err != nil {
return err
}
_, _, err = etcdKV.LoadWithPrefix("writer/ddl")
if err != nil {
return err
}
log.Println("Clear ETCD with prefix writer/ddl")
return nil
}
func TestFlushSyncService_Start(t *testing.T) {
const ctxTimeInMillisecond = 3000
const closeWithDeadline = false
var ctx context.Context
var cancel context.CancelFunc
if closeWithDeadline {
d := time.Now().Add(ctxTimeInMillisecond * time.Millisecond)
ctx, cancel = context.WithDeadline(context.Background(), d)
defer cancel()
} else {
// ctx = context.Background()
ctx, cancel = context.WithCancel(context.Background())
defer cancel()
}
ddChan := make(chan *ddlFlushSyncMsg, 10)
defer close(ddChan)
insertChan := make(chan *insertFlushSyncMsg, 10)
defer close(insertChan)
testPath := "/test/datanode/root/meta"
err := clearEtcd(testPath)
require.NoError(t, err)
Params.MetaRootPath = testPath
fService := newFlushSyncService(ctx, ddChan, insertChan)
assert.Equal(t, testPath, fService.metaTable.client.(*etcdkv.EtcdKV).GetPath("."))
t.Run("FlushSyncService", func(t *testing.T) {
go fService.start()
SegID := UniqueID(100)
ddMsgs := genDdlFlushSyncMsgs(SegID)
insertMsgs := geninsertFlushSyncMsgs(SegID)
for _, msg := range ddMsgs {
ddChan <- msg
time.Sleep(time.Millisecond * 50)
}
for _, msg := range insertMsgs {
insertChan <- msg
time.Sleep(time.Millisecond * 50)
}
for {
if len(ddChan) == 0 && len(insertChan) == 0 && fService.FlushCompleted(SegID) {
break
}
}
ret, err := fService.metaTable.getSegBinlogPaths(SegID)
assert.NoError(t, err)
assert.Equal(t, map[int64][]string{
0: {"x", "y", "z"},
1: {"x", "y", "z"},
2: {"x", "y", "z"},
3: {"x", "y", "z"},
4: {"x", "y", "z"},
}, ret)
ts, err := fService.metaTable.getFlushOpenTime(SegID)
assert.NoError(t, err)
assert.Equal(t, Timestamp(1000), ts)
ts, err = fService.metaTable.getFlushCloseTime(SegID)
assert.NoError(t, err)
assert.Equal(t, Timestamp(2010), ts)
cp, err := fService.metaTable.checkFlushComplete(SegID)
assert.NoError(t, err)
assert.Equal(t, true, cp)
})
}
func genDdlFlushSyncMsgs(segID UniqueID) []*ddlFlushSyncMsg {
ret := make([]*ddlFlushSyncMsg, 0)
for i := 0; i < 5; i++ {
ret = append(ret, &ddlFlushSyncMsg{
flushCompleted: false,
ddlBinlogPathMsg: ddlBinlogPathMsg{
collID: UniqueID(100),
paths: []string{"a", "b", "c"},
},
})
}
ret = append(ret, &ddlFlushSyncMsg{
flushCompleted: true,
ddlBinlogPathMsg: ddlBinlogPathMsg{
segID: segID,
},
})
return ret
}
func geninsertFlushSyncMsgs(segID UniqueID) []*insertFlushSyncMsg {
ret := make([]*insertFlushSyncMsg, 0)
for i := 0; i < 5; i++ {
ret = append(ret, &insertFlushSyncMsg{
flushCompleted: false,
insertBinlogPathMsg: insertBinlogPathMsg{
ts: Timestamp(1000 + i),
segID: segID,
fieldID: int64(i),
paths: []string{"x", "y", "z"},
},
})
}
ret = append(ret, &insertFlushSyncMsg{
flushCompleted: true,
insertBinlogPathMsg: insertBinlogPathMsg{
ts: Timestamp(2010),
segID: segID,
},
})
return ret
}

View File

@ -0,0 +1,135 @@
package datanode
import (
"context"
"fmt"
"log"
"path"
"reflect"
"strings"
"time"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
)
type metaService struct {
ctx context.Context
kvBase *etcdkv.EtcdKV
replica collectionReplica
}
func newMetaService(ctx context.Context, replica collectionReplica) *metaService {
ETCDAddr := Params.EtcdAddress
MetaRootPath := Params.MetaRootPath
cli, _ := clientv3.New(clientv3.Config{
Endpoints: []string{ETCDAddr},
DialTimeout: 5 * time.Second,
})
return &metaService{
ctx: ctx,
kvBase: etcdkv.NewEtcdKV(cli, MetaRootPath),
replica: replica,
}
}
func (mService *metaService) start() {
// init from meta
err := mService.loadCollections()
if err != nil {
log.Fatal("metaService loadCollections failed")
}
}
func GetCollectionObjID(key string) string {
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
return strings.TrimPrefix(key, prefix)
}
func isCollectionObj(key string) bool {
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
prefix = strings.TrimSpace(prefix)
index := strings.Index(key, prefix)
return index == 0
}
func isSegmentObj(key string) bool {
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, SegmentPrefix) + "/"
prefix = strings.TrimSpace(prefix)
index := strings.Index(key, prefix)
return index == 0
}
func printCollectionStruct(obj *etcdpb.CollectionMeta) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v)
typeOfS := v.Type()
for i := 0; i < v.NumField(); i++ {
if typeOfS.Field(i).Name == "GrpcMarshalString" {
continue
}
fmt.Printf("Field: %s\tValue: %v\n", typeOfS.Field(i).Name, v.Field(i).Interface())
}
}
func (mService *metaService) processCollectionCreate(id string, value string) {
//println(fmt.Sprintf("Create Collection:$%s$", id))
col := mService.collectionUnmarshal(value)
if col != nil {
schema := col.Schema
schemaBlob := proto.MarshalTextString(schema)
err := mService.replica.addCollection(col.ID, schemaBlob)
if err != nil {
log.Println(err)
}
}
}
func (mService *metaService) loadCollections() error {
keys, values, err := mService.kvBase.LoadWithPrefix(CollectionPrefix)
if err != nil {
return err
}
for i := range keys {
objID := GetCollectionObjID(keys[i])
mService.processCollectionCreate(objID, values[i])
}
return nil
}
//----------------------------------------------------------------------- Unmarshal and Marshal
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionMeta {
col := etcdpb.CollectionMeta{}
err := proto.UnmarshalText(value, &col)
if err != nil {
log.Println(err)
return nil
}
return &col
}
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionMeta) string {
value := proto.MarshalTextString(col)
if value == "" {
log.Println("marshal collection failed")
return ""
}
return value
}

View File

@ -0,0 +1,100 @@
package datanode
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestMetaService_start(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
replica := newReplica()
metaService := newMetaService(ctx, replica)
metaService.start()
}
func TestMetaService_getCollectionObjId(t *testing.T) {
var key = "/collection/collection0"
var collectionObjID1 = GetCollectionObjID(key)
assert.Equal(t, collectionObjID1, "/collection/collection0")
key = "fakeKey"
var collectionObjID2 = GetCollectionObjID(key)
assert.Equal(t, collectionObjID2, "fakeKey")
}
func TestMetaService_isCollectionObj(t *testing.T) {
var key = Params.MetaRootPath + "/collection/collection0"
var b1 = isCollectionObj(key)
assert.Equal(t, b1, true)
key = Params.MetaRootPath + "/segment/segment0"
var b2 = isCollectionObj(key)
assert.Equal(t, b2, false)
}
func TestMetaService_processCollectionCreate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
replica := newReplica()
metaService := newMetaService(ctx, replica)
defer cancel()
id := "0"
value := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "default"
`
metaService.processCollectionCreate(id, value)
collectionNum := replica.getCollectionNum()
assert.Equal(t, collectionNum, 1)
collection, err := replica.getCollectionByName("test")
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
}
func TestMetaService_loadCollections(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
replica := newReplica()
metaService := newMetaService(ctx, replica)
err2 := (*metaService).loadCollections()
assert.Nil(t, err2)
}

View File

@ -0,0 +1,230 @@
package datanode
import (
"path"
"strconv"
"sync"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
pb "github.com/zilliztech/milvus-distributed/internal/proto/writerpb"
)
type metaTable struct {
client kv.TxnBase // client of a reliable kv service, i.e. etcd client
segID2FlushMeta map[UniqueID]pb.SegmentFlushMeta // segment id to flush meta
collID2DdlMeta map[UniqueID]*pb.DDLFlushMeta
lock sync.RWMutex
}
func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
mt := &metaTable{
client: kv,
lock: sync.RWMutex{},
}
err := mt.reloadSegMetaFromKV()
if err != nil {
return nil, err
}
err = mt.reloadDdlMetaFromKV()
if err != nil {
return nil, err
}
return mt, nil
}
func (mt *metaTable) AppendDDLBinlogPaths(collID UniqueID, paths []string) error {
mt.lock.Lock()
defer mt.lock.Unlock()
_, ok := mt.collID2DdlMeta[collID]
if !ok {
mt.collID2DdlMeta[collID] = &pb.DDLFlushMeta{
CollectionID: collID,
BinlogPaths: make([]string, 0),
}
}
meta := mt.collID2DdlMeta[collID]
meta.BinlogPaths = append(meta.BinlogPaths, paths...)
return mt.saveDDLFlushMeta(meta)
}
func (mt *metaTable) AppendSegBinlogPaths(tsOpen Timestamp, segmentID UniqueID, fieldID int64, dataPaths []string) error {
_, ok := mt.segID2FlushMeta[segmentID]
if !ok {
err := mt.addSegmentFlush(segmentID, tsOpen)
if err != nil {
return err
}
}
meta := mt.segID2FlushMeta[segmentID]
found := false
for _, field := range meta.Fields {
if field.FieldID == fieldID {
field.BinlogPaths = append(field.BinlogPaths, dataPaths...)
found = true
break
}
}
if !found {
newField := &pb.FieldFlushMeta{
FieldID: fieldID,
BinlogPaths: dataPaths,
}
meta.Fields = append(meta.Fields, newField)
}
return mt.saveSegFlushMeta(&meta)
}
func (mt *metaTable) CompleteFlush(tsClose Timestamp, segmentID UniqueID) error {
mt.lock.Lock()
defer mt.lock.Unlock()
meta, ok := mt.segID2FlushMeta[segmentID]
if !ok {
return errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
}
meta.IsClosed = true
meta.CloseTime = tsClose
return mt.saveSegFlushMeta(&meta)
}
// metaTable.lock.Lock() before call this function
func (mt *metaTable) saveDDLFlushMeta(meta *pb.DDLFlushMeta) error {
value := proto.MarshalTextString(meta)
mt.collID2DdlMeta[meta.CollectionID] = meta
prefix := path.Join(Params.DDLFlushMetaSubPath, strconv.FormatInt(meta.CollectionID, 10))
return mt.client.Save(prefix, value)
}
func (mt *metaTable) reloadDdlMetaFromKV() error {
mt.collID2DdlMeta = make(map[UniqueID]*pb.DDLFlushMeta)
_, values, err := mt.client.LoadWithPrefix(Params.DDLFlushMetaSubPath)
if err != nil {
return err
}
for _, value := range values {
ddlMeta := &pb.DDLFlushMeta{}
err = proto.UnmarshalText(value, ddlMeta)
if err != nil {
return err
}
mt.collID2DdlMeta[ddlMeta.CollectionID] = ddlMeta
}
return nil
}
// metaTable.lock.Lock() before call this function
func (mt *metaTable) saveSegFlushMeta(meta *pb.SegmentFlushMeta) error {
value := proto.MarshalTextString(meta)
mt.segID2FlushMeta[meta.SegmentID] = *meta
prefix := path.Join(Params.SegFlushMetaSubPath, strconv.FormatInt(meta.SegmentID, 10))
return mt.client.Save(prefix, value)
}
func (mt *metaTable) reloadSegMetaFromKV() error {
mt.segID2FlushMeta = make(map[UniqueID]pb.SegmentFlushMeta)
_, values, err := mt.client.LoadWithPrefix(Params.SegFlushMetaSubPath)
if err != nil {
return err
}
for _, value := range values {
flushMeta := pb.SegmentFlushMeta{}
err = proto.UnmarshalText(value, &flushMeta)
if err != nil {
return err
}
mt.segID2FlushMeta[flushMeta.SegmentID] = flushMeta
}
return nil
}
func (mt *metaTable) addSegmentFlush(segmentID UniqueID, timestamp Timestamp) error {
mt.lock.Lock()
defer mt.lock.Unlock()
_, ok := mt.segID2FlushMeta[segmentID]
if ok {
return errors.Errorf("segment already exists with ID = " + strconv.FormatInt(segmentID, 10))
}
meta := pb.SegmentFlushMeta{
IsClosed: false,
SegmentID: segmentID,
OpenTime: timestamp,
}
return mt.saveSegFlushMeta(&meta)
}
func (mt *metaTable) getFlushCloseTime(segmentID UniqueID) (Timestamp, error) {
mt.lock.RLock()
defer mt.lock.RUnlock()
meta, ok := mt.segID2FlushMeta[segmentID]
if !ok {
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
}
return meta.CloseTime, nil
}
func (mt *metaTable) getFlushOpenTime(segmentID UniqueID) (Timestamp, error) {
mt.lock.RLock()
defer mt.lock.RUnlock()
meta, ok := mt.segID2FlushMeta[segmentID]
if !ok {
return typeutil.ZeroTimestamp, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
}
return meta.OpenTime, nil
}
func (mt *metaTable) checkFlushComplete(segmentID UniqueID) (bool, error) {
mt.lock.RLock()
defer mt.lock.RUnlock()
meta, ok := mt.segID2FlushMeta[segmentID]
if !ok {
return false, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
}
return meta.IsClosed, nil
}
func (mt *metaTable) getSegBinlogPaths(segmentID UniqueID) (map[int64][]string, error) {
mt.lock.RLock()
defer mt.lock.RUnlock()
meta, ok := mt.segID2FlushMeta[segmentID]
if !ok {
return nil, errors.Errorf("segment not exists with ID = " + strconv.FormatInt(segmentID, 10))
}
ret := make(map[int64][]string)
for _, field := range meta.Fields {
ret[field.FieldID] = field.BinlogPaths
}
return ret, nil
}
func (mt *metaTable) getDDLBinlogPaths(collID UniqueID) (map[UniqueID][]string, error) {
mt.lock.RLock()
defer mt.lock.RUnlock()
meta, ok := mt.collID2DdlMeta[collID]
if !ok {
return nil, errors.Errorf("collection not exists with ID = " + strconv.FormatInt(collID, 10))
}
ret := make(map[UniqueID][]string)
ret[meta.CollectionID] = meta.BinlogPaths
return ret, nil
}

View File

@ -0,0 +1,125 @@
package datanode
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"go.etcd.io/etcd/clientv3"
)
func TestMetaTable_all(t *testing.T) {
etcdAddr := Params.EtcdAddress
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
require.NoError(t, err)
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root/writer")
_, err = cli.Delete(context.TODO(), "/etcd/test/root/writer", clientv3.WithPrefix())
require.NoError(t, err)
meta, err := NewMetaTable(etcdKV)
assert.NoError(t, err)
defer meta.client.Close()
t.Run("TestMetaTable_addSegmentFlush_and_OpenTime", func(t *testing.T) {
tsOpen := Timestamp(100)
err := meta.addSegmentFlush(101, tsOpen)
assert.NoError(t, err)
exp, err := meta.getFlushOpenTime(101)
assert.NoError(t, err)
assert.Equal(t, tsOpen, exp)
tsOpen = Timestamp(200)
err = meta.addSegmentFlush(102, tsOpen)
assert.NoError(t, err)
exp, err = meta.getFlushOpenTime(102)
assert.NoError(t, err)
assert.Equal(t, tsOpen, exp)
tsOpen = Timestamp(200)
err = meta.addSegmentFlush(103, tsOpen)
assert.NoError(t, err)
exp, err = meta.getFlushOpenTime(103)
assert.NoError(t, err)
assert.Equal(t, tsOpen, exp)
err = meta.reloadSegMetaFromKV()
assert.NoError(t, err)
})
t.Run("TestMetaTable_AppendSegBinlogPaths", func(t *testing.T) {
segmentID := UniqueID(201)
tsOpen := Timestamp(1000)
err := meta.addSegmentFlush(segmentID, tsOpen)
assert.Nil(t, err)
exp := map[int64][]string{
1: {"a", "b", "c"},
2: {"b", "a", "c"},
}
for fieldID, dataPaths := range exp {
for _, dp := range dataPaths {
err = meta.AppendSegBinlogPaths(tsOpen, segmentID, fieldID, []string{dp})
assert.Nil(t, err)
err = meta.AppendSegBinlogPaths(tsOpen, segmentID, fieldID, []string{dp})
assert.Nil(t, err)
}
}
ret, err := meta.getSegBinlogPaths(segmentID)
assert.Nil(t, err)
assert.Equal(t,
map[int64][]string{
1: {"a", "a", "b", "b", "c", "c"},
2: {"b", "b", "a", "a", "c", "c"}},
ret)
})
t.Run("TestMetaTable_AppendDDLBinlogPaths", func(t *testing.T) {
collID2Paths := map[UniqueID][]string{
301: {"a", "b", "c"},
302: {"c", "b", "a"},
}
for collID, dataPaths := range collID2Paths {
for _, dp := range dataPaths {
err = meta.AppendDDLBinlogPaths(collID, []string{dp})
assert.Nil(t, err)
}
}
for k, v := range collID2Paths {
ret, err := meta.getDDLBinlogPaths(k)
assert.Nil(t, err)
assert.Equal(t, map[UniqueID][]string{k: v}, ret)
}
})
t.Run("TestMetaTable_CompleteFlush_and_CloseTime", func(t *testing.T) {
var segmentID UniqueID = 401
openTime := Timestamp(1000)
closeTime := Timestamp(10000)
err := meta.addSegmentFlush(segmentID, openTime)
assert.NoError(t, err)
ret, err := meta.checkFlushComplete(segmentID)
assert.NoError(t, err)
assert.Equal(t, false, ret)
meta.CompleteFlush(closeTime, segmentID)
ret, err = meta.checkFlushComplete(segmentID)
assert.NoError(t, err)
assert.Equal(t, true, ret)
ts, err := meta.getFlushCloseTime(segmentID)
assert.NoError(t, err)
assert.Equal(t, closeTime, ts)
})
}

View File

@ -0,0 +1,429 @@
package datanode
import (
"log"
"os"
"path"
"strconv"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
)
type ParamTable struct {
// === PRIVATE Configs ===
dataNodeIDList []UniqueID
paramtable.BaseTable
// === DataNode Internal Components Configs ===
DataNodeID UniqueID
FlowGraphMaxQueueLength int32
FlowGraphMaxParallelism int32
FlushInsertBufferSize int32
FlushDdBufferSize int32
InsertBinlogRootPath string
DdBinlogRootPath string
// === DataNode External Components Configs ===
// --- Master ---
MasterAddress string
// --- Pulsar ---
PulsarAddress string
// - insert channel -
InsertChannelNames []string
InsertChannelRange []int
InsertReceiveBufSize int64
InsertPulsarBufSize int64
// - dd channel -
DDChannelNames []string
DDReceiveBufSize int64
DDPulsarBufSize int64
// - seg statistics channel -
SegmentStatisticsChannelName string
SegmentStatisticsBufSize int64
SegmentStatisticsPublishInterval int
// - timetick channel -
TimeTickChannelName string
// - channel subname -
MsgChannelSubName string
DefaultPartitionName string
// --- ETCD ---
EtcdAddress string
MetaRootPath string
SegFlushMetaSubPath string
DDLFlushMetaSubPath string
// --- MinIO ---
MinioAddress string
MinioAccessKeyID string
MinioSecretAccessKey string
MinioUseSSL bool
MinioBucketName string
}
var Params ParamTable
func (p *ParamTable) Init() {
p.BaseTable.Init()
err := p.LoadYaml("advanced/data_node.yaml")
if err != nil {
panic(err)
}
// === DataNode Internal Components Configs ===
p.initDataNodeID()
p.initFlowGraphMaxQueueLength()
p.initFlowGraphMaxParallelism()
p.initFlushInsertBufferSize()
p.initFlushDdBufferSize()
p.initInsertBinlogRootPath()
p.initDdBinlogRootPath()
// === DataNode External Components Configs ===
// --- Master ---
p.initMasterAddress()
// --- Pulsar ---
p.initPulsarAddress()
// - insert channel -
p.initInsertChannelNames()
p.initInsertChannelRange()
p.initInsertReceiveBufSize()
p.initInsertPulsarBufSize()
// - dd channel -
p.initDDChannelNames()
p.initDDReceiveBufSize()
p.initDDPulsarBufSize()
// - seg statistics channel -
p.initSegmentStatisticsChannelName()
p.initSegmentStatisticsBufSize()
p.initSegmentStatisticsPublishInterval()
// - timetick channel -
p.initTimeTickChannelName()
// - channel subname -
p.initMsgChannelSubName()
// --- ETCD ---
p.initEtcdAddress()
p.initMetaRootPath()
p.initSegFlushMetaSubPath()
p.initDDLFlushMetaSubPath()
// --- MinIO ---
p.initMinioAddress()
p.initMinioAccessKeyID()
p.initMinioSecretAccessKey()
p.initMinioUseSSL()
p.initMinioBucketName()
p.initDefaultPartitionName()
// p.initSliceIndex()
}
// ==== DataNode internal components configs ====
func (p *ParamTable) initDataNodeID() {
p.dataNodeIDList = p.DataNodeIDList()
dataNodeIDStr := os.Getenv("DATA_NODE_ID")
if dataNodeIDStr == "" {
if len(p.dataNodeIDList) <= 0 {
dataNodeIDStr = "0"
} else {
dataNodeIDStr = strconv.Itoa(int(p.dataNodeIDList[0]))
}
}
err := p.Save("_dataNodeID", dataNodeIDStr)
if err != nil {
panic(err)
}
p.DataNodeID = p.ParseInt64("_dataNodeID")
}
// ---- flowgraph configs ----
func (p *ParamTable) initFlowGraphMaxQueueLength() {
p.FlowGraphMaxQueueLength = p.ParseInt32("dataNode.dataSync.flowGraph.maxQueueLength")
}
func (p *ParamTable) initFlowGraphMaxParallelism() {
p.FlowGraphMaxParallelism = p.ParseInt32("dataNode.dataSync.flowGraph.maxParallelism")
}
// ---- flush configs ----
func (p *ParamTable) initFlushInsertBufferSize() {
p.FlushInsertBufferSize = p.ParseInt32("datanode.flush.insertBufSize")
}
func (p *ParamTable) initFlushDdBufferSize() {
p.FlushDdBufferSize = p.ParseInt32("datanode.flush.ddBufSize")
}
func (p *ParamTable) initInsertBinlogRootPath() {
// GOOSE TODO: rootPath change to TenentID
rootPath, err := p.Load("etcd.rootPath")
if err != nil {
panic(err)
}
p.InsertBinlogRootPath = path.Join(rootPath, "insert_log")
}
func (p *ParamTable) initDdBinlogRootPath() {
// GOOSE TODO: rootPath change to TenentID
rootPath, err := p.Load("etcd.rootPath")
if err != nil {
panic(err)
}
p.DdBinlogRootPath = path.Join(rootPath, "data_definition_log")
}
// ===== DataNode External components configs ====
// ---- Master ----
func (p *ParamTable) initMasterAddress() {
addr, err := p.Load("_MasterAddress")
if err != nil {
panic(err)
}
p.MasterAddress = addr
}
// ---- Pulsar ----
func (p *ParamTable) initPulsarAddress() {
url, err := p.Load("_PulsarAddress")
if err != nil {
panic(err)
}
p.PulsarAddress = url
}
// - insert channel -
func (p *ParamTable) initInsertChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.insert")
if err != nil {
log.Fatal(err)
}
prefix += "-"
channelRange, err := p.Load("msgChannel.channelRange.insert")
if err != nil {
panic(err)
}
channelIDs := paramtable.ConvertRangeToIntSlice(channelRange, ",")
var ret []string
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
sep := len(channelIDs) / len(p.dataNodeIDList)
index := p.sliceIndex()
if index == -1 {
panic("dataNodeID not Match with Config")
}
start := index * sep
p.InsertChannelNames = ret[start : start+sep]
}
func (p *ParamTable) initInsertChannelRange() {
insertChannelRange, err := p.Load("msgChannel.channelRange.insert")
if err != nil {
panic(err)
}
p.InsertChannelRange = paramtable.ConvertRangeToIntRange(insertChannelRange, ",")
}
func (p *ParamTable) initInsertReceiveBufSize() {
p.InsertReceiveBufSize = p.ParseInt64("dataNode.msgStream.insert.recvBufSize")
}
func (p *ParamTable) initInsertPulsarBufSize() {
p.InsertPulsarBufSize = p.ParseInt64("dataNode.msgStream.insert.pulsarBufSize")
}
// - dd channel -
func (p *ParamTable) initDDChannelNames() {
prefix, err := p.Load("msgChannel.chanNamePrefix.dataDefinition")
if err != nil {
panic(err)
}
prefix += "-"
iRangeStr, err := p.Load("msgChannel.channelRange.dataDefinition")
if err != nil {
panic(err)
}
channelIDs := paramtable.ConvertRangeToIntSlice(iRangeStr, ",")
var ret []string
for _, ID := range channelIDs {
ret = append(ret, prefix+strconv.Itoa(ID))
}
p.DDChannelNames = ret
}
func (p *ParamTable) initDDReceiveBufSize() {
revBufSize, err := p.Load("dataNode.msgStream.dataDefinition.recvBufSize")
if err != nil {
panic(err)
}
bufSize, err := strconv.Atoi(revBufSize)
if err != nil {
panic(err)
}
p.DDReceiveBufSize = int64(bufSize)
}
func (p *ParamTable) initDDPulsarBufSize() {
pulsarBufSize, err := p.Load("dataNode.msgStream.dataDefinition.pulsarBufSize")
if err != nil {
panic(err)
}
bufSize, err := strconv.Atoi(pulsarBufSize)
if err != nil {
panic(err)
}
p.DDPulsarBufSize = int64(bufSize)
}
// - seg statistics channel -
func (p *ParamTable) initSegmentStatisticsChannelName() {
channelName, err := p.Load("msgChannel.chanNamePrefix.dataNodeSegStatistics")
if err != nil {
panic(err)
}
p.SegmentStatisticsChannelName = channelName
}
func (p *ParamTable) initSegmentStatisticsBufSize() {
p.SegmentStatisticsBufSize = p.ParseInt64("dataNode.msgStream.segStatistics.recvBufSize")
}
func (p *ParamTable) initSegmentStatisticsPublishInterval() {
p.SegmentStatisticsPublishInterval = p.ParseInt("dataNode.msgStream.segStatistics.publishInterval")
}
// - Timetick channel -
func (p *ParamTable) initTimeTickChannelName() {
channels, err := p.Load("msgChannel.chanNamePrefix.dataNodeTimeTick")
if err != nil {
panic(err)
}
p.TimeTickChannelName = channels + "-" + strconv.FormatInt(p.DataNodeID, 10)
}
// - msg channel subname -
func (p *ParamTable) initMsgChannelSubName() {
name, err := p.Load("msgChannel.subNamePrefix.dataNodeSubNamePrefix")
if err != nil {
log.Panic(err)
}
p.MsgChannelSubName = name + "-" + strconv.FormatInt(p.DataNodeID, 10)
}
func (p *ParamTable) initDefaultPartitionName() {
defaultTag, err := p.Load("common.defaultPartitionTag")
if err != nil {
panic(err)
}
p.DefaultPartitionName = defaultTag
}
// --- ETCD ---
func (p *ParamTable) initEtcdAddress() {
addr, err := p.Load("_EtcdAddress")
if err != nil {
panic(err)
}
p.EtcdAddress = addr
}
func (p *ParamTable) initMetaRootPath() {
rootPath, err := p.Load("etcd.rootPath")
if err != nil {
panic(err)
}
subPath, err := p.Load("etcd.metaSubPath")
if err != nil {
panic(err)
}
p.MetaRootPath = path.Join(rootPath, subPath)
}
func (p *ParamTable) initSegFlushMetaSubPath() {
subPath, err := p.Load("etcd.segFlushMetaSubPath")
if err != nil {
panic(err)
}
p.SegFlushMetaSubPath = subPath
}
func (p *ParamTable) initDDLFlushMetaSubPath() {
subPath, err := p.Load("etcd.ddlFlushMetaSubPath")
if err != nil {
panic(err)
}
p.DDLFlushMetaSubPath = subPath
}
func (p *ParamTable) initMinioAddress() {
endpoint, err := p.Load("_MinioAddress")
if err != nil {
panic(err)
}
p.MinioAddress = endpoint
}
func (p *ParamTable) initMinioAccessKeyID() {
keyID, err := p.Load("minio.accessKeyID")
if err != nil {
panic(err)
}
p.MinioAccessKeyID = keyID
}
func (p *ParamTable) initMinioSecretAccessKey() {
key, err := p.Load("minio.secretAccessKey")
if err != nil {
panic(err)
}
p.MinioSecretAccessKey = key
}
func (p *ParamTable) initMinioUseSSL() {
usessl, err := p.Load("minio.useSSL")
if err != nil {
panic(err)
}
p.MinioUseSSL, _ = strconv.ParseBool(usessl)
}
func (p *ParamTable) initMinioBucketName() {
bucketName, err := p.Load("minio.bucketName")
if err != nil {
panic(err)
}
p.MinioBucketName = bucketName
}
func (p *ParamTable) sliceIndex() int {
dataNodeID := p.DataNodeID
dataNodeIDList := p.DataNodeIDList()
for i := 0; i < len(dataNodeIDList); i++ {
if dataNodeID == dataNodeIDList[i] {
return i
}
}
return -1
}

View File

@ -0,0 +1,172 @@
package datanode
import (
"log"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestParamTable_DataNode(t *testing.T) {
Params.Init()
log.Println("Params in ParamTable test: ", Params)
t.Run("Test DataNodeID", func(t *testing.T) {
id := Params.DataNodeID
assert.Equal(t, id, UniqueID(3))
})
t.Run("Test flowGraphMaxQueueLength", func(t *testing.T) {
length := Params.FlowGraphMaxQueueLength
assert.Equal(t, length, int32(1024))
})
t.Run("Test flowGraphMaxParallelism", func(t *testing.T) {
maxParallelism := Params.FlowGraphMaxParallelism
assert.Equal(t, maxParallelism, int32(1024))
})
t.Run("Test FlushInsertBufSize", func(t *testing.T) {
size := Params.FlushInsertBufferSize
assert.Equal(t, int32(500), size)
})
t.Run("Test FlushDdBufSize", func(t *testing.T) {
size := Params.FlushDdBufferSize
assert.Equal(t, int32(20), size)
})
t.Run("Test InsertBinlogRootPath", func(t *testing.T) {
path := Params.InsertBinlogRootPath
assert.Equal(t, "by-dev/insert_log", path)
})
t.Run("Test DdBinlogRootPath", func(t *testing.T) {
path := Params.DdBinlogRootPath
assert.Equal(t, "by-dev/data_definition_log", path)
})
t.Run("Test MasterAddress", func(t *testing.T) {
address := Params.MasterAddress
split := strings.Split(address, ":")
assert.Equal(t, "localhost", split[0])
assert.Equal(t, "53100", split[1])
})
t.Run("Test PulsarAddress", func(t *testing.T) {
address := Params.PulsarAddress
split := strings.Split(address, ":")
assert.Equal(t, split[0], "pulsar")
assert.Equal(t, split[len(split)-1], "6650")
})
t.Run("Test insertChannelNames", func(t *testing.T) {
names := Params.InsertChannelNames
assert.Equal(t, len(names), 2)
assert.Equal(t, names[0], "insert-0")
assert.Equal(t, names[1], "insert-1")
})
t.Run("Test insertChannelRange", func(t *testing.T) {
channelRange := Params.InsertChannelRange
assert.Equal(t, len(channelRange), 2)
assert.Equal(t, channelRange[0], 0)
assert.Equal(t, channelRange[1], 2)
})
t.Run("Test insertMsgStreamReceiveBufSize", func(t *testing.T) {
bufSize := Params.InsertReceiveBufSize
assert.Equal(t, bufSize, int64(1024))
})
t.Run("Test insertPulsarBufSize", func(t *testing.T) {
bufSize := Params.InsertPulsarBufSize
assert.Equal(t, bufSize, int64(1024))
})
t.Run("Test ddChannelNames", func(t *testing.T) {
names := Params.DDChannelNames
assert.Equal(t, len(names), 1)
assert.Equal(t, names[0], "data-definition-0")
})
t.Run("Test DdMsgStreamReceiveBufSize", func(t *testing.T) {
bufSize := Params.DDReceiveBufSize
assert.Equal(t, int64(64), bufSize)
})
t.Run("Test DdPulsarBufSize", func(t *testing.T) {
bufSize := Params.DDPulsarBufSize
assert.Equal(t, int64(64), bufSize)
})
t.Run("Test SegmentStatisticsChannelName", func(t *testing.T) {
name := Params.SegmentStatisticsChannelName
assert.Equal(t, "dataNodeSegStatistics", name)
})
t.Run("Test SegmentStatisticsBufSize", func(t *testing.T) {
size := Params.SegmentStatisticsBufSize
assert.Equal(t, int64(64), size)
})
t.Run("Test SegmentStatisticsPublishInterval", func(t *testing.T) {
interval := Params.SegmentStatisticsPublishInterval
assert.Equal(t, 1000, interval)
})
t.Run("Test timeTickChannelName", func(t *testing.T) {
name := Params.TimeTickChannelName
assert.Equal(t, "dataNodeTimeTick-3", name)
})
t.Run("Test msgChannelSubName", func(t *testing.T) {
name := Params.MsgChannelSubName
assert.Equal(t, "dataNode-3", name)
})
t.Run("Test EtcdAddress", func(t *testing.T) {
addr := Params.EtcdAddress
split := strings.Split(addr, ":")
assert.Equal(t, "localhost", split[0])
assert.Equal(t, "2379", split[1])
})
t.Run("Test MetaRootPath", func(t *testing.T) {
path := Params.MetaRootPath
assert.Equal(t, "by-dev/meta", path)
})
t.Run("Test SegFlushMetaSubPath", func(t *testing.T) {
path := Params.SegFlushMetaSubPath
assert.Equal(t, "writer/segment", path)
})
t.Run("Test DDLFlushMetaSubPath", func(t *testing.T) {
path := Params.DDLFlushMetaSubPath
assert.Equal(t, "writer/ddl", path)
})
t.Run("Test minioAccessKeyID", func(t *testing.T) {
id := Params.MinioAccessKeyID
assert.Equal(t, "minioadmin", id)
})
t.Run("Test minioSecretAccessKey", func(t *testing.T) {
key := Params.MinioSecretAccessKey
assert.Equal(t, "minioadmin", key)
})
t.Run("Test MinioUseSSL", func(t *testing.T) {
useSSL := Params.MinioUseSSL
assert.Equal(t, false, useSSL)
})
t.Run("Test MinioBucketName", func(t *testing.T) {
name := Params.MinioBucketName
assert.Equal(t, "a-bucket", name)
})
}

View File

@ -0,0 +1,15 @@
package datanode
import "github.com/zilliztech/milvus-distributed/internal/util/typeutil"
type (
UniqueID = typeutil.UniqueID
Timestamp = typeutil.Timestamp
IntPrimaryKey = typeutil.IntPrimaryKey
DSL = string
TimeRange struct {
timestampMin Timestamp
timestampMax Timestamp
}
)

View File

@ -0,0 +1,32 @@
package datanode
import (
"context"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
)
type Client struct {
ctx context.Context
// GOOSE TODO: add DataNodeClient
}
func (c *Client) Init() {
panic("implement me")
}
func (c *Client) Start() {
panic("implement me")
}
func (c *Client) Stop() {
panic("implement me")
}
func (c *Client) WatchDmChannels(datapb.WatchDmChannelRequest, error) {
panic("implement me")
}
func (c *Client) FlushSegment() (datapb.FlushSegRequest, error) {
panic("implement me")
}

View File

@ -0,0 +1,11 @@
package datanode
import (
"github.com/zilliztech/milvus-distributed/internal/datanode"
"google.golang.org/grpc"
)
type Server struct {
node datanode.Node
brpcServer *grpc.Server
}

View File

@ -1,136 +0,0 @@
package masterservice
import (
"context"
"time"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"google.golang.org/grpc"
)
// grpc client
type GrpcClient struct {
grpcClient masterpb.MasterServiceClient
conn *grpc.ClientConn
//inner member
addr string
timeout time.Duration
retry int
}
func NewGrpcClient(addr string, timeout time.Duration) (*GrpcClient, error) {
return &GrpcClient{
grpcClient: nil,
conn: nil,
addr: addr,
timeout: timeout,
retry: 3,
}, nil
}
func (c *GrpcClient) Init(params *cms.InitParams) error {
ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
defer cancel()
var err error
for i := 0; i < c.retry; i++ {
if c.conn, err = grpc.DialContext(ctx, c.addr, grpc.WithInsecure(), grpc.WithBlock()); err == nil {
break
}
}
if err != nil {
return err
}
c.grpcClient = masterpb.NewMasterServiceClient(c.conn)
return nil
}
func (c *GrpcClient) Start() error {
return nil
}
func (c *GrpcClient) Stop() error {
return c.conn.Close()
}
//TODO, grpc, get service state from server
func (c *GrpcClient) GetServiceStates() (*internalpb2.ServiceStates, error) {
return nil, nil
}
//DDL request
func (c *GrpcClient) CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return c.grpcClient.CreateCollection(context.Background(), in)
}
func (c *GrpcClient) DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
return c.grpcClient.DropCollection(context.Background(), in)
}
func (c *GrpcClient) HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
return c.grpcClient.HasCollection(context.Background(), in)
}
func (c *GrpcClient) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
return c.grpcClient.DescribeCollection(context.Background(), in)
}
func (c *GrpcClient) GetCollectionStatistics(in *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
return c.grpcClient.GetCollectionStatistics(context.Background(), in)
}
func (c *GrpcClient) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
return c.grpcClient.ShowCollections(context.Background(), in)
}
func (c *GrpcClient) CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
return c.grpcClient.CreatePartition(context.Background(), in)
}
func (c *GrpcClient) DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
return c.grpcClient.DropPartition(context.Background(), in)
}
func (c *GrpcClient) HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
return c.grpcClient.HasPartition(context.Background(), in)
}
func (c *GrpcClient) GetPartitionStatistics(in *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
return c.grpcClient.GetPartitionStatistics(context.Background(), in)
}
func (c *GrpcClient) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
return c.grpcClient.ShowPartitions(context.Background(), in)
}
//index builder service
func (c *GrpcClient) CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
return c.grpcClient.CreateIndex(context.Background(), in)
}
func (c *GrpcClient) DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
return c.grpcClient.DescribeIndex(context.Background(), in)
}
//global timestamp allocator
func (c *GrpcClient) AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
return c.grpcClient.AllocTimestamp(context.Background(), in)
}
func (c *GrpcClient) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
return c.grpcClient.AllocID(context.Background(), in)
}
//receiver time tick from proxy service, and put it into this channel
func (c *GrpcClient) GetTimeTickChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return c.grpcClient.GetTimeTickChannel(context.Background(), empty)
}
//receive ddl from rpc and time tick from proxy service, and put them into this channel
func (c *GrpcClient) GetDdChannel(in *commonpb.Empty) (*milvuspb.StringResponse, error) {
return c.grpcClient.GetDdChannel(context.Background(), in)
}
//just define a channel, not used currently
func (c *GrpcClient) GetStatisticsChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return c.grpcClient.GetStatisticsChannel(context.Background(), empty)
}
func (c *GrpcClient) DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
return c.grpcClient.DescribeSegment(context.Background(), in)
}
func (c *GrpcClient) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
return c.grpcClient.ShowSegments(context.Background(), in)
}

View File

@ -1,430 +0,0 @@
package masterservice
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
func TestGrpcService(t *testing.T) {
rand.Seed(time.Now().UnixNano())
randVal := rand.Int()
cms.Params.Address = "127.0.0.1"
cms.Params.Port = (randVal % 100) + 10000
cms.Params.NodeID = 0
cms.Params.PulsarAddress = "pulsar://127.0.0.1:6650"
cms.Params.EtcdAddress = "127.0.0.1:2379"
cms.Params.MetaRootPath = fmt.Sprintf("/%d/test/meta", randVal)
cms.Params.KvRootPath = fmt.Sprintf("/%d/test/kv", randVal)
cms.Params.ProxyTimeTickChannel = fmt.Sprintf("proxyTimeTick%d", randVal)
cms.Params.MsgChannelSubName = fmt.Sprintf("msgChannel%d", randVal)
cms.Params.TimeTickChannel = fmt.Sprintf("timeTick%d", randVal)
cms.Params.DdChannel = fmt.Sprintf("ddChannel%d", randVal)
cms.Params.StatisticsChannel = fmt.Sprintf("stateChannel%d", randVal)
cms.Params.MaxPartitionNum = 64
cms.Params.DefaultPartitionTag = "_default"
t.Logf("master service port = %d", cms.Params.Port)
svr, err := NewGrpcServer()
assert.Nil(t, err)
core := svr.core.(*cms.Core)
core.ProxyTimeTickChan = make(chan typeutil.Timestamp, 8)
timeTickArray := make([]typeutil.Timestamp, 0, 16)
core.SendTimeTick = func(ts typeutil.Timestamp) error {
t.Logf("send time tick %d", ts)
timeTickArray = append(timeTickArray, ts)
return nil
}
createCollectionArray := make([]*cms.CreateCollectionReqTask, 0, 16)
core.DdCreateCollectionReq = func(req *cms.CreateCollectionReqTask) error {
t.Logf("Create Colllection %s", req.Req.CollectionName)
createCollectionArray = append(createCollectionArray, req)
return nil
}
dropCollectionArray := make([]*cms.DropCollectionReqTask, 0, 16)
core.DdDropCollectionReq = func(req *cms.DropCollectionReqTask) error {
t.Logf("Drop Collection %s", req.Req.CollectionName)
dropCollectionArray = append(dropCollectionArray, req)
return nil
}
createPartitionArray := make([]*cms.CreatePartitionReqTask, 0, 16)
core.DdCreatePartitionReq = func(req *cms.CreatePartitionReqTask) error {
t.Logf("Create Partition %s", req.Req.PartitionName)
createPartitionArray = append(createPartitionArray, req)
return nil
}
dropPartitionArray := make([]*cms.DropPartitionReqTask, 0, 16)
core.DdDropPartitionReq = func(req *cms.DropPartitionReqTask) error {
t.Logf("Drop Partition %s", req.Req.PartitionName)
dropPartitionArray = append(dropPartitionArray, req)
return nil
}
core.GetSegmentMeta = func(id typeutil.UniqueID) (*etcdpb.SegmentMeta, error) {
return &etcdpb.SegmentMeta{
SegmentID: 20,
CollectionID: 10,
PartitionTag: "_default",
ChannelStart: 50,
ChannelEnd: 100,
OpenTime: 1000,
CloseTime: 2000,
NumRows: 16,
MemSize: 1024,
BinlogFilePaths: []*etcdpb.FieldBinlogFiles{
{
FieldID: 101,
BinlogFiles: []string{"/test/binlog/file"},
},
},
}, nil
}
err = svr.Init(&cms.InitParams{ProxyTimeTickChannel: fmt.Sprintf("proxyTimeTick%d", randVal)})
assert.Nil(t, err)
err = svr.Start()
assert.Nil(t, err)
cli, err := NewGrpcClient(fmt.Sprintf("127.0.0.1:%d", cms.Params.Port), 3*time.Second)
assert.Nil(t, err)
err = cli.Init(&cms.InitParams{ProxyTimeTickChannel: fmt.Sprintf("proxyTimeTick%d", randVal)})
assert.Nil(t, err)
err = cli.Start()
assert.Nil(t, err)
t.Run("create collection", func(t *testing.T) {
schema := schemapb.CollectionSchema{
Name: "testColl",
Description: "testColl",
AutoID: true,
Fields: []*schemapb.FieldSchema{
{
FieldID: 100,
Name: "vector",
IsPrimaryKey: false,
Description: "vector",
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: nil,
IndexParams: nil,
},
},
}
sbf, err := proto.Marshal(&schema)
assert.Nil(t, err)
req := &milvuspb.CreateCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kCreateCollection,
MsgID: 100,
Timestamp: 100,
SourceID: 100,
},
DbName: "testDb",
CollectionName: "testColl",
Schema: sbf,
}
status, err := cli.CreateCollection(req)
assert.Nil(t, err)
assert.Equal(t, len(createCollectionArray), 1)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, createCollectionArray[0].Req.Base.MsgType, commonpb.MsgType_kCreateCollection)
assert.Equal(t, createCollectionArray[0].Req.CollectionName, "testColl")
})
t.Run("has collection", func(t *testing.T) {
req := &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasCollection,
MsgID: 101,
Timestamp: 101,
SourceID: 101,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.HasCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Value, true)
req = &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasCollection,
MsgID: 102,
Timestamp: 102,
SourceID: 102,
},
DbName: "testDb",
CollectionName: "testColl2",
}
rsp, err = cli.HasCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Value, false)
req = &milvuspb.HasCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasCollection,
MsgID: 102,
Timestamp: 102,
SourceID: 102,
},
DbName: "testDb",
CollectionName: "testColl2",
}
rsp, err = cli.HasCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
})
t.Run("describe collection", func(t *testing.T) {
req := &milvuspb.DescribeCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDescribeCollection,
MsgID: 103,
Timestamp: 103,
SourceID: 103,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.DescribeCollection(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Schema.Name, "testColl")
})
t.Run("get collection statistics", func(t *testing.T) {
req := &milvuspb.CollectionStatsRequest{
Base: &commonpb.MsgBase{
MsgType: 0, //TODO,miss msg type
MsgID: 104,
Timestamp: 104,
SourceID: 104,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.GetCollectionStatistics(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.Stats), 2)
assert.Equal(t, rsp.Stats[0].Key, "row_count")
assert.Equal(t, rsp.Stats[0].Value, "0")
assert.Equal(t, rsp.Stats[1].Key, "data_size")
assert.Equal(t, rsp.Stats[1].Value, "0")
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
assert.Nil(t, err)
seg := &etcdpb.SegmentMeta{
SegmentID: 101,
CollectionID: collMeta.ID,
PartitionTag: cms.Params.DefaultPartitionTag,
}
err = core.MetaTable.AddSegment(seg)
assert.Nil(t, err)
req = &milvuspb.CollectionStatsRequest{
Base: &commonpb.MsgBase{
MsgType: 0, //TODO,miss msg type
MsgID: 105,
Timestamp: 105,
SourceID: 105,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err = cli.GetCollectionStatistics(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.Stats), 2)
assert.Equal(t, rsp.Stats[0].Key, "row_count")
assert.Equal(t, rsp.Stats[0].Value, "16")
assert.Equal(t, rsp.Stats[1].Key, "data_size")
assert.Equal(t, rsp.Stats[1].Value, "1024")
})
t.Run("show collection", func(t *testing.T) {
req := &milvuspb.ShowCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kShowCollections,
MsgID: 106,
Timestamp: 106,
SourceID: 106,
},
DbName: "testDb",
}
rsp, err := cli.ShowCollections(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.CollectionNames[0], "testColl")
assert.Equal(t, len(rsp.CollectionNames), 1)
})
t.Run("create partition", func(t *testing.T) {
req := &milvuspb.CreatePartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kCreatePartition,
MsgID: 107,
Timestamp: 107,
SourceID: 107,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: "testPartition",
}
status, err := cli.CreatePartition(req)
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
assert.Nil(t, err)
assert.Equal(t, len(collMeta.PartitionIDs), 2)
assert.Equal(t, collMeta.PartitionTags[1], "testPartition")
})
t.Run("has partition", func(t *testing.T) {
req := &milvuspb.HasPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kHasPartition,
MsgID: 108,
Timestamp: 108,
SourceID: 108,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: "testPartition",
}
rsp, err := cli.HasPartition(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, rsp.Value, true)
})
t.Run("get partition statistics", func(t *testing.T) {
req := &milvuspb.PartitionStatsRequest{
Base: &commonpb.MsgBase{
MsgType: 0, //TODO, msg type
MsgID: 109,
Timestamp: 109,
SourceID: 109,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: cms.Params.DefaultPartitionTag,
}
rsp, err := cli.GetPartitionStatistics(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.Stats), 2)
assert.Equal(t, rsp.Stats[0].Key, "row_count")
assert.Equal(t, rsp.Stats[0].Value, "16")
assert.Equal(t, rsp.Stats[1].Key, "data_size")
assert.Equal(t, rsp.Stats[1].Value, "1024")
})
t.Run("show partition", func(t *testing.T) {
req := &milvuspb.ShowPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kShowPartitions,
MsgID: 110,
Timestamp: 110,
SourceID: 110,
},
DbName: "testDb",
CollectionName: "testColl",
}
rsp, err := cli.ShowPartitions(req)
assert.Nil(t, err)
assert.Equal(t, rsp.Status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, len(rsp.PartitionNames), 2)
})
t.Run("drop partition", func(t *testing.T) {
req := &milvuspb.DropPartitionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropPartition,
MsgID: 199,
Timestamp: 199,
SourceID: 199,
},
DbName: "testDb",
CollectionName: "testColl",
PartitionName: "testPartition",
}
status, err := cli.DropPartition(req)
assert.Nil(t, err)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
collMeta, err := core.MetaTable.GetCollectionByName("testColl")
assert.Nil(t, err)
assert.Equal(t, len(collMeta.PartitionIDs), 1)
assert.Equal(t, collMeta.PartitionTags[0], cms.Params.DefaultPartitionTag)
})
t.Run("drop collection", func(t *testing.T) {
req := &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropCollection,
MsgID: 200,
Timestamp: 200,
SourceID: 200,
},
DbName: "testDb",
CollectionName: "testColl",
}
status, err := cli.DropCollection(req)
assert.Nil(t, err)
assert.Equal(t, len(dropCollectionArray), 1)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_SUCCESS)
assert.Equal(t, dropCollectionArray[0].Req.Base.MsgType, commonpb.MsgType_kDropCollection)
assert.Equal(t, dropCollectionArray[0].Req.CollectionName, "testColl")
req = &milvuspb.DropCollectionRequest{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kDropCollection,
MsgID: 200,
Timestamp: 200,
SourceID: 200,
},
DbName: "testDb",
CollectionName: "testColl",
}
status, err = cli.DropCollection(req)
assert.Nil(t, err)
assert.Equal(t, len(dropCollectionArray), 1)
assert.Equal(t, status.ErrorCode, commonpb.ErrorCode_UNEXPECTED_ERROR)
})
err = cli.Stop()
assert.Nil(t, err)
err = svr.Stop()
assert.Nil(t, err)
}

View File

@ -1,174 +0,0 @@
package masterservice
import (
"context"
"fmt"
"net"
"sync"
cms "github.com/zilliztech/milvus-distributed/internal/masterservice"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"google.golang.org/grpc"
)
// grpc wrapper
type GrpcServer struct {
core cms.Interface
grpcServer *grpc.Server
grpcError error
grpcErrMux sync.Mutex
ctx context.Context
cancel context.CancelFunc
}
func NewGrpcServer() (*GrpcServer, error) {
s := &GrpcServer{}
var err error
s.ctx, s.cancel = context.WithCancel(context.Background())
if s.core, err = cms.NewCore(s.ctx); err != nil {
return nil, err
}
s.grpcServer = grpc.NewServer()
s.grpcError = nil
masterpb.RegisterMasterServiceServer(s.grpcServer, s)
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", cms.Params.Port))
if err != nil {
return nil, err
}
go func() {
if err := s.grpcServer.Serve(lis); err != nil {
s.grpcErrMux.Lock()
defer s.grpcErrMux.Unlock()
s.grpcError = err
}
}()
s.grpcErrMux.Lock()
err = s.grpcError
s.grpcErrMux.Unlock()
if err != nil {
return nil, err
}
return s, nil
}
func (s *GrpcServer) Init(params *cms.InitParams) error {
return s.core.Init(params)
}
func (s *GrpcServer) Start() error {
return s.core.Start()
}
func (s *GrpcServer) Stop() error {
err := s.core.Stop()
s.cancel()
s.grpcServer.GracefulStop()
return err
}
func (s *GrpcServer) GetServiceStates(ctx context.Context, empty *commonpb.Empty) (*internalpb2.ServiceStates, error) {
return nil, nil
}
//DDL request
func (s *GrpcServer) CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
return s.core.CreateCollection(in)
}
func (s *GrpcServer) DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
return s.core.DropCollection(in)
}
func (s *GrpcServer) HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
return s.core.HasCollection(in)
}
func (s *GrpcServer) DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
return s.core.DescribeCollection(in)
}
func (s *GrpcServer) GetCollectionStatistics(ctx context.Context, in *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
return s.core.GetCollectionStatistics(in)
}
func (s *GrpcServer) ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
return s.core.ShowCollections(in)
}
func (s *GrpcServer) CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
return s.core.CreatePartition(in)
}
func (s *GrpcServer) DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
return s.core.DropPartition(in)
}
func (s *GrpcServer) HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
return s.core.HasPartition(in)
}
func (s *GrpcServer) GetPartitionStatistics(ctx context.Context, in *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
return s.core.GetPartitionStatistics(in)
}
func (s *GrpcServer) ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
return s.core.ShowPartitions(in)
}
//index builder service
func (s *GrpcServer) CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
return s.core.CreateIndex(in)
}
func (s *GrpcServer) DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
return s.core.DescribeIndex(in)
}
//global timestamp allocator
func (s *GrpcServer) AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
return s.core.AllocTimestamp(in)
}
func (s *GrpcServer) AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
return s.core.AllocID(in)
}
//receiver time tick from proxy service, and put it into this channel
func (s *GrpcServer) GetTimeTickChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return s.core.GetTimeTickChannel(empty)
}
//receive ddl from rpc and time tick from proxy service, and put them into this channel
func (s *GrpcServer) GetDdChannel(ctx context.Context, in *commonpb.Empty) (*milvuspb.StringResponse, error) {
return s.core.GetDdChannel(in)
}
//just define a channel, not used currently
func (s *GrpcServer) GetStatisticsChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return s.core.GetStatisticsChannel(empty)
}
func (s *GrpcServer) DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
return s.core.DescribeSegment(in)
}
func (s *GrpcServer) ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
return s.core.ShowSegments(in)
}
//TODO, move to query node
func (s *GrpcServer) GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error) {
panic("implement me")
}
//TODO, move to data service
func (s *GrpcServer) AssignSegmentID(ctx context.Context, request *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error) {
panic("implement me")
}

View File

@ -495,7 +495,3 @@ func (s *Master) DescribeSegment(ctx context.Context, request *milvuspb.Describe
func (s *Master) ShowSegments(ctx context.Context, request *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
panic("implement me")
}
func (s *Master) GetDdChannel(ctx context.Context, empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
panic("implement me")
}

View File

@ -1,16 +0,0 @@
package masterservice
// system filed id:
// 0: unique row id
// 1: timestamp
// 100: first user field id
// 101: second user field id
// 102: ...
const (
StartOfUserFieldID = 100
RowIDField = 0
TimeStampField = 1
RowIDFieldName = "RowID"
TimeStampFieldName = "Timestamp"
)

View File

@ -1,118 +0,0 @@
package masterservice
import (
"log"
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.uber.org/zap"
)
// Allocator is a Timestamp Oracle allocator.
type Allocator interface {
// Initialize is used to initialize a TSO allocator.
// It will synchronize TSO with etcd and initialize the
// memory for later allocation work.
Initialize() error
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
UpdateTSO() error
// SetTSO sets the physical part with given tso. It's mainly used for BR restore
// and can not forcibly set the TSO smaller than now.
SetTSO(tso uint64) error
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
GenerateTSO(count uint32) (uint64, error)
// Reset is used to reset the TSO allocator.
Reset()
}
// GlobalTSOAllocator is the global single point TSO allocator.
type GlobalTSOAllocator struct {
tso *timestampOracle
}
// NewGlobalTSOAllocator creates a new global TSO allocator.
func NewGlobalTSOAllocator(key string, kvBase kv.TxnBase) *GlobalTSOAllocator {
var saveInterval = 3 * time.Second
return &GlobalTSOAllocator{
tso: &timestampOracle{
kvBase: kvBase,
saveInterval: saveInterval,
maxResetTSGap: func() time.Duration { return 3 * time.Second },
key: key,
},
}
}
// Initialize will initialize the created global TSO allocator.
func (gta *GlobalTSOAllocator) Initialize() error {
return gta.tso.InitTimestamp()
}
// UpdateTSO is used to update the TSO in memory and the time window in etcd.
func (gta *GlobalTSOAllocator) UpdateTSO() error {
return gta.tso.UpdateTimestamp()
}
// SetTSO sets the physical part with given tso.
func (gta *GlobalTSOAllocator) SetTSO(tso uint64) error {
return gta.tso.ResetUserTimestamp(tso)
}
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
func (gta *GlobalTSOAllocator) GenerateTSO(count uint32) (uint64, error) {
var physical, logical int64
if count == 0 {
return 0, errors.New("tso count should be positive")
}
maxRetryCount := 10
for i := 0; i < maxRetryCount; i++ {
current := (*atomicObject)(atomic.LoadPointer(&gta.tso.TSO))
if current == nil || current.physical.Equal(typeutil.ZeroTime) {
// If it's leader, maybe SyncTimestamp hasn't completed yet
log.Println("sync hasn't completed yet, wait for a while")
time.Sleep(200 * time.Millisecond)
continue
}
physical = current.physical.UnixNano() / int64(time.Millisecond)
logical = atomic.AddInt64(&current.logical, int64(count))
if logical >= maxLogical {
log.Println("logical part outside of max logical interval, please check ntp time",
zap.Int("retry-count", i))
time.Sleep(UpdateTimestampStep)
continue
}
return tsoutil.ComposeTS(physical, logical), nil
}
return 0, errors.New("can not get timestamp")
}
func (gta *GlobalTSOAllocator) Alloc(count uint32) (typeutil.Timestamp, error) {
//return gta.tso.SyncTimestamp()
start, err := gta.GenerateTSO(count)
if err != nil {
return typeutil.ZeroTimestamp, err
}
//ret := make([]typeutil.Timestamp, count)
//for i:=uint32(0); i < count; i++{
// ret[i] = start + uint64(i)
//}
return start, err
}
func (gta *GlobalTSOAllocator) AllocOne() (typeutil.Timestamp, error) {
return gta.GenerateTSO(1)
}
// Reset is used to reset the TSO allocator.
func (gta *GlobalTSOAllocator) Reset() {
gta.tso.ResetTimestamp()
}

View File

@ -1,47 +0,0 @@
package masterservice
import (
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
// GlobalTSOAllocator is the global single point TSO allocator.
type GlobalIDAllocator struct {
allocator Allocator
}
func NewGlobalIDAllocator(key string, base kv.TxnBase) *GlobalIDAllocator {
return &GlobalIDAllocator{
allocator: NewGlobalTSOAllocator(key, base),
}
}
// Initialize will initialize the created global TSO allocator.
func (gia *GlobalIDAllocator) Initialize() error {
return gia.allocator.Initialize()
}
// GenerateTSO is used to generate a given number of TSOs.
// Make sure you have initialized the TSO allocator before calling.
func (gia *GlobalIDAllocator) Alloc(count uint32) (typeutil.UniqueID, typeutil.UniqueID, error) {
timestamp, err := gia.allocator.GenerateTSO(count)
if err != nil {
return 0, 0, err
}
idStart := typeutil.UniqueID(timestamp)
idEnd := idStart + int64(count)
return idStart, idEnd, nil
}
func (gia *GlobalIDAllocator) AllocOne() (typeutil.UniqueID, error) {
timestamp, err := gia.allocator.GenerateTSO(1)
if err != nil {
return 0, err
}
idStart := typeutil.UniqueID(timestamp)
return idStart, nil
}
func (gia *GlobalIDAllocator) UpdateID() error {
return gia.allocator.UpdateTSO()
}

View File

@ -1,715 +0,0 @@
package masterservice
import (
"context"
"log"
"math/rand"
"sync"
"sync/atomic"
"time"
"github.com/zilliztech/milvus-distributed/internal/errors"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
"github.com/zilliztech/milvus-distributed/internal/proto/masterpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
)
// internalpb2 -> internalpb
// proxypb(proxy_service)
// querypb(query_service)
// datapb(data_service)
// indexpb(index_service)
// milvuspb -> servicepb
// masterpb2 -> masterpb master_service)
type InitParams struct {
ProxyTimeTickChannel string
}
type Service interface {
Init(params *InitParams) error
Start() error
Stop() error
GetServiceStates() (*internalpb2.ServiceStates, error)
GetTimeTickChannel() (string, error)
GetStatesChannel() (string, error)
}
type Interface interface {
//service
Init(params *InitParams) error
Start() error
Stop() error
GetServiceStates(empty *commonpb.Empty) (*internalpb2.ServiceStates, error)
//DDL request
CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
GetCollectionStatistics(in *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error)
ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
GetPartitionStatistics(in *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error)
ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
//index builder service
CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
//global timestamp allocator
AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error)
//TODO, master load these channel form config file ?
//receiver time tick from proxy service, and put it into this channel
GetTimeTickChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error)
//receive ddl from rpc and time tick from proxy service, and put them into this channel
GetDdChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error)
//just define a channel, not used currently
GetStatisticsChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error)
//segment
DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
//get system config from master, not used currently
//GetSysConfigs(in *milvuspb.SysConfigRequest)
//GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error)
}
// ------------------ struct -----------------------
// master core
type Core struct {
//TODO DataService Interface
//TODO IndexService Interface
//TODO ProxyServiceClient Interface, get proxy service time tick channel,InvalidateCollectionMetaCache
//TODO Segment States Channel, from DataService, if create new segment, data service should put the segment id into this channel, and let the master add the segment id to the collection meta
//TODO Segment Flush Watcher, monitor if segment has flushed into disk
//TODO indexBuilder Sch, tell index service to build index
MetaTable *metaTable
//id allocator
idAllocator *GlobalIDAllocator
//tso allocator
tsoAllocator *GlobalTSOAllocator
//inner members
ctx context.Context
cancel context.CancelFunc
etcdCli *clientv3.Client
kvBase *etcdkv.EtcdKV
metaKV *etcdkv.EtcdKV
//TODO, receive time tick from proxy service time tick channel
ProxyTimeTickChan <-chan typeutil.Timestamp
//TODO, send time tick into dd channel and time tick channel
SendTimeTick func(t typeutil.Timestamp) error
//TODO, send create collection into dd channel
DdCreateCollectionReq func(req *CreateCollectionReqTask) error
//TODO, send drop collection into dd channel, and notify the proxy to delete this collection
DdDropCollectionReq func(req *DropCollectionReqTask) error
//TODO, send create partition into dd channel
DdCreatePartitionReq func(req *CreatePartitionReqTask) error
//TODO, send drop partition into dd channel
DdDropPartitionReq func(req *DropPartitionReqTask) error
//dd request scheduler
ddReqQueue chan reqTask //dd request will be push into this chan
lastDdTimeStamp typeutil.Timestamp
//time tick loop
lastTimeTick typeutil.Timestamp
//states code
stateCode atomic.Value
//call once
initOnce sync.Once
startOnce sync.Once
isInit atomic.Value
//TODO, get segment meta by segment id, from data service by grpc
GetSegmentMeta func(id typeutil.UniqueID) (*etcdpb.SegmentMeta, error)
}
// --------------------- function --------------------------
func NewCore(c context.Context) (*Core, error) {
ctx, cancel := context.WithCancel(c)
rand.Seed(time.Now().UnixNano())
Params.Init()
core := &Core{
ctx: ctx,
cancel: cancel,
}
core.stateCode.Store(internalpb2.StateCode_INITIALIZING)
core.isInit.Store(false)
return core, nil
}
func (c *Core) checkInit() error {
if c.MetaTable == nil {
return errors.Errorf("MetaTable is nil")
}
if c.idAllocator == nil {
return errors.Errorf("idAllocator is nil")
}
if c.tsoAllocator == nil {
return errors.Errorf("tsoAllocator is nil")
}
if c.etcdCli == nil {
return errors.Errorf("etcdCli is nil")
}
if c.metaKV == nil {
return errors.Errorf("metaKV is nil")
}
if c.kvBase == nil {
return errors.Errorf("kvBase is nil")
}
if c.ProxyTimeTickChan == nil {
return errors.Errorf("ProxyTimeTickChan is nil")
}
if c.ddReqQueue == nil {
return errors.Errorf("ddReqQueue is nil")
}
if c.GetSegmentMeta == nil {
return errors.Errorf("GetSegmentMeta is nil")
}
if c.DdCreateCollectionReq == nil {
return errors.Errorf("DdCreateCollectionReq is nil")
}
if c.DdDropCollectionReq == nil {
return errors.Errorf("DdDropCollectionReq is nil")
}
if c.DdCreatePartitionReq == nil {
return errors.Errorf("DdCreatePartitionReq is nil")
}
if c.DdDropPartitionReq == nil {
return errors.Errorf("DdDropPartitionReq is nil")
}
log.Printf("master node id = %d\n", Params.NodeID)
return nil
}
func (c *Core) startDdScheduler() {
for {
select {
case <-c.ctx.Done():
log.Printf("close dd scheduler, exit task execution loop")
return
case task, ok := <-c.ddReqQueue:
if !ok {
log.Printf("dd chan is closed, exit task execution loopo")
return
}
ts, err := task.Ts()
if err != nil {
task.Notify(err)
break
}
if ts <= c.lastDdTimeStamp {
task.Notify(errors.Errorf("input timestamp = %d, last dd time stamp = %d", ts, c.lastDdTimeStamp))
break
}
err = task.Execute()
task.Notify(err)
c.lastDdTimeStamp = ts
}
}
}
func (c *Core) startTimeTickLoop() {
for {
select {
case <-c.ctx.Done():
log.Printf("close master time tick loop")
return
case tt, ok := <-c.ProxyTimeTickChan:
if !ok {
log.Printf("proxyTimeTickStream is closed, exit time tick loop")
return
}
if tt <= c.lastTimeTick {
log.Printf("master time tick go back, last time tick = %d, input time tick = %d", c.lastTimeTick, tt)
}
if err := c.SendTimeTick(tt); err != nil {
log.Printf("master send time tick into dd and time_tick channel failed: %s", err.Error())
}
c.lastTimeTick = tt
}
}
}
func (c *Core) Init(params *InitParams) error {
var initError error = nil
c.initOnce.Do(func() {
Params.ProxyTimeTickChannel = params.ProxyTimeTickChannel
if c.etcdCli, initError = clientv3.New(clientv3.Config{Endpoints: []string{Params.EtcdAddress}, DialTimeout: 5 * time.Second}); initError != nil {
return
}
c.metaKV = etcdkv.NewEtcdKV(c.etcdCli, Params.MetaRootPath)
if c.MetaTable, initError = NewMetaTable(c.metaKV); initError != nil {
return
}
c.kvBase = etcdkv.NewEtcdKV(c.etcdCli, Params.KvRootPath)
c.idAllocator = NewGlobalIDAllocator("idTimestamp", tsoutil.NewTSOKVBase([]string{Params.EtcdAddress}, Params.KvRootPath, "gid"))
if initError = c.idAllocator.Initialize(); initError != nil {
return
}
c.tsoAllocator = NewGlobalTSOAllocator("timestamp", tsoutil.NewTSOKVBase([]string{Params.EtcdAddress}, Params.KvRootPath, "tso"))
if initError = c.tsoAllocator.Initialize(); initError != nil {
return
}
c.ddReqQueue = make(chan reqTask, 1024)
c.isInit.Store(true)
})
return initError
}
func (c *Core) Start() error {
isInit := c.isInit.Load().(bool)
if !isInit {
return errors.Errorf("call init before start")
}
if err := c.checkInit(); err != nil {
return err
}
c.startOnce.Do(func() {
go c.startDdScheduler()
go c.startTimeTickLoop()
c.stateCode.Store(internalpb2.StateCode_HEALTHY)
})
return nil
}
func (c *Core) Stop() error {
c.cancel()
c.stateCode.Store(internalpb2.StateCode_ABNORMAL)
return nil
}
func (c *Core) GetServiceStates(empty *commonpb.Empty) (*internalpb2.ServiceStates, error) {
code := c.stateCode.Load().(internalpb2.StateCode)
return &internalpb2.ServiceStates{
StateCode: code,
NodeStates: []*internalpb2.NodeStates{
{
NodeID: int64(Params.NodeID),
Role: "master",
StateCode: code,
ExtraInfo: nil,
},
},
ExtraInfo: nil,
}, nil
}
func (c *Core) GetTimeTickChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
Value: Params.TimeTickChannel,
}, nil
}
func (c *Core) GetDdChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
Value: Params.DdChannel,
}, nil
}
func (c *Core) GetStatisticsChannel(empty *commonpb.Empty) (*milvuspb.StringResponse, error) {
return &milvuspb.StringResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
Value: Params.StatisticsChannel,
}, nil
}
func (c *Core) CreateCollection(in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error) {
t := &CreateCollectionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Create collection failed: " + err.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}, nil
}
func (c *Core) DropCollection(in *milvuspb.DropCollectionRequest) (*commonpb.Status, error) {
t := &DropCollectionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Create collection failed: " + err.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}, nil
}
func (c *Core) HasCollection(in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error) {
t := &HasCollectionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
HasCollection: false,
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "Has collection failed: " + err.Error(),
},
Value: false,
}, nil
}
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
Value: t.HasCollection,
}, nil
}
func (c *Core) DescribeCollection(in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error) {
t := &DescribeCollectionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
Rsp: &milvuspb.DescribeCollectionResponse{},
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.DescribeCollectionResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "describe collection failed: " + err.Error(),
},
Schema: nil,
}, nil
}
t.Rsp.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}
return t.Rsp, nil
}
func (c *Core) GetCollectionStatistics(in *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error) {
t := &CollectionStatsReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
Rsp: &milvuspb.CollectionStatsResponse{
Stats: nil,
Status: nil,
},
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.CollectionStatsResponse{
Stats: nil,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "GetCollectionStatistics failed: " + err.Error(),
},
}, nil
}
t.Rsp.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}
return t.Rsp, nil
}
func (c *Core) ShowCollections(in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error) {
t := &ShowCollectionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
Rsp: &milvuspb.ShowCollectionResponse{
CollectionNames: nil,
},
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.ShowCollectionResponse{
CollectionNames: nil,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "ShowCollections failed: " + err.Error(),
},
}, nil
}
t.Rsp.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}
return t.Rsp, nil
}
func (c *Core) CreatePartition(in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error) {
t := &CreatePartitionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "create partition failed: " + err.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}, nil
}
func (c *Core) DropPartition(in *milvuspb.DropPartitionRequest) (*commonpb.Status, error) {
t := &DropPartitionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "DropPartition failed: " + err.Error(),
}, nil
}
return &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}, nil
}
func (c *Core) HasPartition(in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error) {
t := &HasPartitionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
HasPartition: false,
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "HasPartition failed: " + err.Error(),
},
Value: false,
}, nil
}
return &milvuspb.BoolResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
Value: t.HasPartition,
}, nil
}
func (c *Core) GetPartitionStatistics(in *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error) {
t := &PartitionStatsReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
Rsp: &milvuspb.PartitionStatsResponse{
Stats: nil,
Status: nil,
},
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.PartitionStatsResponse{
Stats: nil,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "GetPartitionStatistics failed: " + err.Error(),
},
}, nil
}
t.Rsp.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}
return t.Rsp, nil
}
func (c *Core) ShowPartitions(in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error) {
t := &ShowPartitionReqTask{
baseReqTask: baseReqTask{
cv: make(chan error),
core: c,
},
Req: in,
Rsp: &milvuspb.ShowPartitionResponse{
PartitionNames: nil,
Status: nil,
},
}
c.ddReqQueue <- t
err := t.WaitToFinish()
if err != nil {
return &milvuspb.ShowPartitionResponse{
PartitionNames: nil,
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "ShowPartitions failed: " + err.Error(),
},
}, nil
}
t.Rsp.Status = &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
}
return t.Rsp, nil
}
//TODO
func (c *Core) CreateIndex(in *milvuspb.CreateIndexRequest) (*commonpb.Status, error) {
return nil, nil
}
//TODO
func (c *Core) DescribeIndex(in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error) {
return nil, nil
}
//TODO
func (c *Core) DescribeSegment(in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error) {
return nil, nil
}
//TODO
func (c *Core) ShowSegments(in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error) {
return nil, nil
}
func (c *Core) AllocTimestamp(in *masterpb.TsoRequest) (*masterpb.TsoResponse, error) {
ts, err := c.tsoAllocator.Alloc(in.Count)
if err != nil {
return &masterpb.TsoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "AllocTimestamp failed: " + err.Error(),
},
Timestamp: 0,
Count: 0,
}, nil
}
return &masterpb.TsoResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
Timestamp: ts,
Count: in.Count,
}, nil
}
func (c *Core) AllocID(in *masterpb.IDRequest) (*masterpb.IDResponse, error) {
start, _, err := c.idAllocator.Alloc(in.Count)
if err != nil {
return &masterpb.IDResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: "AllocID failed: " + err.Error(),
},
ID: 0,
Count: in.Count,
}, nil
}
return &masterpb.IDResponse{
Status: &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
Reason: "",
},
ID: start,
Count: in.Count,
}, nil
}

View File

@ -1,719 +0,0 @@
package masterservice
import (
"fmt"
"strconv"
"sync"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
pb "github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
type metaTable struct {
client kv.TxnBase // client of a reliable kv service, i.e. etcd client
tenantID2Meta map[typeutil.UniqueID]pb.TenantMeta // tenant id to tenant meta
proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
collID2Meta map[typeutil.UniqueID]pb.CollectionMeta // collection id to collection meta
collName2ID map[string]typeutil.UniqueID // collection name to collection id
segID2Meta map[typeutil.UniqueID]pb.SegmentMeta // segment id to segment meta
segID2IndexMetas map[typeutil.UniqueID][]pb.FieldIndexMeta // segment id to array of field index meta
tenantLock sync.RWMutex
proxyLock sync.RWMutex
ddLock sync.RWMutex
indexLock sync.RWMutex
}
func NewMetaTable(kv kv.TxnBase) (*metaTable, error) {
mt := &metaTable{
client: kv,
tenantLock: sync.RWMutex{},
proxyLock: sync.RWMutex{},
ddLock: sync.RWMutex{},
}
err := mt.reloadFromKV()
if err != nil {
return nil, err
}
return mt, nil
}
func (mt *metaTable) reloadFromKV() error {
mt.tenantID2Meta = make(map[typeutil.UniqueID]pb.TenantMeta)
mt.proxyID2Meta = make(map[typeutil.UniqueID]pb.ProxyMeta)
mt.collID2Meta = make(map[typeutil.UniqueID]pb.CollectionMeta)
mt.collName2ID = make(map[string]typeutil.UniqueID)
mt.segID2Meta = make(map[typeutil.UniqueID]pb.SegmentMeta)
mt.segID2IndexMetas = make(map[typeutil.UniqueID][]pb.FieldIndexMeta)
_, values, err := mt.client.LoadWithPrefix("tenant")
if err != nil {
return err
}
for _, value := range values {
tenantMeta := pb.TenantMeta{}
err := proto.UnmarshalText(value, &tenantMeta)
if err != nil {
return err
}
mt.tenantID2Meta[tenantMeta.ID] = tenantMeta
}
_, values, err = mt.client.LoadWithPrefix("proxy")
if err != nil {
return err
}
for _, value := range values {
proxyMeta := pb.ProxyMeta{}
err = proto.UnmarshalText(value, &proxyMeta)
if err != nil {
return err
}
mt.proxyID2Meta[proxyMeta.ID] = proxyMeta
}
_, values, err = mt.client.LoadWithPrefix("collection")
if err != nil {
return err
}
for _, value := range values {
collectionMeta := pb.CollectionMeta{}
err = proto.UnmarshalText(value, &collectionMeta)
if err != nil {
return err
}
mt.collID2Meta[collectionMeta.ID] = collectionMeta
mt.collName2ID[collectionMeta.Schema.Name] = collectionMeta.ID
}
_, values, err = mt.client.LoadWithPrefix("segment")
if err != nil {
return err
}
for _, value := range values {
segmentMeta := pb.SegmentMeta{}
err = proto.UnmarshalText(value, &segmentMeta)
if err != nil {
return err
}
mt.segID2Meta[segmentMeta.SegmentID] = segmentMeta
}
_, values, err = mt.client.LoadWithPrefix("indexmeta")
if err != nil {
return err
}
for _, v := range values {
indexMeta := pb.FieldIndexMeta{}
err = proto.UnmarshalText(v, &indexMeta)
if err != nil {
return err
}
mt.segID2IndexMetas[indexMeta.SegmentID] = append(mt.segID2IndexMetas[indexMeta.SegmentID], indexMeta)
}
return nil
}
// MetaTable.ddLock.Lock() before call this function
func (mt *metaTable) saveCollectionMeta(coll *pb.CollectionMeta) error {
collBytes := proto.MarshalTextString(coll)
mt.collID2Meta[coll.ID] = *coll
mt.collName2ID[coll.Schema.Name] = coll.ID
return mt.client.Save("/collection/"+strconv.FormatInt(coll.ID, 10), collBytes)
}
// MetaTable.ddLock.Lock() before call this function
func (mt *metaTable) saveSegmentMeta(seg *pb.SegmentMeta) error {
segBytes := proto.MarshalTextString(seg)
mt.segID2Meta[seg.SegmentID] = *seg
return mt.client.Save("/segment/"+strconv.FormatInt(seg.SegmentID, 10), segBytes)
}
// MetaTable.ddLock.Lock() before call this function
func (mt *metaTable) saveCollectionAndDeleteSegmentsMeta(coll *pb.CollectionMeta, segIDs []typeutil.UniqueID) error {
segIDStrs := make([]string, 0, len(segIDs))
for _, segID := range segIDs {
segIDStrs = append(segIDStrs, "/segment/"+strconv.FormatInt(segID, 10))
}
kvs := make(map[string]string)
collStrs := proto.MarshalTextString(coll)
kvs["/collection/"+strconv.FormatInt(coll.ID, 10)] = collStrs
for _, segID := range segIDs {
_, ok := mt.segID2Meta[segID]
if ok {
delete(mt.segID2Meta, segID)
}
}
mt.collID2Meta[coll.ID] = *coll
return mt.client.MultiSaveAndRemove(kvs, segIDStrs)
}
// MetaTable.ddLock.Lock() before call this function
func (mt *metaTable) saveCollectionsAndSegmentsMeta(coll *pb.CollectionMeta, seg *pb.SegmentMeta) error {
kvs := make(map[string]string)
collBytes := proto.MarshalTextString(coll)
kvs["/collection/"+strconv.FormatInt(coll.ID, 10)] = collBytes
mt.collID2Meta[coll.ID] = *coll
mt.collName2ID[coll.Schema.Name] = coll.ID
segBytes := proto.MarshalTextString(seg)
kvs["/segment/"+strconv.FormatInt(seg.SegmentID, 10)] = segBytes
mt.segID2Meta[seg.SegmentID] = *seg
return mt.client.MultiSave(kvs)
}
// MetaTable.ddLock.Lock() before call this function
func (mt *metaTable) deleteCollectionsAndSegmentsMeta(collID typeutil.UniqueID, segIDs []typeutil.UniqueID) error {
collIDStr := "/collection/" + strconv.FormatInt(collID, 10)
totalIDStrs := make([]string, 0, 1+len(segIDs))
totalIDStrs = append(totalIDStrs, collIDStr)
for _, singleID := range segIDs {
totalIDStrs = append(totalIDStrs, "/segment/"+strconv.FormatInt(singleID, 10))
}
collMeta, ok := mt.collID2Meta[collID]
if ok {
delete(mt.collID2Meta, collID)
}
_, ok = mt.collName2ID[collMeta.Schema.Name]
if ok {
delete(mt.collName2ID, collMeta.Schema.Name)
}
for _, segID := range segIDs {
_, ok := mt.segID2Meta[segID]
if ok {
delete(mt.segID2Meta, segID)
}
}
return mt.client.MultiRemove(totalIDStrs)
}
func (mt *metaTable) AddCollection(coll *pb.CollectionMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
if len(coll.SegmentIDs) != 0 {
return errors.Errorf("segment should be empty when creating collection")
}
if len(coll.PartitionTags) == 0 {
coll.PartitionTags = append(coll.PartitionTags, Params.DefaultPartitionTag)
}
_, ok := mt.collName2ID[coll.Schema.Name]
if ok {
return errors.Errorf("collection alread exists with name = " + coll.Schema.Name)
}
err := mt.saveCollectionMeta(coll)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) DeleteCollection(collID typeutil.UniqueID) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
collMeta, ok := mt.collID2Meta[collID]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
}
err := mt.deleteCollectionsAndSegmentsMeta(collID, collMeta.SegmentIDs)
if err != nil {
_ = mt.reloadFromKV()
return err
}
// remove index meta
for _, v := range collMeta.SegmentIDs {
if err := mt.removeSegmentIndexMeta(v); err != nil {
return err
}
}
return nil
}
func (mt *metaTable) HasCollection(collID typeutil.UniqueID) bool {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
_, ok := mt.collID2Meta[collID]
return ok
}
func (mt *metaTable) GetCollectionByName(collectionName string) (*pb.CollectionMeta, error) {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
vid, ok := mt.collName2ID[collectionName]
if !ok {
return nil, errors.Errorf("can't find collection: " + collectionName)
}
col, ok := mt.collID2Meta[vid]
if !ok {
return nil, errors.Errorf("can't find collection: " + collectionName)
}
return &col, nil
}
func (mt *metaTable) ListCollections() ([]string, error) {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
colls := make([]string, 0, len(mt.collName2ID))
for name := range mt.collName2ID {
colls = append(colls, name)
}
return colls, nil
}
func (mt *metaTable) AddPartition(collID typeutil.UniqueID, partitionTag string, partitionID typeutil.UniqueID) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
coll, ok := mt.collID2Meta[collID]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
}
// number of partition tags (except _default) should be limited to 4096 by default
if int64(len(coll.PartitionTags)) > Params.MaxPartitionNum {
return errors.New("maximum partition's number should be limit to " + strconv.FormatInt(Params.MaxPartitionNum, 10))
}
for _, t := range coll.PartitionTags {
if t == partitionTag {
return errors.Errorf("partition already exists.")
}
}
coll.PartitionTags = append(coll.PartitionTags, partitionTag)
coll.PartitionIDs = append(coll.PartitionIDs, partitionID)
err := mt.saveCollectionMeta(&coll)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) HasPartition(collID typeutil.UniqueID, tag string) bool {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
col, ok := mt.collID2Meta[collID]
if !ok {
return false
}
for _, partitionTag := range col.PartitionTags {
if partitionTag == tag {
return true
}
}
return false
}
func (mt *metaTable) DeletePartition(collID typeutil.UniqueID, tag string) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
if tag == Params.DefaultPartitionTag {
return errors.New("default partition cannot be deleted")
}
collMeta, ok := mt.collID2Meta[collID]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(collID, 10))
}
// check tag exists
exist := false
pt := make([]string, 0, len(collMeta.PartitionTags))
pd := make([]typeutil.UniqueID, 0, len(collMeta.PartitionIDs))
for i, t := range collMeta.PartitionTags {
if t != tag {
pt = append(pt, t)
pd = append(pd, collMeta.PartitionIDs[i])
} else {
exist = true
}
}
if !exist {
return errors.New("partition " + tag + " does not exist")
}
if len(pt) == len(collMeta.PartitionTags) {
return nil
}
toDeleteSeg := make([]typeutil.UniqueID, 0, len(collMeta.SegmentIDs))
seg := make([]typeutil.UniqueID, 0, len(collMeta.SegmentIDs))
for _, s := range collMeta.SegmentIDs {
sm, ok := mt.segID2Meta[s]
if !ok {
return errors.Errorf("DeletePartition:can't find segment id = %d", s)
}
if sm.PartitionTag != tag {
seg = append(seg, s)
} else {
toDeleteSeg = append(toDeleteSeg, s)
}
}
collMeta.PartitionTags = pt
collMeta.PartitionIDs = pd
collMeta.SegmentIDs = seg
err := mt.saveCollectionAndDeleteSegmentsMeta(&collMeta, toDeleteSeg)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) AddSegment(seg *pb.SegmentMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
collID := seg.CollectionID
collMeta := mt.collID2Meta[collID]
collMeta.SegmentIDs = append(collMeta.SegmentIDs, seg.SegmentID)
err := mt.saveCollectionsAndSegmentsMeta(&collMeta, seg)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) UpdateSegment(seg *pb.SegmentMeta) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
collID := seg.CollectionID
collMeta := mt.collID2Meta[collID]
isNewSegID := true
for _, segID := range collMeta.SegmentIDs {
if segID == seg.SegmentID {
isNewSegID = false
break
}
}
if isNewSegID {
collMeta.SegmentIDs = append(collMeta.SegmentIDs, seg.SegmentID)
if err := mt.saveCollectionsAndSegmentsMeta(&collMeta, seg); err != nil {
_ = mt.reloadFromKV()
return err
}
} else {
if err := mt.saveSegmentMeta(seg); err != nil {
_ = mt.reloadFromKV()
return err
}
}
return nil
}
func (mt *metaTable) GetSegmentByID(segID typeutil.UniqueID) (*pb.SegmentMeta, error) {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
sm, ok := mt.segID2Meta[segID]
if !ok {
return nil, errors.Errorf("GetSegmentByID:can't find segment id = %d", segID)
}
return &sm, nil
}
func (mt *metaTable) DeleteSegment(segID typeutil.UniqueID) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
segMeta, ok := mt.segID2Meta[segID]
if !ok {
return errors.Errorf("DeleteSegment:can't find segment. id = " + strconv.FormatInt(segID, 10))
}
collMeta, ok := mt.collID2Meta[segMeta.CollectionID]
if !ok {
return errors.Errorf("can't find collection. id = " + strconv.FormatInt(segMeta.CollectionID, 10))
}
for i := 0; i < len(collMeta.SegmentIDs); i++ {
if collMeta.SegmentIDs[i] == segID {
collMeta.SegmentIDs = append(collMeta.SegmentIDs[:i], collMeta.SegmentIDs[i+1:]...)
}
}
err := mt.saveCollectionAndDeleteSegmentsMeta(&collMeta, []typeutil.UniqueID{segID})
if err != nil {
_ = mt.reloadFromKV()
return err
}
return mt.removeSegmentIndexMeta(segID)
}
func (mt *metaTable) CloseSegment(segID typeutil.UniqueID, closeTs typeutil.Timestamp) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
segMeta, ok := mt.segID2Meta[segID]
if !ok {
return errors.Errorf("CloseSegment:can't find segment id = " + strconv.FormatInt(segID, 10))
}
segMeta.CloseTime = closeTs
err := mt.saveSegmentMeta(&segMeta)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) AddFieldIndexMeta(meta *pb.FieldIndexMeta) error {
mt.indexLock.Lock()
defer mt.indexLock.Unlock()
segID := meta.SegmentID
if _, ok := mt.segID2IndexMetas[segID]; !ok {
mt.segID2IndexMetas[segID] = make([]pb.FieldIndexMeta, 0)
}
for _, v := range mt.segID2IndexMetas[segID] {
if v.FieldID == meta.FieldID && typeutil.CompareIndexParams(v.IndexParams, meta.IndexParams) {
return fmt.Errorf("segment %d field id %d's index meta already exist", segID, meta.FieldID)
}
}
mt.segID2IndexMetas[segID] = append(mt.segID2IndexMetas[segID], *meta)
err := mt.saveFieldIndexMetaToEtcd(meta)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) saveFieldIndexMetaToEtcd(meta *pb.FieldIndexMeta) error {
key := "/indexmeta/" + strconv.FormatInt(meta.SegmentID, 10) + strconv.FormatInt(meta.FieldID, 10) + strconv.FormatInt(meta.IndexID, 10)
marshaledMeta := proto.MarshalTextString(meta)
return mt.client.Save(key, marshaledMeta)
}
func (mt *metaTable) DeleteFieldIndexMeta(segID typeutil.UniqueID, fieldID typeutil.UniqueID, indexParams []*commonpb.KeyValuePair) error {
mt.indexLock.Lock()
defer mt.indexLock.Unlock()
if _, ok := mt.segID2IndexMetas[segID]; !ok {
return fmt.Errorf("can not find index meta of segment %d", segID)
}
for i, v := range mt.segID2IndexMetas[segID] {
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
mt.segID2IndexMetas[segID] = append(mt.segID2IndexMetas[segID][:i], mt.segID2IndexMetas[segID][i+1:]...)
err := mt.deleteFieldIndexMetaToEtcd(segID, fieldID, v.IndexID)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
}
return fmt.Errorf("can not find index meta of field %d", fieldID)
}
func (mt *metaTable) deleteFieldIndexMetaToEtcd(segID typeutil.UniqueID, fieldID typeutil.UniqueID, indexID typeutil.UniqueID) error {
key := "/indexmeta/" + strconv.FormatInt(segID, 10) + strconv.FormatInt(fieldID, 10) + strconv.FormatInt(indexID, 10)
return mt.client.Remove(key)
}
func (mt *metaTable) HasFieldIndexMeta(segID typeutil.UniqueID, fieldID typeutil.UniqueID, indexParams []*commonpb.KeyValuePair) (bool, error) {
mt.indexLock.RLock()
defer mt.indexLock.RUnlock()
if _, ok := mt.segID2IndexMetas[segID]; !ok {
return false, nil
}
for _, v := range mt.segID2IndexMetas[segID] {
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
return true, nil
}
}
return false, nil
}
func (mt *metaTable) GetFieldIndexMeta(segID typeutil.UniqueID, fieldID typeutil.UniqueID, indexParams []*commonpb.KeyValuePair) (*pb.FieldIndexMeta, error) {
mt.indexLock.RLock()
defer mt.indexLock.RUnlock()
if _, ok := mt.segID2IndexMetas[segID]; !ok {
return nil, fmt.Errorf("can not find segment %d", segID)
}
for _, v := range mt.segID2IndexMetas[segID] {
if v.FieldID == fieldID && typeutil.CompareIndexParams(v.IndexParams, indexParams) {
return &v, nil
}
}
return nil, fmt.Errorf("can not find field %d", fieldID)
}
func (mt *metaTable) UpdateFieldIndexMeta(meta *pb.FieldIndexMeta) error {
mt.indexLock.Lock()
defer mt.indexLock.Unlock()
segID := meta.SegmentID
if _, ok := mt.segID2IndexMetas[segID]; !ok {
mt.segID2IndexMetas[segID] = make([]pb.FieldIndexMeta, 0)
}
for i, v := range mt.segID2IndexMetas[segID] {
if v.FieldID == meta.FieldID && typeutil.CompareIndexParams(v.IndexParams, meta.IndexParams) {
mt.segID2IndexMetas[segID][i] = *meta
err := mt.deleteFieldIndexMetaToEtcd(segID, v.FieldID, v.IndexID)
if err != nil {
_ = mt.reloadFromKV()
return err
}
err = mt.saveFieldIndexMetaToEtcd(meta)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
}
mt.segID2IndexMetas[segID] = append(mt.segID2IndexMetas[segID], *meta)
err := mt.saveFieldIndexMetaToEtcd(meta)
if err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) removeSegmentIndexMeta(segID typeutil.UniqueID) error {
mt.indexLock.Lock()
defer mt.indexLock.Unlock()
delete(mt.segID2IndexMetas, segID)
keys, _, err := mt.client.LoadWithPrefix("indexmeta/" + strconv.FormatInt(segID, 10))
if err != nil {
_ = mt.reloadFromKV()
return err
}
if err = mt.client.MultiRemove(keys); err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
func (mt *metaTable) GetFieldTypeParams(collID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]*commonpb.KeyValuePair, error) {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
if _, ok := mt.collID2Meta[collID]; !ok {
return nil, fmt.Errorf("can not find collection with id %d", collID)
}
for _, fieldSchema := range mt.collID2Meta[collID].Schema.Fields {
if fieldSchema.FieldID == fieldID {
return fieldSchema.TypeParams, nil
}
}
return nil, fmt.Errorf("can not find field %d in collection %d", fieldID, collID)
}
func (mt *metaTable) GetFieldIndexParams(collID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]*commonpb.KeyValuePair, error) {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
if _, ok := mt.collID2Meta[collID]; !ok {
return nil, fmt.Errorf("can not find collection with id %d", collID)
}
for _, fieldSchema := range mt.collID2Meta[collID].Schema.Fields {
if fieldSchema.FieldID == fieldID {
return fieldSchema.IndexParams, nil
}
}
return nil, fmt.Errorf("can not find field %d in collection %d", fieldID, collID)
}
func (mt *metaTable) UpdateFieldIndexParams(collName string, fieldName string, indexParams []*commonpb.KeyValuePair) error {
mt.ddLock.Lock()
defer mt.ddLock.Unlock()
vid, ok := mt.collName2ID[collName]
if !ok {
return errors.Errorf("can't find collection: " + collName)
}
meta, ok := mt.collID2Meta[vid]
if !ok {
return errors.Errorf("can't find collection: " + collName)
}
for _, fieldSchema := range meta.Schema.Fields {
if fieldSchema.Name == fieldName {
fieldSchema.IndexParams = indexParams
if err := mt.saveCollectionMeta(&meta); err != nil {
_ = mt.reloadFromKV()
return err
}
return nil
}
}
return fmt.Errorf("can not find field with id %s", fieldName)
}
func (mt *metaTable) IsIndexable(collID typeutil.UniqueID, fieldID typeutil.UniqueID) (bool, error) {
mt.ddLock.RLock()
defer mt.ddLock.RUnlock()
if _, ok := mt.collID2Meta[collID]; !ok {
return false, fmt.Errorf("can not find collection with id %d", collID)
}
for _, v := range mt.collID2Meta[collID].Schema.Fields {
// field is vector type and index params is not empty
if v.FieldID == fieldID && (v.DataType == schemapb.DataType_VECTOR_BINARY || v.DataType == schemapb.DataType_VECTOR_FLOAT) &&
len(v.IndexParams) != 0 {
return true, nil
}
}
// fieldID is not in schema(eg: timestamp) or not indexable
return false, nil
}

View File

@ -1,28 +0,0 @@
package masterservice
import (
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
)
var Params ParamTable
type ParamTable struct {
paramtable.BaseTable
Address string
Port int
NodeID uint64
PulsarAddress string
EtcdAddress string
MetaRootPath string
KvRootPath string
ProxyTimeTickChannel string
MsgChannelSubName string
TimeTickChannel string
DdChannel string
StatisticsChannel string
MaxPartitionNum int64
DefaultPartitionTag string
}

View File

@ -1,421 +0,0 @@
package masterservice
import (
"fmt"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/milvuspb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
type reqTask interface {
Type() commonpb.MsgType
Ts() (typeutil.Timestamp, error)
Execute() error
WaitToFinish() error
Notify(err error)
}
type baseReqTask struct {
cv chan error
core *Core
}
func (bt *baseReqTask) Notify(err error) {
bt.cv <- err
}
func (bt *baseReqTask) WaitToFinish() error {
select {
case <-bt.core.ctx.Done():
return errors.Errorf("context done")
case err, ok := <-bt.cv:
if !ok {
return errors.Errorf("notify chan closed")
}
return err
}
}
type CreateCollectionReqTask struct {
baseReqTask
Req *milvuspb.CreateCollectionRequest
}
func (t *CreateCollectionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *CreateCollectionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *CreateCollectionReqTask) Execute() error {
var schema schemapb.CollectionSchema
err := proto.Unmarshal(t.Req.Schema, &schema)
if err != nil {
return err
}
for idx, field := range schema.Fields {
field.FieldID = int64(idx + StartOfUserFieldID)
}
rowIDField := &schemapb.FieldSchema{
FieldID: int64(RowIDField),
Name: RowIDFieldName,
IsPrimaryKey: false,
Description: "row id",
DataType: schemapb.DataType_INT64,
}
timeStampField := &schemapb.FieldSchema{
FieldID: int64(TimeStampField),
Name: TimeStampFieldName,
IsPrimaryKey: false,
Description: "time stamp",
DataType: schemapb.DataType_INT64,
}
schema.Fields = append(schema.Fields, rowIDField, timeStampField)
collID, err := t.core.idAllocator.AllocOne()
if err != nil {
return err
}
collTs, err := t.Ts()
if err != nil {
return err
}
partitionID, err := t.core.idAllocator.AllocOne()
if err != nil {
return err
}
coll := etcdpb.CollectionMeta{
ID: collID,
Schema: &schema,
CreateTime: collTs,
SegmentIDs: make([]typeutil.UniqueID, 0),
PartitionTags: []string{Params.DefaultPartitionTag},
PartitionIDs: []typeutil.UniqueID{partitionID},
}
err = t.core.MetaTable.AddCollection(&coll)
if err != nil {
return err
}
err = t.core.DdCreateCollectionReq(t)
if err != nil {
return err
}
return nil
}
type DropCollectionReqTask struct {
baseReqTask
Req *milvuspb.DropCollectionRequest
}
func (t *DropCollectionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *DropCollectionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *DropCollectionReqTask) Execute() error {
collMeta, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
err = t.core.MetaTable.DeleteCollection(collMeta.ID)
if err != nil {
return err
}
//data service should drop segments , which belong to this collection, from the segment manager
err = t.core.DdDropCollectionReq(t)
if err != nil {
return err
}
return nil
}
type HasCollectionReqTask struct {
baseReqTask
Req *milvuspb.HasCollectionRequest
HasCollection bool
}
func (t *HasCollectionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *HasCollectionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *HasCollectionReqTask) Execute() error {
_, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err == nil {
t.HasCollection = true
} else {
t.HasCollection = false
}
return nil
}
type DescribeCollectionReqTask struct {
baseReqTask
Req *milvuspb.DescribeCollectionRequest
Rsp *milvuspb.DescribeCollectionResponse
}
func (t *DescribeCollectionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *DescribeCollectionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *DescribeCollectionReqTask) Execute() error {
coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
t.Rsp.Schema = proto.Clone(coll.Schema).(*schemapb.CollectionSchema)
var newField []*schemapb.FieldSchema
for _, field := range t.Rsp.Schema.Fields {
if field.FieldID >= StartOfUserFieldID {
newField = append(newField, field)
}
}
t.Rsp.Schema.Fields = newField
return nil
}
type CollectionStatsReqTask struct {
baseReqTask
Req *milvuspb.CollectionStatsRequest
Rsp *milvuspb.CollectionStatsResponse
}
func (t *CollectionStatsReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *CollectionStatsReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
//row_count
//data_size
func (t *CollectionStatsReqTask) Execute() error {
coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
var rowCount int64 = 0
var dataSize int64 = 0
for _, seg := range coll.SegmentIDs {
m, e := t.core.GetSegmentMeta(seg)
if e != nil {
return e
}
rowCount += m.NumRows
dataSize += m.MemSize
}
t.Rsp.Stats = append(t.Rsp.Stats,
&commonpb.KeyValuePair{
Key: "row_count",
Value: fmt.Sprintf("%d", rowCount),
})
t.Rsp.Stats = append(t.Rsp.Stats,
&commonpb.KeyValuePair{
Key: "data_size",
Value: fmt.Sprintf("%d", dataSize),
})
return nil
}
type ShowCollectionReqTask struct {
baseReqTask
Req *milvuspb.ShowCollectionRequest
Rsp *milvuspb.ShowCollectionResponse
}
func (t *ShowCollectionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *ShowCollectionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *ShowCollectionReqTask) Execute() error {
coll, err := t.core.MetaTable.ListCollections()
if err != nil {
return err
}
t.Rsp.CollectionNames = coll
return nil
}
type CreatePartitionReqTask struct {
baseReqTask
Req *milvuspb.CreatePartitionRequest
}
func (t *CreatePartitionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *CreatePartitionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *CreatePartitionReqTask) Execute() error {
collMeta, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
partitionID, err := t.core.idAllocator.AllocOne()
if err != nil {
return err
}
err = t.core.MetaTable.AddPartition(collMeta.ID, t.Req.PartitionName, partitionID)
if err != nil {
return err
}
err = t.core.DdCreatePartitionReq(t)
if err != nil {
return err
}
return nil
}
type DropPartitionReqTask struct {
baseReqTask
Req *milvuspb.DropPartitionRequest
}
func (t *DropPartitionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *DropPartitionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *DropPartitionReqTask) Execute() error {
coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
err = t.core.MetaTable.DeletePartition(coll.ID, t.Req.PartitionName)
if err != nil {
return err
}
err = t.core.DdDropPartitionReq(t)
if err != nil {
return err
}
return nil
}
type HasPartitionReqTask struct {
baseReqTask
Req *milvuspb.HasPartitionRequest
HasPartition bool
}
func (t *HasPartitionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *HasPartitionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *HasPartitionReqTask) Execute() error {
coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
t.HasPartition = t.core.MetaTable.HasPartition(coll.ID, t.Req.PartitionName)
return nil
}
type PartitionStatsReqTask struct {
baseReqTask
Req *milvuspb.PartitionStatsRequest
Rsp *milvuspb.PartitionStatsResponse
}
func (t *PartitionStatsReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *PartitionStatsReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
func (t *PartitionStatsReqTask) Execute() error {
coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
var rowCount int64 = 0
var dataSize int64 = 0
for _, seg := range coll.SegmentIDs {
m, e := t.core.GetSegmentMeta(seg)
if e != nil {
return e
}
if m.PartitionTag == t.Req.PartitionName {
rowCount += m.NumRows
dataSize += m.MemSize
}
}
t.Rsp.Stats = append(t.Rsp.Stats,
&commonpb.KeyValuePair{
Key: "row_count",
Value: fmt.Sprintf("%d", rowCount),
})
t.Rsp.Stats = append(t.Rsp.Stats,
&commonpb.KeyValuePair{
Key: "data_size",
Value: fmt.Sprintf("%d", dataSize),
})
return nil
}
type ShowPartitionReqTask struct {
baseReqTask
Req *milvuspb.ShowPartitionRequest
Rsp *milvuspb.ShowPartitionResponse
}
func (t *ShowPartitionReqTask) Type() commonpb.MsgType {
return t.Req.Base.MsgType
}
func (t *ShowPartitionReqTask) Ts() (typeutil.Timestamp, error) {
return t.Req.Base.Timestamp, nil
}
//TODO,list partition ids and partition tags
func (t *ShowPartitionReqTask) Execute() error {
coll, err := t.core.MetaTable.GetCollectionByName(t.Req.CollectionName)
if err != nil {
return err
}
t.Rsp.PartitionNames = append(t.Rsp.PartitionNames, coll.PartitionTags...)
return nil
}

View File

@ -1,189 +0,0 @@
package masterservice
import (
"log"
"sync/atomic"
"time"
"unsafe"
"go.uber.org/zap"
"github.com/zilliztech/milvus-distributed/internal/errors"
"github.com/zilliztech/milvus-distributed/internal/kv"
"github.com/zilliztech/milvus-distributed/internal/util/tsoutil"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
const (
// UpdateTimestampStep is used to update timestamp.
UpdateTimestampStep = 50 * time.Millisecond
// updateTimestampGuard is the min timestamp interval.
updateTimestampGuard = time.Millisecond
// maxLogical is the max upper limit for logical time.
// When a TSO's logical time reaches this limit,
// the physical time will be forced to increase.
maxLogical = int64(1 << 18)
)
// atomicObject is used to store the current TSO in memory.
type atomicObject struct {
physical time.Time
logical int64
}
// timestampOracle is used to maintain the logic of tso.
type timestampOracle struct {
key string
kvBase kv.TxnBase
// TODO: remove saveInterval
saveInterval time.Duration
maxResetTSGap func() time.Duration
// For tso, set after the PD becomes a leader.
TSO unsafe.Pointer
lastSavedTime atomic.Value
}
func (t *timestampOracle) loadTimestamp() (time.Time, error) {
strData, err := t.kvBase.Load(t.key)
var binData []byte = []byte(strData)
if err != nil {
return typeutil.ZeroTime, err
}
if len(binData) == 0 {
return typeutil.ZeroTime, nil
}
return typeutil.ParseTimestamp(binData)
}
// save timestamp, if lastTs is 0, we think the timestamp doesn't exist, so create it,
// otherwise, update it.
func (t *timestampOracle) saveTimestamp(ts time.Time) error {
data := typeutil.Uint64ToBytes(uint64(ts.UnixNano()))
err := t.kvBase.Save(t.key, string(data))
if err != nil {
return errors.WithStack(err)
}
t.lastSavedTime.Store(ts)
return nil
}
func (t *timestampOracle) InitTimestamp() error {
//last, err := t.loadTimestamp()
//if err != nil {
// return err
//}
next := time.Now()
// If the current system time minus the saved etcd timestamp is less than `updateTimestampGuard`,
// the timestamp allocation will start from the saved etcd timestamp temporarily.
//if typeutil.SubTimeByWallClock(next, last) < updateTimestampGuard {
// next = last.Add(updateTimestampGuard)
//}
save := next.Add(t.saveInterval)
if err := t.saveTimestamp(save); err != nil {
return err
}
//log.Print("sync and save timestamp", zap.Time("last", last), zap.Time("save", save), zap.Time("next", next))
current := &atomicObject{
physical: next,
}
atomic.StorePointer(&t.TSO, unsafe.Pointer(current))
return nil
}
// ResetUserTimestamp update the physical part with specified tso.
func (t *timestampOracle) ResetUserTimestamp(tso uint64) error {
physical, _ := tsoutil.ParseTS(tso)
next := physical.Add(time.Millisecond)
prev := (*atomicObject)(atomic.LoadPointer(&t.TSO))
// do not update
if typeutil.SubTimeByWallClock(next, prev.physical) <= 3*updateTimestampGuard {
return errors.New("the specified ts too small than now")
}
if typeutil.SubTimeByWallClock(next, prev.physical) >= t.maxResetTSGap() {
return errors.New("the specified ts too large than now")
}
save := next.Add(t.saveInterval)
if err := t.saveTimestamp(save); err != nil {
return err
}
update := &atomicObject{
physical: next,
}
atomic.CompareAndSwapPointer(&t.TSO, unsafe.Pointer(prev), unsafe.Pointer(update))
return nil
}
// UpdateTimestamp is used to update the timestamp.
// This function will do two things:
// 1. When the logical time is going to be used up, increase the current physical time.
// 2. When the time window is not big enough, which means the saved etcd time minus the next physical time
// will be less than or equal to `updateTimestampGuard`, then the time window needs to be updated and
// we also need to save the next physical time plus `TsoSaveInterval` into etcd.
//
// Here is some constraints that this function must satisfy:
// 1. The saved time is monotonically increasing.
// 2. The physical time is monotonically increasing.
// 3. The physical time is always less than the saved timestamp.
func (t *timestampOracle) UpdateTimestamp() error {
prev := (*atomicObject)(atomic.LoadPointer(&t.TSO))
now := time.Now()
jetLag := typeutil.SubTimeByWallClock(now, prev.physical)
if jetLag > 3*UpdateTimestampStep {
log.Print("clock offset", zap.Duration("jet-lag", jetLag), zap.Time("prev-physical", prev.physical), zap.Time("now", now))
}
var next time.Time
prevLogical := atomic.LoadInt64(&prev.logical)
// If the system time is greater, it will be synchronized with the system time.
if jetLag > updateTimestampGuard {
next = now
} else if prevLogical > maxLogical/2 {
// The reason choosing maxLogical/2 here is that it's big enough for common cases.
// Because there is enough timestamp can be allocated before next update.
log.Print("the logical time may be not enough", zap.Int64("prev-logical", prevLogical))
next = prev.physical.Add(time.Millisecond)
} else {
// It will still use the previous physical time to alloc the timestamp.
return nil
}
// It is not safe to increase the physical time to `next`.
// The time window needs to be updated and saved to etcd.
if typeutil.SubTimeByWallClock(t.lastSavedTime.Load().(time.Time), next) <= updateTimestampGuard {
save := next.Add(t.saveInterval)
if err := t.saveTimestamp(save); err != nil {
return err
}
}
current := &atomicObject{
physical: next,
logical: 0,
}
atomic.StorePointer(&t.TSO, unsafe.Pointer(current))
return nil
}
// ResetTimestamp is used to reset the timestamp.
func (t *timestampOracle) ResetTimestamp() {
zero := &atomicObject{
physical: time.Now(),
}
atomic.StorePointer(&t.TSO, unsafe.Pointer(zero))
}

View File

@ -18,32 +18,6 @@ message ProxyMeta {
repeated string result_channelIDs = 3;
}
message PartitionInfo {
string partition_name = 1;
int64 partitionID = 2;
repeated int64 segmentIDs = 3;
}
message CollectionInfo {
int64 ID = 1;
schema.CollectionSchema schema = 2;
uint64 create_time = 3;
repeated int64 partitionIDs = 4;
}
message IndexInfo {
string index_name = 1;
int64 indexID = 2;
repeated common.KeyValuePair index_params = 3;
}
message SegmentIndexInfo {
int64 segmentID = 1;
int64 fieldID = 2;
int64 indexID = 3;
int64 buildID = 4;
}
message CollectionMeta {
int64 ID=1;
schema.CollectionSchema schema=2;

View File

@ -140,242 +140,6 @@ func (m *ProxyMeta) GetResultChannelIDs() []string {
return nil
}
type PartitionInfo struct {
PartitionName string `protobuf:"bytes,1,opt,name=partition_name,json=partitionName,proto3" json:"partition_name,omitempty"`
PartitionID int64 `protobuf:"varint,2,opt,name=partitionID,proto3" json:"partitionID,omitempty"`
SegmentIDs []int64 `protobuf:"varint,3,rep,packed,name=segmentIDs,proto3" json:"segmentIDs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PartitionInfo) Reset() { *m = PartitionInfo{} }
func (m *PartitionInfo) String() string { return proto.CompactTextString(m) }
func (*PartitionInfo) ProtoMessage() {}
func (*PartitionInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{2}
}
func (m *PartitionInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PartitionInfo.Unmarshal(m, b)
}
func (m *PartitionInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PartitionInfo.Marshal(b, m, deterministic)
}
func (m *PartitionInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_PartitionInfo.Merge(m, src)
}
func (m *PartitionInfo) XXX_Size() int {
return xxx_messageInfo_PartitionInfo.Size(m)
}
func (m *PartitionInfo) XXX_DiscardUnknown() {
xxx_messageInfo_PartitionInfo.DiscardUnknown(m)
}
var xxx_messageInfo_PartitionInfo proto.InternalMessageInfo
func (m *PartitionInfo) GetPartitionName() string {
if m != nil {
return m.PartitionName
}
return ""
}
func (m *PartitionInfo) GetPartitionID() int64 {
if m != nil {
return m.PartitionID
}
return 0
}
func (m *PartitionInfo) GetSegmentIDs() []int64 {
if m != nil {
return m.SegmentIDs
}
return nil
}
type CollectionInfo struct {
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
CreateTime uint64 `protobuf:"varint,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
PartitionIDs []int64 `protobuf:"varint,4,rep,packed,name=partitionIDs,proto3" json:"partitionIDs,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CollectionInfo) Reset() { *m = CollectionInfo{} }
func (m *CollectionInfo) String() string { return proto.CompactTextString(m) }
func (*CollectionInfo) ProtoMessage() {}
func (*CollectionInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{3}
}
func (m *CollectionInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CollectionInfo.Unmarshal(m, b)
}
func (m *CollectionInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CollectionInfo.Marshal(b, m, deterministic)
}
func (m *CollectionInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_CollectionInfo.Merge(m, src)
}
func (m *CollectionInfo) XXX_Size() int {
return xxx_messageInfo_CollectionInfo.Size(m)
}
func (m *CollectionInfo) XXX_DiscardUnknown() {
xxx_messageInfo_CollectionInfo.DiscardUnknown(m)
}
var xxx_messageInfo_CollectionInfo proto.InternalMessageInfo
func (m *CollectionInfo) GetID() int64 {
if m != nil {
return m.ID
}
return 0
}
func (m *CollectionInfo) GetSchema() *schemapb.CollectionSchema {
if m != nil {
return m.Schema
}
return nil
}
func (m *CollectionInfo) GetCreateTime() uint64 {
if m != nil {
return m.CreateTime
}
return 0
}
func (m *CollectionInfo) GetPartitionIDs() []int64 {
if m != nil {
return m.PartitionIDs
}
return nil
}
type IndexInfo struct {
IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"`
IndexID int64 `protobuf:"varint,2,opt,name=indexID,proto3" json:"indexID,omitempty"`
IndexParams []*commonpb.KeyValuePair `protobuf:"bytes,3,rep,name=index_params,json=indexParams,proto3" json:"index_params,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IndexInfo) Reset() { *m = IndexInfo{} }
func (m *IndexInfo) String() string { return proto.CompactTextString(m) }
func (*IndexInfo) ProtoMessage() {}
func (*IndexInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{4}
}
func (m *IndexInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IndexInfo.Unmarshal(m, b)
}
func (m *IndexInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IndexInfo.Marshal(b, m, deterministic)
}
func (m *IndexInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_IndexInfo.Merge(m, src)
}
func (m *IndexInfo) XXX_Size() int {
return xxx_messageInfo_IndexInfo.Size(m)
}
func (m *IndexInfo) XXX_DiscardUnknown() {
xxx_messageInfo_IndexInfo.DiscardUnknown(m)
}
var xxx_messageInfo_IndexInfo proto.InternalMessageInfo
func (m *IndexInfo) GetIndexName() string {
if m != nil {
return m.IndexName
}
return ""
}
func (m *IndexInfo) GetIndexID() int64 {
if m != nil {
return m.IndexID
}
return 0
}
func (m *IndexInfo) GetIndexParams() []*commonpb.KeyValuePair {
if m != nil {
return m.IndexParams
}
return nil
}
type SegmentIndexInfo struct {
SegmentID int64 `protobuf:"varint,1,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
FieldID int64 `protobuf:"varint,2,opt,name=fieldID,proto3" json:"fieldID,omitempty"`
IndexID int64 `protobuf:"varint,3,opt,name=indexID,proto3" json:"indexID,omitempty"`
BuildID int64 `protobuf:"varint,4,opt,name=buildID,proto3" json:"buildID,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SegmentIndexInfo) Reset() { *m = SegmentIndexInfo{} }
func (m *SegmentIndexInfo) String() string { return proto.CompactTextString(m) }
func (*SegmentIndexInfo) ProtoMessage() {}
func (*SegmentIndexInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{5}
}
func (m *SegmentIndexInfo) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SegmentIndexInfo.Unmarshal(m, b)
}
func (m *SegmentIndexInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SegmentIndexInfo.Marshal(b, m, deterministic)
}
func (m *SegmentIndexInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_SegmentIndexInfo.Merge(m, src)
}
func (m *SegmentIndexInfo) XXX_Size() int {
return xxx_messageInfo_SegmentIndexInfo.Size(m)
}
func (m *SegmentIndexInfo) XXX_DiscardUnknown() {
xxx_messageInfo_SegmentIndexInfo.DiscardUnknown(m)
}
var xxx_messageInfo_SegmentIndexInfo proto.InternalMessageInfo
func (m *SegmentIndexInfo) GetSegmentID() int64 {
if m != nil {
return m.SegmentID
}
return 0
}
func (m *SegmentIndexInfo) GetFieldID() int64 {
if m != nil {
return m.FieldID
}
return 0
}
func (m *SegmentIndexInfo) GetIndexID() int64 {
if m != nil {
return m.IndexID
}
return 0
}
func (m *SegmentIndexInfo) GetBuildID() int64 {
if m != nil {
return m.BuildID
}
return 0
}
type CollectionMeta struct {
ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
Schema *schemapb.CollectionSchema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
@ -392,7 +156,7 @@ func (m *CollectionMeta) Reset() { *m = CollectionMeta{} }
func (m *CollectionMeta) String() string { return proto.CompactTextString(m) }
func (*CollectionMeta) ProtoMessage() {}
func (*CollectionMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{6}
return fileDescriptor_975d306d62b73e88, []int{2}
}
func (m *CollectionMeta) XXX_Unmarshal(b []byte) error {
@ -467,7 +231,7 @@ func (m *FieldBinlogFiles) Reset() { *m = FieldBinlogFiles{} }
func (m *FieldBinlogFiles) String() string { return proto.CompactTextString(m) }
func (*FieldBinlogFiles) ProtoMessage() {}
func (*FieldBinlogFiles) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{7}
return fileDescriptor_975d306d62b73e88, []int{3}
}
func (m *FieldBinlogFiles) XXX_Unmarshal(b []byte) error {
@ -522,7 +286,7 @@ func (m *SegmentMeta) Reset() { *m = SegmentMeta{} }
func (m *SegmentMeta) String() string { return proto.CompactTextString(m) }
func (*SegmentMeta) ProtoMessage() {}
func (*SegmentMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{8}
return fileDescriptor_975d306d62b73e88, []int{4}
}
func (m *SegmentMeta) XXX_Unmarshal(b []byte) error {
@ -629,7 +393,7 @@ func (m *FieldIndexMeta) Reset() { *m = FieldIndexMeta{} }
func (m *FieldIndexMeta) String() string { return proto.CompactTextString(m) }
func (*FieldIndexMeta) ProtoMessage() {}
func (*FieldIndexMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_975d306d62b73e88, []int{9}
return fileDescriptor_975d306d62b73e88, []int{5}
}
func (m *FieldIndexMeta) XXX_Unmarshal(b []byte) error {
@ -695,10 +459,6 @@ func (m *FieldIndexMeta) GetIndexFilePaths() []string {
func init() {
proto.RegisterType((*TenantMeta)(nil), "milvus.proto.etcd.TenantMeta")
proto.RegisterType((*ProxyMeta)(nil), "milvus.proto.etcd.ProxyMeta")
proto.RegisterType((*PartitionInfo)(nil), "milvus.proto.etcd.PartitionInfo")
proto.RegisterType((*CollectionInfo)(nil), "milvus.proto.etcd.CollectionInfo")
proto.RegisterType((*IndexInfo)(nil), "milvus.proto.etcd.IndexInfo")
proto.RegisterType((*SegmentIndexInfo)(nil), "milvus.proto.etcd.SegmentIndexInfo")
proto.RegisterType((*CollectionMeta)(nil), "milvus.proto.etcd.CollectionMeta")
proto.RegisterType((*FieldBinlogFiles)(nil), "milvus.proto.etcd.FieldBinlogFiles")
proto.RegisterType((*SegmentMeta)(nil), "milvus.proto.etcd.SegmentMeta")
@ -708,56 +468,50 @@ func init() {
func init() { proto.RegisterFile("etcd_meta.proto", fileDescriptor_975d306d62b73e88) }
var fileDescriptor_975d306d62b73e88 = []byte{
// 809 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x5d, 0x8f, 0xdb, 0x44,
0x14, 0x95, 0x93, 0x6c, 0x76, 0x7d, 0x93, 0xcd, 0xee, 0xfa, 0xc9, 0x94, 0x96, 0xa6, 0xae, 0x0a,
0x91, 0x10, 0x89, 0xb4, 0x08, 0xde, 0x40, 0xd0, 0x86, 0x4a, 0x11, 0xa2, 0x0d, 0xce, 0x8a, 0x07,
0x5e, 0xac, 0x89, 0x7d, 0x37, 0x19, 0xc9, 0x33, 0x0e, 0x9e, 0x31, 0xdd, 0xdd, 0x07, 0xc4, 0x2b,
0xfc, 0x04, 0x9e, 0xf9, 0x77, 0xfc, 0x06, 0x24, 0x34, 0x77, 0x1c, 0x7f, 0x6c, 0x23, 0x84, 0x2a,
0xf1, 0x78, 0xcf, 0x3d, 0xd7, 0x73, 0xe6, 0x9c, 0x3b, 0x09, 0x9c, 0xa1, 0x8e, 0x93, 0x48, 0xa0,
0x66, 0xd3, 0x5d, 0x9e, 0xe9, 0xcc, 0xbb, 0x10, 0x3c, 0xfd, 0xb9, 0x50, 0xb6, 0x9a, 0x9a, 0xee,
0x83, 0x61, 0x9c, 0x09, 0x91, 0x49, 0x0b, 0x3d, 0x18, 0xaa, 0x78, 0x8b, 0xa2, 0xa4, 0x07, 0x7f,
0x38, 0x00, 0x57, 0x28, 0x99, 0xd4, 0xdf, 0xa1, 0x66, 0xde, 0x08, 0x3a, 0x8b, 0xb9, 0xef, 0x8c,
0x9d, 0x49, 0x37, 0xec, 0x2c, 0xe6, 0xde, 0x87, 0x70, 0x26, 0x0b, 0x11, 0xfd, 0x54, 0x60, 0x7e,
0x1b, 0xc9, 0x2c, 0x41, 0xe5, 0x77, 0xa8, 0x79, 0x2a, 0x0b, 0xf1, 0xbd, 0x41, 0x5f, 0x19, 0xd0,
0xfb, 0x18, 0x2e, 0xb8, 0x54, 0x98, 0xeb, 0x28, 0xde, 0x32, 0x29, 0x31, 0x5d, 0xcc, 0x95, 0xdf,
0x1d, 0x77, 0x27, 0x6e, 0x78, 0x6e, 0x1b, 0x2f, 0x2a, 0xdc, 0xfb, 0x08, 0xce, 0xec, 0x07, 0x2b,
0xae, 0xdf, 0x1b, 0x3b, 0x13, 0x37, 0x1c, 0x11, 0x5c, 0x31, 0x83, 0x5f, 0x1d, 0x70, 0x97, 0x79,
0x76, 0x73, 0x7b, 0x50, 0xdb, 0xe7, 0x70, 0xcc, 0x92, 0x24, 0x47, 0x65, 0x35, 0x0d, 0x2e, 0x1f,
0x4e, 0x5b, 0x77, 0x2f, 0x6f, 0xfd, 0xb5, 0xe5, 0x84, 0x7b, 0xb2, 0xd1, 0x9a, 0xa3, 0x2a, 0xd2,
0x43, 0x5a, 0x6d, 0xa3, 0xd6, 0x1a, 0xdc, 0xc0, 0xe9, 0x92, 0xe5, 0x9a, 0x6b, 0x9e, 0xc9, 0x85,
0xbc, 0xce, 0xbc, 0x67, 0x30, 0xda, 0xed, 0x81, 0x48, 0x32, 0x81, 0xa4, 0xc8, 0x0d, 0x4f, 0x2b,
0xf4, 0x15, 0x13, 0xe8, 0x8d, 0x61, 0x50, 0x01, 0x8b, 0x79, 0x69, 0x5a, 0x13, 0xf2, 0x3e, 0x00,
0x50, 0xb8, 0x11, 0x28, 0xf5, 0xfe, 0xfc, 0x6e, 0xd8, 0x40, 0x82, 0x3f, 0x1d, 0x18, 0xbd, 0xc8,
0xd2, 0x14, 0xe3, 0xea, 0xec, 0xfb, 0x0e, 0x7c, 0x01, 0x7d, 0x1b, 0x66, 0x69, 0xc0, 0xb3, 0xb6,
0x01, 0x65, 0xd0, 0xf5, 0x47, 0x56, 0x04, 0x84, 0xe5, 0x90, 0xf7, 0x18, 0x06, 0x71, 0x8e, 0x4c,
0x63, 0xa4, 0xb9, 0x40, 0xbf, 0x3b, 0x76, 0x26, 0xbd, 0x10, 0x2c, 0x74, 0xc5, 0x05, 0x7a, 0x01,
0x0c, 0x1b, 0x8a, 0x95, 0xdf, 0x23, 0x91, 0x2d, 0x2c, 0xf8, 0xdd, 0x01, 0x77, 0x21, 0x13, 0xbc,
0x21, 0x85, 0x8f, 0x00, 0xb8, 0x29, 0x9a, 0xce, 0xb8, 0x84, 0x90, 0x2b, 0x3e, 0x1c, 0x53, 0x51,
0x39, 0xb2, 0x2f, 0xbd, 0x39, 0x0c, 0xed, 0xe0, 0x8e, 0xe5, 0x4c, 0x58, 0x3f, 0x06, 0x97, 0x4f,
0x0e, 0x26, 0xfa, 0x2d, 0xde, 0xfe, 0xc0, 0xd2, 0x02, 0x97, 0x8c, 0xe7, 0xe1, 0x80, 0xc6, 0x96,
0x34, 0x15, 0xfc, 0x02, 0xe7, 0xab, 0xd2, 0xc1, 0x4a, 0xd2, 0x43, 0x70, 0x2b, 0x57, 0x4b, 0xef,
0x6a, 0xc0, 0x28, 0xba, 0xe6, 0x98, 0x26, 0xb5, 0xa2, 0xb2, 0x6c, 0x6a, 0xed, 0xb6, 0xb5, 0xfa,
0x70, 0xbc, 0x2e, 0x38, 0xcd, 0xf4, 0x6c, 0xa7, 0x2c, 0x83, 0xbf, 0x5a, 0x99, 0x1d, 0xdc, 0xda,
0xff, 0x3b, 0xb3, 0xf6, 0x5a, 0xf5, 0xee, 0xaf, 0x55, 0x7b, 0x7f, 0x35, 0xdb, 0x28, 0xff, 0x88,
0x56, 0xbf, 0xde, 0xdf, 0x2b, 0xb6, 0x51, 0x6f, 0x45, 0xdf, 0x3f, 0x10, 0xfd, 0x6b, 0x38, 0x7f,
0x69, 0xcc, 0x7a, 0xce, 0x65, 0x9a, 0x6d, 0x5e, 0xf2, 0x14, 0x55, 0xd3, 0x4f, 0xa7, 0xed, 0xe7,
0x13, 0x18, 0xae, 0x89, 0x18, 0x5d, 0x1b, 0xa6, 0xdf, 0xa1, 0x63, 0x07, 0xeb, 0x7a, 0x38, 0xf8,
0xbb, 0x03, 0x83, 0x32, 0x3f, 0xf2, 0xee, 0xdf, 0xa3, 0x0b, 0x60, 0x18, 0xd7, 0xef, 0x63, 0x9f,
0x5f, 0x0b, 0xf3, 0x9e, 0xc2, 0x69, 0xeb, 0xb6, 0x64, 0x98, 0xdb, 0xb8, 0xc7, 0x15, 0xdb, 0x18,
0x52, 0xf9, 0x4b, 0x10, 0x29, 0xcd, 0x72, 0x4d, 0xa9, 0x1e, 0x85, 0xc3, 0x12, 0x5c, 0x19, 0x8c,
0x8c, 0x2f, 0x49, 0x28, 0x13, 0xff, 0x88, 0x28, 0x50, 0x42, 0xdf, 0xc8, 0xc4, 0x7b, 0x1f, 0xdc,
0x6c, 0x87, 0xd2, 0xe6, 0xd2, 0xa7, 0x5c, 0x4e, 0x0c, 0x40, 0xa9, 0x3c, 0x02, 0x88, 0xd3, 0x4c,
0x95, 0xa9, 0x1d, 0x53, 0xd7, 0x25, 0x84, 0xda, 0xef, 0xc1, 0x89, 0xf9, 0x99, 0xcd, 0xb3, 0x37,
0xca, 0x3f, 0xb1, 0xb6, 0xc9, 0x42, 0x84, 0xd9, 0x1b, 0x65, 0x5a, 0x02, 0x45, 0xa4, 0xf8, 0x1d,
0xfa, 0xae, 0x6d, 0x09, 0x14, 0x2b, 0x7e, 0x87, 0xde, 0x6b, 0xb8, 0x68, 0x38, 0x1a, 0xed, 0x98,
0xde, 0x2a, 0x1f, 0xe8, 0xe1, 0x3c, 0x9d, 0xbe, 0xf5, 0x37, 0x30, 0xbd, 0x9f, 0x55, 0x78, 0x56,
0x7b, 0xbf, 0x34, 0xb3, 0xc1, 0x6f, 0x1d, 0x18, 0x11, 0x8b, 0x5e, 0xcf, 0x7f, 0x88, 0xe0, 0x5d,
0x5e, 0xcf, 0xfd, 0x97, 0xde, 0x7b, 0x97, 0x97, 0xee, 0x7d, 0x06, 0x47, 0x4a, 0x33, 0x8d, 0x14,
0xc4, 0xe8, 0xf2, 0xf1, 0xc1, 0x71, 0xba, 0xc6, 0xca, 0xd0, 0x42, 0xcb, 0xf6, 0x26, 0x70, 0x6e,
0x0f, 0x6f, 0x38, 0xd6, 0xa7, 0x45, 0x1c, 0x11, 0x5e, 0x79, 0xf1, 0xfc, 0xab, 0x1f, 0xbf, 0xdc,
0x70, 0xbd, 0x2d, 0xd6, 0xe6, 0x63, 0xb3, 0x3b, 0x9e, 0xa6, 0xfc, 0x4e, 0x63, 0xbc, 0x9d, 0xd9,
0x83, 0x3e, 0x49, 0xb8, 0xd2, 0x39, 0x5f, 0x17, 0x1a, 0x93, 0x19, 0x97, 0x1a, 0x73, 0xc9, 0xd2,
0x19, 0x9d, 0x3e, 0x33, 0x6e, 0xef, 0xd6, 0xeb, 0x3e, 0x55, 0x9f, 0xfe, 0x13, 0x00, 0x00, 0xff,
0xff, 0xde, 0xef, 0x54, 0xe3, 0xa3, 0x07, 0x00, 0x00,
// 707 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x41, 0x6f, 0xda, 0x48,
0x14, 0x16, 0x38, 0x40, 0xfc, 0x20, 0x90, 0xcc, 0xc9, 0x9b, 0xcd, 0x6e, 0x08, 0x51, 0x76, 0x91,
0x56, 0x0b, 0x12, 0xab, 0xed, 0xad, 0x55, 0x9b, 0xd0, 0x48, 0xa8, 0x6a, 0x43, 0x0d, 0xea, 0xa1,
0x17, 0x6b, 0xb0, 0x5f, 0x60, 0x24, 0xcf, 0x98, 0x7a, 0xc6, 0x4d, 0xc2, 0xa9, 0xd7, 0xfe, 0x85,
0xfe, 0xc5, 0xfe, 0x86, 0x4a, 0xd5, 0xcc, 0x38, 0x06, 0x5a, 0x0e, 0xbd, 0x79, 0xbe, 0xf7, 0xbd,
0x37, 0xdf, 0xfb, 0xbe, 0x31, 0xb4, 0x50, 0x85, 0x51, 0xc0, 0x51, 0xd1, 0xde, 0x32, 0x4d, 0x54,
0x42, 0x8e, 0x38, 0x8b, 0x3f, 0x66, 0xd2, 0x9e, 0x7a, 0xba, 0x7a, 0xdc, 0x08, 0x13, 0xce, 0x13,
0x61, 0xa1, 0xe3, 0x86, 0x0c, 0x17, 0xc8, 0x73, 0x7a, 0xe7, 0x4b, 0x09, 0x60, 0x8a, 0x82, 0x0a,
0xf5, 0x1a, 0x15, 0x25, 0x4d, 0x28, 0x8f, 0x86, 0x5e, 0xa9, 0x5d, 0xea, 0x3a, 0x7e, 0x79, 0x34,
0x24, 0x7f, 0x41, 0x4b, 0x64, 0x3c, 0xf8, 0x90, 0x61, 0xfa, 0x10, 0x88, 0x24, 0x42, 0xe9, 0x95,
0x4d, 0xf1, 0x40, 0x64, 0xfc, 0xad, 0x46, 0xdf, 0x68, 0x90, 0xfc, 0x03, 0x47, 0x4c, 0x48, 0x4c,
0x55, 0x10, 0x2e, 0xa8, 0x10, 0x18, 0x8f, 0x86, 0xd2, 0x73, 0xda, 0x4e, 0xd7, 0xf5, 0x0f, 0x6d,
0xe1, 0xaa, 0xc0, 0xc9, 0xdf, 0xd0, 0xb2, 0x03, 0x0b, 0xae, 0xb7, 0xd7, 0x2e, 0x75, 0x5d, 0xbf,
0x69, 0xe0, 0x82, 0xd9, 0xf9, 0x54, 0x02, 0x77, 0x9c, 0x26, 0xf7, 0x0f, 0x3b, 0xb5, 0x3d, 0x81,
0x1a, 0x8d, 0xa2, 0x14, 0xa5, 0xd5, 0x54, 0x1f, 0x9c, 0xf4, 0xb6, 0x76, 0xcf, 0xb7, 0x7e, 0x61,
0x39, 0xfe, 0x23, 0x59, 0x6b, 0x4d, 0x51, 0x66, 0xf1, 0x2e, 0xad, 0xb6, 0xb0, 0xd6, 0xda, 0xf9,
0x5a, 0x82, 0xe6, 0x55, 0x12, 0xc7, 0x18, 0x2a, 0x96, 0x88, 0x9d, 0x3a, 0x9e, 0x42, 0xd5, 0x5a,
0x9a, 0xcb, 0xb8, 0xd8, 0x96, 0x91, 0xdb, 0xbd, 0x1e, 0x32, 0x31, 0x80, 0x9f, 0x37, 0x91, 0x53,
0xa8, 0x87, 0x29, 0x52, 0x85, 0x81, 0x62, 0x1c, 0x3d, 0xa7, 0x5d, 0xea, 0xee, 0xf9, 0x60, 0xa1,
0x29, 0xe3, 0x48, 0xfe, 0x04, 0x90, 0x38, 0xe7, 0x28, 0x94, 0x16, 0xba, 0xd7, 0x76, 0xba, 0x8e,
0xbf, 0x81, 0x90, 0x0b, 0x68, 0x2e, 0x69, 0xaa, 0x98, 0x9e, 0x1d, 0x28, 0x3a, 0x97, 0x5e, 0xc5,
0x2c, 0x73, 0x50, 0xa0, 0x53, 0x3a, 0x97, 0xa4, 0x03, 0x8d, 0x02, 0xd0, 0x83, 0xaa, 0x66, 0xd0,
0x16, 0xd6, 0xb9, 0x81, 0xc3, 0x6b, 0x86, 0x71, 0x74, 0xc9, 0x44, 0x9c, 0xcc, 0xaf, 0x59, 0x8c,
0x92, 0x78, 0x50, 0xbb, 0xd5, 0x58, 0xb1, 0xf3, 0xe3, 0x91, 0x9c, 0x41, 0x63, 0x66, 0x88, 0xc1,
0xad, 0x66, 0x7a, 0x65, 0x73, 0x6d, 0x7d, 0xb6, 0x6e, 0xee, 0x7c, 0x2b, 0x43, 0x7d, 0x62, 0xa5,
0x1a, 0xef, 0x4e, 0xc0, 0x2d, 0x94, 0xe7, 0xe3, 0xd6, 0x80, 0x96, 0x18, 0x16, 0x36, 0x8d, 0x86,
0xf9, 0x53, 0xdb, 0xc2, 0xc8, 0x39, 0x1c, 0x6c, 0x6d, 0x6b, 0x0c, 0x73, 0x37, 0xf6, 0x98, 0xd2,
0xb9, 0x26, 0xe5, 0xd9, 0x06, 0x52, 0xd1, 0x54, 0x99, 0xf7, 0x55, 0xf1, 0x1b, 0x39, 0x38, 0xd1,
0x98, 0x31, 0x3e, 0x27, 0xa1, 0x88, 0xbc, 0x8a, 0xa1, 0x40, 0x0e, 0xbd, 0x14, 0x11, 0xf9, 0x1d,
0xdc, 0x64, 0x89, 0xc2, 0xe6, 0x52, 0x35, 0xb9, 0xec, 0x6b, 0xc0, 0xa4, 0xf2, 0x07, 0x40, 0x18,
0x27, 0x32, 0x4f, 0xad, 0x66, 0xaa, 0xae, 0x41, 0x4c, 0xf9, 0x37, 0xd8, 0xd7, 0x3f, 0x4e, 0x9a,
0xdc, 0x49, 0x6f, 0xdf, 0xda, 0x26, 0x32, 0xee, 0x27, 0x77, 0x52, 0x97, 0x38, 0xf2, 0x40, 0xb2,
0x15, 0x7a, 0xae, 0x2d, 0x71, 0xe4, 0x13, 0xb6, 0x42, 0x72, 0x03, 0x47, 0x1b, 0x8e, 0x06, 0x4b,
0xaa, 0x16, 0xd2, 0x83, 0xb6, 0xd3, 0xad, 0x0f, 0xce, 0x7b, 0x3f, 0xfd, 0xd8, 0xbd, 0x1f, 0xb3,
0xf2, 0x5b, 0x6b, 0xef, 0xc7, 0xba, 0xb7, 0xf3, 0xb9, 0x0c, 0x4d, 0xc3, 0x1a, 0x89, 0x08, 0xef,
0x7f, 0x21, 0x82, 0x8d, 0xb4, 0xcb, 0xdb, 0x69, 0x7b, 0x50, 0x63, 0x7a, 0xc8, 0x68, 0x68, 0x2c,
0x77, 0xfc, 0xc7, 0x23, 0x19, 0x42, 0xc3, 0x7c, 0x06, 0x4b, 0x9a, 0x52, 0x6e, 0x9f, 0x68, 0x7d,
0x70, 0xb6, 0xf3, 0x6f, 0x7c, 0x85, 0x0f, 0xef, 0x68, 0x9c, 0xe1, 0x98, 0xb2, 0xd4, 0xaf, 0x9b,
0xb6, 0xb1, 0xe9, 0x22, 0xff, 0x43, 0x45, 0x2a, 0xaa, 0xd0, 0x04, 0xd1, 0x1c, 0x9c, 0xee, 0x6c,
0x37, 0x6b, 0x4c, 0x34, 0xcd, 0xb7, 0x6c, 0xd2, 0x85, 0x43, 0x7b, 0xf9, 0x86, 0x63, 0x55, 0xf3,
0x10, 0x9b, 0x06, 0x2f, 0xbc, 0xb8, 0x7c, 0xfe, 0xfe, 0xd9, 0x9c, 0xa9, 0x45, 0x36, 0xd3, 0xc3,
0xfa, 0x2b, 0x16, 0xc7, 0x6c, 0xa5, 0x30, 0x5c, 0xf4, 0xed, 0x45, 0xff, 0x46, 0x4c, 0xaa, 0x94,
0xcd, 0x32, 0x85, 0x51, 0x9f, 0x09, 0x85, 0xa9, 0xa0, 0x71, 0xdf, 0xdc, 0xde, 0xd7, 0x6e, 0x2f,
0x67, 0xb3, 0xaa, 0x39, 0xfd, 0xf7, 0x3d, 0x00, 0x00, 0xff, 0xff, 0x57, 0x2c, 0x97, 0xd1, 0x75,
0x05, 0x00, 0x00,
}

View File

@ -125,8 +125,6 @@ service MasterService {
rpc GetTimeTickChannel(common.Empty) returns (milvus.StringResponse) {}
rpc GetDdChannel(common.Empty) returns (milvus.StringResponse) {}
rpc GetStatisticsChannel(common.Empty) returns (milvus.StringResponse) {}
}

View File

@ -242,55 +242,54 @@ func init() {
func init() { proto.RegisterFile("master.proto", fileDescriptor_f9c348dec43a6705) }
var fileDescriptor_f9c348dec43a6705 = []byte{
// 756 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xfb, 0x6e, 0xd3, 0x30,
0x14, 0xc6, 0x77, 0x63, 0x68, 0x67, 0xbd, 0x0c, 0x33, 0xc1, 0x14, 0x26, 0x36, 0x0a, 0x6c, 0xdd,
0x85, 0x14, 0x6d, 0x2f, 0xc0, 0xba, 0xa0, 0xae, 0x12, 0x93, 0xa0, 0x2d, 0xa0, 0x81, 0xa6, 0x29,
0x49, 0xad, 0xd6, 0x22, 0x89, 0x4b, 0x8e, 0xdb, 0xc1, 0x9e, 0x98, 0xc7, 0x40, 0xb9, 0x39, 0x49,
0x97, 0x74, 0x41, 0xec, 0x4f, 0xdb, 0x3f, 0x7f, 0x9f, 0x7d, 0xce, 0xa7, 0xc4, 0x50, 0xb2, 0x75,
0x14, 0xd4, 0x55, 0x47, 0x2e, 0x17, 0x9c, 0x3c, 0xb6, 0x99, 0x35, 0x19, 0x63, 0x30, 0x52, 0x83,
0x25, 0xa5, 0x64, 0x72, 0xdb, 0xe6, 0x4e, 0x30, 0xa9, 0x94, 0x92, 0x88, 0x52, 0x61, 0x8e, 0xa0,
0xae, 0xa3, 0x5b, 0xe1, 0x98, 0xf4, 0x75, 0xa1, 0x5f, 0x21, 0x75, 0x27, 0xcc, 0xa4, 0xc1, 0x5c,
0xad, 0x0b, 0x2b, 0x6d, 0xad, 0x43, 0x7f, 0x8e, 0x29, 0x0a, 0xf2, 0x16, 0x96, 0x0c, 0x1d, 0xe9,
0xc6, 0xfc, 0xf6, 0x7c, 0x7d, 0xf5, 0x68, 0x53, 0x4d, 0x19, 0x86, 0x46, 0xe7, 0x38, 0x68, 0xea,
0x48, 0x3b, 0x3e, 0x49, 0xd6, 0xe1, 0x81, 0xc9, 0xc7, 0x8e, 0xd8, 0x58, 0xd8, 0x9e, 0xaf, 0x97,
0x3b, 0xc1, 0xa0, 0x36, 0x00, 0xf0, 0x44, 0x71, 0xc4, 0x1d, 0xa4, 0xe4, 0x18, 0x96, 0x51, 0xe8,
0x62, 0x8c, 0xa1, 0xee, 0xb3, 0x4c, 0xdd, 0xae, 0x8f, 0x74, 0x42, 0x94, 0x54, 0x60, 0xa1, 0xad,
0xf9, 0xaa, 0x8b, 0x9d, 0x85, 0xb6, 0x16, 0x1b, 0x2d, 0x26, 0x8d, 0x7a, 0x00, 0x3d, 0xe4, 0xf7,
0x70, 0xfc, 0x94, 0xea, 0x04, 0x56, 0x7d, 0xd5, 0xff, 0x39, 0xff, 0x26, 0xac, 0x08, 0x66, 0x53,
0x14, 0xba, 0x3d, 0xf2, 0xaf, 0xb1, 0xd4, 0x89, 0x27, 0xb2, 0x7d, 0x8f, 0xfe, 0x3c, 0x82, 0xf2,
0xb9, 0xdf, 0xd6, 0x6e, 0xd0, 0x23, 0x72, 0x05, 0x6b, 0xa7, 0x2e, 0xd5, 0x05, 0x3d, 0xe5, 0x96,
0x45, 0x4d, 0xc1, 0xb8, 0x43, 0x0e, 0xd3, 0xf6, 0xe1, 0x60, 0x1a, 0x0b, 0x6b, 0xa2, 0xcc, 0x3a,
0x6c, 0x6d, 0x8e, 0x7c, 0x87, 0x8a, 0xe6, 0xf2, 0x51, 0x42, 0x7e, 0x3f, 0x53, 0x3e, 0x0d, 0x15,
0x14, 0xbf, 0x82, 0xf2, 0x99, 0x8e, 0x09, 0xed, 0xbd, 0x4c, 0xed, 0x14, 0x13, 0x49, 0xbf, 0xc8,
0x44, 0x9b, 0x9c, 0x5b, 0x51, 0x5f, 0x6a, 0x73, 0xe4, 0x1a, 0x88, 0x46, 0xd1, 0x74, 0x99, 0x91,
0x2c, 0x90, 0x9a, 0x7d, 0x83, 0x5b, 0x60, 0x64, 0xd5, 0x28, 0xcc, 0x4b, 0xe3, 0x09, 0x3c, 0x6d,
0x51, 0x11, 0x2f, 0x79, 0x57, 0x66, 0x28, 0x98, 0x89, 0xe4, 0x20, 0xbb, 0x3d, 0x29, 0x14, 0x23,
0xeb, 0xc3, 0x62, 0xb0, 0xf4, 0xb5, 0xa0, 0xda, 0x1d, 0xf2, 0xeb, 0x18, 0xc0, 0x9c, 0x7e, 0xa5,
0xa9, 0xc8, 0xee, 0xa0, 0x10, 0x2b, 0xdd, 0x2e, 0xa1, 0x1a, 0xc4, 0xea, 0xa3, 0xee, 0x0a, 0xe6,
0xd7, 0xf6, 0x60, 0x46, 0xf8, 0x24, 0x55, 0x30, 0x1e, 0x17, 0x50, 0xf6, 0x62, 0x15, 0x8b, 0xef,
0xe5, 0x46, 0xef, 0x5f, 0xa5, 0x2f, 0xa1, 0x74, 0xa6, 0x63, 0xac, 0x5c, 0xcf, 0x0b, 0xde, 0x2d,
0xe1, 0x42, 0xb9, 0x43, 0x78, 0xd2, 0xa2, 0x42, 0xee, 0x4d, 0x74, 0x3f, 0xbb, 0x1b, 0x29, 0x12,
0x67, 0x77, 0x63, 0x9a, 0x95, 0xa6, 0x0c, 0x2a, 0x5e, 0xa7, 0xe4, 0x3a, 0xe6, 0xd4, 0x2b, 0x05,
0x45, 0x5e, 0xfb, 0x45, 0x50, 0x69, 0xe5, 0x40, 0x35, 0x8a, 0x7f, 0x97, 0x0e, 0x6c, 0xea, 0x88,
0x9c, 0xc6, 0x4f, 0x51, 0xb3, 0x63, 0x7d, 0x0b, 0x96, 0x7e, 0x26, 0x94, 0xbc, 0xa3, 0x84, 0x0b,
0x48, 0x76, 0x73, 0x4f, 0x3b, 0x65, 0x54, 0xbf, 0x1b, 0x94, 0x26, 0x9f, 0x61, 0x35, 0xc8, 0x69,
0xdb, 0xe9, 0xd3, 0x5f, 0x39, 0x1e, 0x09, 0xa2, 0x60, 0xd4, 0x86, 0x50, 0x8e, 0x2e, 0x16, 0x08,
0xef, 0xcd, 0xbc, 0x7c, 0x4a, 0x7a, 0xbf, 0x08, 0x2a, 0x2f, 0x60, 0x40, 0xb9, 0x45, 0x85, 0x3f,
0xeb, 0xb9, 0x53, 0xb2, 0x93, 0xb9, 0x3d, 0x06, 0x22, 0x9b, 0xdd, 0x3b, 0xb9, 0x44, 0x91, 0x2a,
0x27, 0x96, 0xc5, 0xcd, 0x9e, 0xfc, 0x55, 0x6d, 0xa9, 0x19, 0xcf, 0x0e, 0x35, 0xfe, 0xeb, 0x2a,
0xdb, 0xf9, 0x80, 0x94, 0xfd, 0x00, 0x0f, 0x7d, 0xd9, 0xb6, 0x46, 0x9e, 0x67, 0xe2, 0xf2, 0x0d,
0xa2, 0x6c, 0xe5, 0xae, 0x27, 0x0a, 0x51, 0x3d, 0x41, 0x64, 0x03, 0x27, 0x6c, 0x72, 0x5b, 0x23,
0xaf, 0xd3, 0xbb, 0xbc, 0x87, 0x8e, 0x2a, 0x99, 0x58, 0x7c, 0xe7, 0x2e, 0x4c, 0x7a, 0x7c, 0x81,
0xb5, 0x16, 0x15, 0xe1, 0x7f, 0xd8, 0xaf, 0x12, 0x12, 0x25, 0x33, 0x09, 0xef, 0xed, 0x91, 0xf8,
0xad, 0xbc, 0x4a, 0xaf, 0xc9, 0x97, 0x57, 0x4a, 0xa1, 0x36, 0x47, 0xbe, 0x02, 0x69, 0x51, 0xe1,
0x95, 0xb7, 0xc7, 0xcc, 0x1f, 0xa7, 0x43, 0xdd, 0x71, 0xa8, 0x35, 0x53, 0xf9, 0x65, 0x76, 0xc6,
0x85, 0xcb, 0x9c, 0x41, 0xe2, 0xc0, 0x9f, 0xa0, 0xd4, 0xa2, 0x42, 0xeb, 0xdf, 0xa3, 0xe4, 0x05,
0xac, 0x7b, 0x35, 0x90, 0x5f, 0xb7, 0xfb, 0x93, 0x6e, 0x36, 0xbf, 0xbd, 0x1b, 0x30, 0x31, 0x1c,
0x1b, 0xde, 0xee, 0xc6, 0x0d, 0xb3, 0x2c, 0x76, 0x23, 0xa8, 0x39, 0x6c, 0x04, 0x1b, 0xde, 0xf4,
0x19, 0x0a, 0x97, 0x19, 0x63, 0x41, 0xfb, 0x8d, 0xa8, 0x96, 0x0d, 0x5f, 0xb2, 0x11, 0x24, 0x62,
0x64, 0x18, 0xcb, 0xfe, 0xf8, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xb2, 0x22, 0xf7,
0x26, 0x0b, 0x00, 0x00,
// 747 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xfb, 0x6e, 0xd3, 0x3c,
0x18, 0xc6, 0x77, 0xfa, 0xf6, 0x69, 0xef, 0x7a, 0x18, 0x66, 0x82, 0x29, 0x4c, 0x6c, 0x14, 0xd8,
0xba, 0x03, 0x29, 0xda, 0x6e, 0x80, 0x75, 0x41, 0x5d, 0x25, 0x26, 0xa1, 0xb6, 0x80, 0x06, 0x9a,
0xa6, 0x24, 0xb5, 0x5a, 0x8b, 0x24, 0x2e, 0x79, 0xdd, 0x0e, 0x76, 0x1f, 0xdc, 0x2f, 0xca, 0xc9,
0x49, 0xba, 0xa4, 0x0d, 0x62, 0x7f, 0xda, 0xfe, 0xf9, 0x79, 0xde, 0x93, 0x12, 0x43, 0xc9, 0xd6,
0x51, 0x50, 0x57, 0x1d, 0xb9, 0x5c, 0x70, 0xf2, 0xd8, 0x66, 0xd6, 0x64, 0x8c, 0xc1, 0x4a, 0x0d,
0x8e, 0x94, 0x92, 0xc9, 0x6d, 0x9b, 0x3b, 0xc1, 0xa6, 0x52, 0x4a, 0x22, 0x4a, 0x85, 0x39, 0x82,
0xba, 0x8e, 0x6e, 0x85, 0x6b, 0xd2, 0xd7, 0x85, 0x7e, 0x83, 0xd4, 0x9d, 0x30, 0x93, 0x06, 0x7b,
0xb5, 0x2e, 0xac, 0xb5, 0xb5, 0x0e, 0xfd, 0x31, 0xa6, 0x28, 0xc8, 0x5b, 0x58, 0x31, 0x74, 0xa4,
0x5b, 0x8b, 0xbb, 0x8b, 0xf5, 0xf5, 0x93, 0x6d, 0x35, 0x65, 0x18, 0x1a, 0x5d, 0xe2, 0xa0, 0xa9,
0x23, 0xed, 0xf8, 0x24, 0xd9, 0x84, 0xff, 0x4c, 0x3e, 0x76, 0xc4, 0xd6, 0xd2, 0xee, 0x62, 0xbd,
0xdc, 0x09, 0x16, 0xb5, 0x01, 0x80, 0x27, 0x8a, 0x23, 0xee, 0x20, 0x25, 0xa7, 0xb0, 0x8a, 0x42,
0x17, 0x63, 0x0c, 0x75, 0x9f, 0x65, 0xea, 0x76, 0x7d, 0xa4, 0x13, 0xa2, 0xa4, 0x02, 0x4b, 0x6d,
0xcd, 0x57, 0x5d, 0xee, 0x2c, 0xb5, 0xb5, 0xd8, 0x68, 0x39, 0x69, 0xd4, 0x03, 0xe8, 0x21, 0x7f,
0x80, 0xf0, 0x53, 0xaa, 0x13, 0x58, 0xf7, 0x55, 0xff, 0x25, 0xfe, 0x6d, 0x58, 0x13, 0xcc, 0xa6,
0x28, 0x74, 0x7b, 0xe4, 0xa7, 0xb1, 0xd2, 0x89, 0x37, 0xb2, 0x7d, 0x4f, 0x7e, 0x3f, 0x82, 0xf2,
0xa5, 0xdf, 0xd6, 0x6e, 0xd0, 0x23, 0x72, 0x03, 0x1b, 0xe7, 0x2e, 0xd5, 0x05, 0x3d, 0xe7, 0x96,
0x45, 0x4d, 0xc1, 0xb8, 0x43, 0x8e, 0xd3, 0xf6, 0xe1, 0x62, 0x1a, 0x0b, 0x6b, 0xa2, 0xcc, 0x0a,
0xb6, 0xb6, 0x40, 0xbe, 0x41, 0x45, 0x73, 0xf9, 0x28, 0x21, 0x7f, 0x98, 0x29, 0x9f, 0x86, 0x0a,
0x8a, 0xdf, 0x40, 0xf9, 0x42, 0xc7, 0x84, 0xf6, 0x41, 0xa6, 0x76, 0x8a, 0x89, 0xa4, 0x5f, 0x64,
0xa2, 0x4d, 0xce, 0xad, 0xa8, 0x2f, 0xb5, 0x05, 0x72, 0x0b, 0x44, 0xa3, 0x68, 0xba, 0xcc, 0x48,
0x16, 0x48, 0xcd, 0xce, 0xe0, 0x1e, 0x18, 0x59, 0x35, 0x0a, 0xf3, 0xd2, 0x78, 0x02, 0x4f, 0x5b,
0x54, 0xc4, 0x47, 0x5e, 0xca, 0x0c, 0x05, 0x33, 0x91, 0x1c, 0x65, 0xb7, 0x27, 0x85, 0x62, 0x64,
0x7d, 0x5c, 0x0c, 0x96, 0xbe, 0x16, 0x54, 0xbb, 0x43, 0x7e, 0x1b, 0x03, 0x98, 0xd3, 0xaf, 0x34,
0x15, 0xd9, 0x1d, 0x15, 0x62, 0xa5, 0xdb, 0x35, 0x54, 0x83, 0xb1, 0xfa, 0xa8, 0xbb, 0x82, 0xf9,
0xb5, 0x3d, 0x9a, 0x31, 0x7c, 0x92, 0x2a, 0x38, 0x1e, 0x57, 0x50, 0xf6, 0xc6, 0x2a, 0x16, 0x3f,
0xc8, 0x1d, 0xbd, 0xbf, 0x95, 0xbe, 0x86, 0xd2, 0x85, 0x8e, 0xb1, 0x72, 0x3d, 0x6f, 0xf0, 0xee,
0x09, 0x17, 0x9a, 0x3b, 0x84, 0x27, 0x2d, 0x2a, 0xe4, 0xdd, 0x44, 0xf7, 0xb3, 0xbb, 0x91, 0x22,
0x71, 0x76, 0x37, 0xa6, 0x59, 0x69, 0xca, 0xa0, 0xe2, 0x75, 0x4a, 0x9e, 0x63, 0x4e, 0xbd, 0x52,
0x50, 0xe4, 0x75, 0x58, 0x04, 0x95, 0x56, 0x0e, 0x54, 0xa3, 0xf1, 0xef, 0xd2, 0x81, 0x4d, 0x1d,
0x91, 0xd3, 0xf8, 0x29, 0x6a, 0xf6, 0x58, 0xdf, 0x83, 0xa5, 0x9f, 0x09, 0x25, 0x2f, 0x94, 0xf0,
0x00, 0xc9, 0x7e, 0x6e, 0xb4, 0x53, 0x46, 0xf5, 0xf9, 0xa0, 0x34, 0xf9, 0x04, 0xeb, 0xc1, 0x9c,
0xb6, 0x9d, 0x3e, 0xfd, 0x99, 0xe3, 0x91, 0x20, 0x0a, 0x8e, 0xda, 0x10, 0xca, 0x51, 0x62, 0x81,
0xf0, 0xc1, 0xcc, 0xe4, 0x53, 0xd2, 0x87, 0x45, 0x50, 0x99, 0x80, 0x01, 0xe5, 0x16, 0x15, 0xfe,
0xae, 0xe7, 0x4e, 0xc9, 0x5e, 0xe6, 0xf5, 0x18, 0x88, 0x6c, 0xf6, 0xe7, 0x72, 0x89, 0x22, 0x55,
0xce, 0x2c, 0x8b, 0x9b, 0x3d, 0xf9, 0xab, 0xda, 0x51, 0x33, 0x9e, 0x1d, 0x6a, 0xfc, 0xd7, 0x55,
0x76, 0xf3, 0x01, 0x29, 0xfb, 0x01, 0xfe, 0xf7, 0x65, 0xdb, 0x1a, 0x79, 0x9e, 0x89, 0xcb, 0x37,
0x88, 0xb2, 0x93, 0x7b, 0x9e, 0x28, 0x44, 0xf5, 0x0c, 0x91, 0x0d, 0x9c, 0xb0, 0xc9, 0x6d, 0x8d,
0xbc, 0x4e, 0xdf, 0xf2, 0x1e, 0x3a, 0xaa, 0x64, 0x62, 0xf1, 0xbd, 0x79, 0x98, 0xf4, 0xf8, 0x0c,
0x1b, 0x2d, 0x2a, 0xc2, 0xff, 0xb0, 0x5f, 0x25, 0x24, 0x4a, 0xe6, 0x24, 0xbc, 0xb7, 0x47, 0xe2,
0x97, 0xf2, 0x2a, 0x7d, 0x26, 0x5f, 0x5e, 0x29, 0x85, 0xda, 0x02, 0xf9, 0x02, 0xa4, 0x45, 0x85,
0x57, 0xde, 0x1e, 0x33, 0xbf, 0x9f, 0x0f, 0x75, 0xc7, 0xa1, 0xd6, 0x4c, 0xe5, 0x97, 0xd9, 0x33,
0x2e, 0x5c, 0xe6, 0x0c, 0x12, 0x01, 0x5f, 0xc1, 0xa6, 0x17, 0xb0, 0xfc, 0x14, 0x3d, 0x9c, 0x74,
0xb3, 0xf9, 0xf5, 0xdd, 0x80, 0x89, 0xe1, 0xd8, 0xf0, 0x6e, 0x37, 0xee, 0x98, 0x65, 0xb1, 0x3b,
0x41, 0xcd, 0x61, 0x23, 0xb8, 0xf0, 0xa6, 0xcf, 0x50, 0xb8, 0xcc, 0x18, 0x0b, 0xda, 0x6f, 0x44,
0x89, 0x37, 0x7c, 0xc9, 0x46, 0xd0, 0xbe, 0x91, 0x61, 0xac, 0xfa, 0xeb, 0xd3, 0x3f, 0x01, 0x00,
0x00, 0xff, 0xff, 0x8b, 0xde, 0xd8, 0x75, 0xd3, 0x0a, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -373,7 +372,6 @@ type MasterServiceClient interface {
AssignSegmentID(ctx context.Context, in *datapb.AssignSegIDRequest, opts ...grpc.CallOption) (*datapb.AssignSegIDResponse, error)
GetServiceStates(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*internalpb2.ServiceStates, error)
GetTimeTickChannel(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
GetDdChannel(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
GetStatisticsChannel(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*milvuspb.StringResponse, error)
}
@ -574,15 +572,6 @@ func (c *masterServiceClient) GetTimeTickChannel(ctx context.Context, in *common
return out, nil
}
func (c *masterServiceClient) GetDdChannel(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) {
out := new(milvuspb.StringResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.MasterService/GetDdChannel", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *masterServiceClient) GetStatisticsChannel(ctx context.Context, in *commonpb.Empty, opts ...grpc.CallOption) (*milvuspb.StringResponse, error) {
out := new(milvuspb.StringResponse)
err := c.cc.Invoke(ctx, "/milvus.proto.master.MasterService/GetStatisticsChannel", in, out, opts...)
@ -662,7 +651,6 @@ type MasterServiceServer interface {
AssignSegmentID(context.Context, *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
GetServiceStates(context.Context, *commonpb.Empty) (*internalpb2.ServiceStates, error)
GetTimeTickChannel(context.Context, *commonpb.Empty) (*milvuspb.StringResponse, error)
GetDdChannel(context.Context, *commonpb.Empty) (*milvuspb.StringResponse, error)
GetStatisticsChannel(context.Context, *commonpb.Empty) (*milvuspb.StringResponse, error)
}
@ -733,9 +721,6 @@ func (*UnimplementedMasterServiceServer) GetServiceStates(ctx context.Context, r
func (*UnimplementedMasterServiceServer) GetTimeTickChannel(ctx context.Context, req *commonpb.Empty) (*milvuspb.StringResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTimeTickChannel not implemented")
}
func (*UnimplementedMasterServiceServer) GetDdChannel(ctx context.Context, req *commonpb.Empty) (*milvuspb.StringResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetDdChannel not implemented")
}
func (*UnimplementedMasterServiceServer) GetStatisticsChannel(ctx context.Context, req *commonpb.Empty) (*milvuspb.StringResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetStatisticsChannel not implemented")
}
@ -1122,24 +1107,6 @@ func _MasterService_GetTimeTickChannel_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
func _MasterService_GetDdChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(commonpb.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(MasterServiceServer).GetDdChannel(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/milvus.proto.master.MasterService/GetDdChannel",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MasterServiceServer).GetDdChannel(ctx, req.(*commonpb.Empty))
}
return interceptor(ctx, in, info, handler)
}
func _MasterService_GetStatisticsChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(commonpb.Empty)
if err := dec(in); err != nil {
@ -1246,10 +1213,6 @@ var _MasterService_serviceDesc = grpc.ServiceDesc{
MethodName: "GetTimeTickChannel",
Handler: _MasterService_GetTimeTickChannel_Handler,
},
{
MethodName: "GetDdChannel",
Handler: _MasterService_GetDdChannel_Handler,
},
{
MethodName: "GetStatisticsChannel",
Handler: _MasterService_GetStatisticsChannel_Handler,

View File

@ -1,184 +0,0 @@
package querynode
import (
"errors"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
queryPb "github.com/zilliztech/milvus-distributed/internal/proto/querypb"
)
func (node *QueryNode) AddQueryChannel(in *queryPb.AddQueryChannelsRequest) (*commonpb.Status, error) {
if node.searchService == nil || node.searchService.searchMsgStream == nil {
errMsg := "null search service or null search message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
searchStream, ok := node.searchService.searchMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
resultStream, ok := node.searchService.searchResultMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search result message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
// add request channel
pulsarBufSize := Params.SearchPulsarBufSize
consumeChannels := []string{in.RequestChannelID}
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
// add result channel
producerChannels := []string{in.ResultChannelID}
resultStream.CreatePulsarProducers(producerChannels)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}
return status, nil
}
func (node *QueryNode) RemoveQueryChannel(in *queryPb.RemoveQueryChannelsRequest) (*commonpb.Status, error) {
if node.searchService == nil || node.searchService.searchMsgStream == nil {
errMsg := "null search service or null search result message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
searchStream, ok := node.searchService.searchMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
resultStream, ok := node.searchService.searchResultMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search result message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
// remove request channel
pulsarBufSize := Params.SearchPulsarBufSize
consumeChannels := []string{in.RequestChannelID}
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
// TODO: searchStream.RemovePulsarConsumers(producerChannels)
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
// remove result channel
producerChannels := []string{in.ResultChannelID}
// TODO: resultStream.RemovePulsarProducer(producerChannels)
resultStream.CreatePulsarProducers(producerChannels)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}
return status, nil
}
func (node *QueryNode) WatchDmChannels(in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error) {
if node.dataSyncService == nil || node.dataSyncService.dmStream == nil {
errMsg := "null data sync service or null data manipulation stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
fgDMMsgStream, ok := node.dataSyncService.dmStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for dm message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
// add request channel
pulsarBufSize := Params.SearchPulsarBufSize
consumeChannels := in.ChannelIDs
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
fgDMMsgStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}
return status, nil
}
func (node *QueryNode) LoadSegments(in *queryPb.LoadSegmentRequest) (*commonpb.Status, error) {
// TODO: support db
fieldIDs := in.FieldIDs
for _, segmentID := range in.SegmentIDs {
indexID := UniqueID(0) // TODO: ???
err := node.segManager.loadSegment(segmentID, &fieldIDs)
if err != nil {
// TODO: return or continue?
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
}
return status, err
}
err = node.segManager.loadIndex(segmentID, indexID)
if err != nil {
// TODO: return or continue?
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
}
return status, err
}
}
return nil, nil
}
func (node *QueryNode) ReleaseSegments(in *queryPb.ReleaseSegmentRequest) (*commonpb.Status, error) {
// TODO: implement
return nil, nil
}
func (node *QueryNode) GetPartitionState(in *queryPb.PartitionStatesRequest) (*queryPb.PartitionStatesResponse, error) {
// TODO: implement
return nil, nil
}

View File

@ -26,6 +26,7 @@ type loadIndexService struct {
fieldIndexes map[string][]*internalpb2.IndexStats
fieldStatsChan chan []*internalpb2.FieldStats
loadIndexReqChan chan []msgstream.TsMsg
loadIndexMsgStream msgstream.MsgStream
queryNodeID UniqueID
@ -65,6 +66,9 @@ func newLoadIndexService(ctx context.Context, replica collectionReplica) *loadIn
var stream msgstream.MsgStream = loadIndexStream
// init index load requests channel size by message receive buffer size
indexLoadChanSize := receiveBufSize
return &loadIndexService{
ctx: ctx1,
cancel: cancel,
@ -74,15 +78,14 @@ func newLoadIndexService(ctx context.Context, replica collectionReplica) *loadIn
fieldIndexes: make(map[string][]*internalpb2.IndexStats),
fieldStatsChan: make(chan []*internalpb2.FieldStats, 1),
loadIndexReqChan: make(chan []msgstream.TsMsg, indexLoadChanSize),
loadIndexMsgStream: stream,
queryNodeID: Params.QueryNodeID,
}
}
func (lis *loadIndexService) start() {
lis.loadIndexMsgStream.Start()
func (lis *loadIndexService) consume() {
for {
select {
case <-lis.ctx.Done():
@ -93,7 +96,21 @@ func (lis *loadIndexService) start() {
log.Println("null msg pack")
continue
}
for _, msg := range messages.Msgs {
lis.loadIndexReqChan <- messages.Msgs
}
}
}
func (lis *loadIndexService) start() {
lis.loadIndexMsgStream.Start()
go lis.consume()
for {
select {
case <-lis.ctx.Done():
return
case messages := <-lis.loadIndexReqChan:
for _, msg := range messages {
err := lis.execute(msg)
if err != nil {
log.Println(err)

View File

@ -14,13 +14,13 @@ import "C"
import (
"context"
"errors"
"fmt"
"io"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go/config"
"google.golang.org/grpc"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
queryPb "github.com/zilliztech/milvus-distributed/internal/proto/querypb"
)
@ -42,7 +42,6 @@ type QueryNode struct {
queryNodeLoopCancel context.CancelFunc
QueryNodeID uint64
grpcServer *grpc.Server
replica collectionReplica
@ -70,7 +69,6 @@ func NewQueryNode(ctx context.Context, queryNodeID uint64) Node {
}
func newQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
ctx1, cancel := context.WithCancel(ctx)
q := &QueryNode{
queryNodeLoopCtx: ctx1,
@ -81,6 +79,7 @@ func newQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
metaService: nil,
searchService: nil,
statsService: nil,
segManager: nil,
}
var err error
@ -114,12 +113,15 @@ func newQueryNode(ctx context.Context, queryNodeID uint64) *QueryNode {
func (node *QueryNode) Start() error {
// todo add connectMaster logic
// init services and manager
node.dataSyncService = newDataSyncService(node.queryNodeLoopCtx, node.replica)
node.searchService = newSearchService(node.queryNodeLoopCtx, node.replica)
node.metaService = newMetaService(node.queryNodeLoopCtx, node.replica)
node.loadIndexService = newLoadIndexService(node.queryNodeLoopCtx, node.replica)
node.statsService = newStatsService(node.queryNodeLoopCtx, node.replica, node.loadIndexService.fieldStatsChan)
node.segManager = newSegmentManager(node.queryNodeLoopCtx, node.replica, node.loadIndexService.loadIndexReqChan)
// start services
go node.dataSyncService.start()
go node.searchService.start()
go node.metaService.start()
@ -152,5 +154,179 @@ func (node *QueryNode) Close() {
if node.closer != nil {
node.closer.Close()
}
}
func (node *QueryNode) AddQueryChannel(in *queryPb.AddQueryChannelsRequest) (*commonpb.Status, error) {
if node.searchService == nil || node.searchService.searchMsgStream == nil {
errMsg := "null search service or null search message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
searchStream, ok := node.searchService.searchMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
resultStream, ok := node.searchService.searchResultMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search result message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
// add request channel
pulsarBufSize := Params.SearchPulsarBufSize
consumeChannels := []string{in.RequestChannelID}
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
// add result channel
producerChannels := []string{in.ResultChannelID}
resultStream.CreatePulsarProducers(producerChannels)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}
return status, nil
}
func (node *QueryNode) RemoveQueryChannel(in *queryPb.RemoveQueryChannelsRequest) (*commonpb.Status, error) {
if node.searchService == nil || node.searchService.searchMsgStream == nil {
errMsg := "null search service or null search result message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
searchStream, ok := node.searchService.searchMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
resultStream, ok := node.searchService.searchResultMsgStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for search result message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
// remove request channel
pulsarBufSize := Params.SearchPulsarBufSize
consumeChannels := []string{in.RequestChannelID}
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
// TODO: searchStream.RemovePulsarConsumers(producerChannels)
searchStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
// remove result channel
producerChannels := []string{in.ResultChannelID}
// TODO: resultStream.RemovePulsarProducer(producerChannels)
resultStream.CreatePulsarProducers(producerChannels)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}
return status, nil
}
func (node *QueryNode) WatchDmChannels(in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error) {
if node.dataSyncService == nil || node.dataSyncService.dmStream == nil {
errMsg := "null data sync service or null data manipulation stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
fgDMMsgStream, ok := node.dataSyncService.dmStream.(*msgstream.PulsarMsgStream)
if !ok {
errMsg := "type assertion failed for dm message stream"
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: errMsg,
}
return status, errors.New(errMsg)
}
// add request channel
pulsarBufSize := Params.SearchPulsarBufSize
consumeChannels := in.ChannelIDs
consumeSubName := Params.MsgChannelSubName
unmarshalDispatcher := msgstream.NewUnmarshalDispatcher()
fgDMMsgStream.CreatePulsarConsumers(consumeChannels, consumeSubName, unmarshalDispatcher, pulsarBufSize)
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_SUCCESS,
}
return status, nil
}
func (node *QueryNode) LoadSegments(in *queryPb.LoadSegmentRequest) (*commonpb.Status, error) {
// TODO: support db
fieldIDs := in.FieldIDs
for _, segmentID := range in.SegmentIDs {
indexID := UniqueID(0) // TODO: ???
err := node.segManager.loadSegment(segmentID, &fieldIDs)
if err != nil {
// TODO: return or continue?
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
}
return status, err
}
err = node.segManager.loadIndex(segmentID, indexID)
if err != nil {
// TODO: return or continue?
status := &commonpb.Status{
ErrorCode: commonpb.ErrorCode_UNEXPECTED_ERROR,
Reason: err.Error(),
}
return status, err
}
}
return nil, nil
}
func (node *QueryNode) ReleaseSegments(in *queryPb.ReleaseSegmentRequest) (*commonpb.Status, error) {
// TODO: implement
return nil, nil
}
func (node *QueryNode) GetPartitionState(in *queryPb.PartitionStatesRequest) (*queryPb.PartitionStatesResponse, error) {
// TODO: implement
return nil, nil
}

View File

@ -25,7 +25,8 @@ import (
type indexParam = map[string]string
type Segment struct {
segmentPtr C.CSegmentBase
segmentPtr C.CSegmentInterface
segmentType C.enum_SegmentType
segmentID UniqueID
partitionTag string // TODO: use partitionID
collectionID UniqueID
@ -58,11 +59,14 @@ func (s *Segment) GetRecentlyModified() bool {
//-------------------------------------------------------------------------------------- constructor and destructor
func newSegment(collection *Collection, segmentID int64, partitionTag string, collectionID UniqueID) *Segment {
/*
CSegmentBase
newSegment(CPartition partition, unsigned long segment_id);
CSegmentInterface
NewSegment(CCollection collection, uint64_t segment_id, SegmentType seg_type);
*/
initIndexParam := make(map[int64]indexParam)
segmentPtr := C.NewSegment(collection.collectionPtr, C.ulong(segmentID))
// TODO: replace by param
//var segmentType C.enum_SegmentType = C.Growing
var segmentType C.int = 1
segmentPtr := C.NewSegment(collection.collectionPtr, C.ulong(segmentID), segmentType)
var newSegment = &Segment{
segmentPtr: segmentPtr,
segmentID: segmentID,
@ -77,7 +81,7 @@ func newSegment(collection *Collection, segmentID int64, partitionTag string, co
func deleteSegment(segment *Segment) {
/*
void
deleteSegment(CSegmentBase segment);
deleteSegment(CSegmentInterface segment);
*/
cPtr := segment.segmentPtr
C.DeleteSegment(cPtr)
@ -87,7 +91,7 @@ func deleteSegment(segment *Segment) {
func (s *Segment) getRowCount() int64 {
/*
long int
getRowCount(CSegmentBase c_segment);
getRowCount(CSegmentInterface c_segment);
*/
var rowCount = C.GetRowCount(s.segmentPtr)
return int64(rowCount)
@ -96,7 +100,7 @@ func (s *Segment) getRowCount() int64 {
func (s *Segment) getDeletedCount() int64 {
/*
long int
getDeletedCount(CSegmentBase c_segment);
getDeletedCount(CSegmentInterface c_segment);
*/
var deletedCount = C.GetDeletedCount(s.segmentPtr)
return int64(deletedCount)
@ -105,7 +109,7 @@ func (s *Segment) getDeletedCount() int64 {
func (s *Segment) getMemSize() int64 {
/*
long int
GetMemoryUsageInBytes(CSegmentBase c_segment);
GetMemoryUsageInBytes(CSegmentInterface c_segment);
*/
var memoryUsageInBytes = C.GetMemoryUsageInBytes(s.segmentPtr)
@ -116,7 +120,7 @@ func (s *Segment) getMemSize() int64 {
func (s *Segment) segmentPreInsert(numOfRecords int) int64 {
/*
long int
PreInsert(CSegmentBase c_segment, long int size);
PreInsert(CSegmentInterface c_segment, long int size);
*/
var offset = C.PreInsert(s.segmentPtr, C.long(int64(numOfRecords)))
@ -126,7 +130,7 @@ func (s *Segment) segmentPreInsert(numOfRecords int) int64 {
func (s *Segment) segmentPreDelete(numOfRecords int) int64 {
/*
long int
PreDelete(CSegmentBase c_segment, long int size);
PreDelete(CSegmentInterface c_segment, long int size);
*/
var offset = C.PreDelete(s.segmentPtr, C.long(int64(numOfRecords)))
@ -137,7 +141,7 @@ func (s *Segment) segmentPreDelete(numOfRecords int) int64 {
func (s *Segment) segmentInsert(offset int64, entityIDs *[]UniqueID, timestamps *[]Timestamp, records *[]*commonpb.Blob) error {
/*
CStatus
Insert(CSegmentBase c_segment,
Insert(CSegmentInterface c_segment,
long int reserved_offset,
signed long int size,
const long* primary_keys,
@ -190,7 +194,7 @@ func (s *Segment) segmentInsert(offset int64, entityIDs *[]UniqueID, timestamps
func (s *Segment) segmentDelete(offset int64, entityIDs *[]UniqueID, timestamps *[]Timestamp) error {
/*
CStatus
Delete(CSegmentBase c_segment,
Delete(CSegmentInterface c_segment,
long int reserved_offset,
long size,
const long* primary_keys,

View File

@ -6,24 +6,56 @@ import (
"fmt"
"github.com/zilliztech/milvus-distributed/internal/kv"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/datapb"
"github.com/zilliztech/milvus-distributed/internal/proto/indexpb"
internalPb "github.com/zilliztech/milvus-distributed/internal/proto/internalpb2"
queryPb "github.com/zilliztech/milvus-distributed/internal/proto/querypb"
"github.com/zilliztech/milvus-distributed/internal/querynode/client"
"github.com/zilliztech/milvus-distributed/internal/storage"
)
type segmentManager struct {
replica collectionReplica
loadIndexReqChan chan []msgstream.TsMsg
// TODO: replace by client instead of grpc client
dataClient datapb.DataServiceClient
indexBuilderClient indexpb.IndexServiceClient
queryNodeClient *client.Client
kv kv.Base // minio kv
iCodec storage.InsertCodec
iCodec *storage.InsertCodec
}
func newSegmentManager(ctx context.Context, replica collectionReplica, loadIndexReqChan chan []msgstream.TsMsg) *segmentManager {
bucketName := Params.MinioBucketName
option := &miniokv.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
BucketName: bucketName,
CreateBucket: true,
}
minioKV, err := miniokv.NewMinIOKV(ctx, option)
if err != nil {
panic(err)
}
return &segmentManager{
replica: replica,
loadIndexReqChan: loadIndexReqChan,
// TODO: init clients
dataClient: nil,
indexBuilderClient: nil,
kv: minioKV,
iCodec: &storage.InsertCodec{},
}
}
func (s *segmentManager) loadSegment(segmentID UniqueID, fieldIDs *[]int64) error {
@ -136,15 +168,45 @@ func (s *segmentManager) loadIndex(segmentID UniqueID, indexID UniqueID) error {
if !ok {
return errors.New(fmt.Sprint("cannot found index params in segment ", segmentID, " with field = ", vecFieldID))
}
err := s.queryNodeClient.LoadIndex(pathResponse.IndexFilePaths, segmentID, vecFieldID, "", targetIndexParam)
if err != nil {
return err
}
// non-blocking send
go s.sendLoadIndex(pathResponse.IndexFilePaths, segmentID, vecFieldID, "", targetIndexParam)
}
return nil
}
func (s *segmentManager) sendLoadIndex(indexPaths []string,
segmentID int64,
fieldID int64,
fieldName string,
indexParams map[string]string) {
var indexParamsKV []*commonpb.KeyValuePair
for key, value := range indexParams {
indexParamsKV = append(indexParamsKV, &commonpb.KeyValuePair{
Key: key,
Value: value,
})
}
loadIndexRequest := internalPb.LoadIndex{
Base: &commonpb.MsgBase{
MsgType: commonpb.MsgType_kSearchResult,
},
SegmentID: segmentID,
FieldName: fieldName,
FieldID: fieldID,
IndexPaths: indexPaths,
IndexParams: indexParamsKV,
}
loadIndexMsg := &msgstream.LoadIndexMsg{
LoadIndex: loadIndexRequest,
}
messages := []msgstream.TsMsg{loadIndexMsg}
s.loadIndexReqChan <- messages
}
func (s *segmentManager) releaseSegment(in *queryPb.ReleaseSegmentRequest) error {
// TODO: implement
// TODO: release specific field, we need segCore supply relevant interface

View File

@ -264,6 +264,7 @@ func (gp *BaseTable) ParseInt(key string) int {
return value
}
// GOOSE TODO: remove writenode
func (gp *BaseTable) WriteNodeIDList() []UniqueID {
proxyIDStr, err := gp.Load("nodeID.writeNodeIDList")
if err != nil {
@ -281,6 +282,23 @@ func (gp *BaseTable) WriteNodeIDList() []UniqueID {
return ret
}
func (gp *BaseTable) DataNodeIDList() []UniqueID {
proxyIDStr, err := gp.Load("nodeID.dataNodeIDList")
if err != nil {
panic(err)
}
var ret []UniqueID
proxyIDs := strings.Split(proxyIDStr, ",")
for _, i := range proxyIDs {
v, err := strconv.Atoi(i)
if err != nil {
log.Panicf("load write node id list error, %s", err.Error())
}
ret = append(ret, UniqueID(v))
}
return ret
}
func (gp *BaseTable) ProxyIDList() []UniqueID {
proxyIDStr, err := gp.Load("nodeID.proxyIDList")
if err != nil {

View File

@ -348,6 +348,10 @@ func (rmq *RocksMQ) Consume(groupName string, channelName string, n int) ([]Cons
return nil, err
}
if len(consumerMessage) == 0 {
return consumerMessage, nil
}
newID := consumerMessage[len(consumerMessage)-1].msgID
err = rmq.Seek(groupName, channelName, newID)
if err != nil {

View File

@ -1,6 +1,8 @@
package rocksmq
import (
"os"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
@ -23,10 +25,13 @@ func TestRocksMQ(t *testing.T) {
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := master.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
name := "/tmp/rocksmq"
_ = os.RemoveAll(name)
defer os.RemoveAll(name)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
@ -64,3 +69,121 @@ func TestRocksMQ(t *testing.T) {
assert.Equal(t, string(cMsgs[0].payload), "b_message")
assert.Equal(t, string(cMsgs[1].payload), "c_message")
}
func TestRocksMQ_Loop(t *testing.T) {
master.Init()
etcdAddr := master.Params.EtcdAddress
cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
assert.Nil(t, err)
etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
defer etcdKV.Close()
idAllocator := master.NewGlobalIDAllocator("dummy", etcdKV)
_ = idAllocator.Initialize()
name := "/tmp/rocksmq_1"
_ = os.RemoveAll(name)
defer os.RemoveAll(name)
rmq, err := NewRocksMQ(name, idAllocator)
assert.Nil(t, err)
loopNum := 100
channelName := "channel_test"
// Produce one message once
for i := 0; i < loopNum; i++ {
msg := "message_" + strconv.Itoa(i)
pMsg := ProducerMessage{payload: []byte(msg)}
pMsgs := make([]ProducerMessage, 1)
pMsgs[0] = pMsg
err := rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
}
// Produce loopNum messages once
pMsgs := make([]ProducerMessage, loopNum)
for i := 0; i < loopNum; i++ {
msg := "message_" + strconv.Itoa(i+loopNum)
pMsg := ProducerMessage{payload: []byte(msg)}
pMsgs[i] = pMsg
}
err = rmq.Produce(channelName, pMsgs)
assert.Nil(t, err)
// Consume loopNum message once
groupName := "test_group"
_ = rmq.DestroyConsumerGroup(groupName, channelName)
err = rmq.CreateConsumerGroup(groupName, channelName)
assert.Nil(t, err)
cMsgs, err := rmq.Consume(groupName, channelName, loopNum)
assert.Nil(t, err)
assert.Equal(t, len(cMsgs), loopNum)
assert.Equal(t, string(cMsgs[0].payload), "message_"+strconv.Itoa(0))
assert.Equal(t, string(cMsgs[loopNum-1].payload), "message_"+strconv.Itoa(loopNum-1))
// Consume one message once
for i := 0; i < loopNum; i++ {
oneMsgs, err := rmq.Consume(groupName, channelName, 1)
assert.Nil(t, err)
assert.Equal(t, len(oneMsgs), 1)
assert.Equal(t, string(oneMsgs[0].payload), "message_"+strconv.Itoa(i+loopNum))
}
cMsgs, err = rmq.Consume(groupName, channelName, 1)
assert.Nil(t, err)
assert.Equal(t, len(cMsgs), 0)
}
//func TestRocksMQ_Goroutines(t *testing.T) {
// master.Init()
//
// etcdAddr := master.Params.EtcdAddress
// cli, err := clientv3.New(clientv3.Config{Endpoints: []string{etcdAddr}})
// assert.Nil(t, err)
// etcdKV := etcdkv.NewEtcdKV(cli, "/etcd/test/root")
// defer etcdKV.Close()
// idAllocator := master.NewGlobalIDAllocator("dummy", etcdKV)
// _ = idAllocator.Initialize()
//
// name := "/tmp/rocksmq"
// defer os.RemoveAll(name)
// rmq, err := NewRocksMQ(name, idAllocator)
// assert.Nil(t, err)
//
// loopNum := 100
// channelName := "channel_test"
// // Produce two message in each goroutine
// var wg sync.WaitGroup
// wg.Add(1)
// for i := 0; i < loopNum/2; i++ {
// go func() {
// wg.Add(2)
// msg_0 := "message_" + strconv.Itoa(i)
// msg_1 := "message_" + strconv.Itoa(i+1)
// pMsg_0 := ProducerMessage{payload: []byte(msg_0)}
// pMsg_1 := ProducerMessage{payload: []byte(msg_1)}
// pMsgs := make([]ProducerMessage, 2)
// pMsgs[0] = pMsg_0
// pMsgs[1] = pMsg_1
//
// err := rmq.Produce(channelName, pMsgs)
// assert.Nil(t, err)
// }()
// }
//
// groupName := "test_group"
// _ = rmq.DestroyConsumerGroup(groupName, channelName)
// err = rmq.CreateConsumerGroup(groupName, channelName)
// assert.Nil(t, err)
// // Consume one message in each goroutine
// for i := 0; i < loopNum; i++ {
// go func() {
// wg.Done()
// cMsgs, err := rmq.Consume(groupName, channelName, 1)
// fmt.Println(string(cMsgs[0].payload))
// assert.Nil(t, err)
// assert.Equal(t, len(cMsgs), 1)
// }()
// }
// wg.Done()
// wg.Wait()
//}