Add make clean in Makefile

Signed-off-by: cai.zhang <cai.zhang@zilliz.com>
pull/4973/head^2
cai.zhang 2021-01-06 13:50:48 +08:00 committed by yefu.chen
parent da6eeddbf8
commit 1cfc2ff0a5
56 changed files with 2873 additions and 1191 deletions

View File

@ -127,6 +127,7 @@ install: all
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/master $(GOPATH)/bin/master
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/proxy $(GOPATH)/bin/proxy
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/writenode $(GOPATH)/bin/writenode
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/bin/indexbuilder $(GOPATH)/bin/indexbuilder
@mkdir -p $(LIBRARY_PATH) && cp -f $(PWD)/internal/core/output/lib/* $(LIBRARY_PATH)
@echo "Installation successful."
@ -134,7 +135,10 @@ clean:
@echo "Cleaning up all the generated files"
@find . -name '*.test' | xargs rm -fv
@find . -name '*~' | xargs rm -fv
@rm -rvf querynode
@rm -rvf master
@rm -rvf proxy
@rm -rvf writenode
@rm -rf bin/
@rm -rf lib/
@rm -rf $(GOPATH)/bin/master
@rm -rf $(GOPATH)/bin/proxy
@rm -rf $(GOPATH)/bin/querynode
@rm -rf $(GOPATH)/bin/writenode
@rm -rf $(GOPATH)/bin/indexbuilder

315
cmd/storage/benchmark.go Normal file
View File

@ -0,0 +1,315 @@
package main
import (
"context"
"crypto/md5"
"flag"
"fmt"
"log"
"math/rand"
"os"
"sync"
"sync/atomic"
"time"
"github.com/pivotal-golang/bytefmt"
"github.com/zilliztech/milvus-distributed/internal/storage"
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
// Global variables
var durationSecs, threads, loops, numVersion, batchOpSize int
var valueSize uint64
var valueData []byte
var batchValueData [][]byte
var counter, totalKeyCount, keyNum int32
var endTime, setFinish, getFinish, deleteFinish time.Time
var totalKeys [][]byte
var logFileName = "benchmark.log"
var logFile *os.File
var store storagetype.Store
var wg sync.WaitGroup
func runSet() {
for time.Now().Before(endTime) {
num := atomic.AddInt32(&keyNum, 1)
key := []byte(fmt.Sprint("key", num))
for ver := 1; ver <= numVersion; ver++ {
atomic.AddInt32(&counter, 1)
err := store.PutRow(context.Background(), key, valueData, "empty", uint64(ver))
if err != nil {
log.Fatalf("Error setting key %s, %s", key, err.Error())
//atomic.AddInt32(&setCount, -1)
}
}
}
// Remember last done time
setFinish = time.Now()
wg.Done()
}
func runBatchSet() {
for time.Now().Before(endTime) {
num := atomic.AddInt32(&keyNum, int32(batchOpSize))
keys := make([][]byte, batchOpSize)
versions := make([]uint64, batchOpSize)
batchSuffix := make([]string, batchOpSize)
for n := batchOpSize; n > 0; n-- {
keys[n-1] = []byte(fmt.Sprint("key", num-int32(n)))
}
for ver := 1; ver <= numVersion; ver++ {
atomic.AddInt32(&counter, 1)
err := store.PutRows(context.Background(), keys, batchValueData, batchSuffix, versions)
if err != nil {
log.Fatalf("Error setting batch keys %s %s", keys, err.Error())
//atomic.AddInt32(&batchSetCount, -1)
}
}
}
setFinish = time.Now()
wg.Done()
}
func runGet() {
for time.Now().Before(endTime) {
num := atomic.AddInt32(&counter, 1)
//num := atomic.AddInt32(&keyNum, 1)
//key := []byte(fmt.Sprint("key", num))
num = num % totalKeyCount
key := totalKeys[num]
_, err := store.GetRow(context.Background(), key, uint64(numVersion))
if err != nil {
log.Fatalf("Error getting key %s, %s", key, err.Error())
//atomic.AddInt32(&getCount, -1)
}
}
// Remember last done time
getFinish = time.Now()
wg.Done()
}
func runBatchGet() {
for time.Now().Before(endTime) {
num := atomic.AddInt32(&keyNum, int32(batchOpSize))
//keys := make([][]byte, batchOpSize)
//for n := batchOpSize; n > 0; n-- {
// keys[n-1] = []byte(fmt.Sprint("key", num-int32(n)))
//}
end := num % totalKeyCount
if end < int32(batchOpSize) {
end = int32(batchOpSize)
}
start := end - int32(batchOpSize)
keys := totalKeys[start:end]
versions := make([]uint64, batchOpSize)
for i := range versions {
versions[i] = uint64(numVersion)
}
atomic.AddInt32(&counter, 1)
_, err := store.GetRows(context.Background(), keys, versions)
if err != nil {
log.Fatalf("Error getting key %s, %s", keys, err.Error())
//atomic.AddInt32(&batchGetCount, -1)
}
}
// Remember last done time
getFinish = time.Now()
wg.Done()
}
func runDelete() {
for time.Now().Before(endTime) {
num := atomic.AddInt32(&counter, 1)
//num := atomic.AddInt32(&keyNum, 1)
//key := []byte(fmt.Sprint("key", num))
num = num % totalKeyCount
key := totalKeys[num]
err := store.DeleteRow(context.Background(), key, uint64(numVersion))
if err != nil {
log.Fatalf("Error getting key %s, %s", key, err.Error())
//atomic.AddInt32(&deleteCount, -1)
}
}
// Remember last done time
deleteFinish = time.Now()
wg.Done()
}
func runBatchDelete() {
for time.Now().Before(endTime) {
num := atomic.AddInt32(&keyNum, int32(batchOpSize))
//keys := make([][]byte, batchOpSize)
//for n := batchOpSize; n > 0; n-- {
// keys[n-1] = []byte(fmt.Sprint("key", num-int32(n)))
//}
end := num % totalKeyCount
if end < int32(batchOpSize) {
end = int32(batchOpSize)
}
start := end - int32(batchOpSize)
keys := totalKeys[start:end]
atomic.AddInt32(&counter, 1)
versions := make([]uint64, batchOpSize)
for i := range versions {
versions[i] = uint64(numVersion)
}
err := store.DeleteRows(context.Background(), keys, versions)
if err != nil {
log.Fatalf("Error getting key %s, %s", keys, err.Error())
//atomic.AddInt32(&batchDeleteCount, -1)
}
}
// Remember last done time
getFinish = time.Now()
wg.Done()
}
func main() {
// Parse command line
myflag := flag.NewFlagSet("myflag", flag.ExitOnError)
myflag.IntVar(&durationSecs, "d", 5, "Duration of each test in seconds")
myflag.IntVar(&threads, "t", 1, "Number of threads to run")
myflag.IntVar(&loops, "l", 1, "Number of times to repeat test")
var sizeArg string
var storeType string
myflag.StringVar(&sizeArg, "z", "1k", "Size of objects in bytes with postfix K, M, and G")
myflag.StringVar(&storeType, "s", "s3", "Storage type, tikv or minio or s3")
myflag.IntVar(&numVersion, "v", 1, "Max versions for each key")
myflag.IntVar(&batchOpSize, "b", 100, "Batch operation kv pair number")
if err := myflag.Parse(os.Args[1:]); err != nil {
os.Exit(1)
}
// Check the arguments
var err error
if valueSize, err = bytefmt.ToBytes(sizeArg); err != nil {
log.Fatalf("Invalid -z argument for object size: %v", err)
}
var option = storagetype.Option{TikvAddress: "localhost:2379", Type: storeType, BucketName: "zilliz-hz"}
store, err = storage.NewStore(context.Background(), option)
if err != nil {
log.Fatalf("Error when creating storage " + err.Error())
}
logFile, err = os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0777)
if err != nil {
log.Fatalf("Prepare log file error, " + err.Error())
}
// Echo the parameters
log.Printf("Benchmark log will write to file %s\n", logFile.Name())
fmt.Fprintf(logFile, "Parameters: duration=%d, threads=%d, loops=%d, valueSize=%s, batchSize=%d, versions=%d\n", durationSecs, threads, loops, sizeArg, batchOpSize, numVersion)
// Init test data
valueData = make([]byte, valueSize)
rand.Read(valueData)
hasher := md5.New()
hasher.Write(valueData)
batchValueData = make([][]byte, batchOpSize)
for i := range batchValueData {
batchValueData[i] = make([]byte, valueSize)
rand.Read(batchValueData[i])
hasher := md5.New()
hasher.Write(batchValueData[i])
}
// Loop running the tests
for loop := 1; loop <= loops; loop++ {
// reset counters
counter = 0
keyNum = 0
totalKeyCount = 0
totalKeys = nil
// Run the batchSet case
// key seq start from setCount
counter = 0
startTime := time.Now()
endTime = startTime.Add(time.Second * time.Duration(durationSecs))
for n := 1; n <= threads; n++ {
wg.Add(1)
go runBatchSet()
}
wg.Wait()
setTime := setFinish.Sub(startTime).Seconds()
bps := float64(uint64(counter)*valueSize*uint64(batchOpSize)) / setTime
fmt.Fprintf(logFile, "Loop %d: BATCH PUT time %.1f secs, batchs = %d, kv pairs = %d, speed = %sB/sec, %.1f operations/sec, %.1f kv/sec.\n",
loop, setTime, counter, counter*int32(batchOpSize), bytefmt.ByteSize(uint64(bps)), float64(counter)/setTime, float64(counter*int32(batchOpSize))/setTime)
// Record all test keys
//totalKeyCount = keyNum
//totalKeys = make([][]byte, totalKeyCount)
//for i := int32(0); i < totalKeyCount; i++ {
// totalKeys[i] = []byte(fmt.Sprint("key", i))
//}
//
//// Run the get case
//counter = 0
//startTime = time.Now()
//endTime = startTime.Add(time.Second * time.Duration(durationSecs))
//for n := 1; n <= threads; n++ {
// wg.Add(1)
// go runGet()
//}
//wg.Wait()
//
//getTime := getFinish.Sub(startTime).Seconds()
//bps = float64(uint64(counter)*valueSize) / getTime
//fmt.Fprint(logFile, fmt.Sprintf("Loop %d: GET time %.1f secs, kv pairs = %d, speed = %sB/sec, %.1f operations/sec, %.1f kv/sec.\n",
// loop, getTime, counter, bytefmt.ByteSize(uint64(bps)), float64(counter)/getTime, float64(counter)/getTime))
// Run the batchGet case
//counter = 0
//startTime = time.Now()
//endTime = startTime.Add(time.Second * time.Duration(durationSecs))
//for n := 1; n <= threads; n++ {
// wg.Add(1)
// go runBatchGet()
//}
//wg.Wait()
//
//getTime = getFinish.Sub(startTime).Seconds()
//bps = float64(uint64(counter)*valueSize*uint64(batchOpSize)) / getTime
//fmt.Fprint(logFile, fmt.Sprintf("Loop %d: BATCH GET time %.1f secs, batchs = %d, kv pairs = %d, speed = %sB/sec, %.1f operations/sec, %.1f kv/sec.\n",
// loop, getTime, counter, counter*int32(batchOpSize), bytefmt.ByteSize(uint64(bps)), float64(counter)/getTime, float64(counter * int32(batchOpSize))/getTime))
//
//// Run the delete case
//counter = 0
//startTime = time.Now()
//endTime = startTime.Add(time.Second * time.Duration(durationSecs))
//for n := 1; n <= threads; n++ {
// wg.Add(1)
// go runDelete()
//}
//wg.Wait()
//
//deleteTime := deleteFinish.Sub(startTime).Seconds()
//bps = float64(uint64(counter)*valueSize) / deleteTime
//fmt.Fprint(logFile, fmt.Sprintf("Loop %d: Delete time %.1f secs, kv pairs = %d, %.1f operations/sec, %.1f kv/sec.\n",
// loop, deleteTime, counter, float64(counter)/deleteTime, float64(counter)/deleteTime))
//
//// Run the batchDelete case
//counter = 0
//startTime = time.Now()
//endTime = startTime.Add(time.Second * time.Duration(durationSecs))
//for n := 1; n <= threads; n++ {
// wg.Add(1)
// go runBatchDelete()
//}
//wg.Wait()
//
//deleteTime = setFinish.Sub(startTime).Seconds()
//bps = float64(uint64(counter)*valueSize*uint64(batchOpSize)) / setTime
//fmt.Fprint(logFile, fmt.Sprintf("Loop %d: BATCH DELETE time %.1f secs, batchs = %d, kv pairs = %d, %.1f operations/sec, %.1f kv/sec.\n",
// loop, setTime, counter, counter*int32(batchOpSize), float64(counter)/setTime, float64(counter * int32(batchOpSize))/setTime))
// Print line mark
lineMark := "\n"
fmt.Fprint(logFile, lineMark)
}
log.Print("Benchmark test done.")
}

View File

@ -4,15 +4,13 @@ set(MILVUS_QUERY_SRCS
generated/PlanNode.cpp
generated/Expr.cpp
visitors/ShowPlanNodeVisitor.cpp
visitors/ShowExprVisitor.cpp
visitors/ExecPlanNodeVisitor.cpp
visitors/ShowExprVisitor.cpp
visitors/ExecExprVisitor.cpp
visitors/VerifyPlanNodeVisitor.cpp
visitors/VerifyExprVisitor.cpp
Plan.cpp
Search.cpp
SearchOnSealed.cpp
BruteForceSearch.cpp
)
add_library(milvus_query ${MILVUS_QUERY_SRCS})
target_link_libraries(milvus_query milvus_proto milvus_utils knowhere)
target_link_libraries(milvus_query milvus_proto milvus_utils)

View File

@ -21,7 +21,6 @@
#include <boost/align/aligned_allocator.hpp>
#include <boost/algorithm/string.hpp>
#include <algorithm>
#include "query/generated/VerifyPlanNodeVisitor.h"
namespace milvus::query {
@ -139,8 +138,6 @@ Parser::CreatePlanImpl(const std::string& dsl_str) {
if (predicate != nullptr) {
vec_node->predicate_ = std::move(predicate);
}
VerifyPlanNodeVisitor verifier;
vec_node->accept(verifier);
auto plan = std::make_unique<Plan>(schema);
plan->tag2field_ = std::move(tag2field_);

View File

@ -21,7 +21,7 @@
#include "ExprVisitor.h"
namespace milvus::query {
class ExecExprVisitor : public ExprVisitor {
class ExecExprVisitor : ExprVisitor {
public:
void
visit(BoolUnaryExpr& expr) override;

View File

@ -19,7 +19,7 @@
#include "PlanNodeVisitor.h"
namespace milvus::query {
class ExecPlanNodeVisitor : public PlanNodeVisitor {
class ExecPlanNodeVisitor : PlanNodeVisitor {
public:
void
visit(FloatVectorANNS& node) override;

View File

@ -19,7 +19,7 @@
#include "ExprVisitor.h"
namespace milvus::query {
class ShowExprVisitor : public ExprVisitor {
class ShowExprVisitor : ExprVisitor {
public:
void
visit(BoolUnaryExpr& expr) override;

View File

@ -20,7 +20,7 @@
#include "PlanNodeVisitor.h"
namespace milvus::query {
class ShowPlanNodeVisitor : public PlanNodeVisitor {
class ShowPlanNodeVisitor : PlanNodeVisitor {
public:
void
visit(FloatVectorANNS& node) override;

View File

@ -1,36 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#error TODO: copy this file out, and modify the content.
#include "query/generated/VerifyExprVisitor.h"
namespace milvus::query {
void
VerifyExprVisitor::visit(BoolUnaryExpr& expr) {
// TODO
}
void
VerifyExprVisitor::visit(BoolBinaryExpr& expr) {
// TODO
}
void
VerifyExprVisitor::visit(TermExpr& expr) {
// TODO
}
void
VerifyExprVisitor::visit(RangeExpr& expr) {
// TODO
}
} // namespace milvus::query

View File

@ -1,40 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
// Generated File
// DO NOT EDIT
#include <optional>
#include <boost/dynamic_bitset.hpp>
#include <utility>
#include <deque>
#include "segcore/SegmentSmallIndex.h"
#include "query/ExprImpl.h"
#include "ExprVisitor.h"
namespace milvus::query {
class VerifyExprVisitor : public ExprVisitor {
public:
void
visit(BoolUnaryExpr& expr) override;
void
visit(BoolBinaryExpr& expr) override;
void
visit(TermExpr& expr) override;
void
visit(RangeExpr& expr) override;
public:
};
} // namespace milvus::query

View File

@ -1,26 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#error TODO: copy this file out, and modify the content.
#include "query/generated/VerifyPlanNodeVisitor.h"
namespace milvus::query {
void
VerifyPlanNodeVisitor::visit(FloatVectorANNS& node) {
// TODO
}
void
VerifyPlanNodeVisitor::visit(BinaryVectorANNS& node) {
// TODO
}
} // namespace milvus::query

View File

@ -1,37 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
// Generated File
// DO NOT EDIT
#include "utils/Json.h"
#include "query/PlanImpl.h"
#include "segcore/SegmentBase.h"
#include <utility>
#include "PlanNodeVisitor.h"
namespace milvus::query {
class VerifyPlanNodeVisitor : public PlanNodeVisitor {
public:
void
visit(FloatVectorANNS& node) override;
void
visit(BinaryVectorANNS& node) override;
public:
using RetType = QueryResult;
VerifyPlanNodeVisitor() = default;
private:
std::optional<RetType> ret_;
};
} // namespace milvus::query

View File

@ -1,35 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#include "query/generated/VerifyExprVisitor.h"
namespace milvus::query {
void
VerifyExprVisitor::visit(BoolUnaryExpr& expr) {
// TODO
}
void
VerifyExprVisitor::visit(BoolBinaryExpr& expr) {
// TODO
}
void
VerifyExprVisitor::visit(TermExpr& expr) {
// TODO
}
void
VerifyExprVisitor::visit(RangeExpr& expr) {
// TODO
}
} // namespace milvus::query

View File

@ -1,85 +0,0 @@
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#include "query/generated/VerifyPlanNodeVisitor.h"
#include "knowhere/index/vector_index/ConfAdapterMgr.h"
#include "segcore/SegmentSmallIndex.h"
#include "knowhere/index/vector_index/ConfAdapter.h"
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
namespace milvus::query {
#if 1
namespace impl {
// THIS CONTAINS EXTRA BODY FOR VISITOR
// WILL BE USED BY GENERATOR UNDER suvlim/core_gen/
class VerifyPlanNodeVisitor : PlanNodeVisitor {
public:
using RetType = QueryResult;
VerifyPlanNodeVisitor() = default;
private:
std::optional<RetType> ret_;
};
} // namespace impl
#endif
static knowhere::IndexType
InferIndexType(const Json& search_params) {
// ivf -> nprobe
// nsg -> search_length
// hnsw/rhnsw/*pq/*sq -> ef
// annoy -> search_k
// ngtpanng / ngtonng -> max_search_edges / epsilon
static const std::map<std::string, knowhere::IndexType> key_list = [] {
std::map<std::string, knowhere::IndexType> list;
namespace ip = knowhere::IndexParams;
namespace ie = knowhere::IndexEnum;
list.emplace(ip::nprobe, ie::INDEX_FAISS_IVFFLAT);
list.emplace(ip::search_length, ie::INDEX_NSG);
list.emplace(ip::ef, ie::INDEX_HNSW);
list.emplace(ip::search_k, ie::INDEX_ANNOY);
list.emplace(ip::max_search_edges, ie::INDEX_NGTONNG);
list.emplace(ip::epsilon, ie::INDEX_NGTONNG);
return list;
}();
auto dbg_str = search_params.dump();
for (auto& kv : search_params.items()) {
std::string key = kv.key();
if (key_list.count(key)) {
return key_list.at(key);
}
}
PanicInfo("failed to infer index type");
}
void
VerifyPlanNodeVisitor::visit(FloatVectorANNS& node) {
auto& search_params = node.query_info_.search_params_;
auto inferred_type = InferIndexType(search_params);
auto adapter = knowhere::AdapterMgr::GetInstance().GetAdapter(inferred_type);
auto index_mode = knowhere::IndexMode::MODE_CPU;
// mock the api, topk will be passed from placeholder
auto params_copy = search_params;
params_copy[knowhere::meta::TOPK] = 10;
// NOTE: the second parameter is not checked in knowhere, may be redundant
auto passed = adapter->CheckSearch(params_copy, inferred_type, index_mode);
AssertInfo(passed, "invalid search params");
}
void
VerifyPlanNodeVisitor::visit(BinaryVectorANNS& node) {
// TODO
}
} // namespace milvus::query

View File

@ -24,6 +24,5 @@ target_link_libraries(milvus_segcore
dl backtrace
milvus_common
milvus_query
milvus_utils
)

View File

@ -24,8 +24,10 @@ target_link_libraries(all_tests
gtest_main
milvus_segcore
milvus_indexbuilder
knowhere
log
pthread
milvus_utils
)
install (TARGETS all_tests DESTINATION unittest)

View File

@ -137,7 +137,7 @@ TEST(CApiTest, SearchTest) {
auto offset = PreInsert(segment, N);
auto ins_res = Insert(segment, offset, N, uids.data(), timestamps.data(), raw_data.data(), (int)line_sizeof, N);
ASSERT_EQ(ins_res.error_code, Success);
assert(ins_res.error_code == Success);
const char* dsl_string = R"(
{
@ -176,11 +176,11 @@ TEST(CApiTest, SearchTest) {
void* plan = nullptr;
auto status = CreatePlan(collection, dsl_string, &plan);
ASSERT_EQ(status.error_code, Success);
assert(status.error_code == Success);
void* placeholderGroup = nullptr;
status = ParsePlaceholderGroup(plan, blob.data(), blob.length(), &placeholderGroup);
ASSERT_EQ(status.error_code, Success);
assert(status.error_code == Success);
std::vector<CPlaceholderGroup> placeholderGroups;
placeholderGroups.push_back(placeholderGroup);
@ -189,7 +189,7 @@ TEST(CApiTest, SearchTest) {
CQueryResult search_result;
auto res = Search(segment, plan, placeholderGroups.data(), timestamps.data(), 1, &search_result);
ASSERT_EQ(res.error_code, Success);
assert(res.error_code == Success);
DeletePlan(plan);
DeletePlaceholderGroup(placeholderGroup);

View File

@ -11,6 +11,9 @@ import (
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"go.etcd.io/etcd/clientv3"
"github.com/zilliztech/milvus-distributed/internal/allocator"
@ -68,16 +71,19 @@ func CreateBuilder(ctx context.Context) (*Builder, error) {
idAllocator, err := allocator.NewIDAllocator(b.loopCtx, Params.MasterAddress)
option := &miniokv.Option{
Address: Params.MinIOAddress,
AccessKeyID: Params.MinIOAccessKeyID,
SecretAccessKeyID: Params.MinIOSecretAccessKey,
UseSSL: Params.MinIOUseSSL,
BucketName: Params.MinioBucketName,
CreateBucket: true,
minIOEndPoint := Params.MinIOAddress
minIOAccessKeyID := Params.MinIOAccessKeyID
minIOSecretAccessKey := Params.MinIOSecretAccessKey
minIOUseSSL := Params.MinIOUseSSL
minIOClient, err := minio.New(minIOEndPoint, &minio.Options{
Creds: credentials.NewStaticV4(minIOAccessKeyID, minIOSecretAccessKey, ""),
Secure: minIOUseSSL,
})
if err != nil {
return nil, err
}
b.kv, err = miniokv.NewMinIOKV(b.loopCtx, option)
b.kv, err = miniokv.NewMinIOKV(b.loopCtx, minIOClient, Params.MinioBucketName)
if err != nil {
return nil, err
}

View File

@ -2,15 +2,11 @@ package miniokv
import (
"context"
"fmt"
"io"
"log"
"strings"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/zilliztech/milvus-distributed/internal/errors"
)
type MinIOKV struct {
@ -19,46 +15,24 @@ type MinIOKV struct {
bucketName string
}
type Option struct {
Address string
AccessKeyID string
BucketName string
SecretAccessKeyID string
UseSSL bool
CreateBucket bool // when bucket not existed, create it
}
// NewMinIOKV creates a new MinIO kv.
func NewMinIOKV(ctx context.Context, client *minio.Client, bucketName string) (*MinIOKV, error) {
func NewMinIOKV(ctx context.Context, option *Option) (*MinIOKV, error) {
minIOClient, err := minio.New(option.Address, &minio.Options{
Creds: credentials.NewStaticV4(option.AccessKeyID, option.SecretAccessKeyID, ""),
Secure: option.UseSSL,
})
bucketExists, err := client.BucketExists(ctx, bucketName)
if err != nil {
return nil, err
}
bucketExists, err := minIOClient.BucketExists(ctx, option.BucketName)
if err != nil {
return nil, err
}
if option.CreateBucket {
if !bucketExists {
err = minIOClient.MakeBucket(ctx, option.BucketName, minio.MakeBucketOptions{})
if err != nil {
return nil, err
}
}
} else {
if !bucketExists {
return nil, errors.New(fmt.Sprintf("Bucket %s not Existed.", option.BucketName))
if !bucketExists {
err = client.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
if err != nil {
return nil, err
}
}
return &MinIOKV{
ctx: ctx,
minioClient: minIOClient,
bucketName: option.BucketName,
minioClient: client,
bucketName: bucketName,
}, nil
}

View File

@ -5,6 +5,8 @@ import (
"strconv"
"testing"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/util/paramtable"
@ -13,31 +15,24 @@ import (
var Params paramtable.BaseTable
func newMinIOKVClient(ctx context.Context, bucketName string) (*miniokv.MinIOKV, error) {
func TestMinIOKV_Load(t *testing.T) {
Params.Init()
endPoint, _ := Params.Load("_MinioAddress")
accessKeyID, _ := Params.Load("minio.accessKeyID")
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
useSSLStr, _ := Params.Load("minio.useSSL")
useSSL, _ := strconv.ParseBool(useSSLStr)
option := &miniokv.Option{
Address: endPoint,
AccessKeyID: accessKeyID,
SecretAccessKeyID: secretAccessKey,
UseSSL: useSSL,
BucketName: bucketName,
CreateBucket: true,
}
client, err := miniokv.NewMinIOKV(ctx, option)
return client, err
}
func TestMinIOKV_Load(t *testing.T) {
Params.Init()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
useSSL, _ := strconv.ParseBool(useSSLStr)
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
assert.Nil(t, err)
bucketName := "fantastic-tech-test"
MinIOKV, err := newMinIOKVClient(ctx, bucketName)
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
assert.Nil(t, err)
defer MinIOKV.RemoveWithPrefix("")
@ -84,14 +79,25 @@ func TestMinIOKV_Load(t *testing.T) {
}
func TestMinIOKV_MultiSave(t *testing.T) {
Params.Init()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
bucketName := "fantastic-tech-test"
MinIOKV, err := newMinIOKVClient(ctx, bucketName)
Params.Init()
endPoint, _ := Params.Load("_MinioAddress")
accessKeyID, _ := Params.Load("minio.accessKeyID")
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
useSSLStr, _ := Params.Load("minio.useSSL")
useSSL, _ := strconv.ParseBool(useSSLStr)
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
assert.Nil(t, err)
bucketName := "fantastic-tech-test"
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
assert.Nil(t, err)
defer MinIOKV.RemoveWithPrefix("")
err = MinIOKV.Save("key_1", "111")
@ -111,13 +117,25 @@ func TestMinIOKV_MultiSave(t *testing.T) {
}
func TestMinIOKV_Remove(t *testing.T) {
Params.Init()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Params.Init()
endPoint, _ := Params.Load("_MinioAddress")
accessKeyID, _ := Params.Load("minio.accessKeyID")
secretAccessKey, _ := Params.Load("minio.secretAccessKey")
useSSLStr, _ := Params.Load("minio.useSSL")
useSSL, _ := strconv.ParseBool(useSSLStr)
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
assert.Nil(t, err)
bucketName := "fantastic-tech-test"
MinIOKV, err := newMinIOKVClient(ctx, bucketName)
MinIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, bucketName)
assert.Nil(t, err)
defer MinIOKV.RemoveWithPrefix("")

View File

@ -9,25 +9,19 @@ import (
)
type WriteNodeClient interface {
FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error
FlushSegment(segmentID UniqueID) error
DescribeSegment(segmentID UniqueID) (*writerclient.SegmentDescription, error)
GetInsertBinlogPaths(segmentID UniqueID) (map[UniqueID][]string, error)
}
type MockWriteNodeClient struct {
segmentID UniqueID
flushTime time.Time
partitionTag string
timestamp Timestamp
collectionID UniqueID
segmentID UniqueID
flushTime time.Time
}
func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
func (m *MockWriteNodeClient) FlushSegment(segmentID UniqueID) error {
m.flushTime = time.Now()
m.segmentID = segmentID
m.collectionID = collectionID
m.partitionTag = partitionTag
m.timestamp = timestamp
return nil
}

View File

@ -17,12 +17,11 @@ type FlushScheduler struct {
segmentDescribeChan chan UniqueID
indexBuilderSch persistenceScheduler
ctx context.Context
cancel context.CancelFunc
globalTSOAllocator func() (Timestamp, error)
ctx context.Context
cancel context.CancelFunc
}
func NewFlushScheduler(ctx context.Context, client WriteNodeClient, metaTable *metaTable, buildScheduler *IndexBuildScheduler, globalTSOAllocator func() (Timestamp, error)) *FlushScheduler {
func NewFlushScheduler(ctx context.Context, client WriteNodeClient, metaTable *metaTable, buildScheduler *IndexBuildScheduler) *FlushScheduler {
ctx2, cancel := context.WithCancel(ctx)
return &FlushScheduler{
@ -33,23 +32,12 @@ func NewFlushScheduler(ctx context.Context, client WriteNodeClient, metaTable *m
segmentDescribeChan: make(chan UniqueID, 100),
ctx: ctx2,
cancel: cancel,
globalTSOAllocator: globalTSOAllocator,
}
}
func (scheduler *FlushScheduler) schedule(id interface{}) error {
segmentID := id.(UniqueID)
segmentMeta, err := scheduler.metaTable.GetSegmentByID(segmentID)
if err != nil {
return err
}
ts, err := scheduler.globalTSOAllocator()
if err != nil {
return err
}
// todo set corrent timestamp
err = scheduler.client.FlushSegment(segmentID, segmentMeta.CollectionID, segmentMeta.PartitionTag, ts)
err := scheduler.client.FlushSegment(segmentID)
log.Printf("flush segment %d", segmentID)
if err != nil {
return err

View File

@ -193,7 +193,7 @@ func CreateServer(ctx context.Context) (*Master, error) {
m.indexLoadSch = NewIndexLoadScheduler(ctx, loadIndexClient, m.metaTable)
m.indexBuildSch = NewIndexBuildScheduler(ctx, buildIndexClient, m.metaTable, m.indexLoadSch)
m.flushSch = NewFlushScheduler(ctx, flushClient, m.metaTable, m.indexBuildSch, func() (Timestamp, error) { return m.tsoAllocator.AllocOne() })
m.flushSch = NewFlushScheduler(ctx, flushClient, m.metaTable, m.indexBuildSch)
m.segmentAssigner = NewSegmentAssigner(ctx, metakv,
func() (Timestamp, error) { return m.tsoAllocator.AllocOne() },

View File

@ -59,11 +59,7 @@ func TestPersistenceScheduler(t *testing.T) {
//Init scheduler
indexLoadSch := NewIndexLoadScheduler(ctx, loadIndexClient, meta)
indexBuildSch := NewIndexBuildScheduler(ctx, buildIndexClient, meta, indexLoadSch)
cnt := 0
flushSch := NewFlushScheduler(ctx, flushClient, meta, indexBuildSch, func() (Timestamp, error) {
cnt++
return Timestamp(cnt), nil
})
flushSch := NewFlushScheduler(ctx, flushClient, meta, indexBuildSch)
//scheduler start
err = indexLoadSch.Start()

View File

@ -272,8 +272,6 @@ message FlushMsg {
MsgType msg_type = 1;
int64 segmentID = 2;
uint64 timestamp = 3;
int64 collectionID = 4;
string partitionTag = 5;
}

View File

@ -1881,8 +1881,6 @@ type FlushMsg struct {
MsgType MsgType `protobuf:"varint,1,opt,name=msg_type,json=msgType,proto3,enum=milvus.proto.internal.MsgType" json:"msg_type,omitempty"`
SegmentID int64 `protobuf:"varint,2,opt,name=segmentID,proto3" json:"segmentID,omitempty"`
Timestamp uint64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
CollectionID int64 `protobuf:"varint,4,opt,name=collectionID,proto3" json:"collectionID,omitempty"`
PartitionTag string `protobuf:"bytes,5,opt,name=partitionTag,proto3" json:"partitionTag,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -1934,20 +1932,6 @@ func (m *FlushMsg) GetTimestamp() uint64 {
return 0
}
func (m *FlushMsg) GetCollectionID() int64 {
if m != nil {
return m.CollectionID
}
return 0
}
func (m *FlushMsg) GetPartitionTag() string {
if m != nil {
return m.PartitionTag
}
return ""
}
type Key2Seg struct {
RowID int64 `protobuf:"varint,1,opt,name=rowID,proto3" json:"rowID,omitempty"`
PrimaryKey int64 `protobuf:"varint,2,opt,name=primary_key,json=primaryKey,proto3" json:"primary_key,omitempty"`
@ -2677,122 +2661,121 @@ func init() {
func init() { proto.RegisterFile("internal_msg.proto", fileDescriptor_7eb37f6b80b23116) }
var fileDescriptor_7eb37f6b80b23116 = []byte{
// 1867 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcd, 0x6f, 0x24, 0x47,
0x15, 0x4f, 0x77, 0xcf, 0x87, 0xe7, 0xcd, 0x78, 0xdc, 0x5b, 0xb6, 0x77, 0x67, 0xb3, 0x21, 0xeb,
0xed, 0x45, 0xc4, 0x04, 0x61, 0x83, 0x97, 0x03, 0xb9, 0x81, 0x3d, 0x0a, 0x19, 0x16, 0xaf, 0x4c,
0xdb, 0x0a, 0x12, 0x8a, 0xd4, 0x6a, 0xcf, 0x3c, 0xcf, 0x94, 0xfa, 0x6b, 0x5c, 0xd5, 0xb3, 0xf6,
0xec, 0x81, 0x0b, 0x7b, 0x46, 0x7c, 0x88, 0x03, 0x37, 0xee, 0x90, 0x88, 0x80, 0xf8, 0x1f, 0x08,
0x1f, 0x42, 0xe2, 0xbf, 0x80, 0x03, 0x48, 0x24, 0x1c, 0xb8, 0xa1, 0xaa, 0xea, 0x8f, 0x69, 0x7b,
0x66, 0x6c, 0xc5, 0xde, 0xb0, 0x51, 0xf6, 0xd6, 0xf5, 0xba, 0xba, 0xea, 0xbd, 0xdf, 0x7b, 0xef,
0x57, 0xf5, 0x5e, 0x03, 0xa1, 0x61, 0x8c, 0x2c, 0x74, 0x7d, 0x27, 0xe0, 0xfd, 0x8d, 0x21, 0x8b,
0xe2, 0x88, 0xac, 0x06, 0xd4, 0x7f, 0x3c, 0xe2, 0x6a, 0xb4, 0x91, 0x4e, 0x78, 0xb9, 0xd1, 0x8d,
0x82, 0x20, 0x0a, 0x95, 0xf8, 0xe5, 0x1b, 0x1c, 0xd9, 0x63, 0xda, 0xc5, 0xfc, 0x3b, 0x2b, 0x84,
0x5a, 0xa7, 0x6d, 0xe3, 0xf1, 0x08, 0x79, 0x4c, 0x6e, 0x42, 0x65, 0x88, 0xc8, 0x3a, 0xed, 0x96,
0xb6, 0xa6, 0xad, 0x1b, 0x76, 0x32, 0x22, 0x0f, 0xa0, 0xc4, 0x22, 0x1f, 0x5b, 0xfa, 0x9a, 0xb6,
0xde, 0xdc, 0xba, 0xbb, 0x31, 0x75, 0xaf, 0x8d, 0x3d, 0x44, 0x66, 0x47, 0x3e, 0xda, 0x72, 0x32,
0x59, 0x81, 0x72, 0x37, 0x1a, 0x85, 0x71, 0xcb, 0x58, 0xd3, 0xd6, 0x17, 0x6d, 0x35, 0xb0, 0xfa,
0x00, 0x62, 0x3f, 0x3e, 0x8c, 0x42, 0x8e, 0xe4, 0x01, 0x54, 0x78, 0xec, 0xc6, 0x23, 0x2e, 0x37,
0xac, 0x6f, 0xdd, 0x29, 0x2e, 0x9d, 0x28, 0xbf, 0x2f, 0xa7, 0xd8, 0xc9, 0x54, 0xd2, 0x04, 0xbd,
0xd3, 0x96, 0xba, 0x18, 0xb6, 0xde, 0x69, 0xcf, 0xd8, 0x28, 0x02, 0x38, 0xe0, 0xd1, 0x27, 0x68,
0xd9, 0x63, 0xa8, 0xcb, 0x0d, 0xaf, 0x62, 0xda, 0x2b, 0x50, 0x8b, 0x69, 0x80, 0x3c, 0x76, 0x83,
0xa1, 0xd4, 0xa9, 0x64, 0xe7, 0x82, 0x19, 0xfb, 0x3e, 0xd5, 0xa0, 0xb1, 0x8f, 0xfd, 0xdc, 0x8b,
0xd9, 0x34, 0x6d, 0x62, 0x9a, 0x58, 0xba, 0x3b, 0x70, 0xc3, 0x10, 0xfd, 0x04, 0xbc, 0xb2, 0x9d,
0x0b, 0xc8, 0x1d, 0xa8, 0x75, 0x23, 0xdf, 0x77, 0x42, 0x37, 0x40, 0xb9, 0x7c, 0xcd, 0x5e, 0x10,
0x82, 0x47, 0x6e, 0x80, 0xe4, 0x3e, 0x2c, 0x0e, 0x5d, 0x16, 0xd3, 0x98, 0x46, 0xa1, 0x13, 0xbb,
0xfd, 0x56, 0x49, 0x4e, 0x68, 0x64, 0xc2, 0x03, 0xb7, 0x6f, 0xbd, 0xa7, 0x01, 0xf9, 0x26, 0xe7,
0xb4, 0x1f, 0x16, 0x94, 0xb9, 0x56, 0xe0, 0x1f, 0xc2, 0xd2, 0x10, 0x99, 0x93, 0xa8, 0xed, 0x30,
0x3c, 0x6e, 0x19, 0x6b, 0xc6, 0x7a, 0x7d, 0xeb, 0xfe, 0x8c, 0xef, 0x27, 0x55, 0xb1, 0x17, 0x87,
0xc8, 0x76, 0xd4, 0xa7, 0x36, 0x1e, 0x5b, 0x1f, 0x6a, 0xb0, 0x24, 0xdf, 0x2b, 0xad, 0x03, 0x0c,
0x25, 0x74, 0x5c, 0x88, 0x12, 0x65, 0xd5, 0xe0, 0x02, 0xe8, 0xa6, 0x7a, 0xa5, 0x08, 0x68, 0xe9,
0x22, 0x40, 0xcb, 0xe7, 0x01, 0x25, 0x77, 0xa1, 0x8e, 0xa7, 0x43, 0xca, 0xd0, 0x11, 0x11, 0xd0,
0xaa, 0xc8, 0x68, 0x00, 0x25, 0x3a, 0xa0, 0xc1, 0x64, 0x84, 0x55, 0x2f, 0x1d, 0x61, 0x16, 0x87,
0xe5, 0x82, 0x97, 0x92, 0x68, 0x7d, 0x07, 0x6e, 0x4e, 0x22, 0xeb, 0x66, 0x90, 0xb4, 0x34, 0x09,
0xf0, 0x17, 0xe6, 0x01, 0x9c, 0x03, 0x68, 0xaf, 0xe4, 0x18, 0xe7, 0x52, 0xeb, 0xbf, 0x1a, 0xdc,
0xda, 0x61, 0xe8, 0xc6, 0xb8, 0x13, 0xf9, 0x3e, 0x76, 0x85, 0x89, 0x69, 0x80, 0xbc, 0x01, 0x0b,
0x01, 0xef, 0x3b, 0xf1, 0x78, 0x88, 0x12, 0xf5, 0xe6, 0xd6, 0xab, 0x33, 0xf6, 0xda, 0xe5, 0xfd,
0x83, 0xf1, 0x10, 0xed, 0x6a, 0xa0, 0x1e, 0x88, 0x05, 0x8d, 0x6e, 0xb6, 0x5e, 0x46, 0x09, 0x05,
0x99, 0xf0, 0x0e, 0xc3, 0xe3, 0x4e, 0x5b, 0x7a, 0xc7, 0xb0, 0xd5, 0xa0, 0x98, 0x67, 0xa5, 0xb3,
0x79, 0xd6, 0x82, 0xea, 0x90, 0x45, 0xa7, 0xe3, 0x4e, 0x5b, 0x3a, 0xc6, 0xb0, 0xd3, 0x21, 0xf9,
0x2a, 0x54, 0x78, 0x77, 0x80, 0x81, 0x2b, 0xdd, 0x51, 0xdf, 0xba, 0x3d, 0x15, 0xf2, 0x6d, 0x3f,
0x3a, 0xb4, 0x93, 0x89, 0xd6, 0xcf, 0x74, 0x58, 0x6d, 0xb3, 0x68, 0xf8, 0x29, 0xb7, 0x7c, 0x17,
0x96, 0xf2, 0xd5, 0x55, 0x54, 0x2b, 0x08, 0x3e, 0x5f, 0xd4, 0x39, 0x39, 0x61, 0x36, 0x72, 0x73,
0x45, 0xc4, 0xdb, 0xcd, 0x6e, 0x61, 0x6c, 0xfd, 0x53, 0x83, 0x95, 0xb7, 0x5c, 0x7e, 0xad, 0xa0,
0x64, 0x06, 0xeb, 0x33, 0x0d, 0x36, 0xe6, 0x18, 0x5c, 0xba, 0xd0, 0xe0, 0xf2, 0x15, 0x0c, 0xfe,
0x50, 0x83, 0xdb, 0x6d, 0xe4, 0x5d, 0x46, 0x0f, 0xf1, 0xb3, 0x63, 0xf5, 0x2f, 0x35, 0x58, 0xdd,
0x1f, 0x44, 0x27, 0xcf, 0xaf, 0xc5, 0xd6, 0xef, 0x74, 0xb8, 0xa9, 0xb8, 0x69, 0x2f, 0x65, 0xdf,
0x4f, 0x28, 0x41, 0xd7, 0xa0, 0x9e, 0x11, 0x7e, 0x96, 0xa6, 0x93, 0xa2, 0xdc, 0xd2, 0xd2, 0x4c,
0x4b, 0xcb, 0x73, 0x2c, 0xad, 0x14, 0x7d, 0xfb, 0x6d, 0x68, 0xe6, 0xa7, 0x8e, 0x74, 0xad, 0x3a,
0x37, 0xee, 0x4f, 0x77, 0x6d, 0x06, 0x87, 0xf4, 0x6c, 0x7e, 0x60, 0x49, 0xc7, 0xbe, 0xaf, 0xc3,
0x8a, 0x60, 0xb5, 0x17, 0x98, 0x5d, 0x1e, 0xb3, 0x7f, 0x68, 0xb0, 0xfc, 0x96, 0xcb, 0xaf, 0x13,
0xb2, 0xeb, 0x4d, 0xfe, 0xf3, 0xc6, 0x96, 0x3f, 0xb6, 0xb1, 0xff, 0xd2, 0xa0, 0x95, 0xf2, 0xdd,
0x67, 0xc3, 0x62, 0x71, 0xa4, 0x09, 0xae, 0x7b, 0x7e, 0xad, 0xbd, 0x66, 0x72, 0xff, 0xb7, 0x0e,
0x8b, 0x9d, 0x90, 0x23, 0x8b, 0x9f, 0x99, 0xa5, 0xaf, 0x9d, 0xd7, 0x58, 0x15, 0x27, 0x67, 0x74,
0xb9, 0x54, 0x89, 0x22, 0x70, 0xe3, 0xd8, 0x17, 0x37, 0xd2, 0xec, 0x7e, 0x93, 0x0b, 0x8a, 0xb7,
0x7c, 0x45, 0x03, 0x13, 0xb7, 0xfc, 0x09, 0x54, 0xab, 0x45, 0x54, 0x5f, 0x05, 0xc8, 0xc0, 0xe7,
0xad, 0x85, 0x35, 0x43, 0x5c, 0xd3, 0x73, 0x89, 0xa8, 0x80, 0x58, 0x74, 0xd2, 0x69, 0xf3, 0x56,
0x6d, 0xcd, 0x10, 0x15, 0x90, 0x1a, 0x91, 0xaf, 0xc1, 0x02, 0x8b, 0x4e, 0x9c, 0x9e, 0x1b, 0xbb,
0x2d, 0x90, 0x97, 0xec, 0x39, 0xb7, 0xc9, 0x2a, 0x8b, 0x4e, 0xda, 0x6e, 0xec, 0x5a, 0x4f, 0x75,
0x58, 0x6c, 0xa3, 0x8f, 0x31, 0xfe, 0xff, 0x41, 0x2f, 0x20, 0x56, 0x9a, 0x83, 0x58, 0x79, 0x1e,
0x62, 0x95, 0x73, 0x88, 0xdd, 0x83, 0xc6, 0x90, 0xd1, 0xc0, 0x65, 0x63, 0xc7, 0xc3, 0xb1, 0x28,
0x6f, 0x0c, 0xc9, 0xf2, 0x4a, 0xf6, 0x10, 0xc7, 0xdc, 0xfa, 0x48, 0x83, 0xc5, 0x7d, 0x74, 0x59,
0x77, 0xf0, 0xcc, 0x60, 0x98, 0xd0, 0xdf, 0x28, 0xea, 0x3f, 0xff, 0x0e, 0xfd, 0x45, 0x30, 0x19,
0xf2, 0x91, 0x1f, 0x3b, 0x39, 0x38, 0x0a, 0x80, 0x25, 0x25, 0xdf, 0xc9, 0x20, 0xda, 0x84, 0xf2,
0xf1, 0x08, 0xd9, 0xf8, 0xe2, 0x6a, 0x42, 0xcd, 0xb3, 0xfe, 0xa6, 0x81, 0xb9, 0x3f, 0xe6, 0x3b,
0x51, 0x78, 0x44, 0xfb, 0xcf, 0x9d, 0xe5, 0x04, 0x4a, 0xd2, 0x5f, 0xe5, 0x35, 0x63, 0xbd, 0x66,
0xcb, 0x67, 0xe1, 0x4b, 0x0f, 0xc7, 0xce, 0x90, 0xe1, 0x11, 0x3d, 0x45, 0xe5, 0xed, 0x9a, 0x5d,
0xf7, 0x70, 0xbc, 0x97, 0x88, 0xac, 0xbf, 0xea, 0xd0, 0x48, 0x7d, 0x29, 0xf0, 0xb9, 0x8a, 0x41,
0x79, 0x4d, 0xac, 0x5f, 0xbe, 0xeb, 0x32, 0xbd, 0x52, 0x9a, 0xcd, 0xa3, 0xf7, 0xa0, 0x21, 0xdd,
0xe1, 0x84, 0x51, 0x0f, 0x33, 0xef, 0xd6, 0xa5, 0xec, 0x91, 0x14, 0x15, 0x81, 0xaa, 0x5c, 0x26,
0x44, 0xaa, 0xd3, 0x43, 0x84, 0x40, 0x69, 0x40, 0x63, 0xc5, 0x2b, 0x0d, 0x5b, 0x3e, 0x93, 0xbb,
0x50, 0x0f, 0x30, 0x66, 0xb4, 0xab, 0x20, 0xaa, 0xc9, 0xe4, 0x04, 0x25, 0x12, 0x28, 0x58, 0x3f,
0x80, 0xfa, 0x01, 0x0d, 0xf0, 0x80, 0x76, 0xbd, 0x5d, 0xde, 0xbf, 0x0a, 0x9e, 0x79, 0xfb, 0x46,
0x2f, 0xb4, 0x6f, 0xe6, 0x1e, 0x41, 0xd6, 0x07, 0x1a, 0x2c, 0xbc, 0xe9, 0x8f, 0xf8, 0xe0, 0x8a,
0xbb, 0x17, 0x08, 0x5b, 0x9f, 0x42, 0xd8, 0x73, 0x8e, 0xc1, 0xb3, 0xb7, 0xc9, 0xd2, 0x94, 0xdb,
0xa4, 0x05, 0x85, 0x03, 0x62, 0x5a, 0x1b, 0xc6, 0xfa, 0x85, 0x06, 0xd5, 0x87, 0x38, 0xde, 0xda,
0xc7, 0xbe, 0x0c, 0x14, 0x41, 0xde, 0x69, 0x7b, 0x48, 0x0e, 0x84, 0x3b, 0x26, 0xe8, 0x2a, 0xd1,
0x13, 0x72, 0xb6, 0xba, 0x40, 0xd1, 0xdb, 0xb0, 0x40, 0xb9, 0xf3, 0xd8, 0xf5, 0x69, 0x4f, 0x2a,
0xb9, 0x60, 0x57, 0x29, 0x7f, 0x5b, 0x0c, 0x05, 0x51, 0x66, 0xe6, 0xaa, 0xb4, 0x32, 0xec, 0x09,
0x89, 0xf5, 0x0e, 0x40, 0xa2, 0x9a, 0x00, 0x3a, 0x0b, 0x63, 0x6d, 0x32, 0x8c, 0xbf, 0x0e, 0x55,
0x0f, 0xc7, 0x5b, 0x1c, 0xfb, 0x2d, 0x5d, 0x9e, 0x32, 0xb3, 0xd0, 0x4f, 0x56, 0xb2, 0xd3, 0xe9,
0xd6, 0x0f, 0x75, 0xa8, 0x7d, 0x27, 0x72, 0x7b, 0x9d, 0xb0, 0x87, 0xa7, 0xcf, 0xd4, 0x8d, 0x47,
0x14, 0xfd, 0xde, 0xa3, 0xfc, 0xa0, 0xc9, 0x05, 0x22, 0x0b, 0xe5, 0x20, 0xcf, 0xc2, 0x64, 0x28,
0x60, 0xa7, 0x42, 0x33, 0x67, 0xe8, 0xc6, 0x83, 0x94, 0x74, 0x40, 0x8a, 0xf6, 0x84, 0x84, 0xb4,
0xa1, 0x91, 0x4e, 0x60, 0x6e, 0xa0, 0xa8, 0xa7, 0xbe, 0x75, 0x6f, 0x2a, 0x23, 0x3c, 0xc4, 0xf1,
0xdb, 0xae, 0x3f, 0xc2, 0x3d, 0x97, 0x32, 0xbb, 0x9e, 0x2c, 0x22, 0xbe, 0xb2, 0x9e, 0x6a, 0x00,
0x12, 0x01, 0x41, 0x1a, 0xe7, 0x17, 0xd5, 0x3e, 0xce, 0xa2, 0xe4, 0x2b, 0xb0, 0x12, 0x8e, 0x02,
0x87, 0xa1, 0xef, 0xc6, 0xd8, 0x73, 0x12, 0x30, 0x78, 0x02, 0x0e, 0x09, 0x47, 0x81, 0xad, 0x5e,
0xed, 0x27, 0x6f, 0xac, 0x1f, 0x69, 0x00, 0x6f, 0x0a, 0xcb, 0x95, 0x1a, 0x67, 0xa3, 0x5b, 0x9b,
0x12, 0xdd, 0x13, 0xd0, 0xe9, 0x45, 0xe8, 0xb6, 0x53, 0xe8, 0x04, 0x01, 0xf2, 0xa4, 0x87, 0x7a,
0x6f, 0x86, 0x3b, 0x73, 0xe3, 0x13, 0x74, 0xe5, 0xb3, 0xf5, 0x73, 0xd5, 0x76, 0x16, 0xda, 0x29,
0x95, 0x0a, 0x5e, 0xd6, 0xce, 0x7a, 0x59, 0x72, 0x56, 0x10, 0xb1, 0xb1, 0xc3, 0xe9, 0x13, 0x4c,
0x93, 0x44, 0x89, 0xf6, 0xe9, 0x13, 0x14, 0x69, 0x20, 0x21, 0x89, 0x4e, 0x78, 0x7a, 0xea, 0x08,
0x18, 0xa2, 0x13, 0x4e, 0xbe, 0x04, 0x37, 0x18, 0x76, 0x31, 0x8c, 0xfd, 0xb1, 0x13, 0x44, 0x3d,
0x7a, 0x44, 0x31, 0x4d, 0x15, 0x33, 0x7d, 0xb1, 0x9b, 0xc8, 0xad, 0xbf, 0x6b, 0xd0, 0xfc, 0x6e,
0xca, 0xc4, 0x4a, 0xb3, 0x67, 0xc0, 0x7f, 0xdf, 0x90, 0xc6, 0x16, 0xf0, 0x9b, 0xd3, 0x83, 0xce,
0x40, 0xb2, 0x17, 0x38, 0xf6, 0x95, 0x52, 0xdb, 0x50, 0x97, 0xee, 0x48, 0xd6, 0x28, 0xcd, 0xf5,
0x41, 0xee, 0x79, 0x1b, 0x8e, 0xb2, 0x67, 0xeb, 0x57, 0x3a, 0x10, 0xd5, 0xbb, 0x90, 0x4e, 0x7a,
0xee, 0x0a, 0x8e, 0xd7, 0xa6, 0x17, 0x1c, 0xe7, 0x6f, 0x92, 0x9f, 0x03, 0x65, 0x56, 0xde, 0x58,
0x2c, 0x90, 0x40, 0x1b, 0x1a, 0x78, 0x1a, 0x33, 0x37, 0x4d, 0xba, 0xea, 0xa5, 0x93, 0x4e, 0x7e,
0x96, 0x64, 0xf2, 0xbb, 0x3a, 0xac, 0xa4, 0x25, 0xe9, 0x0b, 0xbc, 0x2e, 0xc6, 0xeb, 0xf7, 0x3a,
0xbc, 0x52, 0xc0, 0x6b, 0x8f, 0x45, 0x7d, 0x86, 0x9c, 0xbf, 0xc0, 0x6d, 0x1e, 0x6e, 0xaf, 0xff,
// 1852 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x99, 0xcd, 0x6f, 0x23, 0x49,
0x15, 0xc0, 0xb7, 0xbb, 0xfd, 0x11, 0x3f, 0x3b, 0x4e, 0x4f, 0x25, 0x99, 0xf1, 0xec, 0x2c, 0x3b,
0x99, 0x1e, 0xc4, 0x86, 0x45, 0x24, 0x90, 0xe1, 0xc0, 0xde, 0x20, 0xb1, 0x96, 0x35, 0x43, 0x46,
0xa1, 0x13, 0x2d, 0x12, 0x5a, 0xa9, 0xd5, 0xb1, 0x5f, 0xec, 0x52, 0x7f, 0x39, 0x55, 0xed, 0x49,
0x3c, 0x07, 0x24, 0xc4, 0x9c, 0x11, 0x1f, 0xe2, 0xc0, 0x8d, 0x3b, 0xec, 0x8a, 0x05, 0xf1, 0x3f,
0xf0, 0x29, 0x24, 0xfe, 0x0b, 0x38, 0x80, 0xc4, 0x2e, 0x07, 0x6e, 0xa8, 0xaa, 0xfa, 0xc3, 0x9d,
0xd8, 0x4e, 0xb4, 0xc9, 0x2c, 0xb3, 0xda, 0xb9, 0x75, 0x3d, 0x57, 0x57, 0xbf, 0xf7, 0x7b, 0x1f,
0x55, 0xf5, 0x0c, 0x84, 0x86, 0x31, 0xb2, 0xd0, 0xf5, 0x9d, 0x80, 0xf7, 0x37, 0x86, 0x2c, 0x8a,
0x23, 0xb2, 0x1a, 0x50, 0xff, 0xf1, 0x88, 0xab, 0xd1, 0x46, 0x3a, 0xe1, 0xe5, 0x46, 0x37, 0x0a,
0x82, 0x28, 0x54, 0xe2, 0x97, 0x6f, 0x70, 0x64, 0x8f, 0x69, 0x17, 0xf3, 0xf7, 0xac, 0x10, 0x6a,
0x9d, 0xb6, 0x8d, 0xc7, 0x23, 0xe4, 0x31, 0xb9, 0x09, 0x95, 0x21, 0x22, 0xeb, 0xb4, 0x5b, 0xda,
0x9a, 0xb6, 0x6e, 0xd8, 0xc9, 0x88, 0x3c, 0x80, 0x12, 0x8b, 0x7c, 0x6c, 0xe9, 0x6b, 0xda, 0x7a,
0x73, 0xeb, 0xee, 0xc6, 0xd4, 0x6f, 0x6d, 0xec, 0x21, 0x32, 0x3b, 0xf2, 0xd1, 0x96, 0x93, 0xc9,
0x0a, 0x94, 0xbb, 0xd1, 0x28, 0x8c, 0x5b, 0xc6, 0x9a, 0xb6, 0xbe, 0x68, 0xab, 0x81, 0xd5, 0x07,
0x10, 0xdf, 0xe3, 0xc3, 0x28, 0xe4, 0x48, 0x1e, 0x40, 0x85, 0xc7, 0x6e, 0x3c, 0xe2, 0xf2, 0x83,
0xf5, 0xad, 0x3b, 0xc5, 0xa5, 0x13, 0xe5, 0xf7, 0xe5, 0x14, 0x3b, 0x99, 0x4a, 0x9a, 0xa0, 0x77,
0xda, 0x52, 0x17, 0xc3, 0xd6, 0x3b, 0xed, 0x19, 0x1f, 0x8a, 0x00, 0x0e, 0x78, 0xf4, 0x31, 0x5a,
0xf6, 0x18, 0xea, 0xf2, 0x83, 0x57, 0x31, 0xed, 0x15, 0xa8, 0xc5, 0x34, 0x40, 0x1e, 0xbb, 0xc1,
0x50, 0xea, 0x54, 0xb2, 0x73, 0xc1, 0x8c, 0xef, 0x3e, 0xd5, 0xa0, 0xb1, 0x8f, 0xfd, 0xdc, 0x8b,
0xd9, 0x34, 0x6d, 0x62, 0x9a, 0x58, 0xba, 0x3b, 0x70, 0xc3, 0x10, 0xfd, 0x04, 0x5e, 0xd9, 0xce,
0x05, 0xe4, 0x0e, 0xd4, 0xba, 0x91, 0xef, 0x3b, 0xa1, 0x1b, 0xa0, 0x5c, 0xbe, 0x66, 0x2f, 0x08,
0xc1, 0x23, 0x37, 0x40, 0x72, 0x1f, 0x16, 0x87, 0x2e, 0x8b, 0x69, 0x4c, 0xa3, 0xd0, 0x89, 0xdd,
0x7e, 0xab, 0x24, 0x27, 0x34, 0x32, 0xe1, 0x81, 0xdb, 0xb7, 0xde, 0xd3, 0x80, 0x7c, 0x9d, 0x73,
0xda, 0x0f, 0x0b, 0xca, 0x5c, 0x2b, 0xf8, 0x87, 0xb0, 0x34, 0x44, 0xe6, 0x24, 0x6a, 0x3b, 0x0c,
0x8f, 0x5b, 0xc6, 0x9a, 0xb1, 0x5e, 0xdf, 0xba, 0x3f, 0xe3, 0xfd, 0x49, 0x55, 0xec, 0xc5, 0x21,
0xb2, 0x1d, 0xf5, 0xaa, 0x8d, 0xc7, 0xd6, 0x07, 0x1a, 0x2c, 0xc9, 0xdf, 0x95, 0xd6, 0x01, 0x86,
0x12, 0x1d, 0x17, 0xa2, 0x44, 0x59, 0x35, 0xb8, 0x00, 0xdd, 0x54, 0xaf, 0x14, 0x81, 0x96, 0x2e,
0x02, 0x5a, 0x3e, 0x0f, 0x94, 0xdc, 0x85, 0x3a, 0x9e, 0x0e, 0x29, 0x43, 0x47, 0x44, 0x40, 0xab,
0x22, 0xa3, 0x01, 0x94, 0xe8, 0x80, 0x06, 0x93, 0x11, 0x56, 0xbd, 0x74, 0x84, 0x59, 0x1c, 0x96,
0x0b, 0x5e, 0x4a, 0xa2, 0xf5, 0x1d, 0xb8, 0x39, 0x49, 0xd6, 0xcd, 0x90, 0xb4, 0x34, 0x09, 0xf8,
0x73, 0xf3, 0x00, 0xe7, 0x00, 0xed, 0x95, 0x9c, 0x71, 0x2e, 0xb5, 0xfe, 0xab, 0xc1, 0xad, 0x1d,
0x86, 0x6e, 0x8c, 0x3b, 0x91, 0xef, 0x63, 0x57, 0x98, 0x98, 0x06, 0xc8, 0x1b, 0xb0, 0x10, 0xf0,
0xbe, 0x13, 0x8f, 0x87, 0x28, 0xa9, 0x37, 0xb7, 0x5e, 0x9d, 0xf1, 0xad, 0x5d, 0xde, 0x3f, 0x18,
0x0f, 0xd1, 0xae, 0x06, 0xea, 0x81, 0x58, 0xd0, 0xe8, 0x66, 0xeb, 0x65, 0x25, 0xa1, 0x20, 0x13,
0xde, 0x61, 0x78, 0xdc, 0x69, 0x4b, 0xef, 0x18, 0xb6, 0x1a, 0x14, 0xf3, 0xac, 0x74, 0x36, 0xcf,
0x5a, 0x50, 0x1d, 0xb2, 0xe8, 0x74, 0xdc, 0x69, 0x4b, 0xc7, 0x18, 0x76, 0x3a, 0x24, 0x5f, 0x86,
0x0a, 0xef, 0x0e, 0x30, 0x70, 0xa5, 0x3b, 0xea, 0x5b, 0xb7, 0xa7, 0x22, 0xdf, 0xf6, 0xa3, 0x43,
0x3b, 0x99, 0x68, 0xfd, 0x54, 0x87, 0xd5, 0x36, 0x8b, 0x86, 0x9f, 0x70, 0xcb, 0x77, 0x61, 0x29,
0x5f, 0x5d, 0x45, 0xb5, 0x42, 0xf0, 0xd9, 0xa2, 0xce, 0xc9, 0x0e, 0xb3, 0x91, 0x9b, 0x2b, 0x22,
0xde, 0x6e, 0x76, 0x0b, 0x63, 0xeb, 0x9f, 0x1a, 0xac, 0xbc, 0xe5, 0xf2, 0x6b, 0x85, 0x92, 0x19,
0xac, 0xcf, 0x34, 0xd8, 0x98, 0x63, 0x70, 0xe9, 0x42, 0x83, 0xcb, 0x57, 0x30, 0xf8, 0x03, 0x0d,
0x6e, 0xb7, 0x91, 0x77, 0x19, 0x3d, 0xc4, 0x4f, 0x8f, 0xd5, 0xbf, 0xd0, 0x60, 0x75, 0x7f, 0x10,
0x9d, 0x3c, 0xbf, 0x16, 0x5b, 0xbf, 0xd5, 0xe1, 0xa6, 0xaa, 0x4d, 0x7b, 0x69, 0xf5, 0xfd, 0x98,
0x12, 0x74, 0x0d, 0xea, 0x59, 0xc1, 0xcf, 0xd2, 0x74, 0x52, 0x94, 0x5b, 0x5a, 0x9a, 0x69, 0x69,
0x79, 0x8e, 0xa5, 0x95, 0xa2, 0x6f, 0xbf, 0x09, 0xcd, 0x7c, 0xd7, 0x91, 0xae, 0x55, 0xfb, 0xc6,
0xfd, 0xe9, 0xae, 0xcd, 0x70, 0x48, 0xcf, 0xe6, 0x1b, 0x96, 0x74, 0xec, 0xfb, 0x3a, 0xac, 0x88,
0xaa, 0xf6, 0x82, 0xd9, 0xe5, 0x99, 0xfd, 0x43, 0x83, 0xe5, 0xb7, 0x5c, 0x7e, 0x9d, 0xc8, 0xae,
0x37, 0xf9, 0xcf, 0x1b, 0x5b, 0xfe, 0xc8, 0xc6, 0xfe, 0x4b, 0x83, 0x56, 0x5a, 0xef, 0x3e, 0x1d,
0x16, 0x8b, 0x2d, 0x4d, 0xd4, 0xba, 0xe7, 0xd7, 0xda, 0x6b, 0x2e, 0xee, 0xff, 0xd6, 0x61, 0xb1,
0x13, 0x72, 0x64, 0xf1, 0x33, 0xb3, 0xf4, 0xb5, 0xf3, 0x1a, 0xab, 0xcb, 0xc9, 0x19, 0x5d, 0x2e,
0x75, 0x45, 0x11, 0xdc, 0x38, 0xf6, 0xc5, 0x89, 0x34, 0x3b, 0xdf, 0xe4, 0x82, 0xe2, 0x29, 0x5f,
0x95, 0x81, 0x89, 0x53, 0xfe, 0x04, 0xd5, 0x6a, 0x91, 0xea, 0xab, 0x00, 0x19, 0x7c, 0xde, 0x5a,
0x58, 0x33, 0xc4, 0x31, 0x3d, 0x97, 0x88, 0x1b, 0x10, 0x8b, 0x4e, 0x3a, 0x6d, 0xde, 0xaa, 0xad,
0x19, 0xe2, 0x06, 0xa4, 0x46, 0xe4, 0x2b, 0xb0, 0xc0, 0xa2, 0x13, 0xa7, 0xe7, 0xc6, 0x6e, 0x0b,
0xe4, 0x21, 0x7b, 0xce, 0x69, 0xb2, 0xca, 0xa2, 0x93, 0xb6, 0x1b, 0xbb, 0xd6, 0x53, 0x1d, 0x16,
0xdb, 0xe8, 0x63, 0x8c, 0xff, 0x7f, 0xe8, 0x05, 0x62, 0xa5, 0x39, 0xc4, 0xca, 0xf3, 0x88, 0x55,
0xce, 0x11, 0xbb, 0x07, 0x8d, 0x21, 0xa3, 0x81, 0xcb, 0xc6, 0x8e, 0x87, 0x63, 0x71, 0xbd, 0x31,
0x64, 0x95, 0x57, 0xb2, 0x87, 0x38, 0xe6, 0xd6, 0x87, 0x1a, 0x2c, 0xee, 0xa3, 0xcb, 0xba, 0x83,
0x67, 0x86, 0x61, 0x42, 0x7f, 0xa3, 0xa8, 0xff, 0xfc, 0x33, 0xf4, 0xe7, 0xc1, 0x64, 0xc8, 0x47,
0x7e, 0xec, 0xe4, 0x70, 0x14, 0x80, 0x25, 0x25, 0xdf, 0xc9, 0x10, 0x6d, 0x42, 0xf9, 0x78, 0x84,
0x6c, 0x7c, 0xf1, 0x6d, 0x42, 0xcd, 0xb3, 0xfe, 0xa6, 0x81, 0xb9, 0x3f, 0xe6, 0x3b, 0x51, 0x78,
0x44, 0xfb, 0xcf, 0x9d, 0xe5, 0x04, 0x4a, 0xd2, 0x5f, 0xe5, 0x35, 0x63, 0xbd, 0x66, 0xcb, 0x67,
0xe1, 0x4b, 0x0f, 0xc7, 0xce, 0x90, 0xe1, 0x11, 0x3d, 0x45, 0xe5, 0xed, 0x9a, 0x5d, 0xf7, 0x70,
0xbc, 0x97, 0x88, 0xac, 0xbf, 0xea, 0xd0, 0x48, 0x7d, 0x29, 0xf8, 0x5c, 0xc5, 0xa0, 0xfc, 0x4e,
0xac, 0x5f, 0xbe, 0xeb, 0x32, 0xfd, 0xa6, 0x34, 0xbb, 0x8e, 0xde, 0x83, 0x86, 0x74, 0x87, 0x13,
0x46, 0x3d, 0xcc, 0xbc, 0x5b, 0x97, 0xb2, 0x47, 0x52, 0x54, 0x04, 0x55, 0xb9, 0x4c, 0x88, 0x54,
0xa7, 0x87, 0x08, 0x81, 0xd2, 0x80, 0xc6, 0xaa, 0xae, 0x34, 0x6c, 0xf9, 0x4c, 0xee, 0x42, 0x3d,
0xc0, 0x98, 0xd1, 0xae, 0x42, 0x54, 0x93, 0xc9, 0x09, 0x4a, 0x24, 0x28, 0x58, 0xdf, 0x83, 0xfa,
0x01, 0x0d, 0xf0, 0x80, 0x76, 0xbd, 0x5d, 0xde, 0xbf, 0x0a, 0xcf, 0xbc, 0x7d, 0xa3, 0x17, 0xda,
0x37, 0x73, 0xb7, 0x20, 0xeb, 0xfb, 0x1a, 0x2c, 0xbc, 0xe9, 0x8f, 0xf8, 0xe0, 0x8a, 0x5f, 0x2f,
0x14, 0x6c, 0x7d, 0x4a, 0xc1, 0x9e, 0xa3, 0xc3, 0xcf, 0x35, 0xa8, 0x3e, 0xc4, 0xf1, 0xd6, 0x3e,
0xf6, 0xa5, 0x83, 0x45, 0xd1, 0x4d, 0xdb, 0x3a, 0x72, 0x20, 0x30, 0x4e, 0x94, 0x99, 0x64, 0x7d,
0xc8, 0xab, 0xcc, 0x05, 0xfb, 0xec, 0x6d, 0x58, 0xa0, 0xdc, 0x79, 0xec, 0xfa, 0xb4, 0x27, 0x03,
0x64, 0xc1, 0xae, 0x52, 0xfe, 0xb6, 0x18, 0x8a, 0x02, 0x97, 0xa9, 0xa9, 0xd2, 0xc1, 0xb0, 0x27,
0x24, 0xd6, 0x3b, 0x00, 0x89, 0x6a, 0x02, 0x50, 0x16, 0x7e, 0xda, 0x64, 0xf8, 0x7d, 0x15, 0xaa,
0x1e, 0x8e, 0xb7, 0x38, 0xf6, 0x5b, 0xba, 0xdc, 0x1d, 0x66, 0x51, 0x4b, 0x56, 0xb2, 0xd3, 0xe9,
0xd6, 0x0f, 0x74, 0xa8, 0x7d, 0x2b, 0x72, 0x7b, 0x9d, 0xb0, 0x87, 0xa7, 0xcf, 0x14, 0xff, 0x11,
0x45, 0xbf, 0xf7, 0x28, 0xdf, 0x20, 0x72, 0x81, 0xc8, 0x1e, 0x39, 0xc8, 0xb3, 0x27, 0x19, 0x0a,
0xec, 0x54, 0x68, 0xe6, 0x0c, 0xdd, 0x78, 0x90, 0x16, 0x0b, 0x90, 0xa2, 0x3d, 0x21, 0x21, 0x6d,
0x68, 0xa4, 0x13, 0x98, 0x1b, 0xa8, 0x92, 0x51, 0xdf, 0xba, 0x37, 0x35, 0x93, 0x1f, 0xe2, 0xf8,
0x6d, 0xd7, 0x1f, 0xe1, 0x9e, 0x4b, 0x99, 0x5d, 0x4f, 0x16, 0x11, 0x6f, 0x59, 0x4f, 0x35, 0x00,
0x49, 0x40, 0x24, 0xfb, 0xf9, 0x45, 0xb5, 0x8f, 0xb2, 0x28, 0xf9, 0x12, 0xac, 0x84, 0xa3, 0xc0,
0x61, 0xe8, 0xbb, 0x31, 0xf6, 0x9c, 0x04, 0x06, 0x4f, 0xe0, 0x90, 0x70, 0x14, 0xd8, 0xea, 0xa7,
0xfd, 0xe4, 0x17, 0xeb, 0x87, 0x1a, 0xc0, 0x9b, 0xc2, 0x72, 0xa5, 0xc6, 0xd9, 0x3b, 0x8e, 0x36,
0xe5, 0x8e, 0x33, 0x81, 0x4e, 0x2f, 0xa2, 0xdb, 0x4e, 0xd1, 0x89, 0xc2, 0xc5, 0x93, 0xde, 0xe7,
0xbd, 0x19, 0xee, 0xcc, 0x8d, 0x4f, 0xe8, 0xca, 0x67, 0xeb, 0x67, 0xaa, 0x5d, 0x2c, 0xb4, 0x53,
0x2a, 0x15, 0xbc, 0xac, 0x9d, 0xf5, 0xb2, 0xac, 0x35, 0x41, 0xc4, 0xc6, 0x0e, 0xa7, 0x4f, 0x30,
0x4d, 0x12, 0x25, 0xda, 0xa7, 0x4f, 0x50, 0xa4, 0x81, 0x44, 0x12, 0x9d, 0xf0, 0x74, 0xb7, 0x10,
0x18, 0xa2, 0x13, 0x4e, 0xbe, 0x00, 0x37, 0x18, 0x76, 0x31, 0x8c, 0xfd, 0xb1, 0x13, 0x44, 0x3d,
0x7a, 0x44, 0x31, 0x4d, 0x15, 0x33, 0xfd, 0x61, 0x37, 0x91, 0x5b, 0x7f, 0xd7, 0xa0, 0xf9, 0xed,
0xb4, 0x82, 0x2a, 0xcd, 0x9e, 0x41, 0xdd, 0xfa, 0x9a, 0x34, 0xb6, 0xc0, 0x6f, 0x4e, 0xef, 0x38,
0x83, 0x64, 0x2f, 0x70, 0xec, 0x2b, 0xa5, 0xb6, 0xa1, 0x2e, 0xdd, 0x91, 0xac, 0x51, 0x9a, 0xeb,
0x83, 0xdc, 0xf3, 0x36, 0x1c, 0x65, 0xcf, 0xd6, 0x2f, 0x75, 0x20, 0xaa, 0xe7, 0x20, 0x9d, 0xf4,
0xdc, 0x5d, 0x14, 0x5e, 0x9b, 0x7e, 0x51, 0x38, 0x7f, 0x02, 0xfc, 0x0c, 0x28, 0xb3, 0xf2, 0x86,
0x60, 0xa1, 0x08, 0xb4, 0xa1, 0x81, 0xa7, 0x31, 0x73, 0xd3, 0xa4, 0xab, 0x5e, 0x3a, 0xe9, 0xe4,
0x6b, 0x49, 0x26, 0xbf, 0xab, 0xc3, 0x4a, 0x7a, 0x95, 0x7c, 0xc1, 0xeb, 0x62, 0x5e, 0xbf, 0xd3,
0xe1, 0x95, 0x02, 0xaf, 0x3d, 0x16, 0xf5, 0x19, 0x72, 0xfe, 0x82, 0xdb, 0x3c, 0x6e, 0xaf, 0xff,
0xc5, 0x80, 0x6a, 0x62, 0x30, 0xa9, 0x41, 0xd9, 0x7b, 0x14, 0x85, 0x68, 0xbe, 0x44, 0x56, 0xe1,
0x86, 0x77, 0xf6, 0x27, 0x88, 0xd9, 0x23, 0xcb, 0xb0, 0xe4, 0x15, 0xff, 0x0f, 0x98, 0x48, 0x08,
0x34, 0xbd, 0x42, 0x7b, 0xdc, 0x3c, 0x22, 0xb7, 0x60, 0xd9, 0x3b, 0xdf, 0x41, 0x36, 0xc5, 0xb9,
0x6f, 0x7a, 0xc5, 0x26, 0x2b, 0x37, 0x07, 0x72, 0x89, 0x6f, 0x61, 0x9c, 0x55, 0x0b, 0xdc, 0xa4,
0x64, 0x15, 0x4c, 0xef, 0x4c, 0xaf, 0xd3, 0xfc, 0x83, 0x46, 0x96, 0xa1, 0xe9, 0x15, 0x9a, 0x79,
0xe6, 0x07, 0x1a, 0x21, 0xb0, 0xe8, 0x4d, 0x76, 0xab, 0xcc, 0x3f, 0x6a, 0xe4, 0x16, 0x10, 0xef,
0x5c, 0x53, 0xc7, 0xfc, 0x93, 0x46, 0x56, 0x60, 0xc9, 0x2b, 0xf4, 0x3e, 0xb8, 0xf9, 0x67, 0x8d,
0xdc, 0x80, 0x86, 0x37, 0x41, 0x4f, 0xe6, 0xaf, 0x75, 0xb5, 0xd5, 0x64, 0x4c, 0x99, 0xef, 0xea,
0xe4, 0x0e, 0xdc, 0xf4, 0xa6, 0x06, 0x9a, 0xf9, 0x9e, 0x4e, 0x1a, 0x50, 0xf5, 0x54, 0x97, 0xc1,
0xfc, 0xb1, 0x21, 0x47, 0xaa, 0xfc, 0x35, 0x7f, 0x62, 0x90, 0x3a, 0x54, 0x3c, 0x79, 0xcf, 0x34,
0x7f, 0xaa, 0x5e, 0xa9, 0x32, 0xc2, 0xfc, 0xc8, 0x90, 0xea, 0x4f, 0x16, 0x15, 0xe6, 0x7f, 0x0c,
0xd2, 0x84, 0x9a, 0x97, 0xde, 0x8b, 0xcd, 0xdf, 0xd4, 0xa4, 0xd6, 0xc5, 0xa3, 0xc2, 0x7c, 0xbf,
0x46, 0x96, 0x00, 0xbc, 0xec, 0xda, 0x63, 0xfe, 0xb6, 0xf6, 0xfa, 0x1b, 0xb0, 0x90, 0xfe, 0x87,
0x24, 0x00, 0x95, 0x5d, 0x97, 0xc7, 0xc8, 0xcc, 0x97, 0xc4, 0xb3, 0x8d, 0x6e, 0x0f, 0x99, 0xa9,
0x89, 0xe7, 0xef, 0x31, 0x2a, 0xe4, 0xba, 0xf0, 0xf9, 0x9e, 0x08, 0x4c, 0xd3, 0xd8, 0x6e, 0x7f,
0x7f, 0xbb, 0x4f, 0xe3, 0xc1, 0xe8, 0x50, 0x44, 0xcd, 0xe6, 0x13, 0xea, 0xfb, 0xf4, 0x49, 0x8c,
0xdd, 0xc1, 0xa6, 0x8a, 0xa8, 0x2f, 0xf7, 0x28, 0x8f, 0x19, 0x3d, 0x1c, 0xc5, 0xd8, 0xdb, 0x4c,
0x93, 0x65, 0x53, 0x86, 0x59, 0x36, 0x1c, 0x1e, 0x1e, 0x56, 0xa4, 0xe4, 0xc1, 0xff, 0x02, 0x00,
0x00, 0xff, 0xff, 0xc4, 0x5d, 0xb1, 0xba, 0xf8, 0x1f, 0x00, 0x00,
0x86, 0x77, 0xf6, 0xcf, 0x0b, 0xb3, 0x47, 0x96, 0x61, 0xc9, 0x2b, 0xf6, 0xf5, 0x4d, 0x24, 0x04,
0x9a, 0x5e, 0xa1, 0xad, 0x6d, 0x1e, 0x91, 0x5b, 0xb0, 0xec, 0x9d, 0xef, 0xfc, 0x9a, 0x62, 0xdf,
0x37, 0xbd, 0x62, 0x73, 0x94, 0x9b, 0x03, 0xb9, 0xc4, 0x37, 0x30, 0xce, 0x4e, 0xf9, 0xdc, 0xa4,
0x64, 0x15, 0x4c, 0xef, 0x4c, 0x8f, 0xd2, 0xfc, 0xbd, 0x46, 0x96, 0xa1, 0xe9, 0x15, 0x9a, 0x70,
0xe6, 0x1f, 0x34, 0x42, 0x60, 0xd1, 0x9b, 0xec, 0x32, 0x99, 0x7f, 0xd4, 0xc8, 0x2d, 0x20, 0xde,
0xb9, 0x66, 0x8c, 0xf9, 0x27, 0x8d, 0xac, 0xc0, 0x92, 0x57, 0xe8, 0x59, 0x70, 0xf3, 0xcf, 0x1a,
0xb9, 0x01, 0x0d, 0x6f, 0xa2, 0x3c, 0x99, 0xbf, 0xd2, 0xd5, 0xa7, 0x26, 0x63, 0xca, 0x7c, 0x57,
0x27, 0x77, 0xe0, 0xa6, 0x37, 0x35, 0xd0, 0xcc, 0xf7, 0x74, 0xd2, 0x80, 0xaa, 0xa7, 0xba, 0x03,
0xe6, 0x8f, 0x0c, 0x39, 0x52, 0xd7, 0x56, 0xf3, 0xc7, 0x06, 0xa9, 0x43, 0xc5, 0x93, 0xe7, 0x43,
0xf3, 0x27, 0xea, 0x27, 0x75, 0xfc, 0x37, 0x3f, 0x34, 0xa4, 0xfa, 0x93, 0x97, 0x01, 0xf3, 0x3f,
0x06, 0x69, 0x42, 0xcd, 0x4b, 0xcf, 0xb3, 0xe6, 0xaf, 0x6b, 0x52, 0xeb, 0xe2, 0x56, 0x61, 0xbe,
0x5f, 0x23, 0x4b, 0x00, 0x5e, 0x76, 0xec, 0x31, 0x7f, 0x53, 0x7b, 0xfd, 0x0d, 0x58, 0x48, 0xff,
0x3f, 0x24, 0x00, 0x95, 0x5d, 0x97, 0xc7, 0xc8, 0xcc, 0x97, 0xc4, 0xb3, 0x8d, 0x6e, 0x0f, 0x99,
0xa9, 0x89, 0xe7, 0xef, 0x30, 0x2a, 0xe4, 0xba, 0xf0, 0xf9, 0x9e, 0x08, 0x4c, 0xd3, 0xd8, 0x6e,
0x7f, 0x77, 0xbb, 0x4f, 0xe3, 0xc1, 0xe8, 0x50, 0x44, 0xcd, 0xe6, 0x13, 0xea, 0xfb, 0xf4, 0x49,
0x8c, 0xdd, 0xc1, 0xa6, 0x8a, 0xa8, 0x2f, 0xf6, 0x28, 0x8f, 0x19, 0x3d, 0x1c, 0xc5, 0xd8, 0xdb,
0x4c, 0x93, 0x65, 0x53, 0x86, 0x59, 0x36, 0x1c, 0x1e, 0x1e, 0x56, 0xa4, 0xe4, 0xc1, 0xff, 0x02,
0x00, 0x00, 0xff, 0xff, 0xc1, 0x89, 0xef, 0x28, 0xb0, 0x1f, 0x00, 0x00,
}

View File

@ -11,6 +11,9 @@ import (
"strings"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
minioKV "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
@ -35,17 +38,16 @@ type loadIndexService struct {
func newLoadIndexService(ctx context.Context, replica collectionReplica) *loadIndexService {
ctx1, cancel := context.WithCancel(ctx)
option := &minioKV.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
CreateBucket: true,
BucketName: Params.MinioBucketName,
// init minio
minioClient, err := minio.New(Params.MinioEndPoint, &minio.Options{
Creds: credentials.NewStaticV4(Params.MinioAccessKeyID, Params.MinioSecretAccessKey, ""),
Secure: Params.MinioUseSSLStr,
})
if err != nil {
panic(err)
}
// TODO: load bucketName from config
MinioKV, err := minioKV.NewMinIOKV(ctx1, option)
MinioKV, err := minioKV.NewMinIOKV(ctx1, minioClient, Params.MinioBucketName)
if err != nil {
panic(err)
}

View File

@ -5,6 +5,8 @@ import (
"sort"
"testing"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/stretchr/testify/assert"
"github.com/zilliztech/milvus-distributed/internal/indexbuilder"
@ -66,16 +68,13 @@ func TestLoadIndexService(t *testing.T) {
binarySet, err := index.Serialize()
assert.Equal(t, err, nil)
option := &minioKV.Option{
Address: Params.MinioEndPoint,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSLStr,
BucketName: Params.MinioBucketName,
CreateBucket: true,
}
minioKV, err := minioKV.NewMinIOKV(node.queryNodeLoopCtx, option)
//save index to minio
minioClient, err := minio.New(Params.MinioEndPoint, &minio.Options{
Creds: credentials.NewStaticV4(Params.MinioAccessKeyID, Params.MinioSecretAccessKey, ""),
Secure: Params.MinioUseSSLStr,
})
assert.Equal(t, err, nil)
minioKV, err := minioKV.NewMinIOKV(node.queryNodeLoopCtx, minioClient, Params.MinioBucketName)
assert.Equal(t, err, nil)
indexPaths := make([]string, 0)
for _, index := range binarySet {

View File

@ -0,0 +1,134 @@
package s3driver
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
var option = storagetype.Option{BucketName: "zilliz-hz"}
var ctx = context.Background()
var client, err = NewS3Driver(ctx, option)
func TestS3Driver_PutRowAndGetRow(t *testing.T) {
err = client.PutRow(ctx, []byte("bar"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("bar"), []byte("djhfkjsbdfbsdughorsgsdjhgoisdgh"), "SegmentA", 2)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("bar"), []byte("123854676ershdgfsgdfk,sdhfg;sdi8"), "SegmentB", 3)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("bar1"), []byte("testkeybarorbar_1"), "SegmentC", 3)
assert.Nil(t, err)
object, _ := client.GetRow(ctx, []byte("bar"), 1)
assert.Equal(t, "abcdefghijklmnoopqrstuvwxyz", string(object))
object, _ = client.GetRow(ctx, []byte("bar"), 2)
assert.Equal(t, "djhfkjsbdfbsdughorsgsdjhgoisdgh", string(object))
object, _ = client.GetRow(ctx, []byte("bar"), 5)
assert.Equal(t, "123854676ershdgfsgdfk,sdhfg;sdi8", string(object))
object, _ = client.GetRow(ctx, []byte("bar1"), 5)
assert.Equal(t, "testkeybarorbar_1", string(object))
}
func TestS3Driver_DeleteRow(t *testing.T) {
err = client.DeleteRow(ctx, []byte("bar"), 5)
assert.Nil(t, err)
object, _ := client.GetRow(ctx, []byte("bar"), 6)
assert.Nil(t, object)
err = client.DeleteRow(ctx, []byte("bar1"), 5)
assert.Nil(t, err)
object2, _ := client.GetRow(ctx, []byte("bar1"), 6)
assert.Nil(t, object2)
}
func TestS3Driver_GetSegments(t *testing.T) {
err = client.PutRow(ctx, []byte("seg"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("seg"), []byte("djhfkjsbdfbsdughorsgsdjhgoisdgh"), "SegmentA", 2)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("seg"), []byte("123854676ershdgfsgdfk,sdhfg;sdi8"), "SegmentB", 3)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("seg2"), []byte("testkeybarorbar_1"), "SegmentC", 1)
assert.Nil(t, err)
segements, err := client.GetSegments(ctx, []byte("seg"), 4)
assert.Nil(t, err)
assert.Equal(t, 2, len(segements))
if segements[0] == "SegmentA" {
assert.Equal(t, "SegmentA", segements[0])
assert.Equal(t, "SegmentB", segements[1])
} else {
assert.Equal(t, "SegmentB", segements[0])
assert.Equal(t, "SegmentA", segements[1])
}
}
func TestS3Driver_PutRowsAndGetRows(t *testing.T) {
keys := [][]byte{[]byte("foo"), []byte("bar")}
values := [][]byte{[]byte("The key is foo!"), []byte("The key is bar!")}
segments := []string{"segmentA", "segmentB"}
timestamps := []uint64{1, 2}
err = client.PutRows(ctx, keys, values, segments, timestamps)
assert.Nil(t, err)
objects, err := client.GetRows(ctx, keys, timestamps)
assert.Nil(t, err)
assert.Equal(t, "The key is foo!", string(objects[0]))
assert.Equal(t, "The key is bar!", string(objects[1]))
}
func TestS3Driver_DeleteRows(t *testing.T) {
keys := [][]byte{[]byte("foo"), []byte("bar")}
timestamps := []uint64{3, 3}
err := client.DeleteRows(ctx, keys, timestamps)
assert.Nil(t, err)
objects, err := client.GetRows(ctx, keys, timestamps)
assert.Nil(t, err)
assert.Nil(t, objects[0])
assert.Nil(t, objects[1])
}
func TestS3Driver_PutLogAndGetLog(t *testing.T) {
err = client.PutLog(ctx, []byte("insert"), []byte("This is insert log!"), 1, 11)
assert.Nil(t, err)
err = client.PutLog(ctx, []byte("delete"), []byte("This is delete log!"), 2, 10)
assert.Nil(t, err)
err = client.PutLog(ctx, []byte("update"), []byte("This is update log!"), 3, 9)
assert.Nil(t, err)
err = client.PutLog(ctx, []byte("select"), []byte("This is select log!"), 4, 8)
assert.Nil(t, err)
channels := []int{5, 8, 9, 10, 11, 12, 13}
logValues, err := client.GetLog(ctx, 0, 5, channels)
assert.Nil(t, err)
assert.Equal(t, "This is select log!", string(logValues[0]))
assert.Equal(t, "This is update log!", string(logValues[1]))
assert.Equal(t, "This is delete log!", string(logValues[2]))
assert.Equal(t, "This is insert log!", string(logValues[3]))
}
func TestS3Driver_Segment(t *testing.T) {
err := client.PutSegmentIndex(ctx, "segmentA", []byte("This is segmentA's index!"))
assert.Nil(t, err)
segmentIndex, err := client.GetSegmentIndex(ctx, "segmentA")
assert.Equal(t, "This is segmentA's index!", string(segmentIndex))
assert.Nil(t, err)
err = client.DeleteSegmentIndex(ctx, "segmentA")
assert.Nil(t, err)
}
func TestS3Driver_SegmentDL(t *testing.T) {
err := client.PutSegmentDL(ctx, "segmentB", []byte("This is segmentB's delete log!"))
assert.Nil(t, err)
segmentDL, err := client.GetSegmentDL(ctx, "segmentB")
assert.Nil(t, err)
assert.Equal(t, "This is segmentB's delete log!", string(segmentDL))
err = client.DeleteSegmentDL(ctx, "segmentB")
assert.Nil(t, err)
}

View File

@ -0,0 +1,173 @@
package s3driver
import (
"bytes"
"context"
"io"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
. "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
type S3Store struct {
client *s3.S3
}
func NewS3Store(config aws.Config) (*S3Store, error) {
sess := session.Must(session.NewSession(&config))
service := s3.New(sess)
return &S3Store{
client: service,
}, nil
}
func (s *S3Store) Put(ctx context.Context, key Key, value Value) error {
_, err := s.client.PutObjectWithContext(ctx, &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(string(key)),
Body: bytes.NewReader(value),
})
//sess := session.Must(session.NewSessionWithOptions(session.Options{
// SharedConfigState: session.SharedConfigEnable,
//}))
//uploader := s3manager.NewUploader(sess)
//
//_, err := uploader.Upload(&s3manager.UploadInput{
// Bucket: aws.String(bucketName),
// Key: aws.String(string(key)),
// Body: bytes.NewReader(value),
//})
return err
}
func (s *S3Store) Get(ctx context.Context, key Key) (Value, error) {
object, err := s.client.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(string(key)),
})
if err != nil {
return nil, err
}
//TODO: get size
size := 256 * 1024
buf := make([]byte, size)
n, err := object.Body.Read(buf)
if err != nil && err != io.EOF {
return nil, err
}
return buf[:n], nil
}
func (s *S3Store) GetByPrefix(ctx context.Context, prefix Key, keyOnly bool) ([]Key, []Value, error) {
objectsOutput, err := s.client.ListObjectsWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(bucketName),
Prefix: aws.String(string(prefix)),
})
var objectsKeys []Key
var objectsValues []Value
if objectsOutput != nil && err == nil {
for _, object := range objectsOutput.Contents {
objectsKeys = append(objectsKeys, []byte(*object.Key))
if !keyOnly {
value, err := s.Get(ctx, []byte(*object.Key))
if err != nil {
return nil, nil, err
}
objectsValues = append(objectsValues, value)
}
}
} else {
return nil, nil, err
}
return objectsKeys, objectsValues, nil
}
func (s *S3Store) Scan(ctx context.Context, keyStart Key, keyEnd Key, limit int, keyOnly bool) ([]Key, []Value, error) {
var keys []Key
var values []Value
limitCount := uint(limit)
objects, err := s.client.ListObjectsWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(bucketName),
Prefix: aws.String(string(keyStart)),
})
if err == nil && objects != nil {
for _, object := range objects.Contents {
if *object.Key >= string(keyEnd) {
keys = append(keys, []byte(*object.Key))
if !keyOnly {
value, err := s.Get(ctx, []byte(*object.Key))
if err != nil {
return nil, nil, err
}
values = append(values, value)
}
limitCount--
if limitCount <= 0 {
break
}
}
}
}
return keys, values, err
}
func (s *S3Store) Delete(ctx context.Context, key Key) error {
_, err := s.client.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(string(key)),
})
return err
}
func (s *S3Store) DeleteByPrefix(ctx context.Context, prefix Key) error {
objects, err := s.client.ListObjectsWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(bucketName),
Prefix: aws.String(string(prefix)),
})
if objects != nil && err == nil {
for _, object := range objects.Contents {
_, err := s.client.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: object.Key,
})
return err
}
}
return nil
}
func (s *S3Store) DeleteRange(ctx context.Context, keyStart Key, keyEnd Key) error {
objects, err := s.client.ListObjectsWithContext(ctx, &s3.ListObjectsInput{
Bucket: aws.String(bucketName),
Prefix: aws.String(string(keyStart)),
})
if objects != nil && err == nil {
for _, object := range objects.Contents {
if *object.Key > string(keyEnd) {
_, err := s.client.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: object.Key,
})
return err
}
}
}
return nil
}

View File

@ -0,0 +1,339 @@
package s3driver
import (
"context"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/zilliztech/milvus-distributed/internal/storage/internal/minio/codec"
. "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
type S3Driver struct {
driver *S3Store
}
var bucketName string
func NewS3Driver(ctx context.Context, option Option) (*S3Driver, error) {
// to-do read conf
bucketName = option.BucketName
S3Client, err := NewS3Store(aws.Config{
Region: aws.String(endpoints.CnNorthwest1RegionID)})
if err != nil {
return nil, err
}
return &S3Driver{
S3Client,
}, nil
}
func (s *S3Driver) put(ctx context.Context, key Key, value Value, timestamp Timestamp, suffix string) error {
minioKey, err := codec.MvccEncode(key, timestamp, suffix)
if err != nil {
return err
}
err = s.driver.Put(ctx, minioKey, value)
return err
}
func (s *S3Driver) scanLE(ctx context.Context, key Key, timestamp Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error) {
keyEnd, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, nil, nil, err
}
keys, values, err := s.driver.Scan(ctx, key, []byte(keyEnd), -1, keyOnly)
if err != nil {
return nil, nil, nil, err
}
var timestamps []Timestamp
for _, key := range keys {
_, timestamp, _, _ := codec.MvccDecode(key)
timestamps = append(timestamps, timestamp)
}
return timestamps, keys, values, nil
}
func (s *S3Driver) scanGE(ctx context.Context, key Key, timestamp Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error) {
keyStart, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, nil, nil, err
}
keys, values, err := s.driver.Scan(ctx, key, keyStart, -1, keyOnly)
if err != nil {
return nil, nil, nil, err
}
var timestamps []Timestamp
for _, key := range keys {
_, timestamp, _, _ := codec.MvccDecode(key)
timestamps = append(timestamps, timestamp)
}
return timestamps, keys, values, nil
}
//scan(ctx context.Context, key Key, start Timestamp, end Timestamp, withValue bool) ([]Timestamp, []Key, []Value, error)
func (s *S3Driver) deleteLE(ctx context.Context, key Key, timestamp Timestamp) error {
keyEnd, err := codec.MvccEncode(key, timestamp, "delete")
if err != nil {
return err
}
err = s.driver.DeleteRange(ctx, key, keyEnd)
return err
}
func (s *S3Driver) deleteGE(ctx context.Context, key Key, timestamp Timestamp) error {
keys, _, err := s.driver.GetByPrefix(ctx, key, true)
if err != nil {
return err
}
keyStart, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
panic(err)
}
err = s.driver.DeleteRange(ctx, []byte(keyStart), keys[len(keys)-1])
return err
}
func (s *S3Driver) deleteRange(ctx context.Context, key Key, start Timestamp, end Timestamp) error {
keyStart, err := codec.MvccEncode(key, start, "")
if err != nil {
return err
}
keyEnd, err := codec.MvccEncode(key, end, "")
if err != nil {
return err
}
err = s.driver.DeleteRange(ctx, keyStart, keyEnd)
return err
}
func (s *S3Driver) GetRow(ctx context.Context, key Key, timestamp Timestamp) (Value, error) {
minioKey, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, err
}
keys, values, err := s.driver.Scan(ctx, append(key, byte('_')), minioKey, 1, false)
if values == nil || keys == nil {
return nil, err
}
_, _, suffix, err := codec.MvccDecode(keys[0])
if err != nil {
return nil, err
}
if suffix == "delete" {
return nil, nil
}
return values[0], err
}
func (s *S3Driver) GetRows(ctx context.Context, keys []Key, timestamps []Timestamp) ([]Value, error) {
var values []Value
for i, key := range keys {
value, err := s.GetRow(ctx, key, timestamps[i])
if err != nil {
return nil, err
}
values = append(values, value)
}
return values, nil
}
func (s *S3Driver) PutRow(ctx context.Context, key Key, value Value, segment string, timestamp Timestamp) error {
minioKey, err := codec.MvccEncode(key, timestamp, segment)
if err != nil {
return err
}
err = s.driver.Put(ctx, minioKey, value)
return err
}
func (s *S3Driver) PutRows(ctx context.Context, keys []Key, values []Value, segments []string, timestamps []Timestamp) error {
maxThread := 100
batchSize := 1
keysLength := len(keys)
if keysLength/batchSize > maxThread {
batchSize = keysLength / maxThread
}
batchNums := keysLength / batchSize
if keysLength%batchSize != 0 {
batchNums = keysLength/batchSize + 1
}
errCh := make(chan error)
f := func(ctx2 context.Context, keys2 []Key, values2 []Value, segments2 []string, timestamps2 []Timestamp) {
for i := 0; i < len(keys2); i++ {
err := s.PutRow(ctx2, keys2[i], values2[i], segments2[i], timestamps2[i])
errCh <- err
}
}
for i := 0; i < batchNums; i++ {
j := i
go func() {
start, end := j*batchSize, (j+1)*batchSize
if len(keys) < end {
end = len(keys)
}
f(ctx, keys[start:end], values[start:end], segments[start:end], timestamps[start:end])
}()
}
for i := 0; i < len(keys); i++ {
if err := <-errCh; err != nil {
return err
}
}
return nil
}
func (s *S3Driver) GetSegments(ctx context.Context, key Key, timestamp Timestamp) ([]string, error) {
keyEnd, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, err
}
keys, _, err := s.driver.Scan(ctx, append(key, byte('_')), keyEnd, -1, true)
if err != nil {
return nil, err
}
segmentsSet := map[string]bool{}
for _, key := range keys {
_, _, segment, err := codec.MvccDecode(key)
if err != nil {
panic("must no error")
}
if segment != "delete" {
segmentsSet[segment] = true
}
}
var segments []string
for k, v := range segmentsSet {
if v {
segments = append(segments, k)
}
}
return segments, err
}
func (s *S3Driver) DeleteRow(ctx context.Context, key Key, timestamp Timestamp) error {
minioKey, err := codec.MvccEncode(key, timestamp, "delete")
if err != nil {
return err
}
value := []byte("0")
err = s.driver.Put(ctx, minioKey, value)
return err
}
func (s *S3Driver) DeleteRows(ctx context.Context, keys []Key, timestamps []Timestamp) error {
maxThread := 100
batchSize := 1
keysLength := len(keys)
if keysLength/batchSize > maxThread {
batchSize = keysLength / maxThread
}
batchNums := keysLength / batchSize
if keysLength%batchSize != 0 {
batchNums = keysLength/batchSize + 1
}
errCh := make(chan error)
f := func(ctx2 context.Context, keys2 []Key, timestamps2 []Timestamp) {
for i := 0; i < len(keys2); i++ {
err := s.DeleteRow(ctx2, keys2[i], timestamps2[i])
errCh <- err
}
}
for i := 0; i < batchNums; i++ {
j := i
go func() {
start, end := j*batchSize, (j+1)*batchSize
if len(keys) < end {
end = len(keys)
}
f(ctx, keys[start:end], timestamps[start:end])
}()
}
for i := 0; i < len(keys); i++ {
if err := <-errCh; err != nil {
return err
}
}
return nil
}
func (s *S3Driver) PutLog(ctx context.Context, key Key, value Value, timestamp Timestamp, channel int) error {
logKey := codec.LogEncode(key, timestamp, channel)
err := s.driver.Put(ctx, logKey, value)
return err
}
func (s *S3Driver) GetLog(ctx context.Context, start Timestamp, end Timestamp, channels []int) ([]Value, error) {
keys, values, err := s.driver.GetByPrefix(ctx, []byte("log_"), false)
if err != nil {
return nil, err
}
var resultValues []Value
for i, key := range keys {
_, ts, channel, err := codec.LogDecode(string(key))
if err != nil {
return nil, err
}
if ts >= start && ts <= end {
for j := 0; j < len(channels); j++ {
if channel == channels[j] {
resultValues = append(resultValues, values[i])
}
}
}
}
return resultValues, nil
}
func (s *S3Driver) GetSegmentIndex(ctx context.Context, segment string) (SegmentIndex, error) {
return s.driver.Get(ctx, codec.SegmentEncode(segment, "index"))
}
func (s *S3Driver) PutSegmentIndex(ctx context.Context, segment string, index SegmentIndex) error {
return s.driver.Put(ctx, codec.SegmentEncode(segment, "index"), index)
}
func (s *S3Driver) DeleteSegmentIndex(ctx context.Context, segment string) error {
return s.driver.Delete(ctx, codec.SegmentEncode(segment, "index"))
}
func (s *S3Driver) GetSegmentDL(ctx context.Context, segment string) (SegmentDL, error) {
return s.driver.Get(ctx, codec.SegmentEncode(segment, "DL"))
}
func (s *S3Driver) PutSegmentDL(ctx context.Context, segment string, log SegmentDL) error {
return s.driver.Put(ctx, codec.SegmentEncode(segment, "DL"), log)
}
func (s *S3Driver) DeleteSegmentDL(ctx context.Context, segment string) error {
return s.driver.Delete(ctx, codec.SegmentEncode(segment, "DL"))
}

View File

@ -0,0 +1,101 @@
package codec
import (
"errors"
"fmt"
)
func MvccEncode(key []byte, ts uint64, suffix string) ([]byte, error) {
return []byte(string(key) + "_" + fmt.Sprintf("%016x", ^ts) + "_" + suffix), nil
}
func MvccDecode(key []byte) (string, uint64, string, error) {
if len(key) < 16 {
return "", 0, "", errors.New("insufficient bytes to decode value")
}
suffixIndex := 0
TSIndex := 0
undersCount := 0
for i := len(key) - 1; i > 0; i-- {
if key[i] == byte('_') {
undersCount++
if undersCount == 1 {
suffixIndex = i + 1
}
if undersCount == 2 {
TSIndex = i + 1
break
}
}
}
if suffixIndex == 0 || TSIndex == 0 {
return "", 0, "", errors.New("key is wrong formatted")
}
var TS uint64
_, err := fmt.Sscanf(string(key[TSIndex:suffixIndex-1]), "%x", &TS)
TS = ^TS
if err != nil {
return "", 0, "", err
}
return string(key[0 : TSIndex-1]), TS, string(key[suffixIndex:]), nil
}
func LogEncode(key []byte, ts uint64, channel int) []byte {
suffix := string(key) + "_" + fmt.Sprintf("%d", channel)
logKey, err := MvccEncode([]byte("log"), ts, suffix)
if err != nil {
return nil
}
return logKey
}
func LogDecode(logKey string) (string, uint64, int, error) {
if len(logKey) < 16 {
return "", 0, 0, errors.New("insufficient bytes to decode value")
}
channelIndex := 0
keyIndex := 0
TSIndex := 0
undersCount := 0
for i := len(logKey) - 1; i > 0; i-- {
if logKey[i] == '_' {
undersCount++
if undersCount == 1 {
channelIndex = i + 1
}
if undersCount == 2 {
keyIndex = i + 1
}
if undersCount == 3 {
TSIndex = i + 1
break
}
}
}
if channelIndex == 0 || TSIndex == 0 || keyIndex == 0 || logKey[:TSIndex-1] != "log" {
return "", 0, 0, errors.New("key is wrong formatted")
}
var TS uint64
var channel int
_, err := fmt.Sscanf(logKey[TSIndex:keyIndex-1], "%x", &TS)
if err != nil {
return "", 0, 0, err
}
TS = ^TS
_, err = fmt.Sscanf(logKey[channelIndex:], "%d", &channel)
if err != nil {
return "", 0, 0, err
}
return logKey[keyIndex : channelIndex-1], TS, channel, nil
}
func SegmentEncode(segment string, suffix string) []byte {
return []byte(segment + "_" + suffix)
}

View File

@ -0,0 +1,361 @@
package miniodriver
import (
"context"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/zilliztech/milvus-distributed/internal/storage/internal/minio/codec"
storageType "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
type MinioDriver struct {
driver *minioStore
}
var bucketName string
func NewMinioDriver(ctx context.Context, option storageType.Option) (*MinioDriver, error) {
// to-do read conf
var endPoint = "localhost:9000"
var accessKeyID = "testminio"
var secretAccessKey = "testminio"
var useSSL = false
bucketName := option.BucketName
minioClient, err := minio.New(endPoint, &minio.Options{
Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""),
Secure: useSSL,
})
if err != nil {
return nil, err
}
bucketExists, err := minioClient.BucketExists(ctx, bucketName)
if err != nil {
return nil, err
}
if !bucketExists {
err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{})
if err != nil {
return nil, err
}
}
return &MinioDriver{
&minioStore{
client: minioClient,
},
}, nil
}
func (s *MinioDriver) put(ctx context.Context, key storageType.Key, value storageType.Value, timestamp storageType.Timestamp, suffix string) error {
minioKey, err := codec.MvccEncode(key, timestamp, suffix)
if err != nil {
return err
}
err = s.driver.Put(ctx, minioKey, value)
return err
}
func (s *MinioDriver) scanLE(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp, keyOnly bool) ([]storageType.Timestamp, []storageType.Key, []storageType.Value, error) {
keyEnd, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, nil, nil, err
}
keys, values, err := s.driver.Scan(ctx, key, []byte(keyEnd), -1, keyOnly)
if err != nil {
return nil, nil, nil, err
}
var timestamps []storageType.Timestamp
for _, key := range keys {
_, timestamp, _, _ := codec.MvccDecode(key)
timestamps = append(timestamps, timestamp)
}
return timestamps, keys, values, nil
}
func (s *MinioDriver) scanGE(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp, keyOnly bool) ([]storageType.Timestamp, []storageType.Key, []storageType.Value, error) {
keyStart, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, nil, nil, err
}
keys, values, err := s.driver.Scan(ctx, key, keyStart, -1, keyOnly)
if err != nil {
return nil, nil, nil, err
}
var timestamps []storageType.Timestamp
for _, key := range keys {
_, timestamp, _, _ := codec.MvccDecode(key)
timestamps = append(timestamps, timestamp)
}
return timestamps, keys, values, nil
}
//scan(ctx context.Context, key storageType.Key, start storageType.Timestamp, end storageType.Timestamp, withValue bool) ([]storageType.Timestamp, []storageType.Key, []storageType.Value, error)
func (s *MinioDriver) deleteLE(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp) error {
keyEnd, err := codec.MvccEncode(key, timestamp, "delete")
if err != nil {
return err
}
err = s.driver.DeleteRange(ctx, key, keyEnd)
return err
}
func (s *MinioDriver) deleteGE(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp) error {
keys, _, err := s.driver.GetByPrefix(ctx, key, true)
if err != nil {
return err
}
keyStart, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
panic(err)
}
err = s.driver.DeleteRange(ctx, keyStart, keys[len(keys)-1])
if err != nil {
panic(err)
}
return nil
}
func (s *MinioDriver) deleteRange(ctx context.Context, key storageType.Key, start storageType.Timestamp, end storageType.Timestamp) error {
keyStart, err := codec.MvccEncode(key, start, "")
if err != nil {
return err
}
keyEnd, err := codec.MvccEncode(key, end, "")
if err != nil {
return err
}
err = s.driver.DeleteRange(ctx, keyStart, keyEnd)
return err
}
func (s *MinioDriver) GetRow(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp) (storageType.Value, error) {
minioKey, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, err
}
keys, values, err := s.driver.Scan(ctx, append(key, byte('_')), minioKey, 1, false)
if values == nil || keys == nil {
return nil, err
}
_, _, suffix, err := codec.MvccDecode(keys[0])
if err != nil {
return nil, err
}
if suffix == "delete" {
return nil, nil
}
return values[0], err
}
func (s *MinioDriver) GetRows(ctx context.Context, keys []storageType.Key, timestamps []storageType.Timestamp) ([]storageType.Value, error) {
var values []storageType.Value
for i, key := range keys {
value, err := s.GetRow(ctx, key, timestamps[i])
if err != nil {
return nil, err
}
values = append(values, value)
}
return values, nil
}
func (s *MinioDriver) PutRow(ctx context.Context, key storageType.Key, value storageType.Value, segment string, timestamp storageType.Timestamp) error {
minioKey, err := codec.MvccEncode(key, timestamp, segment)
if err != nil {
return err
}
err = s.driver.Put(ctx, minioKey, value)
return err
}
func (s *MinioDriver) PutRows(ctx context.Context, keys []storageType.Key, values []storageType.Value, segments []string, timestamps []storageType.Timestamp) error {
maxThread := 100
batchSize := 1
keysLength := len(keys)
if keysLength/batchSize > maxThread {
batchSize = keysLength / maxThread
}
batchNums := keysLength / batchSize
if keysLength%batchSize != 0 {
batchNums = keysLength/batchSize + 1
}
errCh := make(chan error)
f := func(ctx2 context.Context, keys2 []storageType.Key, values2 []storageType.Value, segments2 []string, timestamps2 []storageType.Timestamp) {
for i := 0; i < len(keys2); i++ {
err := s.PutRow(ctx2, keys2[i], values2[i], segments2[i], timestamps2[i])
errCh <- err
}
}
for i := 0; i < batchNums; i++ {
j := i
go func() {
start, end := j*batchSize, (j+1)*batchSize
if len(keys) < end {
end = len(keys)
}
f(ctx, keys[start:end], values[start:end], segments[start:end], timestamps[start:end])
}()
}
for i := 0; i < len(keys); i++ {
if err := <-errCh; err != nil {
return err
}
}
return nil
}
func (s *MinioDriver) GetSegments(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp) ([]string, error) {
keyEnd, err := codec.MvccEncode(key, timestamp, "")
if err != nil {
return nil, err
}
keys, _, err := s.driver.Scan(ctx, append(key, byte('_')), keyEnd, -1, true)
if err != nil {
return nil, err
}
segmentsSet := map[string]bool{}
for _, key := range keys {
_, _, segment, err := codec.MvccDecode(key)
if err != nil {
panic("must no error")
}
if segment != "delete" {
segmentsSet[segment] = true
}
}
var segments []string
for k, v := range segmentsSet {
if v {
segments = append(segments, k)
}
}
return segments, err
}
func (s *MinioDriver) DeleteRow(ctx context.Context, key storageType.Key, timestamp storageType.Timestamp) error {
minioKey, err := codec.MvccEncode(key, timestamp, "delete")
if err != nil {
return err
}
value := []byte("0")
err = s.driver.Put(ctx, minioKey, value)
return err
}
func (s *MinioDriver) DeleteRows(ctx context.Context, keys []storageType.Key, timestamps []storageType.Timestamp) error {
maxThread := 100
batchSize := 1
keysLength := len(keys)
if keysLength/batchSize > maxThread {
batchSize = keysLength / maxThread
}
batchNums := keysLength / batchSize
if keysLength%batchSize != 0 {
batchNums = keysLength/batchSize + 1
}
errCh := make(chan error)
f := func(ctx2 context.Context, keys2 []storageType.Key, timestamps2 []storageType.Timestamp) {
for i := 0; i < len(keys2); i++ {
err := s.DeleteRow(ctx2, keys2[i], timestamps2[i])
errCh <- err
}
}
for i := 0; i < batchNums; i++ {
j := i
go func() {
start, end := j*batchSize, (j+1)*batchSize
if len(keys) < end {
end = len(keys)
}
f(ctx, keys[start:end], timestamps[start:end])
}()
}
for i := 0; i < len(keys); i++ {
if err := <-errCh; err != nil {
return err
}
}
return nil
}
func (s *MinioDriver) PutLog(ctx context.Context, key storageType.Key, value storageType.Value, timestamp storageType.Timestamp, channel int) error {
logKey := codec.LogEncode(key, timestamp, channel)
err := s.driver.Put(ctx, logKey, value)
return err
}
func (s *MinioDriver) GetLog(ctx context.Context, start storageType.Timestamp, end storageType.Timestamp, channels []int) ([]storageType.Value, error) {
keys, values, err := s.driver.GetByPrefix(ctx, []byte("log_"), false)
if err != nil {
return nil, err
}
var resultValues []storageType.Value
for i, key := range keys {
_, ts, channel, err := codec.LogDecode(string(key))
if err != nil {
return nil, err
}
if ts >= start && ts <= end {
for j := 0; j < len(channels); j++ {
if channel == channels[j] {
resultValues = append(resultValues, values[i])
}
}
}
}
return resultValues, nil
}
func (s *MinioDriver) GetSegmentIndex(ctx context.Context, segment string) (storageType.SegmentIndex, error) {
return s.driver.Get(ctx, codec.SegmentEncode(segment, "index"))
}
func (s *MinioDriver) PutSegmentIndex(ctx context.Context, segment string, index storageType.SegmentIndex) error {
return s.driver.Put(ctx, codec.SegmentEncode(segment, "index"), index)
}
func (s *MinioDriver) DeleteSegmentIndex(ctx context.Context, segment string) error {
return s.driver.Delete(ctx, codec.SegmentEncode(segment, "index"))
}
func (s *MinioDriver) GetSegmentDL(ctx context.Context, segment string) (storageType.SegmentDL, error) {
return s.driver.Get(ctx, codec.SegmentEncode(segment, "DL"))
}
func (s *MinioDriver) PutSegmentDL(ctx context.Context, segment string, log storageType.SegmentDL) error {
return s.driver.Put(ctx, codec.SegmentEncode(segment, "DL"), log)
}
func (s *MinioDriver) DeleteSegmentDL(ctx context.Context, segment string) error {
return s.driver.Delete(ctx, codec.SegmentEncode(segment, "DL"))
}

View File

@ -0,0 +1,130 @@
package miniodriver
import (
"bytes"
"context"
"io"
"github.com/minio/minio-go/v7"
. "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
type minioStore struct {
client *minio.Client
}
func (s *minioStore) Put(ctx context.Context, key Key, value Value) error {
reader := bytes.NewReader(value)
_, err := s.client.PutObject(ctx, bucketName, string(key), reader, int64(len(value)), minio.PutObjectOptions{})
if err != nil {
return err
}
return err
}
func (s *minioStore) Get(ctx context.Context, key Key) (Value, error) {
object, err := s.client.GetObject(ctx, bucketName, string(key), minio.GetObjectOptions{})
if err != nil {
return nil, err
}
size := 256 * 1024
buf := make([]byte, size)
n, err := object.Read(buf)
if err != nil && err != io.EOF {
return nil, err
}
return buf[:n], nil
}
func (s *minioStore) GetByPrefix(ctx context.Context, prefix Key, keyOnly bool) ([]Key, []Value, error) {
objects := s.client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{Prefix: string(prefix)})
var objectsKeys []Key
var objectsValues []Value
for object := range objects {
objectsKeys = append(objectsKeys, []byte(object.Key))
if !keyOnly {
value, err := s.Get(ctx, []byte(object.Key))
if err != nil {
return nil, nil, err
}
objectsValues = append(objectsValues, value)
}
}
return objectsKeys, objectsValues, nil
}
func (s *minioStore) Scan(ctx context.Context, keyStart Key, keyEnd Key, limit int, keyOnly bool) ([]Key, []Value, error) {
var keys []Key
var values []Value
limitCount := uint(limit)
for object := range s.client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{Prefix: string(keyStart)}) {
if object.Key >= string(keyEnd) {
keys = append(keys, []byte(object.Key))
if !keyOnly {
value, err := s.Get(ctx, []byte(object.Key))
if err != nil {
return nil, nil, err
}
values = append(values, value)
}
limitCount--
if limitCount <= 0 {
break
}
}
}
return keys, values, nil
}
func (s *minioStore) Delete(ctx context.Context, key Key) error {
err := s.client.RemoveObject(ctx, bucketName, string(key), minio.RemoveObjectOptions{})
return err
}
func (s *minioStore) DeleteByPrefix(ctx context.Context, prefix Key) error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
defer close(objectsCh)
for object := range s.client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{Prefix: string(prefix)}) {
objectsCh <- object
}
}()
for rErr := range s.client.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{GovernanceBypass: true}) {
if rErr.Err != nil {
return rErr.Err
}
}
return nil
}
func (s *minioStore) DeleteRange(ctx context.Context, keyStart Key, keyEnd Key) error {
objectsCh := make(chan minio.ObjectInfo)
go func() {
defer close(objectsCh)
for object := range s.client.ListObjects(ctx, bucketName, minio.ListObjectsOptions{Prefix: string(keyStart)}) {
if object.Key <= string(keyEnd) {
objectsCh <- object
}
}
}()
for rErr := range s.client.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{GovernanceBypass: true}) {
if rErr.Err != nil {
return rErr.Err
}
}
return nil
}

View File

@ -0,0 +1,134 @@
package miniodriver
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
var option = storagetype.Option{BucketName: "zilliz-hz"}
var ctx = context.Background()
var client, err = NewMinioDriver(ctx, option)
func TestMinioDriver_PutRowAndGetRow(t *testing.T) {
err = client.PutRow(ctx, []byte("bar"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("bar"), []byte("djhfkjsbdfbsdughorsgsdjhgoisdgh"), "SegmentA", 2)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("bar"), []byte("123854676ershdgfsgdfk,sdhfg;sdi8"), "SegmentB", 3)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("bar1"), []byte("testkeybarorbar_1"), "SegmentC", 3)
assert.Nil(t, err)
object, _ := client.GetRow(ctx, []byte("bar"), 5)
assert.Equal(t, "abcdefghijklmnoopqrstuvwxyz", string(object))
object, _ = client.GetRow(ctx, []byte("bar"), 2)
assert.Equal(t, "djhfkjsbdfbsdughorsgsdjhgoisdgh", string(object))
object, _ = client.GetRow(ctx, []byte("bar"), 5)
assert.Equal(t, "123854676ershdgfsgdfk,sdhfg;sdi8", string(object))
object, _ = client.GetRow(ctx, []byte("bar1"), 5)
assert.Equal(t, "testkeybarorbar_1", string(object))
}
func TestMinioDriver_DeleteRow(t *testing.T) {
err = client.DeleteRow(ctx, []byte("bar"), 5)
assert.Nil(t, err)
object, _ := client.GetRow(ctx, []byte("bar"), 6)
assert.Nil(t, object)
err = client.DeleteRow(ctx, []byte("bar1"), 5)
assert.Nil(t, err)
object2, _ := client.GetRow(ctx, []byte("bar1"), 6)
assert.Nil(t, object2)
}
func TestMinioDriver_GetSegments(t *testing.T) {
err = client.PutRow(ctx, []byte("seg"), []byte("abcdefghijklmnoopqrstuvwxyz"), "SegmentA", 1)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("seg"), []byte("djhfkjsbdfbsdughorsgsdjhgoisdgh"), "SegmentA", 2)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("seg"), []byte("123854676ershdgfsgdfk,sdhfg;sdi8"), "SegmentB", 3)
assert.Nil(t, err)
err = client.PutRow(ctx, []byte("seg2"), []byte("testkeybarorbar_1"), "SegmentC", 1)
assert.Nil(t, err)
segements, err := client.GetSegments(ctx, []byte("seg"), 4)
assert.Nil(t, err)
assert.Equal(t, 2, len(segements))
if segements[0] == "SegmentA" {
assert.Equal(t, "SegmentA", segements[0])
assert.Equal(t, "SegmentB", segements[1])
} else {
assert.Equal(t, "SegmentB", segements[0])
assert.Equal(t, "SegmentA", segements[1])
}
}
func TestMinioDriver_PutRowsAndGetRows(t *testing.T) {
keys := [][]byte{[]byte("foo"), []byte("bar")}
values := [][]byte{[]byte("The key is foo!"), []byte("The key is bar!")}
segments := []string{"segmentA", "segmentB"}
timestamps := []uint64{1, 2}
err = client.PutRows(ctx, keys, values, segments, timestamps)
assert.Nil(t, err)
objects, err := client.GetRows(ctx, keys, timestamps)
assert.Nil(t, err)
assert.Equal(t, "The key is foo!", string(objects[0]))
assert.Equal(t, "The key is bar!", string(objects[1]))
}
func TestMinioDriver_DeleteRows(t *testing.T) {
keys := [][]byte{[]byte("foo"), []byte("bar")}
timestamps := []uint64{3, 3}
err := client.DeleteRows(ctx, keys, timestamps)
assert.Nil(t, err)
objects, err := client.GetRows(ctx, keys, timestamps)
assert.Nil(t, err)
assert.Nil(t, objects[0])
assert.Nil(t, objects[1])
}
func TestMinioDriver_PutLogAndGetLog(t *testing.T) {
err = client.PutLog(ctx, []byte("insert"), []byte("This is insert log!"), 1, 11)
assert.Nil(t, err)
err = client.PutLog(ctx, []byte("delete"), []byte("This is delete log!"), 2, 10)
assert.Nil(t, err)
err = client.PutLog(ctx, []byte("update"), []byte("This is update log!"), 3, 9)
assert.Nil(t, err)
err = client.PutLog(ctx, []byte("select"), []byte("This is select log!"), 4, 8)
assert.Nil(t, err)
channels := []int{5, 8, 9, 10, 11, 12, 13}
logValues, err := client.GetLog(ctx, 0, 5, channels)
assert.Nil(t, err)
assert.Equal(t, "This is select log!", string(logValues[0]))
assert.Equal(t, "This is update log!", string(logValues[1]))
assert.Equal(t, "This is delete log!", string(logValues[2]))
assert.Equal(t, "This is insert log!", string(logValues[3]))
}
func TestMinioDriver_Segment(t *testing.T) {
err := client.PutSegmentIndex(ctx, "segmentA", []byte("This is segmentA's index!"))
assert.Nil(t, err)
segmentIndex, err := client.GetSegmentIndex(ctx, "segmentA")
assert.Equal(t, "This is segmentA's index!", string(segmentIndex))
assert.Nil(t, err)
err = client.DeleteSegmentIndex(ctx, "segmentA")
assert.Nil(t, err)
}
func TestMinioDriver_SegmentDL(t *testing.T) {
err := client.PutSegmentDL(ctx, "segmentB", []byte("This is segmentB's delete log!"))
assert.Nil(t, err)
segmentDL, err := client.GetSegmentDL(ctx, "segmentB")
assert.Nil(t, err)
assert.Equal(t, "This is segmentB's delete log!", string(segmentDL))
err = client.DeleteSegmentDL(ctx, "segmentB")
assert.Nil(t, err)
}

View File

@ -0,0 +1,62 @@
package codec
import (
"encoding/binary"
"errors"
"github.com/tikv/client-go/codec"
)
var (
Delimiter = byte('_')
DelimiterPlusOne = Delimiter + 0x01
DeleteMark = byte('d')
SegmentIndexMark = byte('i')
SegmentDLMark = byte('d')
)
// EncodeKey append timestamp, delimiter, and suffix string
// to one slice key.
// Note: suffix string should not contains Delimiter
func EncodeKey(key []byte, timestamp uint64, suffix string) []byte {
//TODO: should we encode key to memory comparable
ret := EncodeDelimiter(key, Delimiter)
ret = codec.EncodeUintDesc(ret, timestamp)
return append(ret, suffix...)
}
func DecodeKey(key []byte) ([]byte, uint64, string, error) {
if len(key) < 8 {
return nil, 0, "", errors.New("insufficient bytes to decode value")
}
lenDeKey := 0
for i := len(key) - 1; i > 0; i-- {
if key[i] == Delimiter {
lenDeKey = i
break
}
}
if lenDeKey == 0 || lenDeKey+8 > len(key) {
return nil, 0, "", errors.New("insufficient bytes to decode value")
}
tsBytes := key[lenDeKey+1 : lenDeKey+9]
ts := binary.BigEndian.Uint64(tsBytes)
suffix := string(key[lenDeKey+9:])
key = key[:lenDeKey-1]
return key, ^ts, suffix, nil
}
// EncodeDelimiter append a delimiter byte to slice b, and return the appended slice.
func EncodeDelimiter(b []byte, delimiter byte) []byte {
return append(b, delimiter)
}
func EncodeSegment(segName []byte, segType byte) []byte {
segmentKey := []byte("segment")
segmentKey = append(segmentKey, Delimiter)
segmentKey = append(segmentKey, segName...)
return append(segmentKey, Delimiter, segType)
}

View File

@ -0,0 +1,389 @@
package tikvdriver
import (
"context"
"errors"
"strconv"
"strings"
"github.com/tikv/client-go/config"
"github.com/tikv/client-go/rawkv"
. "github.com/zilliztech/milvus-distributed/internal/storage/internal/tikv/codec"
. "github.com/zilliztech/milvus-distributed/internal/storage/type"
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
func keyAddOne(key Key) Key {
if key == nil {
return nil
}
lenKey := len(key)
ret := make(Key, lenKey)
copy(ret, key)
ret[lenKey-1] += 0x01
return ret
}
type tikvEngine struct {
client *rawkv.Client
conf config.Config
}
func (e tikvEngine) Put(ctx context.Context, key Key, value Value) error {
return e.client.Put(ctx, key, value)
}
func (e tikvEngine) BatchPut(ctx context.Context, keys []Key, values []Value) error {
return e.client.BatchPut(ctx, keys, values)
}
func (e tikvEngine) Get(ctx context.Context, key Key) (Value, error) {
return e.client.Get(ctx, key)
}
func (e tikvEngine) GetByPrefix(ctx context.Context, prefix Key, keyOnly bool) (keys []Key, values []Value, err error) {
startKey := prefix
endKey := keyAddOne(prefix)
limit := e.conf.Raw.MaxScanLimit
for {
ks, vs, err := e.Scan(ctx, startKey, endKey, limit, keyOnly)
if err != nil {
return keys, values, err
}
keys = append(keys, ks...)
values = append(values, vs...)
if len(ks) < limit {
break
}
// update the start key, and exclude the start key
startKey = append(ks[len(ks)-1], '\000')
}
return
}
func (e tikvEngine) Scan(ctx context.Context, startKey Key, endKey Key, limit int, keyOnly bool) ([]Key, []Value, error) {
return e.client.Scan(ctx, startKey, endKey, limit, rawkv.ScanOption{KeyOnly: keyOnly})
}
func (e tikvEngine) Delete(ctx context.Context, key Key) error {
return e.client.Delete(ctx, key)
}
func (e tikvEngine) DeleteByPrefix(ctx context.Context, prefix Key) error {
startKey := prefix
endKey := keyAddOne(prefix)
return e.client.DeleteRange(ctx, startKey, endKey)
}
func (e tikvEngine) DeleteRange(ctx context.Context, startKey Key, endKey Key) error {
return e.client.DeleteRange(ctx, startKey, endKey)
}
func (e tikvEngine) Close() error {
return e.client.Close()
}
type TikvStore struct {
engine *tikvEngine
}
func NewTikvStore(ctx context.Context, option storagetype.Option) (*TikvStore, error) {
conf := config.Default()
client, err := rawkv.NewClient(ctx, []string{option.TikvAddress}, conf)
if err != nil {
return nil, err
}
return &TikvStore{
&tikvEngine{
client: client,
conf: conf,
},
}, nil
}
func (s *TikvStore) Name() string {
return "TiKV storage"
}
func (s *TikvStore) put(ctx context.Context, key Key, value Value, timestamp Timestamp, suffix string) error {
return s.engine.Put(ctx, EncodeKey(key, timestamp, suffix), value)
}
func (s *TikvStore) scanLE(ctx context.Context, key Key, timestamp Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error) {
panic("implement me")
}
func (s *TikvStore) scanGE(ctx context.Context, key Key, timestamp Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error) {
panic("implement me")
}
func (s *TikvStore) scan(ctx context.Context, key Key, start Timestamp, end Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error) {
//startKey := EncodeKey(key, start, "")
//endKey := EncodeKey(EncodeDelimiter(key, DelimiterPlusOne), end, "")
//return s.engine.Scan(ctx, startKey, endKey, -1, keyOnly)
panic("implement me")
}
func (s *TikvStore) deleteLE(ctx context.Context, key Key, timestamp Timestamp) error {
panic("implement me")
}
func (s *TikvStore) deleteGE(ctx context.Context, key Key, timestamp Timestamp) error {
panic("implement me")
}
func (s *TikvStore) deleteRange(ctx context.Context, key Key, start Timestamp, end Timestamp) error {
panic("implement me")
}
func (s *TikvStore) GetRow(ctx context.Context, key Key, timestamp Timestamp) (Value, error) {
startKey := EncodeKey(key, timestamp, "")
endKey := EncodeDelimiter(key, DelimiterPlusOne)
keys, values, err := s.engine.Scan(ctx, startKey, endKey, 1, false)
if err != nil || keys == nil {
return nil, err
}
_, _, suffix, err := DecodeKey(keys[0])
if err != nil {
return nil, err
}
// key is marked deleted
if suffix == string(DeleteMark) {
return nil, nil
}
return values[0], nil
}
// TODO: how to spilt keys to some batches
var batchSize = 100
type kvPair struct {
key Key
value Value
err error
}
func batchKeys(keys []Key) [][]Key {
keysLen := len(keys)
numBatch := (keysLen-1)/batchSize + 1
batches := make([][]Key, numBatch)
for i := 0; i < numBatch; i++ {
batchStart := i * batchSize
batchEnd := batchStart + batchSize
// the last batch
if i == numBatch-1 {
batchEnd = keysLen
}
batches[i] = keys[batchStart:batchEnd]
}
return batches
}
func (s *TikvStore) GetRows(ctx context.Context, keys []Key, timestamps []Timestamp) ([]Value, error) {
if len(keys) != len(timestamps) {
return nil, errors.New("the len of keys is not equal to the len of timestamps")
}
batches := batchKeys(keys)
ch := make(chan kvPair, len(keys))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for n, b := range batches {
batch := b
numBatch := n
go func() {
for i, key := range batch {
select {
case <-ctx.Done():
return
default:
v, err := s.GetRow(ctx, key, timestamps[numBatch*batchSize+i])
ch <- kvPair{
key: key,
value: v,
err: err,
}
}
}
}()
}
var err error
var values []Value
kvMap := make(map[string]Value)
for i := 0; i < len(keys); i++ {
kv := <-ch
if kv.err != nil {
cancel()
if err == nil {
err = kv.err
}
}
kvMap[string(kv.key)] = kv.value
}
for _, key := range keys {
values = append(values, kvMap[string(key)])
}
return values, err
}
func (s *TikvStore) PutRow(ctx context.Context, key Key, value Value, segment string, timestamp Timestamp) error {
return s.put(ctx, key, value, timestamp, segment)
}
func (s *TikvStore) PutRows(ctx context.Context, keys []Key, values []Value, segments []string, timestamps []Timestamp) error {
if len(keys) != len(values) {
return errors.New("the len of keys is not equal to the len of values")
}
if len(keys) != len(timestamps) {
return errors.New("the len of keys is not equal to the len of timestamps")
}
encodedKeys := make([]Key, len(keys))
for i, key := range keys {
encodedKeys[i] = EncodeKey(key, timestamps[i], segments[i])
}
return s.engine.BatchPut(ctx, encodedKeys, values)
}
func (s *TikvStore) DeleteRow(ctx context.Context, key Key, timestamp Timestamp) error {
return s.put(ctx, key, Value{0x00}, timestamp, string(DeleteMark))
}
func (s *TikvStore) DeleteRows(ctx context.Context, keys []Key, timestamps []Timestamp) error {
encodeKeys := make([]Key, len(keys))
values := make([]Value, len(keys))
for i, key := range keys {
encodeKeys[i] = EncodeKey(key, timestamps[i], string(DeleteMark))
values[i] = Value{0x00}
}
return s.engine.BatchPut(ctx, encodeKeys, values)
}
//func (s *TikvStore) DeleteRows(ctx context.Context, keys []Key, timestamp Timestamp) error {
// batches := batchKeys(keys)
// ch := make(chan error, len(batches))
// ctx, cancel := context.WithCancel(ctx)
//
// for _, b := range batches {
// batch := b
// go func() {
// for _, key := range batch {
// select {
// case <-ctx.Done():
// return
// default:
// ch <- s.DeleteRow(ctx, key, timestamp)
// }
// }
// }()
// }
//
// var err error
// for i := 0; i < len(keys); i++ {
// if e := <-ch; e != nil {
// cancel()
// if err == nil {
// err = e
// }
// }
// }
// return err
//}
func (s *TikvStore) PutLog(ctx context.Context, key Key, value Value, timestamp Timestamp, channel int) error {
suffix := string(EncodeDelimiter(key, DelimiterPlusOne)) + strconv.Itoa(channel)
return s.put(ctx, Key("log"), value, timestamp, suffix)
}
func (s *TikvStore) GetLog(ctx context.Context, start Timestamp, end Timestamp, channels []int) (logs []Value, err error) {
key := Key("log")
startKey := EncodeKey(key, end, "")
endKey := EncodeKey(key, start, "")
// TODO: use for loop to ensure get all keys
keys, values, err := s.engine.Scan(ctx, startKey, endKey, s.engine.conf.Raw.MaxScanLimit, false)
if err != nil || keys == nil {
return nil, err
}
for i, key := range keys {
_, _, suffix, err := DecodeKey(key)
log := values[i]
if err != nil {
return logs, err
}
// no channels filter
if len(channels) == 0 {
logs = append(logs, log)
}
slice := strings.Split(suffix, string(DelimiterPlusOne))
channel, err := strconv.Atoi(slice[len(slice)-1])
if err != nil {
panic(err)
}
for _, item := range channels {
if item == channel {
logs = append(logs, log)
break
}
}
}
return
}
func (s *TikvStore) GetSegmentIndex(ctx context.Context, segment string) (SegmentIndex, error) {
return s.engine.Get(ctx, EncodeSegment([]byte(segment), SegmentIndexMark))
}
func (s *TikvStore) PutSegmentIndex(ctx context.Context, segment string, index SegmentIndex) error {
return s.engine.Put(ctx, EncodeSegment([]byte(segment), SegmentIndexMark), index)
}
func (s *TikvStore) DeleteSegmentIndex(ctx context.Context, segment string) error {
return s.engine.Delete(ctx, EncodeSegment([]byte(segment), SegmentIndexMark))
}
func (s *TikvStore) GetSegmentDL(ctx context.Context, segment string) (SegmentDL, error) {
return s.engine.Get(ctx, EncodeSegment([]byte(segment), SegmentDLMark))
}
func (s *TikvStore) PutSegmentDL(ctx context.Context, segment string, log SegmentDL) error {
return s.engine.Put(ctx, EncodeSegment([]byte(segment), SegmentDLMark), log)
}
func (s *TikvStore) DeleteSegmentDL(ctx context.Context, segment string) error {
return s.engine.Delete(ctx, EncodeSegment([]byte(segment), SegmentDLMark))
}
func (s *TikvStore) GetSegments(ctx context.Context, key Key, timestamp Timestamp) ([]string, error) {
keys, _, err := s.engine.GetByPrefix(ctx, EncodeDelimiter(key, Delimiter), true)
if err != nil {
return nil, err
}
segmentsSet := map[string]bool{}
for _, key := range keys {
_, ts, segment, err := DecodeKey(key)
if err != nil {
panic("must no error")
}
if ts <= timestamp && segment != string(DeleteMark) {
segmentsSet[segment] = true
}
}
var segments []string
for k, v := range segmentsSet {
if v {
segments = append(segments, k)
}
}
return segments, err
}
func (s *TikvStore) Close() error {
return s.engine.Close()
}

View File

@ -0,0 +1,293 @@
package tikvdriver
import (
"bytes"
"context"
"fmt"
"math"
"os"
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
. "github.com/zilliztech/milvus-distributed/internal/storage/internal/tikv/codec"
. "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
//var store TikvStore
var store *TikvStore
var option = Option{TikvAddress: "localhost:2379"}
func TestMain(m *testing.M) {
store, _ = NewTikvStore(context.Background(), option)
exitCode := m.Run()
_ = store.Close()
os.Exit(exitCode)
}
func TestTikvEngine_Prefix(t *testing.T) {
ctx := context.Background()
prefix := Key("key")
engine := store.engine
value := Value("value")
// Put some key with same prefix
key := prefix
err := engine.Put(ctx, key, value)
require.Nil(t, err)
key = EncodeKey(prefix, 0, "")
err = engine.Put(ctx, key, value)
assert.Nil(t, err)
// Get by prefix
ks, _, err := engine.GetByPrefix(ctx, prefix, true)
assert.Equal(t, 2, len(ks))
assert.Nil(t, err)
// Delete by prefix
err = engine.DeleteByPrefix(ctx, prefix)
assert.Nil(t, err)
ks, _, err = engine.GetByPrefix(ctx, prefix, true)
assert.Equal(t, 0, len(ks))
assert.Nil(t, err)
//Test large amount keys
num := engine.conf.Raw.MaxScanLimit + 1
keys := make([]Key, num)
values := make([]Value, num)
for i := 0; i < num; i++ {
key = EncodeKey(prefix, uint64(i), "")
keys[i] = key
values[i] = value
}
err = engine.BatchPut(ctx, keys, values)
assert.Nil(t, err)
ks, _, err = engine.GetByPrefix(ctx, prefix, true)
assert.Nil(t, err)
assert.Equal(t, num, len(ks))
err = engine.DeleteByPrefix(ctx, prefix)
assert.Nil(t, err)
}
func TestTikvStore_Row(t *testing.T) {
ctx := context.Background()
key := Key("key")
// Add same row with different timestamp
err := store.PutRow(ctx, key, Value("value0"), "segment0", 0)
assert.Nil(t, err)
err = store.PutRow(ctx, key, Value("value1"), "segment0", 2)
assert.Nil(t, err)
// Get most recent row using key and timestamp
v, err := store.GetRow(ctx, key, 3)
assert.Nil(t, err)
assert.Equal(t, Value("value1"), v)
v, err = store.GetRow(ctx, key, 2)
assert.Nil(t, err)
assert.Equal(t, Value("value1"), v)
v, err = store.GetRow(ctx, key, 1)
assert.Nil(t, err)
assert.Equal(t, Value("value0"), v)
// Add a different row, but with same prefix
key1 := Key("key_y")
err = store.PutRow(ctx, key1, Value("valuey"), "segment0", 2)
assert.Nil(t, err)
// Get most recent row using key and timestamp
v, err = store.GetRow(ctx, key, 3)
assert.Nil(t, err)
assert.Equal(t, Value("value1"), v)
v, err = store.GetRow(ctx, key1, 3)
assert.Nil(t, err)
assert.Equal(t, Value("valuey"), v)
// Delete a row
err = store.DeleteRow(ctx, key, 4)
assert.Nil(t, err)
v, err = store.GetRow(ctx, key, 5)
assert.Nil(t, err)
assert.Nil(t, v)
// Clear test data
err = store.engine.DeleteByPrefix(ctx, key)
assert.Nil(t, err)
k, va, err := store.engine.GetByPrefix(ctx, key, false)
assert.Nil(t, err)
assert.Nil(t, k)
assert.Nil(t, va)
}
func TestTikvStore_BatchRow(t *testing.T) {
ctx := context.Background()
// Prepare test data
size := 0
var testKeys []Key
var testValues []Value
var segments []string
var timestamps []Timestamp
for i := 0; size/store.engine.conf.Raw.MaxBatchPutSize < 1; i++ {
key := fmt.Sprint("key", i)
size += len(key)
testKeys = append(testKeys, []byte(key))
value := fmt.Sprint("value", i)
size += len(value)
testValues = append(testValues, []byte(value))
segments = append(segments, "test")
v, err := store.GetRow(ctx, Key(key), math.MaxUint64)
assert.Nil(t, v)
assert.Nil(t, err)
}
// Batch put rows
for range testKeys {
timestamps = append(timestamps, 1)
}
err := store.PutRows(ctx, testKeys, testValues, segments, timestamps)
assert.Nil(t, err)
// Batch get rows
for i := range timestamps {
timestamps[i] = 2
}
checkValues, err := store.GetRows(ctx, testKeys, timestamps)
assert.NotNil(t, checkValues)
assert.Nil(t, err)
assert.Equal(t, len(checkValues), len(testValues))
for i := range testKeys {
assert.Equal(t, testValues[i], checkValues[i])
}
// Delete all test rows
for i := range timestamps {
timestamps[i] = math.MaxUint64
}
err = store.DeleteRows(ctx, testKeys, timestamps)
assert.Nil(t, err)
// Ensure all test row is deleted
for i := range timestamps {
timestamps[i] = math.MaxUint64
}
checkValues, err = store.GetRows(ctx, testKeys, timestamps)
assert.Nil(t, err)
for _, value := range checkValues {
assert.Nil(t, value)
}
// Clean test data
err = store.engine.DeleteByPrefix(ctx, Key("key"))
assert.Nil(t, err)
}
func TestTikvStore_GetSegments(t *testing.T) {
ctx := context.Background()
key := Key("key")
// Put rows
err := store.PutRow(ctx, key, Value{0}, "a", 1)
assert.Nil(t, err)
err = store.PutRow(ctx, key, Value{0}, "a", 2)
assert.Nil(t, err)
err = store.PutRow(ctx, key, Value{0}, "c", 3)
assert.Nil(t, err)
// Get segments
segs, err := store.GetSegments(ctx, key, 2)
assert.Nil(t, err)
assert.Equal(t, 1, len(segs))
assert.Equal(t, "a", segs[0])
segs, err = store.GetSegments(ctx, key, 3)
assert.Nil(t, err)
assert.Equal(t, 2, len(segs))
// Clean test data
err = store.engine.DeleteByPrefix(ctx, key)
assert.Nil(t, err)
}
func TestTikvStore_Log(t *testing.T) {
ctx := context.Background()
// Put some log
err := store.PutLog(ctx, Key("key1"), Value("value1"), 1, 1)
assert.Nil(t, err)
err = store.PutLog(ctx, Key("key1"), Value("value1_1"), 1, 2)
assert.Nil(t, err)
err = store.PutLog(ctx, Key("key2"), Value("value2"), 2, 1)
assert.Nil(t, err)
// Check log
log, err := store.GetLog(ctx, 0, 2, []int{1, 2})
if err != nil {
panic(err)
}
sort.Slice(log, func(i, j int) bool {
return bytes.Compare(log[i], log[j]) == -1
})
assert.Equal(t, log[0], Value("value1"))
assert.Equal(t, log[1], Value("value1_1"))
assert.Equal(t, log[2], Value("value2"))
// Delete test data
err = store.engine.DeleteByPrefix(ctx, Key("log"))
assert.Nil(t, err)
}
func TestTikvStore_SegmentIndex(t *testing.T) {
ctx := context.Background()
// Put segment index
err := store.PutSegmentIndex(ctx, "segment0", []byte("index0"))
assert.Nil(t, err)
err = store.PutSegmentIndex(ctx, "segment1", []byte("index1"))
assert.Nil(t, err)
// Get segment index
index, err := store.GetSegmentIndex(ctx, "segment0")
assert.Nil(t, err)
assert.Equal(t, []byte("index0"), index)
index, err = store.GetSegmentIndex(ctx, "segment1")
assert.Nil(t, err)
assert.Equal(t, []byte("index1"), index)
// Delete segment index
err = store.DeleteSegmentIndex(ctx, "segment0")
assert.Nil(t, err)
err = store.DeleteSegmentIndex(ctx, "segment1")
assert.Nil(t, err)
index, err = store.GetSegmentIndex(ctx, "segment0")
assert.Nil(t, err)
assert.Nil(t, index)
}
func TestTikvStore_DeleteSegmentDL(t *testing.T) {
ctx := context.Background()
// Put segment delete log
err := store.PutSegmentDL(ctx, "segment0", []byte("index0"))
assert.Nil(t, err)
err = store.PutSegmentDL(ctx, "segment1", []byte("index1"))
assert.Nil(t, err)
// Get segment delete log
index, err := store.GetSegmentDL(ctx, "segment0")
assert.Nil(t, err)
assert.Equal(t, []byte("index0"), index)
index, err = store.GetSegmentDL(ctx, "segment1")
assert.Nil(t, err)
assert.Equal(t, []byte("index1"), index)
// Delete segment delete log
err = store.DeleteSegmentDL(ctx, "segment0")
assert.Nil(t, err)
err = store.DeleteSegmentDL(ctx, "segment1")
assert.Nil(t, err)
index, err = store.GetSegmentDL(ctx, "segment0")
assert.Nil(t, err)
assert.Nil(t, index)
}

View File

@ -0,0 +1,39 @@
package storage
import (
"context"
"errors"
S3Driver "github.com/zilliztech/milvus-distributed/internal/storage/internal/S3"
minIODriver "github.com/zilliztech/milvus-distributed/internal/storage/internal/minio"
tikvDriver "github.com/zilliztech/milvus-distributed/internal/storage/internal/tikv"
storagetype "github.com/zilliztech/milvus-distributed/internal/storage/type"
)
func NewStore(ctx context.Context, option storagetype.Option) (storagetype.Store, error) {
var err error
var store storagetype.Store
switch option.Type {
case storagetype.TIKVDriver:
store, err = tikvDriver.NewTikvStore(ctx, option)
if err != nil {
panic(err.Error())
}
return store, nil
case storagetype.MinIODriver:
store, err = minIODriver.NewMinioDriver(ctx, option)
if err != nil {
//panic(err.Error())
return nil, err
}
return store, nil
case storagetype.S3DRIVER:
store, err = S3Driver.NewS3Driver(ctx, option)
if err != nil {
//panic(err.Error())
return nil, err
}
return store, nil
}
return nil, errors.New("unsupported driver")
}

View File

@ -0,0 +1,79 @@
package storagetype
import (
"context"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
)
type Key = []byte
type Value = []byte
type Timestamp = typeutil.Timestamp
type DriverType = string
type SegmentIndex = []byte
type SegmentDL = []byte
type Option struct {
Type DriverType
TikvAddress string
BucketName string
}
const (
MinIODriver DriverType = "MinIO"
TIKVDriver DriverType = "TIKV"
S3DRIVER DriverType = "S3"
)
/*
type Store interface {
Get(ctx context.Context, key Key, timestamp Timestamp) (Value, error)
BatchGet(ctx context.Context, keys [] Key, timestamp Timestamp) ([]Value, error)
Set(ctx context.Context, key Key, v Value, timestamp Timestamp) error
BatchSet(ctx context.Context, keys []Key, v []Value, timestamp Timestamp) error
Delete(ctx context.Context, key Key, timestamp Timestamp) error
BatchDelete(ctx context.Context, keys []Key, timestamp Timestamp) error
Close() error
}
*/
type storeEngine interface {
Put(ctx context.Context, key Key, value Value) error
Get(ctx context.Context, key Key) (Value, error)
GetByPrefix(ctx context.Context, prefix Key, keyOnly bool) ([]Key, []Value, error)
Scan(ctx context.Context, startKey Key, endKey Key, limit int, keyOnly bool) ([]Key, []Value, error)
Delete(ctx context.Context, key Key) error
DeleteByPrefix(ctx context.Context, prefix Key) error
DeleteRange(ctx context.Context, keyStart Key, keyEnd Key) error
}
type Store interface {
//put(ctx context.Context, key Key, value Value, timestamp Timestamp, suffix string) error
//scanLE(ctx context.Context, key Key, timestamp Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error)
//scanGE(ctx context.Context, key Key, timestamp Timestamp, keyOnly bool) ([]Timestamp, []Key, []Value, error)
//deleteLE(ctx context.Context, key Key, timestamp Timestamp) error
//deleteGE(ctx context.Context, key Key, timestamp Timestamp) error
//deleteRange(ctx context.Context, key Key, start Timestamp, end Timestamp) error
GetRow(ctx context.Context, key Key, timestamp Timestamp) (Value, error)
GetRows(ctx context.Context, keys []Key, timestamps []Timestamp) ([]Value, error)
PutRow(ctx context.Context, key Key, value Value, segment string, timestamp Timestamp) error
PutRows(ctx context.Context, keys []Key, values []Value, segments []string, timestamps []Timestamp) error
GetSegments(ctx context.Context, key Key, timestamp Timestamp) ([]string, error)
DeleteRow(ctx context.Context, key Key, timestamp Timestamp) error
DeleteRows(ctx context.Context, keys []Key, timestamps []Timestamp) error
PutLog(ctx context.Context, key Key, value Value, timestamp Timestamp, channel int) error
GetLog(ctx context.Context, start Timestamp, end Timestamp, channels []int) ([]Value, error)
GetSegmentIndex(ctx context.Context, segment string) (SegmentIndex, error)
PutSegmentIndex(ctx context.Context, segment string, index SegmentIndex) error
DeleteSegmentIndex(ctx context.Context, segment string) error
GetSegmentDL(ctx context.Context, segment string) (SegmentDL, error)
PutSegmentDL(ctx context.Context, segment string, log SegmentDL) error
DeleteSegmentDL(ctx context.Context, segment string) error
}

View File

@ -48,7 +48,7 @@ type SegmentDescription struct {
CloseTime Timestamp
}
func (c *Client) FlushSegment(segmentID UniqueID, collectionID UniqueID, partitionTag string, timestamp Timestamp) error {
func (c *Client) FlushSegment(segmentID UniqueID) error {
baseMsg := msgstream.BaseMsg{
BeginTimestamp: 0,
EndTimestamp: 0,
@ -56,11 +56,9 @@ func (c *Client) FlushSegment(segmentID UniqueID, collectionID UniqueID, partiti
}
flushMsg := internalPb.FlushMsg{
MsgType: internalPb.MsgType_kFlush,
SegmentID: segmentID,
CollectionID: collectionID,
PartitionTag: partitionTag,
Timestamp: timestamp,
MsgType: internalPb.MsgType_kFlush,
SegmentID: segmentID,
Timestamp: Timestamp(0),
}
fMsg := &msgstream.FlushMsg{

View File

@ -1,37 +0,0 @@
package writenode
import (
"log"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
)
type Collection struct {
schema *schemapb.CollectionSchema
id UniqueID
}
func (c *Collection) Name() string {
return c.schema.Name
}
func (c *Collection) ID() UniqueID {
return c.id
}
func newCollection(collectionID UniqueID, schemaStr string) *Collection {
var schema schemapb.CollectionSchema
err := proto.UnmarshalText(schemaStr, &schema)
if err != nil {
log.Println(err)
return nil
}
var newCollection = &Collection{
schema: &schema,
id: collectionID,
}
return newCollection
}

View File

@ -1,94 +0,0 @@
package writenode
import (
"strconv"
"sync"
"github.com/zilliztech/milvus-distributed/internal/errors"
)
type collectionReplica interface {
// collection
getCollectionNum() int
addCollection(collectionID UniqueID, schemaBlob string) error
removeCollection(collectionID UniqueID) error
getCollectionByID(collectionID UniqueID) (*Collection, error)
getCollectionByName(collectionName string) (*Collection, error)
hasCollection(collectionID UniqueID) bool
}
type collectionReplicaImpl struct {
mu sync.RWMutex
collections []*Collection
}
//----------------------------------------------------------------------------------------------------- collection
func (colReplica *collectionReplicaImpl) getCollectionNum() int {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
return len(colReplica.collections)
}
func (colReplica *collectionReplicaImpl) addCollection(collectionID UniqueID, schemaBlob string) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
var newCollection = newCollection(collectionID, schemaBlob)
colReplica.collections = append(colReplica.collections, newCollection)
return nil
}
func (colReplica *collectionReplicaImpl) removeCollection(collectionID UniqueID) error {
colReplica.mu.Lock()
defer colReplica.mu.Unlock()
tmpCollections := make([]*Collection, 0)
for _, col := range colReplica.collections {
if col.ID() != collectionID {
tmpCollections = append(tmpCollections, col)
}
}
colReplica.collections = tmpCollections
return nil
}
func (colReplica *collectionReplicaImpl) getCollectionByID(collectionID UniqueID) (*Collection, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, collection := range colReplica.collections {
if collection.ID() == collectionID {
return collection, nil
}
}
return nil, errors.New("cannot find collection, id = " + strconv.FormatInt(collectionID, 10))
}
func (colReplica *collectionReplicaImpl) getCollectionByName(collectionName string) (*Collection, error) {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, collection := range colReplica.collections {
if collection.Name() == collectionName {
return collection, nil
}
}
return nil, errors.New("Cannot found collection: " + collectionName)
}
func (colReplica *collectionReplicaImpl) hasCollection(collectionID UniqueID) bool {
colReplica.mu.RLock()
defer colReplica.mu.RUnlock()
for _, col := range colReplica.collections {
if col.ID() == collectionID {
return true
}
}
return false
}

View File

@ -1,153 +0,0 @@
package writenode
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/zilliztech/milvus-distributed/internal/proto/commonpb"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/stretchr/testify/assert"
)
func newReplica() collectionReplica {
collections := make([]*Collection, 0)
var replica collectionReplica = &collectionReplicaImpl{
collections: collections,
}
return replica
}
func genTestCollectionMeta(collectionName string, collectionID UniqueID) *etcdpb.CollectionMeta {
fieldVec := schemapb.FieldSchema{
FieldID: UniqueID(100),
Name: "vec",
IsPrimaryKey: false,
DataType: schemapb.DataType_VECTOR_FLOAT,
TypeParams: []*commonpb.KeyValuePair{
{
Key: "dim",
Value: "16",
},
},
IndexParams: []*commonpb.KeyValuePair{
{
Key: "metric_type",
Value: "L2",
},
},
}
fieldInt := schemapb.FieldSchema{
FieldID: UniqueID(101),
Name: "age",
IsPrimaryKey: false,
DataType: schemapb.DataType_INT32,
}
schema := schemapb.CollectionSchema{
Name: collectionName,
AutoID: true,
Fields: []*schemapb.FieldSchema{
&fieldVec, &fieldInt,
},
}
collectionMeta := etcdpb.CollectionMeta{
ID: collectionID,
Schema: &schema,
CreateTime: Timestamp(0),
SegmentIDs: []UniqueID{0},
PartitionTags: []string{"default"},
}
return &collectionMeta
}
func initTestMeta(t *testing.T, replica collectionReplica, collectionName string, collectionID UniqueID, segmentID UniqueID) {
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
var err = replica.addCollection(collectionMeta.ID, schemaBlob)
assert.NoError(t, err)
collection, err := replica.getCollectionByName(collectionName)
assert.NoError(t, err)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
assert.Equal(t, replica.getCollectionNum(), 1)
}
//----------------------------------------------------------------------------------------------------- collection
func TestCollectionReplica_getCollectionNum(t *testing.T) {
replica := newReplica()
initTestMeta(t, replica, "collection0", 0, 0)
assert.Equal(t, replica.getCollectionNum(), 1)
}
func TestCollectionReplica_addCollection(t *testing.T) {
replica := newReplica()
initTestMeta(t, replica, "collection0", 0, 0)
}
func TestCollectionReplica_removeCollection(t *testing.T) {
replica := newReplica()
initTestMeta(t, replica, "collection0", 0, 0)
assert.Equal(t, replica.getCollectionNum(), 1)
err := replica.removeCollection(0)
assert.NoError(t, err)
assert.Equal(t, replica.getCollectionNum(), 0)
}
func TestCollectionReplica_getCollectionByID(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
targetCollection, err := replica.getCollectionByID(collectionID)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
}
func TestCollectionReplica_getCollectionByName(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
targetCollection, err := replica.getCollectionByName(collectionName)
assert.NoError(t, err)
assert.NotNil(t, targetCollection)
assert.Equal(t, targetCollection.Name(), collectionName)
assert.Equal(t, targetCollection.ID(), collectionID)
}
func TestCollectionReplica_hasCollection(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
hasCollection := replica.hasCollection(collectionID)
assert.Equal(t, hasCollection, true)
hasCollection = replica.hasCollection(UniqueID(1))
assert.Equal(t, hasCollection, false)
}
func TestCollectionReplica_freeAll(t *testing.T) {
replica := newReplica()
collectionName := "collection0"
collectionID := UniqueID(0)
initTestMeta(t, replica, collectionName, collectionID, 0)
}

View File

@ -1,34 +0,0 @@
package writenode
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/stretchr/testify/assert"
)
func TestCollection_newCollection(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
}
func TestCollection_deleteCollection(t *testing.T) {
collectionName := "collection0"
collectionID := UniqueID(0)
collectionMeta := genTestCollectionMeta(collectionName, collectionID)
schemaBlob := proto.MarshalTextString(collectionMeta.Schema)
assert.NotEqual(t, "", schemaBlob)
collection := newCollection(collectionMeta.ID, schemaBlob)
assert.Equal(t, collection.Name(), collectionName)
assert.Equal(t, collection.ID(), collectionID)
}

View File

@ -12,18 +12,16 @@ type dataSyncService struct {
fg *flowgraph.TimeTickedFlowGraph
ddChan chan *ddlFlushSyncMsg
insertChan chan *insertFlushSyncMsg
replica collectionReplica
}
func newDataSyncService(ctx context.Context,
ddChan chan *ddlFlushSyncMsg, insertChan chan *insertFlushSyncMsg, replica collectionReplica) *dataSyncService {
ddChan chan *ddlFlushSyncMsg, insertChan chan *insertFlushSyncMsg) *dataSyncService {
return &dataSyncService{
ctx: ctx,
fg: nil,
ddChan: ddChan,
insertChan: insertChan,
replica: replica,
}
}
@ -48,8 +46,8 @@ func (dsService *dataSyncService) initNodes() {
var filterDmNode Node = newFilteredDmNode()
var ddNode Node = newDDNode(dsService.ctx, dsService.ddChan, dsService.replica)
var insertBufferNode Node = newInsertBufferNode(dsService.ctx, dsService.insertChan, dsService.replica)
var ddNode Node = newDDNode(dsService.ctx, dsService.ddChan)
var insertBufferNode Node = newInsertBufferNode(dsService.ctx, dsService.insertChan)
dsService.fg.AddNode(&dmStreamNode)
dsService.fg.AddNode(&ddStreamNode)

View File

@ -197,8 +197,7 @@ func TestDataSyncService_Start(t *testing.T) {
assert.NoError(t, err)
// dataSync
replica := newReplica()
node.dataSyncService = newDataSyncService(node.ctx, nil, nil, replica)
node.dataSyncService = newDataSyncService(node.ctx, nil, nil)
go node.dataSyncService.start()
node.Close()

View File

@ -9,6 +9,9 @@ import (
"strconv"
"github.com/golang/protobuf/proto"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/kv"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
@ -27,7 +30,6 @@ type ddNode struct {
idAllocator *allocator.IDAllocator
kv kv.Base
replica collectionReplica
}
type ddData struct {
@ -226,15 +228,6 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
log.Println(err)
return
}
schemaStr := proto.MarshalTextString(&schema)
// add collection
err = ddNode.replica.addCollection(collectionID, schemaStr)
if err != nil {
log.Println(err)
return
}
collectionName := schema.Name
ddNode.ddMsg.collectionRecords[collectionName] = append(ddNode.ddMsg.collectionRecords[collectionName],
metaOperateRecord{
@ -259,11 +252,6 @@ func (ddNode *ddNode) createCollection(msg *msgstream.CreateCollectionMsg) {
func (ddNode *ddNode) dropCollection(msg *msgstream.DropCollectionMsg) {
collectionID := msg.CollectionID
err := ddNode.replica.removeCollection(collectionID)
if err != nil {
log.Println(err)
}
// remove collection
if _, ok := ddNode.ddRecords.collectionRecords[collectionID]; !ok {
err := errors.New("cannot found collection " + strconv.FormatInt(collectionID, 10))
@ -359,7 +347,7 @@ func (ddNode *ddNode) dropPartition(msg *msgstream.DropPartitionMsg) {
ddNode.ddBuffer.ddData[collectionID].eventTypes = append(ddNode.ddBuffer.ddData[collectionID].eventTypes, storage.DropPartitionEventType)
}
func newDDNode(ctx context.Context, outCh chan *ddlFlushSyncMsg, replica collectionReplica) *ddNode {
func newDDNode(ctx context.Context, outCh chan *ddlFlushSyncMsg) *ddNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
@ -372,16 +360,19 @@ func newDDNode(ctx context.Context, outCh chan *ddlFlushSyncMsg, replica collect
partitionRecords: make(map[UniqueID]interface{}),
}
bucketName := Params.MinioBucketName
option := &miniokv.Option{
Address: Params.MinioAddress,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSL,
BucketName: bucketName,
CreateBucket: true,
minIOEndPoint := Params.MinioAddress
minIOAccessKeyID := Params.MinioAccessKeyID
minIOSecretAccessKey := Params.MinioSecretAccessKey
minIOUseSSL := Params.MinioUseSSL
minIOClient, err := minio.New(minIOEndPoint, &minio.Options{
Creds: credentials.NewStaticV4(minIOAccessKeyID, minIOSecretAccessKey, ""),
Secure: minIOUseSSL,
})
if err != nil {
panic(err)
}
minioKV, err := miniokv.NewMinIOKV(ctx, option)
bucketName := Params.MinioBucketName
minioKV, err := miniokv.NewMinIOKV(ctx, minIOClient, bucketName)
if err != nil {
panic(err)
}
@ -406,6 +397,5 @@ func newDDNode(ctx context.Context, outCh chan *ddlFlushSyncMsg, replica collect
idAllocator: idAllocator,
kv: minioKV,
replica: replica,
}
}

View File

@ -45,8 +45,8 @@ func TestFlowGraphDDNode_Operate(t *testing.T) {
go fService.start()
Params.FlushDdBufSize = 4
replica := newReplica()
ddNode := newDDNode(ctx, ddChan, replica)
ddNode := newDDNode(ctx, ddChan)
colID := UniqueID(0)
colName := "col-test-0"

View File

@ -7,10 +7,15 @@ import (
"log"
"path"
"strconv"
"time"
"unsafe"
"github.com/golang/protobuf/proto"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/zilliztech/milvus-distributed/internal/allocator"
"github.com/zilliztech/milvus-distributed/internal/kv"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
miniokv "github.com/zilliztech/milvus-distributed/internal/kv/minio"
"github.com/zilliztech/milvus-distributed/internal/msgstream"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
@ -18,6 +23,7 @@ import (
"github.com/zilliztech/milvus-distributed/internal/proto/schemapb"
"github.com/zilliztech/milvus-distributed/internal/storage"
"github.com/zilliztech/milvus-distributed/internal/util/typeutil"
"go.etcd.io/etcd/clientv3"
)
const (
@ -31,13 +37,13 @@ type (
insertBufferNode struct {
BaseNode
kvClient *etcdkv.EtcdKV
insertBuffer *insertBuffer
minIOKV kv.Base
minioPrifex string
idAllocator *allocator.IDAllocator
outCh chan *insertFlushSyncMsg
pulsarWriteNodeTimeTickStream *msgstream.PulsarMsgStream
replica collectionReplica
}
insertBuffer struct {
@ -101,7 +107,6 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
continue
}
currentSegID := msg.GetSegmentID()
collectionName := msg.GetCollectionName()
idata, ok := ibNode.insertBuffer.insertData[currentSegID]
if !ok {
@ -111,19 +116,16 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
}
// 1.1 Get CollectionMeta from etcd
collection, err := ibNode.replica.getCollectionByName(collectionName)
//collSchema, err := ibNode.getCollectionSchemaByName(collectionName)
segMeta, collMeta, err := ibNode.getMeta(currentSegID)
if err != nil {
// GOOSE TODO add error handler
log.Println("Get meta wrong:", err)
continue
}
collectionID := collection.ID()
collSchema := collection.schema
// 1.2 Get Fields
var pos int = 0 // Record position of blob
for _, field := range collSchema.Fields {
for _, field := range collMeta.Schema.Fields {
switch field.DataType {
case schemapb.DataType_VECTOR_FLOAT:
var dim int
@ -370,10 +372,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
log.Println("partitionTag to partitionID wrong")
// TODO GOOSE add error handler
}
collMeta := &etcdpb.CollectionMeta{
Schema: collSchema,
ID: collectionID,
}
inCodec := storage.NewInsertCodec(collMeta)
// buffer data to binlogs
@ -389,7 +388,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
log.Println(".. Clearing buffer")
// 1.5.2 binLogs -> minIO/S3
collIDStr := strconv.FormatInt(collectionID, 10)
collIDStr := strconv.FormatInt(segMeta.GetCollectionID(), 10)
partitionIDStr := strconv.FormatInt(partitionID, 10)
segIDStr := strconv.FormatInt(currentSegID, 10)
keyPrefix := path.Join(ibNode.minioPrifex, collIDStr, partitionIDStr, segIDStr)
@ -448,24 +447,20 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
for _, msg := range iMsg.flushMessages {
currentSegID := msg.GetSegmentID()
flushTs := msg.GetTimestamp()
partitionTag := msg.GetPartitionTag()
collectionID := msg.GetCollectionID()
log.Printf(". Receiving flush message segID(%v)...", currentSegID)
if ibNode.insertBuffer.size(currentSegID) > 0 {
log.Println(".. Buffer not empty, flushing ...")
collSchema, err := ibNode.getCollectionSchemaByID(collectionID)
segMeta, collMeta, err := ibNode.getMeta(currentSegID)
if err != nil {
// GOOSE TODO add error handler
log.Println("Get meta wrong: ", err)
}
collMeta := &etcdpb.CollectionMeta{
Schema: collSchema,
ID: collectionID,
}
inCodec := storage.NewInsertCodec(collMeta)
// partitionTag -> partitionID
partitionTag := segMeta.GetPartitionTag()
partitionID, err := typeutil.Hash32String(partitionTag)
if err != nil {
// GOOSE TODO add error handler
@ -483,7 +478,7 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
delete(ibNode.insertBuffer.insertData, currentSegID)
// binLogs -> minIO/S3
collIDStr := strconv.FormatInt(collectionID, 10)
collIDStr := strconv.FormatInt(segMeta.GetCollectionID(), 10)
partitionIDStr := strconv.FormatInt(partitionID, 10)
segIDStr := strconv.FormatInt(currentSegID, 10)
keyPrefix := path.Join(ibNode.minioPrifex, collIDStr, partitionIDStr, segIDStr)
@ -542,20 +537,31 @@ func (ibNode *insertBufferNode) Operate(in []*Msg) []*Msg {
return nil
}
func (ibNode *insertBufferNode) getCollectionSchemaByID(collectionID UniqueID) (*schemapb.CollectionSchema, error) {
ret, err := ibNode.replica.getCollectionByID(collectionID)
if err != nil {
return nil, err
}
return ret.schema, nil
}
func (ibNode *insertBufferNode) getMeta(segID UniqueID) (*etcdpb.SegmentMeta, *etcdpb.CollectionMeta, error) {
func (ibNode *insertBufferNode) getCollectionSchemaByName(collectionName string) (*schemapb.CollectionSchema, error) {
ret, err := ibNode.replica.getCollectionByName(collectionName)
segMeta := &etcdpb.SegmentMeta{}
key := path.Join(SegmentPrefix, strconv.FormatInt(segID, 10))
value, err := ibNode.kvClient.Load(key)
if err != nil {
return nil, err
return nil, nil, err
}
return ret.schema, nil
err = proto.UnmarshalText(value, segMeta)
if err != nil {
return nil, nil, err
}
collMeta := &etcdpb.CollectionMeta{}
key = path.Join(CollectionPrefix, strconv.FormatInt(segMeta.GetCollectionID(), 10))
value, err = ibNode.kvClient.Load(key)
if err != nil {
return nil, nil, err
}
err = proto.UnmarshalText(value, collMeta)
if err != nil {
return nil, nil, err
}
return segMeta, collMeta, nil
}
func (ibNode *insertBufferNode) writeHardTimeTick(ts Timestamp) error {
@ -576,7 +582,7 @@ func (ibNode *insertBufferNode) writeHardTimeTick(ts Timestamp) error {
return ibNode.pulsarWriteNodeTimeTickStream.Produce(&msgPack)
}
func newInsertBufferNode(ctx context.Context, outCh chan *insertFlushSyncMsg, replica collectionReplica) *insertBufferNode {
func newInsertBufferNode(ctx context.Context, outCh chan *insertFlushSyncMsg) *insertBufferNode {
maxQueueLength := Params.FlowGraphMaxQueueLength
maxParallelism := Params.FlowGraphMaxParallelism
@ -590,18 +596,34 @@ func newInsertBufferNode(ctx context.Context, outCh chan *insertFlushSyncMsg, re
maxSize: maxSize,
}
// MinIO
option := &miniokv.Option{
Address: Params.MinioAddress,
AccessKeyID: Params.MinioAccessKeyID,
SecretAccessKeyID: Params.MinioSecretAccessKey,
UseSSL: Params.MinioUseSSL,
CreateBucket: true,
BucketName: Params.MinioBucketName,
// EtcdKV
ETCDAddr := Params.EtcdAddress
MetaRootPath := Params.MetaRootPath
log.Println("metaRootPath: ", MetaRootPath)
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{ETCDAddr},
DialTimeout: 5 * time.Second,
})
if err != nil {
panic(err)
}
kvClient := etcdkv.NewEtcdKV(cli, MetaRootPath)
minIOKV, err := miniokv.NewMinIOKV(ctx, option)
// MinIO
minioendPoint := Params.MinioAddress
miniioAccessKeyID := Params.MinioAccessKeyID
miniioSecretAccessKey := Params.MinioSecretAccessKey
minioUseSSL := Params.MinioUseSSL
minioBucketName := Params.MinioBucketName
minioClient, err := minio.New(minioendPoint, &minio.Options{
Creds: credentials.NewStaticV4(miniioAccessKeyID, miniioSecretAccessKey, ""),
Secure: minioUseSSL,
})
if err != nil {
panic(err)
}
minIOKV, err := miniokv.NewMinIOKV(ctx, minioClient, minioBucketName)
if err != nil {
panic(err)
}
@ -622,12 +644,12 @@ func newInsertBufferNode(ctx context.Context, outCh chan *insertFlushSyncMsg, re
return &insertBufferNode{
BaseNode: baseNode,
kvClient: kvClient,
insertBuffer: iBuffer,
minIOKV: minIOKV,
minioPrifex: minioPrefix,
idAllocator: idAllocator,
outCh: outCh,
pulsarWriteNodeTimeTickStream: wTt,
replica: replica,
}
}

View File

@ -47,8 +47,7 @@ func TestFlowGraphInputBufferNode_Operate(t *testing.T) {
go fService.start()
// Params.FlushInsertBufSize = 2
replica := newReplica()
iBNode := newInsertBufferNode(ctx, insertChan, replica)
iBNode := newInsertBufferNode(ctx, insertChan)
newMeta()
inMsg := genInsertMsg()

View File

@ -1,135 +0,0 @@
package writenode
import (
"context"
"fmt"
"log"
"path"
"reflect"
"strings"
"time"
"github.com/golang/protobuf/proto"
"go.etcd.io/etcd/clientv3"
etcdkv "github.com/zilliztech/milvus-distributed/internal/kv/etcd"
"github.com/zilliztech/milvus-distributed/internal/proto/etcdpb"
)
type metaService struct {
ctx context.Context
kvBase *etcdkv.EtcdKV
replica collectionReplica
}
func newMetaService(ctx context.Context, replica collectionReplica) *metaService {
ETCDAddr := Params.EtcdAddress
MetaRootPath := Params.MetaRootPath
cli, _ := clientv3.New(clientv3.Config{
Endpoints: []string{ETCDAddr},
DialTimeout: 5 * time.Second,
})
return &metaService{
ctx: ctx,
kvBase: etcdkv.NewEtcdKV(cli, MetaRootPath),
replica: replica,
}
}
func (mService *metaService) start() {
// init from meta
err := mService.loadCollections()
if err != nil {
log.Fatal("metaService loadCollections failed")
}
}
func GetCollectionObjID(key string) string {
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
return strings.TrimPrefix(key, prefix)
}
func isCollectionObj(key string) bool {
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, CollectionPrefix) + "/"
prefix = strings.TrimSpace(prefix)
index := strings.Index(key, prefix)
return index == 0
}
func isSegmentObj(key string) bool {
ETCDRootPath := Params.MetaRootPath
prefix := path.Join(ETCDRootPath, SegmentPrefix) + "/"
prefix = strings.TrimSpace(prefix)
index := strings.Index(key, prefix)
return index == 0
}
func printCollectionStruct(obj *etcdpb.CollectionMeta) {
v := reflect.ValueOf(obj)
v = reflect.Indirect(v)
typeOfS := v.Type()
for i := 0; i < v.NumField(); i++ {
if typeOfS.Field(i).Name == "GrpcMarshalString" {
continue
}
fmt.Printf("Field: %s\tValue: %v\n", typeOfS.Field(i).Name, v.Field(i).Interface())
}
}
func (mService *metaService) processCollectionCreate(id string, value string) {
//println(fmt.Sprintf("Create Collection:$%s$", id))
col := mService.collectionUnmarshal(value)
if col != nil {
schema := col.Schema
schemaBlob := proto.MarshalTextString(schema)
err := mService.replica.addCollection(col.ID, schemaBlob)
if err != nil {
log.Println(err)
}
}
}
func (mService *metaService) loadCollections() error {
keys, values, err := mService.kvBase.LoadWithPrefix(CollectionPrefix)
if err != nil {
return err
}
for i := range keys {
objID := GetCollectionObjID(keys[i])
mService.processCollectionCreate(objID, values[i])
}
return nil
}
//----------------------------------------------------------------------- Unmarshal and Marshal
func (mService *metaService) collectionUnmarshal(value string) *etcdpb.CollectionMeta {
col := etcdpb.CollectionMeta{}
err := proto.UnmarshalText(value, &col)
if err != nil {
log.Println(err)
return nil
}
return &col
}
func (mService *metaService) collectionMarshal(col *etcdpb.CollectionMeta) string {
value := proto.MarshalTextString(col)
if value == "" {
log.Println("marshal collection failed")
return ""
}
return value
}

View File

@ -1,100 +0,0 @@
package writenode
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
)
func TestMetaService_start(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
replica := newReplica()
metaService := newMetaService(ctx, replica)
metaService.start()
}
func TestMetaService_getCollectionObjId(t *testing.T) {
var key = "/collection/collection0"
var collectionObjID1 = GetCollectionObjID(key)
assert.Equal(t, collectionObjID1, "/collection/collection0")
key = "fakeKey"
var collectionObjID2 = GetCollectionObjID(key)
assert.Equal(t, collectionObjID2, "fakeKey")
}
func TestMetaService_isCollectionObj(t *testing.T) {
var key = Params.MetaRootPath + "/collection/collection0"
var b1 = isCollectionObj(key)
assert.Equal(t, b1, true)
key = Params.MetaRootPath + "/segment/segment0"
var b2 = isCollectionObj(key)
assert.Equal(t, b2, false)
}
func TestMetaService_processCollectionCreate(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
replica := newReplica()
metaService := newMetaService(ctx, replica)
defer cancel()
id := "0"
value := `schema: <
name: "test"
fields: <
fieldID:100
name: "vec"
data_type: VECTOR_FLOAT
type_params: <
key: "dim"
value: "16"
>
index_params: <
key: "metric_type"
value: "L2"
>
>
fields: <
fieldID:101
name: "age"
data_type: INT32
type_params: <
key: "dim"
value: "1"
>
>
>
segmentIDs: 0
partition_tags: "default"
`
metaService.processCollectionCreate(id, value)
collectionNum := replica.getCollectionNum()
assert.Equal(t, collectionNum, 1)
collection, err := replica.getCollectionByName("test")
assert.NoError(t, err)
assert.Equal(t, collection.ID(), UniqueID(0))
}
func TestMetaService_loadCollections(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
replica := newReplica()
metaService := newMetaService(ctx, replica)
err2 := (*metaService).loadCollections()
assert.Nil(t, err2)
}

View File

@ -9,25 +9,15 @@ type WriteNode struct {
WriteNodeID uint64
dataSyncService *dataSyncService
flushSyncService *flushSyncService
metaService *metaService
replica collectionReplica
}
func NewWriteNode(ctx context.Context, writeNodeID uint64) *WriteNode {
collections := make([]*Collection, 0)
var replica collectionReplica = &collectionReplicaImpl{
collections: collections,
}
node := &WriteNode{
ctx: ctx,
WriteNodeID: writeNodeID,
dataSyncService: nil,
flushSyncService: nil,
metaService: nil,
replica: replica,
}
return node
@ -45,12 +35,10 @@ func (node *WriteNode) Start() error {
insertChan := make(chan *insertFlushSyncMsg, chanSize)
node.flushSyncService = newFlushSyncService(node.ctx, ddChan, insertChan)
node.dataSyncService = newDataSyncService(node.ctx, ddChan, insertChan, node.replica)
node.metaService = newMetaService(node.ctx, node.replica)
node.dataSyncService = newDataSyncService(node.ctx, ddChan, insertChan)
go node.dataSyncService.start()
go node.flushSyncService.start()
node.metaService.start()
return nil
}

View File

@ -58,10 +58,6 @@ if __name__ == "__main__":
'visitor_name': "ExecExprVisitor",
"parameter_name": 'expr',
},
{
'visitor_name': "VerifyExprVisitor",
"parameter_name": 'expr',
},
],
'PlanNode': [
{
@ -72,10 +68,7 @@ if __name__ == "__main__":
'visitor_name': "ExecPlanNodeVisitor",
"parameter_name": 'node',
},
{
'visitor_name': "VerifyPlanNodeVisitor",
"parameter_name": 'node',
},
]
}
extract_extra_body(visitor_info, query_path)

View File

@ -13,7 +13,7 @@
#include "@@base_visitor@@.h"
namespace @@namespace@@ {
class @@visitor_name@@ : public @@base_visitor@@ {
class @@visitor_name@@ : @@base_visitor@@ {
public:
@@body@@