mirror of https://github.com/milvus-io/milvus.git
parent
fe01d54eca
commit
26f06dd732
|
@ -7,26 +7,43 @@ run:
|
|||
- docs
|
||||
- scripts
|
||||
- internal/core
|
||||
- cmake_build
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- goimports
|
||||
- misspell
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- gosimple
|
||||
- staticcheck
|
||||
- decorder
|
||||
- depguard
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosec
|
||||
- revive
|
||||
- durationcheck
|
||||
- unconvert
|
||||
- misspell
|
||||
- typecheck
|
||||
- durationcheck
|
||||
- forbidigo
|
||||
- depguard
|
||||
- gci
|
||||
- whitespace
|
||||
- gofumpt
|
||||
# - gocritic
|
||||
|
||||
linters-settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/milvus-io)
|
||||
custom-order: true
|
||||
gofumpt:
|
||||
lang-version: "1.18"
|
||||
module-path: github.com/milvus-io
|
||||
goimports:
|
||||
local-prefixes: github.com/milvus-io
|
||||
revive:
|
||||
rules:
|
||||
- name: unused-parameter
|
||||
|
@ -92,6 +109,3 @@ issues:
|
|||
max-issues-per-linter: 0
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.27.0 # use the fixed version to not introduce new linters unexpectedly
|
||||
|
|
38
Makefile
38
Makefile
|
@ -36,6 +36,14 @@ INSTALL_GOLANGCI_LINT := $(findstring $(GOLANGCI_LINT_VERSION), $(GOLANGCI_LINT_
|
|||
MOCKERY_VERSION := 2.32.4
|
||||
MOCKERY_OUTPUT := $(shell $(INSTALL_PATH)/mockery --version 2>/dev/null)
|
||||
INSTALL_MOCKERY := $(findstring $(MOCKERY_VERSION),$(MOCKERY_OUTPUT))
|
||||
# gci
|
||||
GCI_VERSION := 0.11.2
|
||||
GCI_OUTPUT := $(shell $(INSTALL_PATH)/gci --version 2>/dev/null)
|
||||
INSTALL_GCI := $(findstring $(GCI_VERSION),$(GCI_OUTPUT))
|
||||
# gofumpt
|
||||
GOFUMPT_VERSION := 0.5.0
|
||||
GOFUMPT_OUTPUT := $(shell $(INSTALL_PATH)/gofumpt --version 2>/dev/null)
|
||||
INSTALL_GOFUMPT := $(findstring $(GOFUMPT_VERSION),$(GOFUMPT_OUTPUT))
|
||||
|
||||
export GIT_BRANCH=master
|
||||
|
||||
|
@ -97,18 +105,32 @@ else
|
|||
@GO111MODULE=on env bash $(PWD)/scripts/gofmt.sh pkg/
|
||||
endif
|
||||
|
||||
lint: tools/bin/revive
|
||||
@echo "Running $@ check"
|
||||
@tools/bin/revive -formatter friendly -config tools/check/revive.toml ./...
|
||||
lint-fix: getdeps
|
||||
@mkdir -p $(INSTALL_PATH)
|
||||
@if [ -z "$(INSTALL_GCI)" ]; then \
|
||||
echo "Installing gci v$(GCI_VERSION) to ./bin/" && GOBIN=$(INSTALL_PATH) go install github.com/daixiang0/gci@v$(GCI_VERSION); \
|
||||
else \
|
||||
echo "gci v$(GCI_VERSION) already installed"; \
|
||||
fi
|
||||
@if [ -z "$(INSTALL_GOFUMPT)" ]; then \
|
||||
echo "Installing gofumpt v$(GOFUMPT_VERSION) to ./bin/" && GOBIN=$(INSTALL_PATH) go install mvdan.cc/gofumpt@v$(GOFUMPT_VERSION); \
|
||||
else \
|
||||
echo "gofumpt v$(GOFUMPT_VERSION) already installed"; \
|
||||
fi
|
||||
@echo "Running gofumpt fix"
|
||||
@$(INSTALL_PATH)/gofumpt -l -w .
|
||||
@echo "Running gci fix"
|
||||
@$(INSTALL_PATH)/gci write cmd/ --skip-generated -s standard -s default -s "prefix(github.com/milvus-io)" --custom-order
|
||||
@$(INSTALL_PATH)/gci write internal/ --skip-generated -s standard -s default -s "prefix(github.com/milvus-io)" --custom-order
|
||||
@$(INSTALL_PATH)/gci write pkg/ --skip-generated -s standard -s default -s "prefix(github.com/milvus-io)" --custom-order
|
||||
@$(INSTALL_PATH)/gci write tests/ --skip-generated -s standard -s default -s "prefix(github.com/milvus-io)" --custom-order
|
||||
@echo "Running golangci-lint auto-fix"
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --fix --timeout=30m --config $(PWD)/.golangci.yml; cd pkg && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --fix --timeout=30m --config $(PWD)/.golangci.yml
|
||||
|
||||
#TODO: Check code specifications by golangci-lint
|
||||
static-check: getdeps
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on $(INSTALL_PATH)/golangci-lint cache clean
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./internal/...
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./cmd/...
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ./.golangci.yml ./tests/integration/...
|
||||
@source $(PWD)/scripts/setenv.sh && cd pkg && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config ../.golangci.yml ./...
|
||||
@source $(PWD)/scripts/setenv.sh && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config $(PWD)/.golangci.yml; cd pkg && GO111MODULE=on $(INSTALL_PATH)/golangci-lint run --timeout=30m --config $(PWD)/.golangci.yml
|
||||
|
||||
verifiers: build-cpp getdeps cppcheck fmt static-check
|
||||
|
||||
|
|
|
@ -19,13 +19,14 @@ package components
|
|||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
grpcdatacoordclient "github.com/milvus-io/milvus/internal/distributed/datacoord"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DataCoord implements grpc server of DataCoord server
|
||||
|
|
|
@ -19,13 +19,14 @@ package components
|
|||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
grpcdatanode "github.com/milvus-io/milvus/internal/distributed/datanode"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DataNode implements DataNode grpc server
|
||||
|
|
|
@ -26,8 +26,7 @@ import (
|
|||
)
|
||||
|
||||
// IndexCoord implements IndexCoord grpc server
|
||||
type IndexCoord struct {
|
||||
}
|
||||
type IndexCoord struct{}
|
||||
|
||||
// NewIndexCoord creates a new IndexCoord
|
||||
func NewIndexCoord(ctx context.Context, factory dependency.Factory) (*IndexCoord, error) {
|
||||
|
|
|
@ -19,13 +19,14 @@ package components
|
|||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
grpcindexnode "github.com/milvus-io/milvus/internal/distributed/indexnode"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// IndexNode implements IndexNode grpc server
|
||||
|
@ -43,7 +44,6 @@ func NewIndexNode(ctx context.Context, factory dependency.Factory) (*IndexNode,
|
|||
}
|
||||
n.svr = svr
|
||||
return n, nil
|
||||
|
||||
}
|
||||
|
||||
// Run starts service
|
||||
|
|
|
@ -19,13 +19,14 @@ package components
|
|||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
grpcproxy "github.com/milvus-io/milvus/internal/distributed/proxy"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Proxy implements Proxy grpc server
|
||||
|
|
|
@ -46,7 +46,6 @@ func NewQueryNode(ctx context.Context, factory dependency.Factory) (*QueryNode,
|
|||
ctx: ctx,
|
||||
svr: svr,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// Run starts service
|
||||
|
|
|
@ -19,13 +19,14 @@ package components
|
|||
import (
|
||||
"context"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
rc "github.com/milvus-io/milvus/internal/distributed/rootcoord"
|
||||
"github.com/milvus-io/milvus/internal/util/dependency"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// RootCoord implements RoodCoord grpc server
|
||||
|
|
|
@ -16,8 +16,9 @@
|
|||
|
||||
package main
|
||||
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"C"
|
||||
"os"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/milvus"
|
||||
|
|
|
@ -364,7 +364,6 @@ func getTrashKey(taskType, key string) string {
|
|||
}
|
||||
|
||||
func (c *mck) extractTask(prefix string, keys []string, values []string) {
|
||||
|
||||
for i := range keys {
|
||||
taskID, err := strconv.ParseInt(filepath.Base(keys[i]), 10, 64)
|
||||
if err != nil {
|
||||
|
@ -520,7 +519,6 @@ func (c *mck) extractVecFieldIndexInfo(taskID int64, infos []*querypb.FieldIndex
|
|||
func (c *mck) unmarshalTask(taskID int64, t string) (string, []int64, []int64, error) {
|
||||
header := commonpb.MsgHeader{}
|
||||
err := proto.Unmarshal([]byte(t), &header)
|
||||
|
||||
if err != nil {
|
||||
return errReturn(taskID, "MsgHeader", err)
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func (c *stop) formatFlags(args []string, flags *flag.FlagSet) {
|
|||
func (c *stop) stopPid(filename string, runtimeDir string) error {
|
||||
var pid int
|
||||
|
||||
fd, err := os.OpenFile(path.Join(runtimeDir, filename), os.O_RDONLY, 0664)
|
||||
fd, err := os.OpenFile(path.Join(runtimeDir, filename), os.O_RDONLY, 0o664)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
func makeRuntimeDir(dir string) error {
|
||||
perm := os.FileMode(0755)
|
||||
perm := os.FileMode(0o755)
|
||||
// os.MkdirAll equal to `mkdir -p`
|
||||
err := os.MkdirAll(dir, perm)
|
||||
if err != nil {
|
||||
|
@ -63,7 +63,7 @@ func createRuntimeDir(sType string) string {
|
|||
func createPidFile(w io.Writer, filename string, runtimeDir string) (*flock.Flock, error) {
|
||||
fileFullName := path.Join(runtimeDir, filename)
|
||||
|
||||
fd, err := os.OpenFile(fileFullName, os.O_CREATE|os.O_RDWR, 0664)
|
||||
fd, err := os.OpenFile(fileFullName, os.O_CREATE|os.O_RDWR, 0o664)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("file %s is locked, error = %w", filename, err)
|
||||
}
|
||||
|
|
|
@ -27,6 +27,10 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/components"
|
||||
"github.com/milvus-io/milvus/internal/http"
|
||||
"github.com/milvus-io/milvus/internal/http/healthz"
|
||||
|
@ -43,9 +47,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
_ "github.com/milvus-io/milvus/pkg/util/symbolizer" // support symbolizer and crash dump
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// all milvus related metrics is in a separate registry
|
||||
|
|
|
@ -7,12 +7,13 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
type DocContent struct {
|
||||
|
@ -106,7 +107,7 @@ type YamlMarshaller struct {
|
|||
}
|
||||
|
||||
func (m *YamlMarshaller) writeYamlRecursive(data []DocContent, level int) {
|
||||
var topLevels = typeutil.NewOrderedMap[string, []DocContent]()
|
||||
topLevels := typeutil.NewOrderedMap[string, []DocContent]()
|
||||
for _, d := range data {
|
||||
key := strings.Split(d.key, ".")[level]
|
||||
|
||||
|
|
|
@ -36,5 +36,4 @@ func main() {
|
|||
default:
|
||||
log.Error(fmt.Sprintf("unknown argument %s", args[1]))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,9 +5,10 @@ import (
|
|||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
)
|
||||
|
||||
func ShowYaml(filepath string) {
|
||||
|
|
|
@ -7,12 +7,13 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"go.uber.org/zap"
|
||||
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/etcd"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package allocator
|
||||
|
||||
import (
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -6,10 +6,8 @@ import (
|
|||
"github.com/blang/semver/v4"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/meta"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
"github.com/milvus-io/milvus/pkg/util"
|
||||
)
|
||||
|
||||
|
|
|
@ -3,9 +3,9 @@ package backend
|
|||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
)
|
||||
|
||||
type BackupHeaderVersion int32
|
||||
|
@ -78,7 +78,7 @@ func (v *BackupHeaderExtra) ToJSONBytes() []byte {
|
|||
}
|
||||
|
||||
func GetExtra(extra []byte) *BackupHeaderExtra {
|
||||
var v = newDefaultBackupHeaderExtra()
|
||||
v := newDefaultBackupHeaderExtra()
|
||||
err := json.Unmarshal(extra, v)
|
||||
if err != nil {
|
||||
console.Error(err.Error())
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
etcdkv "github.com/milvus-io/milvus/internal/kv/etcd"
|
||||
"github.com/milvus-io/milvus/pkg/util/etcd"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
type etcdBasedBackend struct {
|
||||
|
|
|
@ -8,15 +8,13 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/legacy"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/legacy/legacypb"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/legacy"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/legacy/legacypb"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/meta"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/utils"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
|
@ -56,7 +54,7 @@ func (b etcd210) loadTtAliases() (meta.TtAliasesMeta210, error) {
|
|||
tsKey := keys[i]
|
||||
tsValue := values[i]
|
||||
valueIsTombstone := rootcoord.IsTombstone(tsValue)
|
||||
var aliasInfo = &pb.CollectionInfo{} // alias stored in collection info.
|
||||
aliasInfo := &pb.CollectionInfo{} // alias stored in collection info.
|
||||
if valueIsTombstone {
|
||||
aliasInfo = nil
|
||||
} else {
|
||||
|
@ -88,7 +86,7 @@ func (b etcd210) loadAliases() (meta.AliasesMeta210, error) {
|
|||
key := keys[i]
|
||||
value := values[i]
|
||||
valueIsTombstone := rootcoord.IsTombstone(value)
|
||||
var aliasInfo = &pb.CollectionInfo{} // alias stored in collection info.
|
||||
aliasInfo := &pb.CollectionInfo{} // alias stored in collection info.
|
||||
if valueIsTombstone {
|
||||
aliasInfo = nil
|
||||
} else {
|
||||
|
@ -122,7 +120,7 @@ func (b etcd210) loadTtCollections() (meta.TtCollectionsMeta210, error) {
|
|||
}
|
||||
|
||||
valueIsTombstone := rootcoord.IsTombstone(tsValue)
|
||||
var coll = &pb.CollectionInfo{}
|
||||
coll := &pb.CollectionInfo{}
|
||||
if valueIsTombstone {
|
||||
coll = nil
|
||||
} else {
|
||||
|
@ -164,7 +162,7 @@ func (b etcd210) loadCollections() (meta.CollectionsMeta210, error) {
|
|||
}
|
||||
|
||||
valueIsTombstone := rootcoord.IsTombstone(value)
|
||||
var coll = &pb.CollectionInfo{}
|
||||
coll := &pb.CollectionInfo{}
|
||||
if valueIsTombstone {
|
||||
coll = nil
|
||||
} else {
|
||||
|
@ -213,7 +211,7 @@ func (b etcd210) loadCollectionIndexes() (meta.CollectionIndexesMeta210, error)
|
|||
key := keys[i]
|
||||
value := values[i]
|
||||
|
||||
var index = &pb.IndexInfo{}
|
||||
index := &pb.IndexInfo{}
|
||||
if err := proto.Unmarshal([]byte(value), index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -240,7 +238,7 @@ func (b etcd210) loadSegmentIndexes() (meta.SegmentIndexesMeta210, error) {
|
|||
for i := 0; i < l; i++ {
|
||||
value := values[i]
|
||||
|
||||
var index = &pb.SegmentIndexInfo{}
|
||||
index := &pb.SegmentIndexInfo{}
|
||||
if err := proto.Unmarshal([]byte(value), index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -263,7 +261,7 @@ func (b etcd210) loadIndexBuildMeta() (meta.IndexBuildMeta210, error) {
|
|||
for i := 0; i < l; i++ {
|
||||
value := values[i]
|
||||
|
||||
var record = &legacypb.IndexMeta{}
|
||||
record := &legacypb.IndexMeta{}
|
||||
if err := proto.Unmarshal([]byte(value), record); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -434,7 +432,7 @@ func (b etcd210) Backup(meta *meta.Meta, backupFile string) error {
|
|||
return err
|
||||
}
|
||||
console.Warning(fmt.Sprintf("backup to: %s", backupFile))
|
||||
return ioutil.WriteFile(backupFile, backup, 0600)
|
||||
return ioutil.WriteFile(backupFile, backup, 0o600)
|
||||
}
|
||||
|
||||
func (b etcd210) BackupV2(file string) error {
|
||||
|
@ -489,7 +487,7 @@ func (b etcd210) BackupV2(file string) error {
|
|||
}
|
||||
|
||||
console.Warning(fmt.Sprintf("backup to: %s", file))
|
||||
return ioutil.WriteFile(file, backup, 0600)
|
||||
return ioutil.WriteFile(file, backup, 0o600)
|
||||
}
|
||||
|
||||
func (b etcd210) Restore(backupFile string) error {
|
||||
|
|
|
@ -3,14 +3,11 @@ package backend
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/querycoord"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/meta"
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/querycoord"
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/rootcoord"
|
||||
"github.com/milvus-io/milvus/pkg/util"
|
||||
)
|
||||
|
||||
// etcd220 implements Backend.
|
||||
|
|
|
@ -5,9 +5,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
)
|
||||
|
||||
func Execute(args []string) {
|
||||
|
|
|
@ -4,9 +4,7 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/migration"
|
||||
)
|
||||
|
||||
|
|
|
@ -6,11 +6,11 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/legacy/legacypb"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/allocator"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/allocator"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/legacy/legacypb"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
pb "github.com/milvus-io/milvus/internal/proto/etcdpb"
|
||||
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func alias210ToAlias220(record *pb.CollectionInfo, ts Timestamp) *model.Alias {
|
||||
|
|
|
@ -2,11 +2,14 @@ package meta
|
|||
|
||||
import (
|
||||
"github.com/blang/semver/v4"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
type (
|
||||
UniqueID = typeutil.UniqueID
|
||||
Timestamp = typeutil.Timestamp
|
||||
)
|
||||
|
||||
type Meta struct {
|
||||
SourceVersion semver.Version
|
||||
|
|
|
@ -21,14 +21,20 @@ type FieldIndexesWithSchema struct {
|
|||
|
||||
type FieldIndexes210 map[UniqueID]*FieldIndexesWithSchema // coll_id -> field indexes.
|
||||
|
||||
type TtCollectionsMeta210 map[UniqueID]map[Timestamp]*pb.CollectionInfo // coll_id -> ts -> coll
|
||||
type CollectionsMeta210 map[UniqueID]*pb.CollectionInfo // coll_id -> coll
|
||||
type (
|
||||
TtCollectionsMeta210 map[UniqueID]map[Timestamp]*pb.CollectionInfo // coll_id -> ts -> coll
|
||||
CollectionsMeta210 map[UniqueID]*pb.CollectionInfo // coll_id -> coll
|
||||
)
|
||||
|
||||
type TtAliasesMeta210 map[string]map[Timestamp]*pb.CollectionInfo // alias name -> ts -> coll
|
||||
type AliasesMeta210 map[string]*pb.CollectionInfo // alias name -> coll
|
||||
type (
|
||||
TtAliasesMeta210 map[string]map[Timestamp]*pb.CollectionInfo // alias name -> ts -> coll
|
||||
AliasesMeta210 map[string]*pb.CollectionInfo // alias name -> coll
|
||||
)
|
||||
|
||||
type CollectionIndexesMeta210 map[UniqueID]map[UniqueID]*pb.IndexInfo // coll_id -> index_id -> index
|
||||
type SegmentIndexesMeta210 map[UniqueID]map[UniqueID]*pb.SegmentIndexInfo // seg_id -> index_id -> segment index
|
||||
type (
|
||||
CollectionIndexesMeta210 map[UniqueID]map[UniqueID]*pb.IndexInfo // coll_id -> index_id -> index
|
||||
SegmentIndexesMeta210 map[UniqueID]map[UniqueID]*pb.SegmentIndexInfo // seg_id -> index_id -> segment index
|
||||
)
|
||||
|
||||
type IndexBuildMeta210 map[UniqueID]*legacypb.IndexMeta // index_build_id -> index
|
||||
|
||||
|
|
|
@ -13,23 +13,35 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util"
|
||||
)
|
||||
|
||||
type TtCollectionsMeta220 map[UniqueID]map[Timestamp]*model.Collection // coll_id -> ts -> coll
|
||||
type CollectionsMeta220 map[UniqueID]*model.Collection // coll_id -> coll
|
||||
type (
|
||||
TtCollectionsMeta220 map[UniqueID]map[Timestamp]*model.Collection // coll_id -> ts -> coll
|
||||
CollectionsMeta220 map[UniqueID]*model.Collection // coll_id -> coll
|
||||
)
|
||||
|
||||
type TtAliasesMeta220 map[string]map[Timestamp]*model.Alias // alias name -> ts -> coll
|
||||
type AliasesMeta220 map[string]*model.Alias // alias name -> coll
|
||||
type (
|
||||
TtAliasesMeta220 map[string]map[Timestamp]*model.Alias // alias name -> ts -> coll
|
||||
AliasesMeta220 map[string]*model.Alias // alias name -> coll
|
||||
)
|
||||
|
||||
type TtPartitionsMeta220 map[UniqueID]map[Timestamp][]*model.Partition // coll_id -> ts -> partitions
|
||||
type PartitionsMeta220 map[UniqueID][]*model.Partition // coll_id -> ts -> partitions
|
||||
type (
|
||||
TtPartitionsMeta220 map[UniqueID]map[Timestamp][]*model.Partition // coll_id -> ts -> partitions
|
||||
PartitionsMeta220 map[UniqueID][]*model.Partition // coll_id -> ts -> partitions
|
||||
)
|
||||
|
||||
type TtFieldsMeta220 map[UniqueID]map[Timestamp][]*model.Field // coll_id -> ts -> fields
|
||||
type FieldsMeta220 map[UniqueID][]*model.Field // coll_id -> ts -> fields
|
||||
type (
|
||||
TtFieldsMeta220 map[UniqueID]map[Timestamp][]*model.Field // coll_id -> ts -> fields
|
||||
FieldsMeta220 map[UniqueID][]*model.Field // coll_id -> ts -> fields
|
||||
)
|
||||
|
||||
type CollectionIndexesMeta220 map[UniqueID]map[UniqueID]*model.Index // coll_id -> index_id -> index
|
||||
type SegmentIndexesMeta220 map[UniqueID]map[UniqueID]*model.SegmentIndex // seg_id -> index_id -> segment index
|
||||
type (
|
||||
CollectionIndexesMeta220 map[UniqueID]map[UniqueID]*model.Index // coll_id -> index_id -> index
|
||||
SegmentIndexesMeta220 map[UniqueID]map[UniqueID]*model.SegmentIndex // seg_id -> index_id -> segment index
|
||||
)
|
||||
|
||||
type CollectionLoadInfo220 map[UniqueID]*model.CollectionLoadInfo // collectionID -> CollectionLoadInfo
|
||||
type PartitionLoadInfo220 map[UniqueID]map[UniqueID]*model.PartitionLoadInfo // collectionID, partitionID -> PartitionLoadInfo
|
||||
type (
|
||||
CollectionLoadInfo220 map[UniqueID]*model.CollectionLoadInfo // collectionID -> CollectionLoadInfo
|
||||
PartitionLoadInfo220 map[UniqueID]map[UniqueID]*model.PartitionLoadInfo // collectionID, partitionID -> PartitionLoadInfo
|
||||
)
|
||||
|
||||
func (meta *TtCollectionsMeta220) GenerateSaves(sourceVersion semver.Version) (map[string]string, error) {
|
||||
saves := make(map[string]string)
|
||||
|
|
|
@ -4,8 +4,7 @@ import (
|
|||
"github.com/milvus-io/milvus/cmd/tools/migration/meta"
|
||||
)
|
||||
|
||||
type migrator210To220 struct {
|
||||
}
|
||||
type migrator210To220 struct{}
|
||||
|
||||
func (m migrator210To220) Migrate(metas *meta.Meta) (*meta.Meta, error) {
|
||||
return meta.From210To220(metas)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/meta"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
)
|
||||
|
|
|
@ -7,20 +7,15 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/blang/semver/v4"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
"go.uber.org/atomic"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/backend"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/configs"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/console"
|
||||
"github.com/milvus-io/milvus/cmd/tools/migration/versions"
|
||||
"github.com/milvus-io/milvus/internal/util/sessionutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/etcd"
|
||||
)
|
||||
|
||||
|
|
|
@ -5,13 +5,14 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/rootcoord"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
type UniqueID = typeutil.UniqueID
|
||||
type Timestamp = typeutil.Timestamp
|
||||
type (
|
||||
UniqueID = typeutil.UniqueID
|
||||
Timestamp = typeutil.Timestamp
|
||||
)
|
||||
|
||||
type errNotOfTsKey struct {
|
||||
key string
|
||||
|
|
|
@ -23,8 +23,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
|
@ -83,7 +84,6 @@ func (ia *IDAllocator) gatherReqIDCount() uint32 {
|
|||
}
|
||||
|
||||
func (ia *IDAllocator) syncID() (bool, error) {
|
||||
|
||||
need := ia.gatherReqIDCount()
|
||||
if need < ia.countPerRPC {
|
||||
need = ia.countPerRPC
|
||||
|
|
|
@ -20,14 +20,14 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
)
|
||||
|
||||
type mockIDAllocator struct {
|
||||
}
|
||||
type mockIDAllocator struct{}
|
||||
|
||||
func (tso *mockIDAllocator) AllocID(ctx context.Context, req *rootcoordpb.AllocIDRequest) (*rootcoordpb.AllocIDResponse, error) {
|
||||
return &rootcoordpb.AllocIDResponse{
|
||||
|
|
|
@ -20,8 +20,9 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
)
|
||||
|
||||
func TestAllocator_Basic(t *testing.T) {
|
||||
|
|
|
@ -24,5 +24,4 @@ func defaultBuildIndexPolicy(buildIDs []UniqueID) {
|
|||
sort.Slice(buildIDs, func(i, j int) bool {
|
||||
return buildIDs[i] < buildIDs[j]
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
@ -41,8 +41,8 @@ type channelStateTimer struct {
|
|||
|
||||
etcdWatcher clientv3.WatchChan
|
||||
timeoutWatcher chan *ackEvent
|
||||
//Modifies afterwards must guarantee that runningTimerCount is updated synchronized with runningTimers
|
||||
//in order to keep consistency
|
||||
// Modifies afterwards must guarantee that runningTimerCount is updated synchronized with runningTimers
|
||||
// in order to keep consistency
|
||||
runningTimerCount atomic.Int32
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,6 @@ func parseWatchInfo(key string, data []byte) (*datapb.ChannelWatchInfo, error) {
|
|||
watchInfo := datapb.ChannelWatchInfo{}
|
||||
if err := proto.Unmarshal(data, &watchInfo); err != nil {
|
||||
return nil, fmt.Errorf("invalid event data: fail to parse ChannelWatchInfo, key: %s, err: %v", key, err)
|
||||
|
||||
}
|
||||
|
||||
if watchInfo.Vchan == nil {
|
||||
|
|
|
@ -21,11 +21,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
)
|
||||
|
||||
func TestChannelStateTimer(t *testing.T) {
|
||||
|
@ -242,6 +242,5 @@ func TestChannelStateTimer_parses(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
assert.Equal(t, test.outAckType, getAckType(test.inState))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
|
|
@ -467,7 +467,7 @@ func (c *ChannelManager) fillChannelWatchInfo(op *ChannelOp) {
|
|||
|
||||
// fillChannelWatchInfoWithState updates the channel op by filling in channel watch info.
|
||||
func (c *ChannelManager) fillChannelWatchInfoWithState(op *ChannelOp, state datapb.ChannelWatchState) []string {
|
||||
var channelsWithTimer = []string{}
|
||||
channelsWithTimer := []string{}
|
||||
startTs := time.Now().Unix()
|
||||
checkInterval := Params.DataCoordCfg.WatchTimeoutInterval.GetAsDuration(time.Second)
|
||||
for _, ch := range op.Channels {
|
||||
|
@ -607,7 +607,7 @@ type ackEvent struct {
|
|||
}
|
||||
|
||||
func (c *ChannelManager) updateWithTimer(updates ChannelOpSet, state datapb.ChannelWatchState) error {
|
||||
var channelsWithTimer = []string{}
|
||||
channelsWithTimer := []string{}
|
||||
for _, op := range updates {
|
||||
if op.Type == Add {
|
||||
channelsWithTimer = append(channelsWithTimer, c.fillChannelWatchInfoWithState(op, state)...)
|
||||
|
|
|
@ -203,7 +203,7 @@ func TestChannelManager_StateTransfer(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("ToRelease-ReleaseSuccess-Reassign-ToWatch-2-DN", func(t *testing.T) {
|
||||
var oldNode = UniqueID(120)
|
||||
oldNode := UniqueID(120)
|
||||
cName := channelNamePrefix + "ToRelease-ReleaseSuccess-Reassign-ToWatch-2-DN"
|
||||
|
||||
watchkv.RemoveWithPrefix("")
|
||||
|
@ -289,7 +289,7 @@ func TestChannelManager_StateTransfer(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("ToRelease-ReleaseFail-CleanUpAndDelete-Reassign-ToWatch-2-DN", func(t *testing.T) {
|
||||
var oldNode = UniqueID(121)
|
||||
oldNode := UniqueID(121)
|
||||
|
||||
cName := channelNamePrefix + "ToRelease-ReleaseFail-CleanUpAndDelete-Reassign-ToWatch-2-DN"
|
||||
watchkv.RemoveWithPrefix("")
|
||||
|
@ -520,7 +520,7 @@ func TestChannelManager(t *testing.T) {
|
|||
|
||||
t.Run("test Reassign", func(t *testing.T) {
|
||||
defer watchkv.RemoveWithPrefix("")
|
||||
var collectionID = UniqueID(5)
|
||||
collectionID := UniqueID(5)
|
||||
|
||||
tests := []struct {
|
||||
nodeID UniqueID
|
||||
|
@ -571,9 +571,7 @@ func TestChannelManager(t *testing.T) {
|
|||
t.Run("test DeleteNode", func(t *testing.T) {
|
||||
defer watchkv.RemoveWithPrefix("")
|
||||
|
||||
var (
|
||||
collectionID = UniqueID(999)
|
||||
)
|
||||
collectionID := UniqueID(999)
|
||||
chManager, err := NewChannelManager(watchkv, newMockHandler(), withStateChecker())
|
||||
require.NoError(t, err)
|
||||
chManager.store = &ChannelStore{
|
||||
|
@ -581,7 +579,8 @@ func TestChannelManager(t *testing.T) {
|
|||
channelsInfo: map[int64]*NodeChannelInfo{
|
||||
1: {1, []*channel{
|
||||
{Name: "channel-1", CollectionID: collectionID},
|
||||
{Name: "channel-2", CollectionID: collectionID}}},
|
||||
{Name: "channel-2", CollectionID: collectionID},
|
||||
}},
|
||||
bufferID: {bufferID, []*channel{}},
|
||||
},
|
||||
}
|
||||
|
@ -596,7 +595,7 @@ func TestChannelManager(t *testing.T) {
|
|||
|
||||
t.Run("test CleanupAndReassign", func(t *testing.T) {
|
||||
defer watchkv.RemoveWithPrefix("")
|
||||
var collectionID = UniqueID(6)
|
||||
collectionID := UniqueID(6)
|
||||
|
||||
tests := []struct {
|
||||
nodeID UniqueID
|
||||
|
@ -745,7 +744,7 @@ func TestChannelManager(t *testing.T) {
|
|||
)
|
||||
cName := channelNamePrefix + "TestBgChecker"
|
||||
|
||||
//1. set up channel_manager
|
||||
// 1. set up channel_manager
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
chManager, err := NewChannelManager(watchkv, newMockHandler(), withBgChecker())
|
||||
|
@ -753,12 +752,12 @@ func TestChannelManager(t *testing.T) {
|
|||
assert.NotNil(t, chManager.bgChecker)
|
||||
chManager.Startup(ctx, []int64{nodeID})
|
||||
|
||||
//2. test isSilent function running correctly
|
||||
// 2. test isSilent function running correctly
|
||||
Params.Save(Params.DataCoordCfg.ChannelBalanceSilentDuration.Key, "3")
|
||||
assert.False(t, chManager.isSilent())
|
||||
assert.False(t, chManager.stateTimer.hasRunningTimers())
|
||||
|
||||
//3. watch one channel
|
||||
// 3. watch one channel
|
||||
chManager.Watch(&channel{Name: cName, CollectionID: collectionID})
|
||||
assert.False(t, chManager.isSilent())
|
||||
assert.True(t, chManager.stateTimer.hasRunningTimers())
|
||||
|
@ -766,7 +765,7 @@ func TestChannelManager(t *testing.T) {
|
|||
waitAndStore(t, watchkv, key, datapb.ChannelWatchState_ToWatch, datapb.ChannelWatchState_WatchSuccess)
|
||||
waitAndCheckState(t, watchkv, datapb.ChannelWatchState_WatchSuccess, nodeID, cName, collectionID)
|
||||
|
||||
//4. wait for duration and check silent again
|
||||
// 4. wait for duration and check silent again
|
||||
time.Sleep(Params.DataCoordCfg.ChannelBalanceSilentDuration.GetAsDuration(time.Second))
|
||||
chManager.stateTimer.removeTimers([]string{cName})
|
||||
assert.True(t, chManager.isSilent())
|
||||
|
@ -839,7 +838,8 @@ func TestChannelManager_Reload(t *testing.T) {
|
|||
chManager.store = &ChannelStore{
|
||||
store: watchkv,
|
||||
channelsInfo: map[int64]*NodeChannelInfo{
|
||||
nodeID: {nodeID, []*channel{{Name: channelName, CollectionID: collectionID}}}},
|
||||
nodeID: {nodeID, []*channel{{Name: channelName, CollectionID: collectionID}}},
|
||||
},
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(getWatchInfoWithState(datapb.ChannelWatchState_WatchFailure, collectionID, channelName))
|
||||
|
@ -861,7 +861,8 @@ func TestChannelManager_Reload(t *testing.T) {
|
|||
chManager.store = &ChannelStore{
|
||||
store: watchkv,
|
||||
channelsInfo: map[int64]*NodeChannelInfo{
|
||||
nodeID: {nodeID, []*channel{{Name: channelName, CollectionID: collectionID}}}},
|
||||
nodeID: {nodeID, []*channel{{Name: channelName, CollectionID: collectionID}}},
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
@ -902,7 +903,6 @@ func TestChannelManager_Reload(t *testing.T) {
|
|||
v, err := watchkv.Load(path.Join(prefix, strconv.FormatInt(nodeID, 10), channelName))
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, v)
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -958,9 +958,7 @@ func TestChannelManager_BalanceBehaviour(t *testing.T) {
|
|||
t.Run("one node with three channels add a new node", func(t *testing.T) {
|
||||
defer watchkv.RemoveWithPrefix("")
|
||||
|
||||
var (
|
||||
collectionID = UniqueID(999)
|
||||
)
|
||||
collectionID := UniqueID(999)
|
||||
|
||||
chManager, err := NewChannelManager(watchkv, newMockHandler(), withStateChecker())
|
||||
require.NoError(t, err)
|
||||
|
@ -976,12 +974,12 @@ func TestChannelManager_BalanceBehaviour(t *testing.T) {
|
|||
1: {1, []*channel{
|
||||
{Name: "channel-1", CollectionID: collectionID},
|
||||
{Name: "channel-2", CollectionID: collectionID},
|
||||
{Name: "channel-3", CollectionID: collectionID}}}},
|
||||
{Name: "channel-3", CollectionID: collectionID},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
channelBalanced string
|
||||
)
|
||||
var channelBalanced string
|
||||
|
||||
chManager.AddNode(2)
|
||||
channelBalanced = "channel-1"
|
||||
|
@ -1047,7 +1045,6 @@ func TestChannelManager_BalanceBehaviour(t *testing.T) {
|
|||
assert.True(t, chManager.Match(1, "channel-1"))
|
||||
assert.True(t, chManager.Match(1, "channel-4"))
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestChannelManager_RemoveChannel(t *testing.T) {
|
||||
|
@ -1153,6 +1150,5 @@ func TestChannelManager_HelperFunc(t *testing.T) {
|
|||
assert.ElementsMatch(t, test.expectedOut, nodes)
|
||||
})
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
|
|
@ -20,13 +20,14 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Cluster provides interfaces to interact with datanode cluster
|
||||
|
@ -77,7 +78,8 @@ func (c *Cluster) Watch(ch string, collectionID UniqueID) error {
|
|||
// Flush sends flush requests to dataNodes specified
|
||||
// which also according to channels where segments are assigned to.
|
||||
func (c *Cluster) Flush(ctx context.Context, nodeID int64, channel string,
|
||||
segments []*datapb.SegmentInfo) error {
|
||||
segments []*datapb.SegmentInfo,
|
||||
) error {
|
||||
if !c.channelManager.Match(nodeID, channel) {
|
||||
log.Warn("node is not matched with channel",
|
||||
zap.String("channel", channel),
|
||||
|
|
|
@ -367,7 +367,7 @@ func (suite *ClusterSuite) TestUnregister() {
|
|||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
var mockSessionCreator = func(ctx context.Context, addr string, nodeID int64) (types.DataNode, error) {
|
||||
mockSessionCreator := func(ctx context.Context, addr string, nodeID int64) (types.DataNode, error) {
|
||||
return newMockDataNodeClient(1, nil)
|
||||
}
|
||||
sessionManager := NewSessionManager(withSessionCreator(mockSessionCreator))
|
||||
|
@ -414,7 +414,7 @@ func TestWatchIfNeeded(t *testing.T) {
|
|||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
var mockSessionCreator = func(ctx context.Context, addr string, nodeID int64) (types.DataNode, error) {
|
||||
mockSessionCreator := func(ctx context.Context, addr string, nodeID int64) (types.DataNode, error) {
|
||||
return newMockDataNodeClient(1, nil)
|
||||
}
|
||||
sessionManager := NewSessionManager(withSessionCreator(mockSessionCreator))
|
||||
|
@ -584,7 +584,7 @@ func TestCluster_Flush(t *testing.T) {
|
|||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
//TODO add a method to verify datanode has flush request after client injection is available
|
||||
// TODO add a method to verify datanode has flush request after client injection is available
|
||||
}
|
||||
|
||||
func TestCluster_Import(t *testing.T) {
|
||||
|
@ -629,7 +629,7 @@ func TestCluster_ReCollectSegmentStats(t *testing.T) {
|
|||
t.Run("recollect succeed", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
var mockSessionCreator = func(ctx context.Context, addr string, nodeID int64) (types.DataNode, error) {
|
||||
mockSessionCreator := func(ctx context.Context, addr string, nodeID int64) (types.DataNode, error) {
|
||||
return newMockDataNodeClient(1, nil)
|
||||
}
|
||||
sessionManager := NewSessionManager(withSessionCreator(mockSessionCreator))
|
||||
|
|
|
@ -23,13 +23,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
)
|
||||
|
||||
// TODO this num should be determined by resources of datanode, for now, we set to a fixed value for simple
|
||||
|
@ -102,12 +102,13 @@ type compactionPlanHandler struct {
|
|||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
flushCh chan UniqueID
|
||||
//segRefer *SegmentReferenceManager
|
||||
// segRefer *SegmentReferenceManager
|
||||
parallelCh map[int64]chan struct{}
|
||||
}
|
||||
|
||||
func newCompactionPlanHandler(sessions *SessionManager, cm *ChannelManager, meta *meta,
|
||||
allocator allocator, flush chan UniqueID) *compactionPlanHandler {
|
||||
allocator allocator, flush chan UniqueID,
|
||||
) *compactionPlanHandler {
|
||||
return &compactionPlanHandler{
|
||||
plans: make(map[int64]*compactionTask),
|
||||
chManager: cm,
|
||||
|
@ -115,7 +116,7 @@ func newCompactionPlanHandler(sessions *SessionManager, cm *ChannelManager, meta
|
|||
sessions: sessions,
|
||||
allocator: allocator,
|
||||
flushCh: flush,
|
||||
//segRefer: segRefer,
|
||||
// segRefer: segRefer,
|
||||
parallelCh: make(map[int64]chan struct{}),
|
||||
}
|
||||
}
|
||||
|
@ -263,7 +264,7 @@ func (c *compactionPlanHandler) handleMergeCompactionResult(plan *datapb.Compact
|
|||
return err
|
||||
}
|
||||
|
||||
var nodeID = c.plans[plan.GetPlanID()].dataNodeID
|
||||
nodeID := c.plans[plan.GetPlanID()].dataNodeID
|
||||
req := &datapb.SyncSegmentsRequest{
|
||||
PlanID: plan.PlanID,
|
||||
CompactedTo: newSegment.GetID(),
|
||||
|
|
|
@ -23,22 +23,21 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
mockkv "github.com/milvus-io/milvus/internal/kv/mocks"
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/datacoord"
|
||||
"github.com/milvus-io/milvus/internal/mocks"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/metautil"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
|
||||
mockkv "github.com/milvus-io/milvus/internal/kv/mocks"
|
||||
)
|
||||
|
||||
func Test_compactionPlanHandler_execCompactionPlan(t *testing.T) {
|
||||
|
@ -182,7 +181,6 @@ func Test_compactionPlanHandler_execCompactionPlan(t *testing.T) {
|
|||
assert.Equal(t, tt.args.signal, task.triggerInfo)
|
||||
assert.Equal(t, 1, c.executingTaskNum)
|
||||
} else {
|
||||
|
||||
assert.Eventually(t,
|
||||
func() bool {
|
||||
c.mu.RLock()
|
||||
|
@ -198,7 +196,6 @@ func Test_compactionPlanHandler_execCompactionPlan(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_compactionPlanHandler_execWithParallels(t *testing.T) {
|
||||
|
||||
mockDataNode := &mocks.MockDataNode{}
|
||||
paramtable.Get().Save(Params.DataCoordCfg.CompactionCheckIntervalInSeconds.Key, "1")
|
||||
defer paramtable.Get().Reset(Params.DataCoordCfg.CompactionCheckIntervalInSeconds.Key)
|
||||
|
@ -330,7 +327,8 @@ func TestCompactionPlanHandler_handleMergeCompactionResult(t *testing.T) {
|
|||
data map[int64]*Session
|
||||
}{
|
||||
data: map[int64]*Session{
|
||||
dataNodeID: {client: mockDataNode}},
|
||||
dataNodeID: {client: mockDataNode},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -485,7 +483,8 @@ func TestCompactionPlanHandler_completeCompaction(t *testing.T) {
|
|||
data map[int64]*Session
|
||||
}{
|
||||
data: map[int64]*Session{
|
||||
dataNodeID: {client: mockDataNode}},
|
||||
dataNodeID: {client: mockDataNode},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -577,7 +576,8 @@ func TestCompactionPlanHandler_completeCompaction(t *testing.T) {
|
|||
data map[int64]*Session
|
||||
}{
|
||||
data: map[int64]*Session{
|
||||
dataNodeID: {client: mockDataNode}},
|
||||
dataNodeID: {client: mockDataNode},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -32,6 +31,7 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/indexparamcheck"
|
||||
"github.com/milvus-io/milvus/pkg/util/logutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
)
|
||||
|
||||
type compactTime struct {
|
||||
|
@ -72,8 +72,8 @@ type compactionTrigger struct {
|
|||
forceMu sync.Mutex
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
//segRefer *SegmentReferenceManager
|
||||
//indexCoord types.IndexCoord
|
||||
// segRefer *SegmentReferenceManager
|
||||
// indexCoord types.IndexCoord
|
||||
estimateNonDiskSegmentPolicy calUpperLimitPolicy
|
||||
estimateDiskSegmentPolicy calUpperLimitPolicy
|
||||
// A sloopy hack, so we can test with different segment row count without worrying that
|
||||
|
@ -85,8 +85,8 @@ func newCompactionTrigger(
|
|||
meta *meta,
|
||||
compactionHandler compactionPlanContext,
|
||||
allocator allocator,
|
||||
//segRefer *SegmentReferenceManager,
|
||||
//indexCoord types.IndexCoord,
|
||||
// segRefer *SegmentReferenceManager,
|
||||
// indexCoord types.IndexCoord,
|
||||
handler Handler,
|
||||
) *compactionTrigger {
|
||||
return &compactionTrigger{
|
||||
|
@ -94,8 +94,8 @@ func newCompactionTrigger(
|
|||
allocator: allocator,
|
||||
signals: make(chan *compactionSignal, 100),
|
||||
compactionHandler: compactionHandler,
|
||||
//segRefer: segRefer,
|
||||
//indexCoord: indexCoord,
|
||||
// segRefer: segRefer,
|
||||
// indexCoord: indexCoord,
|
||||
estimateDiskSegmentPolicy: calBySchemaPolicyWithDiskIndex,
|
||||
estimateNonDiskSegmentPolicy: calBySchemaPolicy,
|
||||
handler: handler,
|
||||
|
@ -211,7 +211,6 @@ func (t *compactionTrigger) getCompactTime(ts Timestamp, coll *collectionInfo) (
|
|||
|
||||
// triggerCompaction trigger a compaction if any compaction condition satisfy.
|
||||
func (t *compactionTrigger) triggerCompaction() error {
|
||||
|
||||
id, err := t.allocSignalID()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -585,7 +584,7 @@ func (t *compactionTrigger) generatePlans(segments []*SegmentInfo, force bool, i
|
|||
}
|
||||
// greedy pick from large segment to small, the goal is to fill each segment to reach 512M
|
||||
// we must ensure all prioritized candidates is in a plan
|
||||
//TODO the compaction selection policy should consider if compaction workload is high
|
||||
// TODO the compaction selection policy should consider if compaction workload is high
|
||||
for len(prioritizedCandidates) > 0 {
|
||||
var bucket []*SegmentInfo
|
||||
// pop out the first element
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
@ -32,6 +31,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
)
|
||||
|
||||
type spyCompactionHandler struct {
|
||||
|
@ -490,7 +490,7 @@ func Test_compactionTrigger_force(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run(tt.name+" with allocate ts error", func(t *testing.T) {
|
||||
//indexCood := newMockIndexCoord()
|
||||
// indexCood := newMockIndexCoord()
|
||||
tr := &compactionTrigger{
|
||||
meta: tt.fields.meta,
|
||||
handler: newMockHandlerWithMeta(tt.fields.meta),
|
||||
|
@ -926,7 +926,6 @@ func Test_compactionTrigger_noplan(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
tr := &compactionTrigger{
|
||||
meta: tt.fields.meta,
|
||||
handler: newMockHandlerWithMeta(tt.fields.meta),
|
||||
|
@ -1659,7 +1658,6 @@ func Test_compactionTrigger_noplan_random_size(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, plan := range plans {
|
||||
|
||||
size := int64(0)
|
||||
for _, log := range plan.SegmentBinlogs {
|
||||
size += log.FieldBinlogs[0].GetBinlogs()[0].LogSize
|
||||
|
@ -1708,7 +1706,7 @@ func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
|
|||
couldDo := trigger.ShouldDoSingleCompaction(info, false, &compactTime{})
|
||||
assert.True(t, couldDo)
|
||||
|
||||
//Test too many stats log
|
||||
// Test too many stats log
|
||||
info = &SegmentInfo{
|
||||
SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: 1,
|
||||
|
@ -1736,12 +1734,12 @@ func Test_compactionTrigger_shouldDoSingleCompaction(t *testing.T) {
|
|||
|
||||
couldDo = trigger.ShouldDoSingleCompaction(info, true, &compactTime{})
|
||||
assert.False(t, couldDo)
|
||||
//Test too many stats log but compacted
|
||||
// Test too many stats log but compacted
|
||||
info.CompactionFrom = []int64{0, 1}
|
||||
couldDo = trigger.ShouldDoSingleCompaction(info, false, &compactTime{})
|
||||
assert.False(t, couldDo)
|
||||
|
||||
//Test expire triggered compaction
|
||||
// Test expire triggered compaction
|
||||
var binlogs2 []*datapb.FieldBinlog
|
||||
for i := UniqueID(0); i < 100; i++ {
|
||||
binlogs2 = append(binlogs2, &datapb.FieldBinlog{
|
||||
|
@ -1974,40 +1972,41 @@ func (s *CompactionTriggerSuite) SetupTest() {
|
|||
s.indexID = 300
|
||||
s.vecFieldID = 400
|
||||
s.channel = "dml_0_100v0"
|
||||
s.meta = &meta{segments: &SegmentsInfo{
|
||||
map[int64]*SegmentInfo{
|
||||
1: {
|
||||
SegmentInfo: s.genSeg(1, 60),
|
||||
lastFlushTime: time.Now().Add(-100 * time.Minute),
|
||||
segmentIndexes: s.genSegIndex(1, indexID, 60),
|
||||
},
|
||||
2: {
|
||||
SegmentInfo: s.genSeg(2, 60),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(2, indexID, 60),
|
||||
},
|
||||
3: {
|
||||
SegmentInfo: s.genSeg(3, 60),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(3, indexID, 60),
|
||||
},
|
||||
4: {
|
||||
SegmentInfo: s.genSeg(4, 60),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(4, indexID, 60),
|
||||
},
|
||||
5: {
|
||||
SegmentInfo: s.genSeg(5, 26),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(5, indexID, 26),
|
||||
},
|
||||
6: {
|
||||
SegmentInfo: s.genSeg(6, 26),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(6, indexID, 26),
|
||||
s.meta = &meta{
|
||||
segments: &SegmentsInfo{
|
||||
map[int64]*SegmentInfo{
|
||||
1: {
|
||||
SegmentInfo: s.genSeg(1, 60),
|
||||
lastFlushTime: time.Now().Add(-100 * time.Minute),
|
||||
segmentIndexes: s.genSegIndex(1, indexID, 60),
|
||||
},
|
||||
2: {
|
||||
SegmentInfo: s.genSeg(2, 60),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(2, indexID, 60),
|
||||
},
|
||||
3: {
|
||||
SegmentInfo: s.genSeg(3, 60),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(3, indexID, 60),
|
||||
},
|
||||
4: {
|
||||
SegmentInfo: s.genSeg(4, 60),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(4, indexID, 60),
|
||||
},
|
||||
5: {
|
||||
SegmentInfo: s.genSeg(5, 26),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(5, indexID, 26),
|
||||
},
|
||||
6: {
|
||||
SegmentInfo: s.genSeg(6, 26),
|
||||
lastFlushTime: time.Now(),
|
||||
segmentIndexes: s.genSegIndex(6, indexID, 26),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
collections: map[int64]*collectionInfo{
|
||||
s.collectionID: {
|
||||
ID: s.collectionID,
|
||||
|
@ -2061,7 +2060,7 @@ func (s *CompactionTriggerSuite) TestHandleSignal() {
|
|||
defer s.SetupTest()
|
||||
tr := s.tr
|
||||
s.compactionHandler.EXPECT().isFull().Return(false)
|
||||
//s.allocator.EXPECT().allocTimestamp(mock.Anything).Return(10000, nil)
|
||||
// s.allocator.EXPECT().allocTimestamp(mock.Anything).Return(10000, nil)
|
||||
s.handler.EXPECT().GetCollection(mock.Anything, int64(100)).Return(nil, errors.New("mocked"))
|
||||
tr.handleSignal(&compactionSignal{
|
||||
segmentID: 1,
|
||||
|
@ -2078,7 +2077,7 @@ func (s *CompactionTriggerSuite) TestHandleSignal() {
|
|||
defer s.SetupTest()
|
||||
tr := s.tr
|
||||
s.compactionHandler.EXPECT().isFull().Return(false)
|
||||
//s.allocator.EXPECT().allocTimestamp(mock.Anything).Return(10000, nil)
|
||||
// s.allocator.EXPECT().allocTimestamp(mock.Anything).Return(10000, nil)
|
||||
s.handler.EXPECT().GetCollection(mock.Anything, int64(100)).Return(&collectionInfo{
|
||||
Properties: map[string]string{
|
||||
common.CollectionAutoCompactionKey: "bad_value",
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
|
@ -27,7 +29,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/commonpbutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -29,9 +29,11 @@ var errNilKvClient = errors.New("kv client not initialized")
|
|||
const serverNotServingErrMsg = "DataCoord is not serving"
|
||||
|
||||
// errors for VerifyResponse
|
||||
var errNilResponse = errors.New("response is nil")
|
||||
var errNilStatusResponse = errors.New("response has nil status")
|
||||
var errUnknownResponseType = errors.New("unknown response type")
|
||||
var (
|
||||
errNilResponse = errors.New("response is nil")
|
||||
errNilStatusResponse = errors.New("response has nil status")
|
||||
errUnknownResponseType = errors.New("unknown response type")
|
||||
)
|
||||
|
||||
func msgDataCoordIsUnhealthy(coordID UniqueID) string {
|
||||
return fmt.Sprintf("DataCoord %d is not ready", coordID)
|
||||
|
|
|
@ -19,9 +19,10 @@ package datacoord
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestMsgDataCoordIsUnhealthy(t *testing.T) {
|
||||
|
|
|
@ -201,7 +201,8 @@ func (gc *garbageCollector) scan() {
|
|||
func (gc *garbageCollector) checkDroppedSegmentGC(segment *SegmentInfo,
|
||||
childSegment *SegmentInfo,
|
||||
indexSet typeutil.UniqueSet,
|
||||
cpTimestamp Timestamp) bool {
|
||||
cpTimestamp Timestamp,
|
||||
) bool {
|
||||
log := log.With(zap.Int64("segmentID", segment.ID))
|
||||
|
||||
isCompacted := childSegment != nil || segment.GetCompacted()
|
||||
|
@ -246,7 +247,7 @@ func (gc *garbageCollector) clearEtcd() {
|
|||
if segment.GetState() == commonpb.SegmentState_Dropped {
|
||||
drops[segment.GetID()] = segment
|
||||
channels.Insert(segment.GetInsertChannel())
|
||||
//continue
|
||||
// continue
|
||||
// A(indexed), B(indexed) -> C(no indexed), D(no indexed) -> E(no indexed), A, B can not be GC
|
||||
}
|
||||
for _, from := range segment.GetCompactionFrom() {
|
||||
|
|
|
@ -26,8 +26,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
|
@ -47,6 +45,7 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
)
|
||||
|
@ -54,7 +53,7 @@ import (
|
|||
func Test_garbageCollector_basic(t *testing.T) {
|
||||
bucketName := `datacoord-ut` + strings.ToLower(funcutil.RandomString(8))
|
||||
rootPath := `gc` + funcutil.RandomString(8)
|
||||
//TODO change to Params
|
||||
// TODO change to Params
|
||||
cli, _, _, _, _, err := initUtOSSEnv(bucketName, rootPath, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -93,7 +92,6 @@ func Test_garbageCollector_basic(t *testing.T) {
|
|||
gc.close()
|
||||
})
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func validateMinioPrefixElements(t *testing.T, cli *minio.Client, bucketName string, prefix string, elements []string) {
|
||||
|
@ -107,7 +105,7 @@ func validateMinioPrefixElements(t *testing.T, cli *minio.Client, bucketName str
|
|||
func Test_garbageCollector_scan(t *testing.T) {
|
||||
bucketName := `datacoord-ut` + strings.ToLower(funcutil.RandomString(8))
|
||||
rootPath := `gc` + funcutil.RandomString(8)
|
||||
//TODO change to Params
|
||||
// TODO change to Params
|
||||
cli, inserts, stats, delta, others, err := initUtOSSEnv(bucketName, rootPath, 4)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -334,7 +332,7 @@ func createMetaForRecycleUnusedIndexes(catalog metastore.DataCoordCatalog) *meta
|
|||
var (
|
||||
ctx = context.Background()
|
||||
collID = UniqueID(100)
|
||||
//partID = UniqueID(200)
|
||||
// partID = UniqueID(200)
|
||||
fieldID = UniqueID(300)
|
||||
indexID = UniqueID(400)
|
||||
)
|
||||
|
@ -428,7 +426,7 @@ func createMetaForRecycleUnusedSegIndexes(catalog metastore.DataCoordCatalog) *m
|
|||
ctx = context.Background()
|
||||
collID = UniqueID(100)
|
||||
partID = UniqueID(200)
|
||||
//fieldID = UniqueID(300)
|
||||
// fieldID = UniqueID(300)
|
||||
indexID = UniqueID(400)
|
||||
segID = UniqueID(500)
|
||||
)
|
||||
|
@ -571,7 +569,7 @@ func createMetaTableForRecycleUnusedIndexFiles(catalog *datacoord.Catalog) *meta
|
|||
ctx = context.Background()
|
||||
collID = UniqueID(100)
|
||||
partID = UniqueID(200)
|
||||
//fieldID = UniqueID(300)
|
||||
// fieldID = UniqueID(300)
|
||||
indexID = UniqueID(400)
|
||||
segID = UniqueID(500)
|
||||
buildID = UniqueID(600)
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -30,6 +29,7 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/funcutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/retry"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"strconv"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
|
@ -30,7 +31,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func (m *meta) updateCollectionIndex(index *model.Index) {
|
||||
|
|
|
@ -41,7 +41,7 @@ import (
|
|||
func TestMeta_CanCreateIndex(t *testing.T) {
|
||||
var (
|
||||
collID = UniqueID(1)
|
||||
//partID = UniqueID(2)
|
||||
// partID = UniqueID(2)
|
||||
indexID = UniqueID(10)
|
||||
fieldID = UniqueID(100)
|
||||
indexName = "_default_idx"
|
||||
|
@ -162,7 +162,7 @@ func TestMeta_CanCreateIndex(t *testing.T) {
|
|||
func TestMeta_HasSameReq(t *testing.T) {
|
||||
var (
|
||||
collID = UniqueID(1)
|
||||
//partID = UniqueID(2)
|
||||
// partID = UniqueID(2)
|
||||
indexID = UniqueID(10)
|
||||
fieldID = UniqueID(100)
|
||||
indexName = "_default_idx"
|
||||
|
@ -371,7 +371,7 @@ func TestMeta_AddSegmentIndex(t *testing.T) {
|
|||
func TestMeta_GetIndexIDByName(t *testing.T) {
|
||||
var (
|
||||
collID = UniqueID(1)
|
||||
//partID = UniqueID(2)
|
||||
// partID = UniqueID(2)
|
||||
indexID = UniqueID(10)
|
||||
fieldID = UniqueID(100)
|
||||
indexName = "_default_idx"
|
||||
|
@ -425,7 +425,6 @@ func TestMeta_GetIndexIDByName(t *testing.T) {
|
|||
indexID2CreateTS := m.GetIndexIDByName(collID, indexName)
|
||||
assert.Contains(t, indexID2CreateTS, indexID)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestMeta_GetSegmentIndexState(t *testing.T) {
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestServer_CreateIndex(t *testing.T) {
|
|||
var (
|
||||
collID = UniqueID(1)
|
||||
fieldID = UniqueID(10)
|
||||
//indexID = UniqueID(100)
|
||||
// indexID = UniqueID(100)
|
||||
indexName = "default_idx"
|
||||
typeParams = []*commonpb.KeyValuePair{
|
||||
{
|
||||
|
@ -706,7 +706,7 @@ func TestServer_DescribeIndex(t *testing.T) {
|
|||
catalog: catalog,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
//finished
|
||||
// finished
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: collID,
|
||||
|
@ -1067,7 +1067,7 @@ func TestServer_GetIndexStatistics(t *testing.T) {
|
|||
catalog: catalog,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
//finished
|
||||
// finished
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: collID,
|
||||
|
@ -1347,7 +1347,7 @@ func TestServer_DropIndex(t *testing.T) {
|
|||
catalog: catalog,
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
//finished
|
||||
// finished
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: collID,
|
||||
|
@ -1542,7 +1542,7 @@ func TestServer_GetIndexInfos(t *testing.T) {
|
|||
catalog: &datacoord.Catalog{MetaKv: mocks.NewMetaKv(t)},
|
||||
indexes: map[UniqueID]map[UniqueID]*model.Index{
|
||||
collID: {
|
||||
//finished
|
||||
// finished
|
||||
indexID: {
|
||||
TenantID: "",
|
||||
CollectionID: collID,
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus/internal/indexnode"
|
||||
|
@ -29,7 +30,6 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/indexpb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIndexNodeManager_AddNode(t *testing.T) {
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/samber/lo"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/exp/maps"
|
||||
|
@ -44,6 +43,7 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/metautil"
|
||||
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -491,7 +491,7 @@ func (m *meta) UpdateFlushSegmentsInfo(
|
|||
}
|
||||
// TODO add diff encoding and compression
|
||||
currBinlogs := clonedSegment.GetBinlogs()
|
||||
var getFieldBinlogs = func(id UniqueID, binlogs []*datapb.FieldBinlog) *datapb.FieldBinlog {
|
||||
getFieldBinlogs := func(id UniqueID, binlogs []*datapb.FieldBinlog) *datapb.FieldBinlog {
|
||||
for _, binlog := range binlogs {
|
||||
if id == binlog.GetFieldID() {
|
||||
return binlog
|
||||
|
@ -532,7 +532,7 @@ func (m *meta) UpdateFlushSegmentsInfo(
|
|||
}
|
||||
clonedSegment.Deltalogs = currDeltaLogs
|
||||
modSegments[segmentID] = clonedSegment
|
||||
var getClonedSegment = func(segmentID UniqueID) *SegmentInfo {
|
||||
getClonedSegment := func(segmentID UniqueID) *SegmentInfo {
|
||||
if s, ok := modSegments[segmentID]; ok {
|
||||
return s
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ func (m *meta) mergeDropSegment(seg2Drop *SegmentInfo) (*SegmentInfo, *segMetric
|
|||
|
||||
currBinlogs := clonedSegment.GetBinlogs()
|
||||
|
||||
var getFieldBinlogs = func(id UniqueID, binlogs []*datapb.FieldBinlog) *datapb.FieldBinlog {
|
||||
getFieldBinlogs := func(id UniqueID, binlogs []*datapb.FieldBinlog) *datapb.FieldBinlog {
|
||||
for _, binlog := range binlogs {
|
||||
if id == binlog.GetFieldID() {
|
||||
return binlog
|
||||
|
@ -983,7 +983,8 @@ func (m *meta) SetSegmentCompacting(segmentID UniqueID, compacting bool) {
|
|||
// - the segment info of compactedTo segment after compaction to add
|
||||
// The compactedTo segment could contain 0 numRows
|
||||
func (m *meta) PrepareCompleteCompactionMutation(plan *datapb.CompactionPlan,
|
||||
result *datapb.CompactionResult) ([]*SegmentInfo, []*SegmentInfo, *SegmentInfo, *segMetricMutation, error) {
|
||||
result *datapb.CompactionResult,
|
||||
) ([]*SegmentInfo, []*SegmentInfo, *SegmentInfo, *segMetricMutation, error) {
|
||||
log.Info("meta update: prepare for complete compaction mutation")
|
||||
compactionLogs := plan.GetSegmentBinlogs()
|
||||
m.Lock()
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/msgpb"
|
||||
"github.com/milvus-io/milvus/internal/kv"
|
||||
mockkv "github.com/milvus-io/milvus/internal/kv/mocks"
|
||||
"github.com/milvus-io/milvus/internal/metastore/kv/datacoord"
|
||||
"github.com/milvus-io/milvus/internal/metastore/model"
|
||||
"github.com/milvus-io/milvus/internal/mocks"
|
||||
|
@ -39,8 +40,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/metrics"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/testutils"
|
||||
|
||||
mockkv "github.com/milvus-io/milvus/internal/kv/mocks"
|
||||
)
|
||||
|
||||
// MetaReloadSuite tests meta reload & meta creation related logic
|
||||
|
@ -311,7 +310,6 @@ func TestMeta_Basic(t *testing.T) {
|
|||
info1_1 = meta.GetHealthySegment(segID1_1)
|
||||
assert.NotNil(t, info1_1)
|
||||
assert.Equal(t, false, info1_1.GetIsImporting())
|
||||
|
||||
})
|
||||
|
||||
t.Run("Test segment with kv fails", func(t *testing.T) {
|
||||
|
@ -495,8 +493,10 @@ func TestUpdateFlushSegmentsInfo(t *testing.T) {
|
|||
meta, err := newMemoryMeta()
|
||||
assert.NoError(t, err)
|
||||
|
||||
segment1 := &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{ID: 1, State: commonpb.SegmentState_Growing, Binlogs: []*datapb.FieldBinlog{getFieldBinlogPaths(1, getInsertLogPath("binlog0", 1))},
|
||||
Statslogs: []*datapb.FieldBinlog{getFieldBinlogPaths(1, getStatsLogPath("statslog0", 1))}}}
|
||||
segment1 := &SegmentInfo{SegmentInfo: &datapb.SegmentInfo{
|
||||
ID: 1, State: commonpb.SegmentState_Growing, Binlogs: []*datapb.FieldBinlog{getFieldBinlogPaths(1, getInsertLogPath("binlog0", 1))},
|
||||
Statslogs: []*datapb.FieldBinlog{getFieldBinlogPaths(1, getStatsLogPath("statslog0", 1))},
|
||||
}}
|
||||
err = meta.AddSegment(context.TODO(), segment1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
@ -524,7 +524,6 @@ func TestUpdateFlushSegmentsInfo(t *testing.T) {
|
|||
assert.Equal(t, updated.State, expected.State)
|
||||
assert.Equal(t, updated.size.Load(), expected.size.Load())
|
||||
assert.Equal(t, updated.NumOfRows, expected.NumOfRows)
|
||||
|
||||
})
|
||||
|
||||
t.Run("update non-existed segment", func(t *testing.T) {
|
||||
|
|
|
@ -20,13 +20,11 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
"github.com/milvus-io/milvus/internal/types"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/hardware"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
|
@ -28,7 +29,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type mockMetricDataNodeClient struct {
|
||||
|
@ -122,7 +122,6 @@ func TestGetDataNodeMetrics(t *testing.T) {
|
|||
info, err = svr.getDataNodeMetrics(ctx, req, NewSession(&NodeInfo{}, mockFailClientCreator))
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, info.HasError)
|
||||
|
||||
}
|
||||
|
||||
func TestGetIndexNodeMetrics(t *testing.T) {
|
||||
|
|
|
@ -22,8 +22,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
|
@ -36,7 +34,9 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/proxypb"
|
||||
"github.com/milvus-io/milvus/internal/proto/rootcoordpb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/merr"
|
||||
"github.com/milvus-io/milvus/pkg/util/metricsinfo"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -103,8 +103,7 @@ func (m *MockAllocator) allocID(ctx context.Context) (UniqueID, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
type MockAllocator0 struct {
|
||||
}
|
||||
type MockAllocator0 struct{}
|
||||
|
||||
func (m *MockAllocator0) allocTimestamp(ctx context.Context) (Timestamp, error) {
|
||||
return Timestamp(0), nil
|
||||
|
@ -307,7 +306,7 @@ type mockRootCoordService struct {
|
|||
}
|
||||
|
||||
func (m *mockRootCoordService) RenameCollection(ctx context.Context, req *milvuspb.RenameCollectionRequest) (*commonpb.Status, error) {
|
||||
//TODO implement me
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
|
|
@ -23,10 +23,11 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"stathat.com/c/consistent"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
)
|
||||
|
||||
// RegisterPolicy decides the channels mapping after registering the nodeID
|
||||
|
@ -443,7 +444,6 @@ func RoundRobinReassignPolicy(store ROChannelStore, reassigns []*NodeChannelInfo
|
|||
} else {
|
||||
addUpdates[targetID].Channels = append(addUpdates[targetID].Channels, ch)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
for _, update := range addUpdates {
|
||||
|
|
|
@ -387,7 +387,7 @@ func TestBgCheckForChannelBalance(t *testing.T) {
|
|||
},
|
||||
time.Now(),
|
||||
},
|
||||
//there should be no reallocate
|
||||
// there should be no reallocate
|
||||
[]*NodeChannelInfo{},
|
||||
nil,
|
||||
},
|
||||
|
@ -409,8 +409,11 @@ func TestBgCheckForChannelBalance(t *testing.T) {
|
|||
"test uneven with zero",
|
||||
args{
|
||||
[]*NodeChannelInfo{
|
||||
{1, []*channel{{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1}}},
|
||||
{1, []*channel{
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
}},
|
||||
{2, []*channel{}},
|
||||
},
|
||||
time.Now(),
|
||||
|
@ -450,7 +453,7 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
},
|
||||
[]*NodeChannelInfo{{1, []*channel{{Name: "chan1", CollectionID: 1}}}},
|
||||
},
|
||||
//as there's no available nodes except the input node, there's no reassign plan generated
|
||||
// as there's no available nodes except the input node, there's no reassign plan generated
|
||||
[]*ChannelOp{},
|
||||
},
|
||||
{
|
||||
|
@ -468,10 +471,11 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
[]*NodeChannelInfo{{1, []*channel{{Name: "chan1", CollectionID: 1}}}},
|
||||
},
|
||||
[]*ChannelOp{
|
||||
//as we use ceil to calculate the wanted average number, there should be one reassign
|
||||
//though the average num less than 1
|
||||
// as we use ceil to calculate the wanted average number, there should be one reassign
|
||||
// though the average num less than 1
|
||||
{Delete, 1, []*channel{{Name: "chan1", CollectionID: 1}}, nil},
|
||||
{Add, 2, []*channel{{Name: "chan1", CollectionID: 1}}, nil}},
|
||||
{Add, 2, []*channel{{Name: "chan1", CollectionID: 1}}, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
"test_normal_reassigning_for_one_available_nodes",
|
||||
|
@ -487,7 +491,8 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
},
|
||||
[]*ChannelOp{
|
||||
{Delete, 1, []*channel{{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1}}, nil},
|
||||
{Add, 2, []*channel{{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1}}, nil}},
|
||||
{Add, 2, []*channel{{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1}}, nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
"test_normal_reassigning_for_multiple_available_nodes",
|
||||
|
@ -499,7 +504,8 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
{Name: "chan4", CollectionID: 1}}},
|
||||
{Name: "chan4", CollectionID: 1},
|
||||
}},
|
||||
2: {2, []*channel{}},
|
||||
3: {3, []*channel{}},
|
||||
4: {4, []*channel{}},
|
||||
|
@ -512,11 +518,15 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
}}},
|
||||
},
|
||||
[]*ChannelOp{
|
||||
{Delete, 1, []*channel{
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1}},
|
||||
nil},
|
||||
{
|
||||
Delete, 1,
|
||||
[]*channel{
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{Add, 2, []*channel{{Name: "chan1", CollectionID: 1}}, nil},
|
||||
{Add, 3, []*channel{{Name: "chan2", CollectionID: 1}}, nil},
|
||||
{Add, 4, []*channel{{Name: "chan3", CollectionID: 1}}, nil},
|
||||
|
@ -529,12 +539,18 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
memkv.NewMemoryKV(),
|
||||
map[int64]*NodeChannelInfo{
|
||||
1: {1, []*channel{
|
||||
{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1}, {Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1}, {Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1}, {Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1}, {Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1}, {Name: "chan12", CollectionID: 1},
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
{Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1},
|
||||
{Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1},
|
||||
{Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1},
|
||||
{Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1},
|
||||
{Name: "chan12", CollectionID: 1},
|
||||
}},
|
||||
2: {2, []*channel{
|
||||
{Name: "chan13", CollectionID: 1}, {Name: "chan14", CollectionID: 1},
|
||||
|
@ -544,33 +560,51 @@ func TestAvgReassignPolicy(t *testing.T) {
|
|||
},
|
||||
},
|
||||
[]*NodeChannelInfo{{1, []*channel{
|
||||
{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1}, {Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1}, {Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1}, {Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1}, {Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1}, {Name: "chan12", CollectionID: 1},
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
{Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1},
|
||||
{Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1},
|
||||
{Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1},
|
||||
{Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1},
|
||||
{Name: "chan12", CollectionID: 1},
|
||||
}}},
|
||||
},
|
||||
[]*ChannelOp{
|
||||
{Delete, 1, []*channel{
|
||||
{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1}, {Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1}, {Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1}, {Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1}, {Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1}, {Name: "chan12", CollectionID: 1},
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
{Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1},
|
||||
{Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1},
|
||||
{Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1},
|
||||
{Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1},
|
||||
{Name: "chan12", CollectionID: 1},
|
||||
}, nil},
|
||||
{Add, 4, []*channel{
|
||||
{Name: "chan1", CollectionID: 1}, {Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1}, {Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1}}, nil},
|
||||
{Name: "chan1", CollectionID: 1},
|
||||
{Name: "chan2", CollectionID: 1},
|
||||
{Name: "chan3", CollectionID: 1},
|
||||
{Name: "chan4", CollectionID: 1},
|
||||
{Name: "chan5", CollectionID: 1},
|
||||
}, nil},
|
||||
{Add, 3, []*channel{
|
||||
{Name: "chan6", CollectionID: 1}, {Name: "chan7", CollectionID: 1},
|
||||
{Name: "chan8", CollectionID: 1}, {Name: "chan9", CollectionID: 1},
|
||||
{Name: "chan6", CollectionID: 1},
|
||||
{Name: "chan7", CollectionID: 1},
|
||||
{Name: "chan8", CollectionID: 1},
|
||||
{Name: "chan9", CollectionID: 1},
|
||||
}, nil},
|
||||
{Add, 2, []*channel{
|
||||
{Name: "chan10", CollectionID: 1}, {Name: "chan11", CollectionID: 1},
|
||||
{Name: "chan10", CollectionID: 1},
|
||||
{Name: "chan11", CollectionID: 1},
|
||||
{Name: "chan12", CollectionID: 1},
|
||||
}, nil},
|
||||
},
|
||||
|
|
|
@ -21,10 +21,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -68,7 +68,8 @@ type AllocatePolicy func(segments []*SegmentInfo, count int64,
|
|||
|
||||
// AllocatePolicyV1 v1 policy simple allocation policy using Greedy Algorithm
|
||||
func AllocatePolicyV1(segments []*SegmentInfo, count int64,
|
||||
maxCountPerSegment int64) ([]*Allocation, []*Allocation) {
|
||||
maxCountPerSegment int64,
|
||||
) ([]*Allocation, []*Allocation) {
|
||||
newSegmentAllocations := make([]*Allocation, 0)
|
||||
existedSegmentAllocations := make([]*Allocation, 0)
|
||||
// create new segment if count >= max num
|
||||
|
|
|
@ -21,13 +21,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
)
|
||||
|
||||
func TestUpperLimitCalBySchema(t *testing.T) {
|
||||
|
|
|
@ -238,7 +238,7 @@ func (s *SegmentInfo) Clone(opts ...SegmentInfoOption) *SegmentInfo {
|
|||
allocations: s.allocations,
|
||||
lastFlushTime: s.lastFlushTime,
|
||||
isCompacting: s.isCompacting,
|
||||
//cannot copy size, since binlog may be changed
|
||||
// cannot copy size, since binlog may be changed
|
||||
lastWrittenTime: s.lastWrittenTime,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.uber.org/zap"
|
||||
|
||||
|
@ -31,17 +30,16 @@ import (
|
|||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/retry"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// allocPool pool of Allocation, to reduce allocation of Allocation
|
||||
allocPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Allocation{}
|
||||
},
|
||||
}
|
||||
)
|
||||
// allocPool pool of Allocation, to reduce allocation of Allocation
|
||||
var allocPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Allocation{}
|
||||
},
|
||||
}
|
||||
|
||||
// getAllocation unifies way to retrieve allocation struct
|
||||
func getAllocation(numOfRows int64) *Allocation {
|
||||
|
@ -230,7 +228,7 @@ func (s *SegmentManager) loadSegmentsFromMeta() {
|
|||
}
|
||||
|
||||
func (s *SegmentManager) maybeResetLastExpireForSegments() error {
|
||||
//for all sealed and growing segments, need to reset last expire
|
||||
// for all sealed and growing segments, need to reset last expire
|
||||
if len(s.segments) > 0 {
|
||||
var latestTs uint64
|
||||
allocateErr := retry.Do(context.Background(), func() error {
|
||||
|
@ -257,7 +255,8 @@ func (s *SegmentManager) maybeResetLastExpireForSegments() error {
|
|||
|
||||
// AllocSegment allocate segment per request collcation, partication, channel and rows
|
||||
func (s *SegmentManager) AllocSegment(ctx context.Context, collectionID UniqueID,
|
||||
partitionID UniqueID, channelName string, requestRows int64) ([]*Allocation, error) {
|
||||
partitionID UniqueID, channelName string, requestRows int64,
|
||||
) ([]*Allocation, error) {
|
||||
log := log.Ctx(ctx).
|
||||
With(zap.Int64("collectionID", collectionID)).
|
||||
With(zap.Int64("partitionID", partitionID)).
|
||||
|
@ -322,7 +321,8 @@ func (s *SegmentManager) AllocSegment(ctx context.Context, collectionID UniqueID
|
|||
|
||||
// allocSegmentForImport allocates one segment allocation for bulk insert.
|
||||
func (s *SegmentManager) allocSegmentForImport(ctx context.Context, collectionID UniqueID,
|
||||
partitionID UniqueID, channelName string, requestRows int64, importTaskID int64) (*Allocation, error) {
|
||||
partitionID UniqueID, channelName string, requestRows int64, importTaskID int64,
|
||||
) (*Allocation, error) {
|
||||
_, sp := otel.Tracer(typeutil.DataCoordRole).Start(ctx, "Alloc-ImportSegment")
|
||||
defer sp.End()
|
||||
s.mu.Lock()
|
||||
|
@ -375,7 +375,8 @@ func (s *SegmentManager) genExpireTs(ctx context.Context, isImported bool) (Time
|
|||
}
|
||||
|
||||
func (s *SegmentManager) openNewSegment(ctx context.Context, collectionID UniqueID, partitionID UniqueID,
|
||||
channelName string, segmentState commonpb.SegmentState) (*SegmentInfo, error) {
|
||||
channelName string, segmentState commonpb.SegmentState,
|
||||
) (*SegmentInfo, error) {
|
||||
log := log.Ctx(ctx)
|
||||
ctx, sp := otel.Tracer(typeutil.DataCoordRole).Start(ctx, "open-Segment")
|
||||
defer sp.End()
|
||||
|
|
|
@ -56,7 +56,7 @@ func TestManagerOptions(t *testing.T) {
|
|||
opt := withCalUpperLimitPolicy(defaultCalUpperLimitPolicy())
|
||||
assert.NotNil(t, opt)
|
||||
|
||||
//manual set nil``
|
||||
// manual set nil``
|
||||
segmentManager.estimatePolicy = nil
|
||||
opt.apply(segmentManager)
|
||||
assert.True(t, segmentManager.estimatePolicy != nil)
|
||||
|
@ -144,7 +144,7 @@ func TestAllocSegment(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLastExpireReset(t *testing.T) {
|
||||
//set up meta on dc
|
||||
// set up meta on dc
|
||||
ctx := context.Background()
|
||||
paramtable.Init()
|
||||
Params.Save(Params.DataCoordCfg.AllocLatestExpireAttempt.Key, "1")
|
||||
|
@ -179,7 +179,7 @@ func TestLastExpireReset(t *testing.T) {
|
|||
}
|
||||
meta.AddSegment(context.TODO(), initSegment)
|
||||
|
||||
//assign segments, set max segment to only 1MB, equalling to 10485 rows
|
||||
// assign segments, set max segment to only 1MB, equalling to 10485 rows
|
||||
var bigRows, smallRows int64 = 10000, 1000
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator)
|
||||
initSegment.SegmentInfo.State = commonpb.SegmentState_Dropped
|
||||
|
@ -193,7 +193,7 @@ func TestLastExpireReset(t *testing.T) {
|
|||
allocs, _ = segmentManager.AllocSegment(context.Background(), collID, 0, channelName, smallRows)
|
||||
segmentID3, expire3 := allocs[0].SegmentID, allocs[0].ExpireTime
|
||||
|
||||
//simulate handleTimeTick op on dataCoord
|
||||
// simulate handleTimeTick op on dataCoord
|
||||
meta.SetCurrentRows(segmentID1, bigRows)
|
||||
meta.SetCurrentRows(segmentID2, bigRows)
|
||||
meta.SetCurrentRows(segmentID3, smallRows)
|
||||
|
@ -202,11 +202,11 @@ func TestLastExpireReset(t *testing.T) {
|
|||
assert.Equal(t, commonpb.SegmentState_Sealed, meta.GetSegment(segmentID2).GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Growing, meta.GetSegment(segmentID3).GetState())
|
||||
|
||||
//pretend that dataCoord break down
|
||||
// pretend that dataCoord break down
|
||||
metaKV.Close()
|
||||
etcdCli.Close()
|
||||
|
||||
//dataCoord restart
|
||||
// dataCoord restart
|
||||
newEtcdCli, _ := etcd.GetEtcdClient(Params.EtcdCfg.UseEmbedEtcd.GetAsBool(), Params.EtcdCfg.EtcdUseSSL.GetAsBool(),
|
||||
Params.EtcdCfg.Endpoints.GetAsStrings(), Params.EtcdCfg.EtcdTLSCert.GetValue(),
|
||||
Params.EtcdCfg.EtcdTLSKey.GetValue(), Params.EtcdCfg.EtcdTLSCACert.GetValue(), Params.EtcdCfg.EtcdTLSMinVersion.GetValue())
|
||||
|
@ -217,14 +217,14 @@ func TestLastExpireReset(t *testing.T) {
|
|||
restartedMeta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
assert.Nil(t, err)
|
||||
newSegmentManager, _ := newSegmentManager(restartedMeta, mockAllocator)
|
||||
//reset row number to avoid being cleaned by empty segment
|
||||
// reset row number to avoid being cleaned by empty segment
|
||||
restartedMeta.SetCurrentRows(segmentID1, bigRows)
|
||||
restartedMeta.SetCurrentRows(segmentID2, bigRows)
|
||||
restartedMeta.SetCurrentRows(segmentID3, smallRows)
|
||||
|
||||
//verify lastExpire of growing and sealed segments
|
||||
// verify lastExpire of growing and sealed segments
|
||||
segment1, segment2, segment3 := restartedMeta.GetSegment(segmentID1), restartedMeta.GetSegment(segmentID2), restartedMeta.GetSegment(segmentID3)
|
||||
//segmentState should not be altered but growing segment's lastExpire has been reset to the latest
|
||||
// segmentState should not be altered but growing segment's lastExpire has been reset to the latest
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, segment1.GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Sealed, segment2.GetState())
|
||||
assert.Equal(t, commonpb.SegmentState_Growing, segment3.GetState())
|
||||
|
@ -408,7 +408,7 @@ func TestAllocRowsLargerThanOneSegment(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
|
||||
var mockPolicy = func(schema *schemapb.CollectionSchema) (int, error) {
|
||||
mockPolicy := func(schema *schemapb.CollectionSchema) (int, error) {
|
||||
return 1, nil
|
||||
}
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withCalUpperLimitPolicy(mockPolicy))
|
||||
|
@ -430,7 +430,7 @@ func TestExpireAllocation(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
|
||||
var mockPolicy = func(schema *schemapb.CollectionSchema) (int, error) {
|
||||
mockPolicy := func(schema *schemapb.CollectionSchema) (int, error) {
|
||||
return 10000000, nil
|
||||
}
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withCalUpperLimitPolicy(mockPolicy))
|
||||
|
@ -548,7 +548,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
collID, err := mockAllocator.allocID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64))) //always seal
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64))) // always seal
|
||||
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 1, len(allocations))
|
||||
|
@ -573,7 +573,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
collID, err := mockAllocator.allocID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) //always seal
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) // always seal
|
||||
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 1, len(allocations))
|
||||
|
@ -600,7 +600,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator,
|
||||
withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64)),
|
||||
withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) //always seal
|
||||
withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) // always seal
|
||||
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 1, len(allocations))
|
||||
|
@ -712,7 +712,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
collID, err := mockAllocator.allocID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64))) //always seal
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withSegmentSealPolices(sealByLifetimePolicy(math.MinInt64))) // always seal
|
||||
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 1, len(allocations))
|
||||
|
@ -741,7 +741,7 @@ func TestTryToSealSegment(t *testing.T) {
|
|||
collID, err := mockAllocator.allocID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
meta.AddCollection(&collectionInfo{ID: collID, Schema: schema})
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) //always seal
|
||||
segmentManager, _ := newSegmentManager(meta, mockAllocator, withChannelSealPolices(getChannelOpenSegCapacityPolicy(-1))) // always seal
|
||||
allocations, err := segmentManager.AllocSegment(context.TODO(), collID, 0, "c1", 2)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, 1, len(allocations))
|
||||
|
@ -800,7 +800,6 @@ func TestAllocationPool(t *testing.T) {
|
|||
assert.EqualValues(t, 100, allo.NumOfRows)
|
||||
assert.EqualValues(t, 0, allo.ExpireTime)
|
||||
assert.EqualValues(t, 0, allo.SegmentID)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -134,7 +134,7 @@ type Server struct {
|
|||
icSession *sessionutil.Session
|
||||
dnEventCh <-chan *sessionutil.SessionEvent
|
||||
inEventCh <-chan *sessionutil.SessionEvent
|
||||
//qcEventCh <-chan *sessionutil.SessionEvent
|
||||
// qcEventCh <-chan *sessionutil.SessionEvent
|
||||
|
||||
enableActiveStandBy bool
|
||||
activateFunc func() error
|
||||
|
@ -142,9 +142,9 @@ type Server struct {
|
|||
dataNodeCreator dataNodeCreatorFunc
|
||||
indexNodeCreator indexNodeCreatorFunc
|
||||
rootCoordClientCreator rootCoordCreatorFunc
|
||||
//indexCoord types.IndexCoord
|
||||
// indexCoord types.IndexCoord
|
||||
|
||||
//segReferManager *SegmentReferenceManager
|
||||
// segReferManager *SegmentReferenceManager
|
||||
indexBuilder *indexBuilder
|
||||
indexNodeManager *IndexNodeManager
|
||||
|
||||
|
@ -902,7 +902,7 @@ func (s *Server) startFlushLoop(ctx context.Context) {
|
|||
logutil.Logger(s.ctx).Info("flush loop shutdown")
|
||||
return
|
||||
case segmentID := <-s.flushCh:
|
||||
//Ignore return error
|
||||
// Ignore return error
|
||||
log.Info("flush successfully", zap.Any("segmentID", segmentID))
|
||||
err := s.postFlush(ctx, segmentID)
|
||||
if err != nil {
|
||||
|
|
|
@ -331,14 +331,14 @@ func TestFlush(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
//func TestGetComponentStates(t *testing.T) {
|
||||
//svr := newTestServer(t)
|
||||
//defer closeTestServer(t, svr)
|
||||
//cli := newMockDataNodeClient(1)
|
||||
//err := cli.Init()
|
||||
//assert.NoError(t, err)
|
||||
//err = cli.Start()
|
||||
//assert.NoError(t, err)
|
||||
// func TestGetComponentStates(t *testing.T) {
|
||||
// svr := newTestServer(t)
|
||||
// defer closeTestServer(t, svr)
|
||||
// cli := newMockDataNodeClient(1)
|
||||
// err := cli.Init()
|
||||
// assert.NoError(t, err)
|
||||
// err = cli.Start()
|
||||
// assert.NoError(t, err)
|
||||
|
||||
//err = svr.cluster.Register(&dataNode{
|
||||
//id: 1,
|
||||
|
@ -503,7 +503,6 @@ func TestGetInsertBinlogPaths(t *testing.T) {
|
|||
resp, err := svr.GetInsertBinlogPaths(svr.ctx, req)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, commonpb.ErrorCode_UnexpectedError, resp.GetStatus().GetErrorCode())
|
||||
|
||||
})
|
||||
|
||||
t.Run("with closed server", func(t *testing.T) {
|
||||
|
@ -529,7 +528,6 @@ func TestGetCollectionStatistics(t *testing.T) {
|
|||
resp, err := svr.GetCollectionStatistics(svr.ctx, req)
|
||||
assert.NoError(t, err)
|
||||
assert.EqualValues(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
|
||||
})
|
||||
t.Run("with closed server", func(t *testing.T) {
|
||||
svr := newTestServer(t, nil)
|
||||
|
@ -1743,11 +1741,10 @@ func TestDropVirtualChannel(t *testing.T) {
|
|||
err = svr.channelManager.Watch(&channel{Name: "ch1", CollectionID: 0})
|
||||
require.Nil(t, err)
|
||||
|
||||
//resend
|
||||
// resend
|
||||
resp, err = svr.DropVirtualChannel(ctx, req)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, commonpb.ErrorCode_Success, resp.GetStatus().GetErrorCode())
|
||||
|
||||
})
|
||||
|
||||
t.Run("with channel not matched", func(t *testing.T) {
|
||||
|
@ -1798,35 +1795,45 @@ func TestGetChannelSeekPosition(t *testing.T) {
|
|||
channelName string
|
||||
expectedPos *msgpb.MsgPosition
|
||||
}{
|
||||
{"test-with-channelCP",
|
||||
{
|
||||
"test-with-channelCP",
|
||||
&msgpb.MsgPosition{ChannelName: "ch1", Timestamp: 100, MsgID: msgID},
|
||||
[]*msgpb.MsgPosition{{ChannelName: "ch1", Timestamp: 50, MsgID: msgID}, {ChannelName: "ch1", Timestamp: 200, MsgID: msgID}},
|
||||
startPos1,
|
||||
"ch1", &msgpb.MsgPosition{ChannelName: "ch1", Timestamp: 100, MsgID: msgID}},
|
||||
"ch1", &msgpb.MsgPosition{ChannelName: "ch1", Timestamp: 100, MsgID: msgID},
|
||||
},
|
||||
|
||||
{"test-with-segmentDMLPos",
|
||||
{
|
||||
"test-with-segmentDMLPos",
|
||||
nil,
|
||||
[]*msgpb.MsgPosition{{ChannelName: "ch1", Timestamp: 50, MsgID: msgID}, {ChannelName: "ch1", Timestamp: 200, MsgID: msgID}},
|
||||
startPos1,
|
||||
"ch1", &msgpb.MsgPosition{ChannelName: "ch1", Timestamp: 50, MsgID: msgID}},
|
||||
"ch1", &msgpb.MsgPosition{ChannelName: "ch1", Timestamp: 50, MsgID: msgID},
|
||||
},
|
||||
|
||||
{"test-with-collStartPos",
|
||||
{
|
||||
"test-with-collStartPos",
|
||||
nil,
|
||||
nil,
|
||||
startPos1,
|
||||
"ch1", &msgpb.MsgPosition{ChannelName: "ch1", MsgID: startPos1[0].Data}},
|
||||
"ch1", &msgpb.MsgPosition{ChannelName: "ch1", MsgID: startPos1[0].Data},
|
||||
},
|
||||
|
||||
{"test-non-exist-channel-1",
|
||||
{
|
||||
"test-non-exist-channel-1",
|
||||
nil,
|
||||
nil,
|
||||
startPosNonExist,
|
||||
"ch1", nil},
|
||||
"ch1", nil,
|
||||
},
|
||||
|
||||
{"test-non-exist-channel-2",
|
||||
{
|
||||
"test-non-exist-channel-2",
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
"ch1", nil},
|
||||
"ch1", nil,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
|
@ -1858,7 +1865,8 @@ func TestGetChannelSeekPosition(t *testing.T) {
|
|||
|
||||
seekPos := svr.handler.(*ServerHandler).GetChannelSeekPosition(&channel{
|
||||
Name: test.channelName,
|
||||
CollectionID: 0}, allPartitionID)
|
||||
CollectionID: 0,
|
||||
}, allPartitionID)
|
||||
if test.expectedPos == nil {
|
||||
assert.True(t, seekPos == nil)
|
||||
} else {
|
||||
|
@ -2460,7 +2468,7 @@ func TestShouldDropChannel(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("channel name not in kv, collection not exist", func(t *testing.T) {
|
||||
//myRoot.code = commonpb.ErrorCode_CollectionNotExists
|
||||
// myRoot.code = commonpb.ErrorCode_CollectionNotExists
|
||||
myRoot.EXPECT().DescribeCollection(mock.Anything, mock.Anything).
|
||||
Return(&milvuspb.DescribeCollectionResponse{
|
||||
Status: merr.Status(merr.WrapErrCollectionNotFound(-1)),
|
||||
|
@ -2509,7 +2517,6 @@ func TestShouldDropChannel(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetRecoveryInfo(t *testing.T) {
|
||||
|
||||
t.Run("test get recovery info with no segments", func(t *testing.T) {
|
||||
svr := newTestServer(t, nil)
|
||||
defer closeTestServer(t, svr)
|
||||
|
@ -2531,7 +2538,8 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
})
|
||||
|
||||
createSegment := func(id, collectionID, partitionID, numOfRows int64, posTs uint64,
|
||||
channel string, state commonpb.SegmentState) *datapb.SegmentInfo {
|
||||
channel string, state commonpb.SegmentState,
|
||||
) *datapb.SegmentInfo {
|
||||
return &datapb.SegmentInfo{
|
||||
ID: id,
|
||||
CollectionID: collectionID,
|
||||
|
@ -2718,7 +2726,7 @@ func TestGetRecoveryInfo(t *testing.T) {
|
|||
assert.NoError(t, err)
|
||||
err = svr.meta.AddSegment(context.TODO(), NewSegmentInfo(seg2))
|
||||
assert.NoError(t, err)
|
||||
//svr.indexCoord.(*mocks.MockIndexCoord).EXPECT().GetIndexInfos(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
// svr.indexCoord.(*mocks.MockIndexCoord).EXPECT().GetIndexInfos(mock.Anything, mock.Anything).Return(nil, nil)
|
||||
|
||||
req := &datapb.GetRecoveryInfoRequest{
|
||||
CollectionID: 0,
|
||||
|
@ -3228,7 +3236,7 @@ func TestOptions(t *testing.T) {
|
|||
})
|
||||
t.Run("WithDataNodeCreator", func(t *testing.T) {
|
||||
var target int64
|
||||
var val = rand.Int63()
|
||||
val := rand.Int63()
|
||||
opt := WithDataNodeCreator(func(context.Context, string, int64) (types.DataNode, error) {
|
||||
target = val
|
||||
return nil, nil
|
||||
|
@ -3596,9 +3604,7 @@ func TestGetFlushState(t *testing.T) {
|
|||
svr := newTestServerWithMeta(t, nil, meta)
|
||||
defer closeTestServer(t, svr)
|
||||
|
||||
var (
|
||||
collection = int64(0)
|
||||
)
|
||||
collection := int64(0)
|
||||
|
||||
resp, err := svr.GetFlushState(context.Background(), &datapb.GetFlushStateRequest{
|
||||
FlushTs: 11,
|
||||
|
@ -3624,18 +3630,34 @@ func TestGetFlushAllState(t *testing.T) {
|
|||
ExpectedSuccess bool
|
||||
ExpectedFlushed bool
|
||||
}{
|
||||
{"test FlushAll flushed", []Timestamp{100, 200}, 99,
|
||||
true, false, false, false, true, true},
|
||||
{"test FlushAll not flushed", []Timestamp{100, 200}, 150,
|
||||
true, false, false, false, true, false},
|
||||
{"test Sever is not healthy", nil, 0,
|
||||
false, false, false, false, false, false},
|
||||
{"test ListDatabase failed", nil, 0,
|
||||
true, true, false, false, false, false},
|
||||
{"test ShowCollections failed", nil, 0,
|
||||
true, false, true, false, false, false},
|
||||
{"test DescribeCollection failed", nil, 0,
|
||||
true, false, false, true, false, false},
|
||||
{
|
||||
"test FlushAll flushed",
|
||||
[]Timestamp{100, 200},
|
||||
99,
|
||||
true, false, false, false, true, true,
|
||||
},
|
||||
{
|
||||
"test FlushAll not flushed",
|
||||
[]Timestamp{100, 200},
|
||||
150,
|
||||
true, false, false, false, true, false,
|
||||
},
|
||||
{
|
||||
"test Sever is not healthy", nil, 0,
|
||||
false, false, false, false, false, false,
|
||||
},
|
||||
{
|
||||
"test ListDatabase failed", nil, 0,
|
||||
true, true, false, false, false, false,
|
||||
},
|
||||
{
|
||||
"test ShowCollections failed", nil, 0,
|
||||
true, false, true, false, false, false,
|
||||
},
|
||||
{
|
||||
"test DescribeCollection failed", nil, 0,
|
||||
true, false, false, true, false, false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.testName, func(t *testing.T) {
|
||||
|
@ -4238,9 +4260,9 @@ func newTestServerWithMeta(t *testing.T, receiveCh chan any, meta *meta, opts ..
|
|||
svr.rootCoordClientCreator = func(ctx context.Context, metaRootPath string, etcdCli *clientv3.Client) (types.RootCoord, error) {
|
||||
return newMockRootCoordService(), nil
|
||||
}
|
||||
//indexCoord := mocks.NewMockIndexCoord(t)
|
||||
//indexCoord.EXPECT().GetIndexInfos(mock.Anything, mock.Anything).Return(nil, nil).Maybe()
|
||||
//svr.indexCoord = indexCoord
|
||||
// indexCoord := mocks.NewMockIndexCoord(t)
|
||||
// indexCoord.EXPECT().GetIndexInfos(mock.Anything, mock.Anything).Return(nil, nil).Maybe()
|
||||
// svr.indexCoord = indexCoord
|
||||
|
||||
err = svr.Init()
|
||||
assert.NoError(t, err)
|
||||
|
@ -4327,7 +4349,8 @@ func Test_CheckHealth(t *testing.T) {
|
|||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
healthClient := &mockDataNodeClient{
|
||||
id: 1,
|
||||
state: commonpb.StateCode_Healthy}
|
||||
state: commonpb.StateCode_Healthy,
|
||||
}
|
||||
sm := NewSessionManager()
|
||||
sm.sessions = struct {
|
||||
sync.RWMutex
|
||||
|
@ -4352,7 +4375,8 @@ func Test_CheckHealth(t *testing.T) {
|
|||
svr.stateCode.Store(commonpb.StateCode_Healthy)
|
||||
unhealthClient := &mockDataNodeClient{
|
||||
id: 1,
|
||||
state: commonpb.StateCode_Abnormal}
|
||||
state: commonpb.StateCode_Abnormal,
|
||||
}
|
||||
sm := NewSessionManager()
|
||||
sm.sessions = struct {
|
||||
sync.RWMutex
|
||||
|
|
|
@ -98,7 +98,6 @@ func TestServer_GcConfirm(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetRecoveryInfoV2(t *testing.T) {
|
||||
|
||||
t.Run("test get recovery info with no segments", func(t *testing.T) {
|
||||
svr := newTestServer(t, nil)
|
||||
defer closeTestServer(t, svr)
|
||||
|
@ -119,7 +118,8 @@ func TestGetRecoveryInfoV2(t *testing.T) {
|
|||
})
|
||||
|
||||
createSegment := func(id, collectionID, partitionID, numOfRows int64, posTs uint64,
|
||||
channel string, state commonpb.SegmentState) *datapb.SegmentInfo {
|
||||
channel string, state commonpb.SegmentState,
|
||||
) *datapb.SegmentInfo {
|
||||
return &datapb.SegmentInfo{
|
||||
ID: id,
|
||||
CollectionID: collectionID,
|
||||
|
|
|
@ -22,6 +22,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
grpcdatanodeclient "github.com/milvus-io/milvus/internal/distributed/datanode/client"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
|
@ -32,7 +34,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/retry"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -58,7 +58,6 @@ func (a *Impl) GetIDAlloactor() *gAllocator.IDAllocator {
|
|||
}
|
||||
|
||||
func (a *Impl) GetGenerator(count int, done <-chan struct{}) (<-chan UniqueID, error) {
|
||||
|
||||
idStart, _, err := a.Alloc(uint32(count))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -85,6 +85,7 @@ func (m *RootCoordFactory) AllocID(ctx context.Context, in *rootcoordpb.AllocIDR
|
|||
resp := &rootcoordpb.AllocIDResponse{
|
||||
ID: m.ID,
|
||||
Count: in.GetCount(),
|
||||
Status: merr.Status(nil)}
|
||||
Status: merr.Status(nil),
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/datanode/allocator"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
|
@ -32,7 +33,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/milvus-io/milvus/pkg/util/conc"
|
||||
"github.com/milvus-io/milvus/pkg/util/metautil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -64,8 +64,10 @@ type binlogIO struct {
|
|||
allocator.Allocator
|
||||
}
|
||||
|
||||
var _ downloader = (*binlogIO)(nil)
|
||||
var _ uploader = (*binlogIO)(nil)
|
||||
var (
|
||||
_ downloader = (*binlogIO)(nil)
|
||||
_ uploader = (*binlogIO)(nil)
|
||||
)
|
||||
|
||||
func (b *binlogIO) download(ctx context.Context, paths []string) ([]*Blob, error) {
|
||||
log.Debug("down load", zap.Strings("path", paths))
|
||||
|
@ -78,7 +80,7 @@ func (b *binlogIO) download(ctx context.Context, paths []string) ([]*Blob, error
|
|||
localPath := path
|
||||
future := getMultiReadPool().Submit(func() (any, error) {
|
||||
var vs []byte
|
||||
var err = errStart
|
||||
err := errStart
|
||||
for err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -111,7 +113,8 @@ func (b *binlogIO) uploadSegmentFiles(
|
|||
ctx context.Context,
|
||||
CollectionID UniqueID,
|
||||
segID UniqueID,
|
||||
kvs map[string][]byte) error {
|
||||
kvs map[string][]byte,
|
||||
) error {
|
||||
log.Debug("update", zap.Int64("collectionID", CollectionID), zap.Int64("segmentID", segID))
|
||||
if len(kvs) == 0 {
|
||||
return nil
|
||||
|
@ -121,7 +124,7 @@ func (b *binlogIO) uploadSegmentFiles(
|
|||
localPath := key
|
||||
localVal := val
|
||||
future := getMultiReadPool().Submit(func() (any, error) {
|
||||
var err = errStart
|
||||
err := errStart
|
||||
for err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -242,7 +245,8 @@ func (b *binlogIO) uploadStatsLog(
|
|||
iData *InsertData,
|
||||
stats *storage.PrimaryKeyStats,
|
||||
totRows int64,
|
||||
meta *etcdpb.CollectionMeta) (map[UniqueID]*datapb.FieldBinlog, map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
meta *etcdpb.CollectionMeta,
|
||||
) (map[UniqueID]*datapb.FieldBinlog, map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
var inPaths map[int64]*datapb.FieldBinlog
|
||||
var err error
|
||||
|
||||
|
@ -278,8 +282,8 @@ func (b *binlogIO) uploadInsertLog(
|
|||
segID UniqueID,
|
||||
partID UniqueID,
|
||||
iData *InsertData,
|
||||
meta *etcdpb.CollectionMeta) (map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
|
||||
meta *etcdpb.CollectionMeta,
|
||||
) (map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
iCodec := storage.NewInsertCodecWithSchema(meta)
|
||||
kvs := make(map[string][]byte)
|
||||
|
||||
|
@ -309,7 +313,8 @@ func (b *binlogIO) uploadDeltaLog(
|
|||
segID UniqueID,
|
||||
partID UniqueID,
|
||||
dData *DeleteData,
|
||||
meta *etcdpb.CollectionMeta) ([]*datapb.FieldBinlog, error) {
|
||||
meta *etcdpb.CollectionMeta,
|
||||
) ([]*datapb.FieldBinlog, error) {
|
||||
var (
|
||||
deltaInfo = make([]*datapb.FieldBinlog, 0)
|
||||
kvs = make(map[string][]byte)
|
||||
|
|
|
@ -24,16 +24,16 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/datanode/allocator"
|
||||
"github.com/milvus-io/milvus/internal/storage"
|
||||
"github.com/milvus-io/milvus/pkg/common"
|
||||
"github.com/milvus-io/milvus/pkg/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var binlogTestDir = "/tmp/milvus_test/test_binlog_io"
|
||||
|
@ -201,7 +201,6 @@ func TestBinlogIOInnerMethods(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
if test.isvalid {
|
||||
|
||||
k, v, err := b.genDeltaBlobs(&DeleteData{
|
||||
Pks: []primaryKey{test.deletepk},
|
||||
Tss: []uint64{test.ts},
|
||||
|
@ -237,7 +236,6 @@ func TestBinlogIOInnerMethods(t *testing.T) {
|
|||
assert.Error(t, err)
|
||||
assert.Empty(t, k)
|
||||
assert.Empty(t, v)
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@ func (m *DeltaBufferManager) GetEntriesNum(segID UniqueID) int64 {
|
|||
func (m *DeltaBufferManager) UpdateCompactedSegments() {
|
||||
compactedTo2From := m.channel.listCompactedSegmentIDs()
|
||||
for compactedTo, compactedFrom := range compactedTo2From {
|
||||
|
||||
// if the compactedTo segment has 0 numRows, there'll be no segments
|
||||
// in the channel meta, so remove all compacted from segments related
|
||||
if !m.channel.hasSegment(compactedTo, true) {
|
||||
|
@ -87,7 +86,6 @@ func (m *DeltaBufferManager) UpdateCompactedSegments() {
|
|||
|
||||
// only store delBuf if EntriesNum > 0
|
||||
if compactToDelBuff.EntriesNum > 0 {
|
||||
|
||||
m.pushOrFixHeap(compactedTo, compactToDelBuff)
|
||||
// We need to re-add the memorySize because m.Delete(segID) sub them all.
|
||||
m.usedMemory.Add(compactToDelBuff.GetMemorySize())
|
||||
|
@ -129,7 +127,8 @@ func (m *DeltaBufferManager) deleteFromHeap(buffer *DelDataBuf) {
|
|||
}
|
||||
|
||||
func (m *DeltaBufferManager) StoreNewDeletes(segID UniqueID, pks []primaryKey,
|
||||
tss []Timestamp, tr TimeRange, startPos, endPos *msgpb.MsgPosition) {
|
||||
tss []Timestamp, tr TimeRange, startPos, endPos *msgpb.MsgPosition,
|
||||
) {
|
||||
buffer, loaded := m.Load(segID)
|
||||
if !loaded {
|
||||
buffer = newDelDataBuf(segID)
|
||||
|
@ -154,7 +153,6 @@ func (m *DeltaBufferManager) Delete(segID UniqueID) {
|
|||
m.usedMemory.Sub(buffer.GetMemorySize())
|
||||
m.deleteFromHeap(buffer)
|
||||
m.channel.rollDeleteBuffer(segID)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,7 +163,7 @@ func (m *DeltaBufferManager) popHeapItem() *Item {
|
|||
}
|
||||
|
||||
func (m *DeltaBufferManager) ShouldFlushSegments() []UniqueID {
|
||||
var memUsage = m.usedMemory.Load()
|
||||
memUsage := m.usedMemory.Load()
|
||||
if memUsage < Params.DataNodeCfg.FlushDeleteBufferBytes.GetAsInt64() {
|
||||
return nil
|
||||
}
|
||||
|
@ -181,12 +179,11 @@ func (m *DeltaBufferManager) ShouldFlushSegments() []UniqueID {
|
|||
memUsage -= segItem.memorySize
|
||||
if memUsage < Params.DataNodeCfg.FlushDeleteBufferBytes.GetAsInt64() {
|
||||
break
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
//here we push all selected segment back into the heap
|
||||
//in order to keep the heap semantically correct
|
||||
// here we push all selected segment back into the heap
|
||||
// in order to keep the heap semantically correct
|
||||
m.heapGuard.Lock()
|
||||
for _, segMem := range poppedItems {
|
||||
heap.Push(m.delBufHeap, segMem)
|
||||
|
@ -334,7 +331,7 @@ func (ddb *DelDataBuf) Buffer(pks []primaryKey, tss []Timestamp, tr TimeRange, s
|
|||
varCharPk := pks[i].(*varCharPrimaryKey)
|
||||
bufSize += int64(len(varCharPk.Value))
|
||||
}
|
||||
//accumulate buf size for timestamp, which is 8 bytes
|
||||
// accumulate buf size for timestamp, which is 8 bytes
|
||||
bufSize += 8
|
||||
}
|
||||
|
||||
|
@ -430,13 +427,14 @@ func newBufferData(collSchema *schemapb.CollectionSchema) (*BufferData, error) {
|
|||
limit++
|
||||
}
|
||||
|
||||
//TODO::xige-16 eval vec and string field
|
||||
// TODO::xige-16 eval vec and string field
|
||||
return &BufferData{
|
||||
buffer: &InsertData{Data: make(map[UniqueID]storage.FieldData)},
|
||||
size: 0,
|
||||
limit: limit,
|
||||
tsFrom: math.MaxUint64,
|
||||
tsTo: 0}, nil
|
||||
tsTo: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newDelDataBuf(segmentID UniqueID) *DelDataBuf {
|
||||
|
|
|
@ -170,7 +170,7 @@ func Test_CompactSegBuff(t *testing.T) {
|
|||
},
|
||||
delBufHeap: &PriorityQueue{},
|
||||
}
|
||||
//1. set compactTo and compactFrom
|
||||
// 1. set compactTo and compactFrom
|
||||
targetSeg := &Segment{segmentID: 3333}
|
||||
targetSeg.setType(datapb.SegmentType_Flushed)
|
||||
|
||||
|
@ -190,7 +190,7 @@ func Test_CompactSegBuff(t *testing.T) {
|
|||
channelSegments[seg2.segmentID] = seg2
|
||||
channelSegments[targetSeg.segmentID] = targetSeg
|
||||
|
||||
//2. set up deleteDataBuf for seg1 and seg2
|
||||
// 2. set up deleteDataBuf for seg1 and seg2
|
||||
delDataBuf1 := newDelDataBuf(seg1.segmentID)
|
||||
delDataBuf1.EntriesNum++
|
||||
delDataBuf1.updateStartAndEndPosition(nil, &msgpb.MsgPosition{Timestamp: 50})
|
||||
|
@ -203,12 +203,12 @@ func Test_CompactSegBuff(t *testing.T) {
|
|||
delBufferManager.updateMeta(seg2.segmentID, delDataBuf2)
|
||||
heap.Push(delBufferManager.delBufHeap, delDataBuf2.item)
|
||||
|
||||
//3. test compact
|
||||
// 3. test compact
|
||||
delBufferManager.UpdateCompactedSegments()
|
||||
|
||||
//4. expect results in two aspects:
|
||||
//4.1 compactedFrom segments are removed from delBufferManager
|
||||
//4.2 compactedTo seg is set properly with correct entriesNum
|
||||
// 4. expect results in two aspects:
|
||||
// 4.1 compactedFrom segments are removed from delBufferManager
|
||||
// 4.2 compactedTo seg is set properly with correct entriesNum
|
||||
_, seg1Exist := delBufferManager.Load(seg1.segmentID)
|
||||
_, seg2Exist := delBufferManager.Load(seg2.segmentID)
|
||||
assert.False(t, seg1Exist)
|
||||
|
@ -221,7 +221,7 @@ func Test_CompactSegBuff(t *testing.T) {
|
|||
assert.NotNil(t, targetSegBuf.item)
|
||||
assert.Equal(t, targetSeg.segmentID, targetSegBuf.item.segmentID)
|
||||
|
||||
//5. test roll and evict (https://github.com/milvus-io/milvus/issues/20501)
|
||||
// 5. test roll and evict (https://github.com/milvus-io/milvus/issues/20501)
|
||||
delBufferManager.channel.rollDeleteBuffer(targetSeg.segmentID)
|
||||
_, segCompactedToExist := delBufferManager.Load(targetSeg.segmentID)
|
||||
assert.False(t, segCompactedToExist)
|
||||
|
@ -271,25 +271,61 @@ func TestUpdateCompactedSegments(t *testing.T) {
|
|||
|
||||
expectedSegsRemain []UniqueID
|
||||
}{
|
||||
{"zero segments", false,
|
||||
[]UniqueID{}, []UniqueID{}, []UniqueID{}},
|
||||
{"segment no compaction", false,
|
||||
[]UniqueID{}, []UniqueID{}, []UniqueID{100, 101}},
|
||||
{"segment compacted", true,
|
||||
[]UniqueID{200}, []UniqueID{103}, []UniqueID{100, 101}},
|
||||
{"segment compacted 100>201", true,
|
||||
[]UniqueID{201}, []UniqueID{100}, []UniqueID{101, 201}},
|
||||
{"segment compacted 100+101>201", true,
|
||||
[]UniqueID{201, 201}, []UniqueID{100, 101}, []UniqueID{201}},
|
||||
{"segment compacted 100>201, 101>202", true,
|
||||
[]UniqueID{201, 202}, []UniqueID{100, 101}, []UniqueID{201, 202}},
|
||||
{
|
||||
"zero segments", false,
|
||||
[]UniqueID{},
|
||||
[]UniqueID{},
|
||||
[]UniqueID{},
|
||||
},
|
||||
{
|
||||
"segment no compaction", false,
|
||||
[]UniqueID{},
|
||||
[]UniqueID{},
|
||||
[]UniqueID{100, 101},
|
||||
},
|
||||
{
|
||||
"segment compacted", true,
|
||||
[]UniqueID{200},
|
||||
[]UniqueID{103},
|
||||
[]UniqueID{100, 101},
|
||||
},
|
||||
{
|
||||
"segment compacted 100>201", true,
|
||||
[]UniqueID{201},
|
||||
[]UniqueID{100},
|
||||
[]UniqueID{101, 201},
|
||||
},
|
||||
{
|
||||
"segment compacted 100+101>201", true,
|
||||
[]UniqueID{201, 201},
|
||||
[]UniqueID{100, 101},
|
||||
[]UniqueID{201},
|
||||
},
|
||||
{
|
||||
"segment compacted 100>201, 101>202", true,
|
||||
[]UniqueID{201, 202},
|
||||
[]UniqueID{100, 101},
|
||||
[]UniqueID{201, 202},
|
||||
},
|
||||
// false
|
||||
{"segment compacted 100>201", false,
|
||||
[]UniqueID{201}, []UniqueID{100}, []UniqueID{101}},
|
||||
{"segment compacted 100+101>201", false,
|
||||
[]UniqueID{201, 201}, []UniqueID{100, 101}, []UniqueID{}},
|
||||
{"segment compacted 100>201, 101>202", false,
|
||||
[]UniqueID{201, 202}, []UniqueID{100, 101}, []UniqueID{}},
|
||||
{
|
||||
"segment compacted 100>201", false,
|
||||
[]UniqueID{201},
|
||||
[]UniqueID{100},
|
||||
[]UniqueID{101},
|
||||
},
|
||||
{
|
||||
"segment compacted 100+101>201", false,
|
||||
[]UniqueID{201, 201},
|
||||
[]UniqueID{100, 101},
|
||||
[]UniqueID{},
|
||||
},
|
||||
{
|
||||
"segment compacted 100>201, 101>202", false,
|
||||
[]UniqueID{201, 202},
|
||||
[]UniqueID{100, 101},
|
||||
[]UniqueID{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -693,7 +693,6 @@ func (c *ChannelMeta) mergeFlushedSegments(ctx context.Context, seg *Segment, pl
|
|||
if !c.hasSegment(ID, true) || c.hasSegment(ID, false) {
|
||||
inValidSegments = append(inValidSegments, ID)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(inValidSegments) > 0 {
|
||||
|
|
|
@ -203,7 +203,8 @@ func TestChannelMeta_getCollectionAndPartitionID(t *testing.T) {
|
|||
seg.setType(test.segType)
|
||||
channel := &ChannelMeta{
|
||||
segments: map[UniqueID]*Segment{
|
||||
test.segID: &seg},
|
||||
test.segID: &seg,
|
||||
},
|
||||
}
|
||||
|
||||
collID, parID, err := channel.getCollectionAndPartitionID(test.segID)
|
||||
|
@ -703,11 +704,9 @@ func TestChannelMeta_InterfaceMethod(t *testing.T) {
|
|||
} else {
|
||||
assert.False(t, channel.hasSegment(3, true))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestChannelMeta_loadStats(t *testing.T) {
|
||||
|
@ -730,7 +729,7 @@ func TestChannelMeta_loadStats(t *testing.T) {
|
|||
partitionID: 2,
|
||||
}
|
||||
|
||||
//gen pk stats bytes
|
||||
// gen pk stats bytes
|
||||
stats := storage.NewPrimaryKeyStats(106, int64(schemapb.DataType_Int64), 10)
|
||||
iCodec := storage.NewInsertCodecWithSchema(meta)
|
||||
|
||||
|
@ -752,7 +751,8 @@ func TestChannelMeta_loadStats(t *testing.T) {
|
|||
Binlogs: []*datapb.Binlog{{
|
||||
//<StatsLogPath>/<collectionID>/<partitionID>/<segmentID>/<FieldID>/<logIdx>
|
||||
LogPath: path.Join(common.SegmentStatslogPath, metautil.JoinIDPath(1, 2, 1, 106, 10)),
|
||||
}}}}, 0)
|
||||
}},
|
||||
}}, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// load flushed stats log
|
||||
|
@ -767,7 +767,8 @@ func TestChannelMeta_loadStats(t *testing.T) {
|
|||
Binlogs: []*datapb.Binlog{{
|
||||
//<StatsLogPath>/<collectionID>/<partitionID>/<segmentID>/<FieldID>/<logIdx>
|
||||
LogPath: path.Join(common.SegmentStatslogPath, metautil.JoinIDPath(1, 2, 2, 106), storage.CompoundStatsType.LogIdx()),
|
||||
}}}}, 0)
|
||||
}},
|
||||
}}, 0)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
@ -830,7 +831,6 @@ func TestChannelMeta_UpdatePKRange(t *testing.T) {
|
|||
assert.True(t, segNew.isPKExist(pk))
|
||||
assert.True(t, segNormal.isPKExist(pk))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestChannelMeta_ChannelCP(t *testing.T) {
|
||||
|
@ -866,7 +866,8 @@ func TestChannelMeta_ChannelCP(t *testing.T) {
|
|||
t.Run("set insertBuffer&deleteBuffer then get", func(t *testing.T) {
|
||||
run := func(curInsertPos, curDeletePos *msgpb.MsgPosition,
|
||||
hisInsertPoss, hisDeletePoss []*msgpb.MsgPosition,
|
||||
ttPos, expectedPos *msgpb.MsgPosition) {
|
||||
ttPos, expectedPos *msgpb.MsgPosition,
|
||||
) {
|
||||
segmentID := UniqueID(1)
|
||||
channel := newChannel(mockVChannel, collID, nil, rc, cm)
|
||||
channel.chunkManager = &mockDataCM{}
|
||||
|
|
|
@ -20,8 +20,9 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
)
|
||||
|
||||
func TestCompactionExecutor(t *testing.T) {
|
||||
|
@ -114,7 +115,6 @@ func TestCompactionExecutor(t *testing.T) {
|
|||
t.FailNow()
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func newMockCompactor(isvalid bool) *mockCompactor {
|
||||
|
@ -143,7 +143,6 @@ func (mc *mockCompactor) complete() {
|
|||
}
|
||||
|
||||
func (mc *mockCompactor) injectDone(success bool) {
|
||||
|
||||
}
|
||||
|
||||
func (mc *mockCompactor) compact() (*datapb.CompactionResult, error) {
|
||||
|
|
|
@ -25,6 +25,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
||||
"github.com/milvus-io/milvus/internal/datanode/allocator"
|
||||
"github.com/milvus-io/milvus/internal/proto/datapb"
|
||||
|
@ -40,7 +42,6 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/util/timerecord"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -96,8 +97,8 @@ func newCompactionTask(
|
|||
fm flushManager,
|
||||
alloc allocator.Allocator,
|
||||
plan *datapb.CompactionPlan,
|
||||
chunkManager storage.ChunkManager) *compactionTask {
|
||||
|
||||
chunkManager storage.ChunkManager,
|
||||
) *compactionTask {
|
||||
ctx1, cancel := context.WithCancel(ctx)
|
||||
return &compactionTask{
|
||||
ctx: ctx1,
|
||||
|
@ -152,7 +153,7 @@ func (t *compactionTask) mergeDeltalogs(dBlobs map[UniqueID][]*Blob) (map[interf
|
|||
mergeStart := time.Now()
|
||||
dCodec := storage.NewDeleteCodec()
|
||||
|
||||
var pk2ts = make(map[interface{}]Timestamp)
|
||||
pk2ts := make(map[interface{}]Timestamp)
|
||||
|
||||
for _, blobs := range dBlobs {
|
||||
_, _, dData, err := dCodec.Deserialize(blobs)
|
||||
|
@ -184,7 +185,8 @@ func (t *compactionTask) uploadRemainLog(
|
|||
stats *storage.PrimaryKeyStats,
|
||||
totRows int64,
|
||||
fID2Content map[UniqueID][]interface{},
|
||||
fID2Type map[UniqueID]schemapb.DataType) (map[UniqueID]*datapb.FieldBinlog, map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
fID2Type map[UniqueID]schemapb.DataType,
|
||||
) (map[UniqueID]*datapb.FieldBinlog, map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
var iData *InsertData
|
||||
|
||||
// remain insert data
|
||||
|
@ -220,9 +222,11 @@ func (t *compactionTask) uploadSingleInsertLog(
|
|||
partID UniqueID,
|
||||
meta *etcdpb.CollectionMeta,
|
||||
fID2Content map[UniqueID][]interface{},
|
||||
fID2Type map[UniqueID]schemapb.DataType) (map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
fID2Type map[UniqueID]schemapb.DataType,
|
||||
) (map[UniqueID]*datapb.FieldBinlog, error) {
|
||||
iData := &InsertData{
|
||||
Data: make(map[storage.FieldID]storage.FieldData)}
|
||||
Data: make(map[storage.FieldID]storage.FieldData),
|
||||
}
|
||||
|
||||
for fID, content := range fID2Content {
|
||||
tp, ok := fID2Type[fID]
|
||||
|
@ -253,7 +257,8 @@ func (t *compactionTask) merge(
|
|||
targetSegID UniqueID,
|
||||
partID UniqueID,
|
||||
meta *etcdpb.CollectionMeta,
|
||||
delta map[interface{}]Timestamp) ([]*datapb.FieldBinlog, []*datapb.FieldBinlog, int64, error) {
|
||||
delta map[interface{}]Timestamp,
|
||||
) ([]*datapb.FieldBinlog, []*datapb.FieldBinlog, int64, error) {
|
||||
log := log.With(zap.Int64("planID", t.getPlanID()))
|
||||
mergeStart := time.Now()
|
||||
|
||||
|
@ -416,7 +421,7 @@ func (t *compactionTask) merge(
|
|||
}
|
||||
fID2Content[fID] = append(fID2Content[fID], vInter)
|
||||
}
|
||||
//update pk to new stats log
|
||||
// update pk to new stats log
|
||||
stats.Update(v.PK)
|
||||
|
||||
currentRows++
|
||||
|
@ -490,7 +495,6 @@ func (t *compactionTask) compact() (*datapb.CompactionResult, error) {
|
|||
var targetSegID UniqueID
|
||||
var err error
|
||||
switch {
|
||||
|
||||
case t.plan.GetType() == datapb.CompactionType_UndefinedCompaction:
|
||||
log.Warn("compact wrong, compaction type undefined")
|
||||
return nil, errCompactionTypeUndifined
|
||||
|
@ -624,12 +628,11 @@ func (t *compactionTask) compact() (*datapb.CompactionResult, error) {
|
|||
<-ti.Injected()
|
||||
log.Info("compact inject elapse", zap.Duration("elapse", time.Since(injectStart)))
|
||||
|
||||
var dblobs = make(map[UniqueID][]*Blob)
|
||||
dblobs := make(map[UniqueID][]*Blob)
|
||||
allPath := make([][]string, 0)
|
||||
|
||||
downloadStart := time.Now()
|
||||
for _, s := range t.plan.GetSegmentBinlogs() {
|
||||
|
||||
// Get the number of field binlog files from non-empty segment
|
||||
var binlogNum int
|
||||
for _, b := range s.GetFieldBinlogs() {
|
||||
|
@ -728,7 +731,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
var rst storage.FieldData
|
||||
switch schemaDataType {
|
||||
case schemapb.DataType_Bool:
|
||||
var data = &storage.BoolFieldData{
|
||||
data := &storage.BoolFieldData{
|
||||
Data: make([]bool, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -742,7 +745,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Int8:
|
||||
var data = &storage.Int8FieldData{
|
||||
data := &storage.Int8FieldData{
|
||||
Data: make([]int8, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -756,7 +759,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Int16:
|
||||
var data = &storage.Int16FieldData{
|
||||
data := &storage.Int16FieldData{
|
||||
Data: make([]int16, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -770,7 +773,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Int32:
|
||||
var data = &storage.Int32FieldData{
|
||||
data := &storage.Int32FieldData{
|
||||
Data: make([]int32, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -784,7 +787,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Int64:
|
||||
var data = &storage.Int64FieldData{
|
||||
data := &storage.Int64FieldData{
|
||||
Data: make([]int64, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -798,7 +801,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Float:
|
||||
var data = &storage.FloatFieldData{
|
||||
data := &storage.FloatFieldData{
|
||||
Data: make([]float32, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -812,7 +815,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Double:
|
||||
var data = &storage.DoubleFieldData{
|
||||
data := &storage.DoubleFieldData{
|
||||
Data: make([]float64, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -826,7 +829,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_String, schemapb.DataType_VarChar:
|
||||
var data = &storage.StringFieldData{
|
||||
data := &storage.StringFieldData{
|
||||
Data: make([]string, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -840,7 +843,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_JSON:
|
||||
var data = &storage.JSONFieldData{
|
||||
data := &storage.JSONFieldData{
|
||||
Data: make([][]byte, 0, len(content)),
|
||||
}
|
||||
|
||||
|
@ -854,7 +857,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_FloatVector:
|
||||
var data = &storage.FloatVectorFieldData{
|
||||
data := &storage.FloatVectorFieldData{
|
||||
Data: []float32{},
|
||||
}
|
||||
|
||||
|
@ -870,7 +873,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_Float16Vector:
|
||||
var data = &storage.Float16VectorFieldData{
|
||||
data := &storage.Float16VectorFieldData{
|
||||
Data: []byte{},
|
||||
}
|
||||
|
||||
|
@ -886,7 +889,7 @@ func interface2FieldData(schemaDataType schemapb.DataType, content []interface{}
|
|||
rst = data
|
||||
|
||||
case schemapb.DataType_BinaryVector:
|
||||
var data = &storage.BinaryVectorFieldData{
|
||||
data := &storage.BinaryVectorFieldData{
|
||||
Data: []byte{},
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,6 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("Test mergeDeltalogs", func(t *testing.T) {
|
||||
|
@ -219,14 +218,24 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
0, nil, nil,
|
||||
100, []UniqueID{1, 2, 3}, []Timestamp{20000, 30000, 20005},
|
||||
200, []UniqueID{4, 5, 6}, []Timestamp{50000, 50001, 50002},
|
||||
100,
|
||||
[]UniqueID{1, 2, 3},
|
||||
[]Timestamp{20000, 30000, 20005},
|
||||
200,
|
||||
[]UniqueID{4, 5, 6},
|
||||
[]Timestamp{50000, 50001, 50002},
|
||||
6, "2 segments",
|
||||
},
|
||||
{
|
||||
300, []UniqueID{10, 20}, []Timestamp{20001, 40001},
|
||||
100, []UniqueID{1, 2, 3}, []Timestamp{20000, 30000, 20005},
|
||||
200, []UniqueID{4, 5, 6}, []Timestamp{50000, 50001, 50002},
|
||||
300,
|
||||
[]UniqueID{10, 20},
|
||||
[]Timestamp{20001, 40001},
|
||||
100,
|
||||
[]UniqueID{1, 2, 3},
|
||||
[]Timestamp{20000, 30000, 20005},
|
||||
200,
|
||||
[]UniqueID{4, 5, 6},
|
||||
[]Timestamp{50000, 50001, 50002},
|
||||
8, "3 segments",
|
||||
},
|
||||
}
|
||||
|
@ -259,7 +268,6 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
})
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
t.Run("Test merge", func(t *testing.T) {
|
||||
|
@ -278,7 +286,6 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
alloc.EXPECT().GetGenerator(mock.Anything, mock.Anything).Call.Return(validGeneratorFn, nil)
|
||||
alloc.EXPECT().AllocOne().Return(0, nil)
|
||||
t.Run("Merge without expiration", func(t *testing.T) {
|
||||
|
||||
mockbIO := &binlogIO{cm, alloc}
|
||||
paramtable.Get().Save(Params.CommonCfg.EntityExpirationTTL.Key, "0")
|
||||
iData := genInsertDataWithExpiredTS()
|
||||
|
@ -306,8 +313,10 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
Channel: channel, downloader: mockbIO, uploader: mockbIO, done: make(chan struct{}, 1),
|
||||
plan: &datapb.CompactionPlan{
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{SegmentID: 1}},
|
||||
}}
|
||||
{SegmentID: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
inPaths, statsPaths, numOfRow, err := ct.merge(context.Background(), allPaths, 2, 0, meta, dm)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), numOfRow)
|
||||
|
@ -348,8 +357,10 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
Channel: channel, downloader: mockbIO, uploader: mockbIO, done: make(chan struct{}, 1),
|
||||
plan: &datapb.CompactionPlan{
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{SegmentID: 1}},
|
||||
}}
|
||||
{SegmentID: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
inPaths, statsPaths, numOfRow, err := ct.merge(context.Background(), allPaths, 2, 0, meta, dm)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), numOfRow)
|
||||
|
@ -361,7 +372,6 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
})
|
||||
// set Params.DataNodeCfg.BinLogMaxSize.Key = 1 to generate multi binlogs, each has only one row
|
||||
t.Run("Merge without expiration3", func(t *testing.T) {
|
||||
|
||||
mockbIO := &binlogIO{cm, alloc}
|
||||
paramtable.Get().Save(Params.CommonCfg.EntityExpirationTTL.Key, "0")
|
||||
BinLogMaxSize := Params.DataNodeCfg.BinLogMaxSize.GetAsInt()
|
||||
|
@ -394,8 +404,10 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
Channel: channel, downloader: mockbIO, uploader: mockbIO, done: make(chan struct{}, 1),
|
||||
plan: &datapb.CompactionPlan{
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{SegmentID: 1}},
|
||||
}}
|
||||
{SegmentID: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
inPaths, statsPaths, numOfRow, err := ct.merge(context.Background(), allPaths, 2, 0, meta, dm)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(2), numOfRow)
|
||||
|
@ -442,7 +454,8 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
plan: &datapb.CompactionPlan{
|
||||
CollectionTtl: 864000,
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{SegmentID: 1}},
|
||||
{SegmentID: 1},
|
||||
},
|
||||
},
|
||||
done: make(chan struct{}, 1),
|
||||
}
|
||||
|
@ -482,8 +495,10 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
Channel: channel, downloader: mockbIO, uploader: mockbIO, done: make(chan struct{}, 1),
|
||||
plan: &datapb.CompactionPlan{
|
||||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{SegmentID: 1}},
|
||||
}}
|
||||
{SegmentID: 1},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, _, _, err = ct.merge(context.Background(), allPaths, 2, 0, &etcdpb.CollectionMeta{
|
||||
Schema: &schemapb.CollectionSchema{Fields: []*schemapb.FieldSchema{
|
||||
{DataType: schemapb.DataType_FloatVector, TypeParams: []*commonpb.KeyValuePair{
|
||||
|
@ -526,7 +541,8 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
{DataType: schemapb.DataType_FloatVector, TypeParams: []*commonpb.KeyValuePair{
|
||||
{Key: common.DimKey, Value: "bad_dim"},
|
||||
}},
|
||||
}}}, dm)
|
||||
}},
|
||||
}, dm)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
@ -615,12 +631,13 @@ func TestCompactionTaskInnerMethods(t *testing.T) {
|
|||
SegmentBinlogs: []*datapb.CompactionSegmentBinlogs{
|
||||
{
|
||||
SegmentID: 1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
done: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
//segment not in channel
|
||||
// segment not in channel
|
||||
_, err := ct.getNumRows()
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
@ -1019,7 +1036,7 @@ func (mfm *mockFlushManager) isFull() bool {
|
|||
func (mfm *mockFlushManager) injectFlush(injection *taskInjection, segments ...UniqueID) {
|
||||
go func() {
|
||||
time.Sleep(time.Second * time.Duration(mfm.sleepSeconds))
|
||||
//injection.injected <- struct{}{}
|
||||
// injection.injected <- struct{}{}
|
||||
close(injection.injected)
|
||||
<-injection.injectOver
|
||||
mfm.injectOverCount.Lock()
|
||||
|
|
|
@ -98,7 +98,7 @@ type DataNode struct {
|
|||
rootCoord types.RootCoord
|
||||
dataCoord types.DataCoord
|
||||
|
||||
//call once
|
||||
// call once
|
||||
initOnce sync.Once
|
||||
startOnce sync.Once
|
||||
stopOnce sync.Once
|
||||
|
@ -257,7 +257,6 @@ func (node *DataNode) Init() error {
|
|||
node.factory.Init(Params)
|
||||
log.Info("DataNode server init succeeded",
|
||||
zap.String("MsgChannelSubName", Params.CommonCfg.DataNodeSubName.GetValue()))
|
||||
|
||||
})
|
||||
return initError
|
||||
}
|
||||
|
@ -340,7 +339,6 @@ func (node *DataNode) Start() error {
|
|||
}
|
||||
|
||||
chunkManager, err := node.factory.NewPersistentStorageChunkManager(node.ctx)
|
||||
|
||||
if err != nil {
|
||||
startErr = err
|
||||
return
|
||||
|
@ -365,7 +363,6 @@ func (node *DataNode) Start() error {
|
|||
go node.flowgraphManager.start()
|
||||
|
||||
node.UpdateStateCode(commonpb.StateCode_Healthy)
|
||||
|
||||
})
|
||||
return startErr
|
||||
}
|
||||
|
|
|
@ -87,7 +87,6 @@ func newDataSyncService(
|
|||
serverID int64,
|
||||
timetickSender *timeTickSender,
|
||||
) (*dataSyncService, error) {
|
||||
|
||||
if channel == nil {
|
||||
return nil, errors.New("Nil input")
|
||||
}
|
||||
|
@ -223,7 +222,7 @@ func (dsService *dataSyncService) initNodes(initCtx context.Context, vchanInfo *
|
|||
return err
|
||||
}
|
||||
|
||||
//tickler will update addSegment progress to watchInfo
|
||||
// tickler will update addSegment progress to watchInfo
|
||||
tickler.watch()
|
||||
defer tickler.stop()
|
||||
futures := make([]*conc.Future[any], 0, len(unflushedSegmentInfos)+len(flushedSegmentInfos))
|
||||
|
@ -258,7 +257,8 @@ func (dsService *dataSyncService) initNodes(initCtx context.Context, vchanInfo *
|
|||
statsBinLogs: segment.Statslogs,
|
||||
binLogs: segment.GetBinlogs(),
|
||||
endPos: segment.GetDmlPosition(),
|
||||
recoverTs: vchanInfo.GetSeekPosition().GetTimestamp()}); err != nil {
|
||||
recoverTs: vchanInfo.GetSeekPosition().GetTimestamp(),
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tickler.inc()
|
||||
|
@ -400,7 +400,7 @@ func (dsService *dataSyncService) initNodes(initCtx context.Context, vchanInfo *
|
|||
return err
|
||||
}
|
||||
|
||||
//deleteNode
|
||||
// deleteNode
|
||||
err = dsService.fg.SetEdges(deleteNode.Name(),
|
||||
[]string{ttNode.Name()},
|
||||
)
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -45,6 +44,7 @@ import (
|
|||
"github.com/milvus-io/milvus/pkg/mq/msgdispatcher"
|
||||
"github.com/milvus-io/milvus/pkg/mq/msgstream"
|
||||
"github.com/milvus-io/milvus/pkg/util/paramtable"
|
||||
"github.com/milvus-io/milvus/pkg/util/tsoutil"
|
||||
"github.com/milvus-io/milvus/pkg/util/typeutil"
|
||||
)
|
||||
|
||||
|
@ -118,35 +118,44 @@ type testInfo struct {
|
|||
}
|
||||
|
||||
func TestDataSyncService_newDataSyncService(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
tests := []*testInfo{
|
||||
{true, false, &mockMsgStreamFactory{false, true},
|
||||
{
|
||||
true, false, &mockMsgStreamFactory{false, true},
|
||||
0, "by-dev-rootcoord-dml-test_v0",
|
||||
0, 0, "", 0,
|
||||
0, 0, "", 0,
|
||||
"SetParamsReturnError"},
|
||||
{true, false, &mockMsgStreamFactory{true, true},
|
||||
"SetParamsReturnError",
|
||||
},
|
||||
{
|
||||
true, false, &mockMsgStreamFactory{true, true},
|
||||
0, "by-dev-rootcoord-dml-test_v0",
|
||||
1, 0, "", 0,
|
||||
1, 1, "", 0,
|
||||
"CollID 0 mismach with seginfo collID 1"},
|
||||
{true, false, &mockMsgStreamFactory{true, true},
|
||||
"CollID 0 mismach with seginfo collID 1",
|
||||
},
|
||||
{
|
||||
true, false, &mockMsgStreamFactory{true, true},
|
||||
1, "by-dev-rootcoord-dml-test_v1",
|
||||
1, 0, "by-dev-rootcoord-dml-test_v2", 0,
|
||||
1, 1, "by-dev-rootcoord-dml-test_v3", 0,
|
||||
"chanName c1 mismach with seginfo chanName c2"},
|
||||
{true, false, &mockMsgStreamFactory{true, true},
|
||||
"chanName c1 mismach with seginfo chanName c2",
|
||||
},
|
||||
{
|
||||
true, false, &mockMsgStreamFactory{true, true},
|
||||
1, "by-dev-rootcoord-dml-test_v1",
|
||||
1, 0, "by-dev-rootcoord-dml-test_v1", 0,
|
||||
1, 1, "by-dev-rootcoord-dml-test_v2", 0,
|
||||
"add normal segments"},
|
||||
{true, false, &mockMsgStreamFactory{true, true},
|
||||
"add normal segments",
|
||||
},
|
||||
{
|
||||
true, false, &mockMsgStreamFactory{true, true},
|
||||
1, "by-dev-rootcoord-dml-test_v1",
|
||||
1, 1, "by-dev-rootcoord-dml-test_v1", 0,
|
||||
1, 2, "by-dev-rootcoord-dml-test_v1", 0,
|
||||
"add un-flushed and flushed segments"},
|
||||
"add un-flushed and flushed segments",
|
||||
},
|
||||
}
|
||||
cm := storage.NewLocalChunkManager(storage.RootPath(dataSyncServiceTestDir))
|
||||
defer cm.RemoveWithPrefix(ctx, cm.RootPath())
|
||||
|
@ -194,7 +203,6 @@ func TestDataSyncService_newDataSyncService(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// NOTE: start pulsar before test
|
||||
|
@ -558,7 +566,7 @@ func genBytes() (rawData []byte) {
|
|||
const N = 1
|
||||
|
||||
// Float vector
|
||||
var fvector = [DIM]float32{1, 2}
|
||||
fvector := [DIM]float32{1, 2}
|
||||
for _, ele := range fvector {
|
||||
buf := make([]byte, 4)
|
||||
common.Endian.PutUint32(buf, math.Float32bits(ele))
|
||||
|
@ -568,11 +576,11 @@ func genBytes() (rawData []byte) {
|
|||
// Binary vector
|
||||
// Dimension of binary vector is 32
|
||||
// size := 4, = 32 / 8
|
||||
var bvector = []byte{255, 255, 255, 0}
|
||||
bvector := []byte{255, 255, 255, 0}
|
||||
rawData = append(rawData, bvector...)
|
||||
|
||||
// Bool
|
||||
var fieldBool = true
|
||||
fieldBool := true
|
||||
buf := new(bytes.Buffer)
|
||||
if err := binary.Write(buf, common.Endian, fieldBool); err != nil {
|
||||
panic(err)
|
||||
|
@ -597,12 +605,12 @@ func TestBytesReader(t *testing.T) {
|
|||
// Bytes Reader is able to recording the position
|
||||
rawDataReader := bytes.NewReader(rawData)
|
||||
|
||||
var fvector = make([]float32, 2)
|
||||
fvector := make([]float32, 2)
|
||||
err := binary.Read(rawDataReader, common.Endian, &fvector)
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, fvector, []float32{1, 2})
|
||||
|
||||
var bvector = make([]byte, 4)
|
||||
bvector := make([]byte, 4)
|
||||
err = binary.Read(rawDataReader, common.Endian, &bvector)
|
||||
assert.NoError(t, err)
|
||||
assert.ElementsMatch(t, bvector, []byte{255, 255, 255, 0})
|
||||
|
@ -623,7 +631,7 @@ func TestGetSegmentInfos(t *testing.T) {
|
|||
dsService := &dataSyncService{
|
||||
dataCoord: dataCoord,
|
||||
}
|
||||
var ctx = context.Background()
|
||||
ctx := context.Background()
|
||||
segmentInfos, err := dsService.getSegmentInfos(ctx, []int64{1})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(segmentInfos))
|
||||
|
@ -680,7 +688,8 @@ func TestClearGlobalFlushingCache(t *testing.T) {
|
|||
collID: 1,
|
||||
partitionID: 1,
|
||||
startPos: &msgpb.MsgPosition{},
|
||||
endPos: &msgpb.MsgPosition{}})
|
||||
endPos: &msgpb.MsgPosition{},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = channel.addSegment(
|
||||
|
|
|
@ -239,7 +239,8 @@ const (
|
|||
)
|
||||
|
||||
func newChannelEventManager(handlePut func(*datapb.ChannelWatchInfo, int64) error,
|
||||
handleDel func(string), retryInterval time.Duration) *channelEventManager {
|
||||
handleDel func(string), retryInterval time.Duration,
|
||||
) *channelEventManager {
|
||||
return &channelEventManager{
|
||||
eventChan: make(chan event, 10),
|
||||
closeChan: make(chan struct{}),
|
||||
|
|
|
@ -161,12 +161,11 @@ func TestWatchChannel(t *testing.T) {
|
|||
|
||||
err = kv.RemoveWithPrefix(fmt.Sprintf("%s/%d", Params.CommonCfg.DataCoordWatchSubPath.GetValue(), paramtable.GetNodeID()))
|
||||
assert.NoError(t, err)
|
||||
//TODO there is not way to sync Release done, use sleep for now
|
||||
// TODO there is not way to sync Release done, use sleep for now
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
exist = node.flowgraphManager.exist(ch)
|
||||
assert.False(t, exist)
|
||||
|
||||
})
|
||||
|
||||
t.Run("handle watch info failed", func(t *testing.T) {
|
||||
|
@ -414,7 +413,6 @@ func parseWatchInfo(key string, data []byte) (*datapb.ChannelWatchInfo, error) {
|
|||
watchInfo := datapb.ChannelWatchInfo{}
|
||||
if err := proto.Unmarshal(data, &watchInfo); err != nil {
|
||||
return nil, fmt.Errorf("invalid event data: fail to parse ChannelWatchInfo, key: %s, err: %v", key, err)
|
||||
|
||||
}
|
||||
|
||||
if watchInfo.Vchan == nil {
|
||||
|
@ -457,7 +455,6 @@ func TestEventTickler(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
tickler.inc()
|
||||
|
|
|
@ -99,7 +99,7 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
|
|||
}
|
||||
|
||||
if msMsg.IsCloseMsg() {
|
||||
var fgMsg = flowGraphMsg{
|
||||
fgMsg := flowGraphMsg{
|
||||
BaseMsg: flowgraph.NewBaseMsg(true),
|
||||
insertMessages: make([]*msgstream.InsertMsg, 0),
|
||||
timeRange: TimeRange{
|
||||
|
@ -133,7 +133,7 @@ func (ddn *ddNode) Operate(in []Msg) []Msg {
|
|||
}
|
||||
}()
|
||||
|
||||
var fgMsg = flowGraphMsg{
|
||||
fgMsg := flowGraphMsg{
|
||||
insertMessages: make([]*msgstream.InsertMsg, 0),
|
||||
timeRange: TimeRange{
|
||||
timestampMin: msMsg.TimestampMin(),
|
||||
|
@ -279,8 +279,8 @@ func (ddn *ddNode) isDropped(segID UniqueID) bool {
|
|||
func (ddn *ddNode) Close() {}
|
||||
|
||||
func newDDNode(ctx context.Context, collID UniqueID, vChannelName string, droppedSegmentIDs []UniqueID,
|
||||
sealedSegments []*datapb.SegmentInfo, growingSegments []*datapb.SegmentInfo, compactor *compactionExecutor) (*ddNode, error) {
|
||||
|
||||
sealedSegments []*datapb.SegmentInfo, growingSegments []*datapb.SegmentInfo, compactor *compactionExecutor,
|
||||
) (*ddNode, error) {
|
||||
baseNode := BaseNode{}
|
||||
baseNode.SetMaxQueueLength(Params.DataNodeCfg.FlowGraphMaxQueueLength.GetAsInt32())
|
||||
baseNode.SetMaxParallelism(Params.DataNodeCfg.FlowGraphMaxParallelism.GetAsInt32())
|
||||
|
|
|
@ -48,9 +48,11 @@ func TestFlowGraph_DDNode_newDDNode(t *testing.T) {
|
|||
[]*datapb.SegmentInfo{
|
||||
getSegmentInfo(100, 10000),
|
||||
getSegmentInfo(101, 10000),
|
||||
getSegmentInfo(102, 10000)},
|
||||
getSegmentInfo(102, 10000),
|
||||
},
|
||||
[]*datapb.SegmentInfo{
|
||||
getSegmentInfo(200, 10000)},
|
||||
getSegmentInfo(200, 10000),
|
||||
},
|
||||
},
|
||||
{
|
||||
"0 sealed segments and 0 growing segment",
|
||||
|
@ -94,12 +96,18 @@ func TestFlowGraph_DDNode_Operate(t *testing.T) {
|
|||
in []Msg
|
||||
description string
|
||||
}{
|
||||
{[]Msg{},
|
||||
"Invalid input length == 0"},
|
||||
{[]Msg{&flowGraphMsg{}, &flowGraphMsg{}, &flowGraphMsg{}},
|
||||
"Invalid input length == 3"},
|
||||
{[]Msg{&flowGraphMsg{}},
|
||||
"Invalid input length == 1 but input message is not msgStreamMsg"},
|
||||
{
|
||||
[]Msg{},
|
||||
"Invalid input length == 0",
|
||||
},
|
||||
{
|
||||
[]Msg{&flowGraphMsg{}, &flowGraphMsg{}, &flowGraphMsg{}},
|
||||
"Invalid input length == 3",
|
||||
},
|
||||
{
|
||||
[]Msg{&flowGraphMsg{}},
|
||||
"Invalid input length == 1 but input message is not msgStreamMsg",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range invalidInTests {
|
||||
|
@ -117,10 +125,14 @@ func TestFlowGraph_DDNode_Operate(t *testing.T) {
|
|||
|
||||
description string
|
||||
}{
|
||||
{1, 1, 1,
|
||||
"DropCollectionMsg collID == ddNode collID"},
|
||||
{1, 2, 0,
|
||||
"DropCollectionMsg collID != ddNode collID"},
|
||||
{
|
||||
1, 1, 1,
|
||||
"DropCollectionMsg collID == ddNode collID",
|
||||
},
|
||||
{
|
||||
1, 2, 0,
|
||||
"DropCollectionMsg collID != ddNode collID",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -164,10 +176,16 @@ func TestFlowGraph_DDNode_Operate(t *testing.T) {
|
|||
|
||||
description string
|
||||
}{
|
||||
{1, 1, 101, []UniqueID{101},
|
||||
"DropCollectionMsg collID == ddNode collID"},
|
||||
{1, 2, 101, []UniqueID{},
|
||||
"DropCollectionMsg collID != ddNode collID"},
|
||||
{
|
||||
1, 1, 101,
|
||||
[]UniqueID{101},
|
||||
"DropCollectionMsg collID == ddNode collID",
|
||||
},
|
||||
{
|
||||
1, 2, 101,
|
||||
[]UniqueID{},
|
||||
"DropCollectionMsg collID != ddNode collID",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -195,15 +213,12 @@ func TestFlowGraph_DDNode_Operate(t *testing.T) {
|
|||
fgMsg, ok := rt[0].(*flowGraphMsg)
|
||||
assert.True(t, ok)
|
||||
assert.ElementsMatch(t, test.expectOutput, fgMsg.dropPartitions)
|
||||
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Test DDNode Operate and filter insert msg", func(t *testing.T) {
|
||||
var (
|
||||
collectionID UniqueID = 1
|
||||
)
|
||||
var collectionID UniqueID = 1
|
||||
// Prepare ddNode states
|
||||
ddn := ddNode{
|
||||
ctx: context.Background(),
|
||||
|
@ -260,7 +275,6 @@ func TestFlowGraph_DDNode_Operate(t *testing.T) {
|
|||
})
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
||||
|
@ -274,19 +288,24 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
inMsg *msgstream.InsertMsg
|
||||
expected bool
|
||||
}{
|
||||
{"test dropped segments true",
|
||||
{
|
||||
"test dropped segments true",
|
||||
[]UniqueID{100},
|
||||
nil,
|
||||
nil,
|
||||
getInsertMsg(100, 10000),
|
||||
true},
|
||||
{"test dropped segments true 2",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"test dropped segments true 2",
|
||||
[]UniqueID{100, 101, 102},
|
||||
nil,
|
||||
nil,
|
||||
getInsertMsg(102, 10000),
|
||||
true},
|
||||
{"test sealed segments msgTs <= segmentTs true",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"test sealed segments msgTs <= segmentTs true",
|
||||
[]UniqueID{},
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
200: getSegmentInfo(200, 50000),
|
||||
|
@ -294,8 +313,10 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
},
|
||||
nil,
|
||||
getInsertMsg(200, 10000),
|
||||
true},
|
||||
{"test sealed segments msgTs <= segmentTs true",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"test sealed segments msgTs <= segmentTs true",
|
||||
[]UniqueID{},
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
200: getSegmentInfo(200, 50000),
|
||||
|
@ -303,8 +324,10 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
},
|
||||
nil,
|
||||
getInsertMsg(200, 50000),
|
||||
true},
|
||||
{"test sealed segments msgTs > segmentTs false",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"test sealed segments msgTs > segmentTs false",
|
||||
[]UniqueID{},
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
200: getSegmentInfo(200, 50000),
|
||||
|
@ -312,8 +335,10 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
},
|
||||
nil,
|
||||
getInsertMsg(222, 70000),
|
||||
false},
|
||||
{"test growing segments msgTs <= segmentTs true",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"test growing segments msgTs <= segmentTs true",
|
||||
[]UniqueID{},
|
||||
nil,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
|
@ -321,8 +346,10 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
300: getSegmentInfo(300, 50000),
|
||||
},
|
||||
getInsertMsg(200, 10000),
|
||||
true},
|
||||
{"test growing segments msgTs > segmentTs false",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"test growing segments msgTs > segmentTs false",
|
||||
[]UniqueID{},
|
||||
nil,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
|
@ -330,8 +357,10 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
300: getSegmentInfo(300, 50000),
|
||||
},
|
||||
getInsertMsg(200, 70000),
|
||||
false},
|
||||
{"test not exist",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"test not exist",
|
||||
[]UniqueID{},
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
400: getSegmentInfo(500, 50000),
|
||||
|
@ -342,14 +371,17 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
300: getSegmentInfo(300, 50000),
|
||||
},
|
||||
getInsertMsg(111, 70000),
|
||||
false},
|
||||
false,
|
||||
},
|
||||
// for pChannel reuse on same collection
|
||||
{"test insert msg with different channelName",
|
||||
{
|
||||
"test insert msg with different channelName",
|
||||
[]UniqueID{100},
|
||||
nil,
|
||||
nil,
|
||||
getInsertMsgWithChannel(100, 10000, anotherChannelName),
|
||||
true},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -364,7 +396,6 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
// Test
|
||||
got := ddn.tryToFilterSegmentInsertMessages(test.inMsg)
|
||||
assert.Equal(t, test.expected, got)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -380,33 +411,39 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
inMsg *msgstream.InsertMsg
|
||||
msgFiltered bool
|
||||
}{
|
||||
{"msgTs<segTs",
|
||||
{
|
||||
"msgTs<segTs",
|
||||
true,
|
||||
50000,
|
||||
10000,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
100: getSegmentInfo(100, 50000),
|
||||
101: getSegmentInfo(101, 50000)},
|
||||
101: getSegmentInfo(101, 50000),
|
||||
},
|
||||
getInsertMsg(100, 10000),
|
||||
true,
|
||||
},
|
||||
{"msgTs==segTs",
|
||||
{
|
||||
"msgTs==segTs",
|
||||
true,
|
||||
50000,
|
||||
10000,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
100: getSegmentInfo(100, 50000),
|
||||
101: getSegmentInfo(101, 50000)},
|
||||
101: getSegmentInfo(101, 50000),
|
||||
},
|
||||
getInsertMsg(100, 50000),
|
||||
true,
|
||||
},
|
||||
{"msgTs>segTs",
|
||||
{
|
||||
"msgTs>segTs",
|
||||
false,
|
||||
50000,
|
||||
10000,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
100: getSegmentInfo(100, 70000),
|
||||
101: getSegmentInfo(101, 50000)},
|
||||
101: getSegmentInfo(101, 50000),
|
||||
},
|
||||
getInsertMsg(300, 60000),
|
||||
false,
|
||||
},
|
||||
|
@ -440,27 +477,33 @@ func TestFlowGraph_DDNode_filterMessages(t *testing.T) {
|
|||
inMsg *msgstream.InsertMsg
|
||||
msgFiltered bool
|
||||
}{
|
||||
{"msgTs<segTs",
|
||||
{
|
||||
"msgTs<segTs",
|
||||
true,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
100: getSegmentInfo(100, 50000),
|
||||
101: getSegmentInfo(101, 50000)},
|
||||
101: getSegmentInfo(101, 50000),
|
||||
},
|
||||
getInsertMsg(100, 10000),
|
||||
true,
|
||||
},
|
||||
{"msgTs==segTs",
|
||||
{
|
||||
"msgTs==segTs",
|
||||
true,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
100: getSegmentInfo(100, 50000),
|
||||
101: getSegmentInfo(101, 50000)},
|
||||
101: getSegmentInfo(101, 50000),
|
||||
},
|
||||
getInsertMsg(100, 50000),
|
||||
true,
|
||||
},
|
||||
{"msgTs>segTs",
|
||||
{
|
||||
"msgTs>segTs",
|
||||
false,
|
||||
map[UniqueID]*datapb.SegmentInfo{
|
||||
100: getSegmentInfo(100, 50000),
|
||||
101: getSegmentInfo(101, 50000)},
|
||||
101: getSegmentInfo(101, 50000),
|
||||
},
|
||||
getInsertMsg(100, 60000),
|
||||
false,
|
||||
},
|
||||
|
@ -497,16 +540,31 @@ func TestFlowGraph_DDNode_isDropped(t *testing.T) {
|
|||
|
||||
description string
|
||||
}{
|
||||
{[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)}, 1, true,
|
||||
"Input seg 1 in droppedSegs{1,2,3}"},
|
||||
{[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)}, 2, true,
|
||||
"Input seg 2 in droppedSegs{1,2,3}"},
|
||||
{[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)}, 3, true,
|
||||
"Input seg 3 in droppedSegs{1,2,3}"},
|
||||
{[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)}, 4, false,
|
||||
"Input seg 4 not in droppedSegs{1,2,3}"},
|
||||
{[]*datapb.SegmentInfo{}, 5, false,
|
||||
"Input seg 5, no droppedSegs {}"},
|
||||
{
|
||||
[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)},
|
||||
1, true,
|
||||
"Input seg 1 in droppedSegs{1,2,3}",
|
||||
},
|
||||
{
|
||||
[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)},
|
||||
2, true,
|
||||
"Input seg 2 in droppedSegs{1,2,3}",
|
||||
},
|
||||
{
|
||||
[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)},
|
||||
3, true,
|
||||
"Input seg 3 in droppedSegs{1,2,3}",
|
||||
},
|
||||
{
|
||||
[]*datapb.SegmentInfo{getSegmentInfo(1, 0), getSegmentInfo(2, 0), getSegmentInfo(3, 0)},
|
||||
4, false,
|
||||
"Input seg 4 not in droppedSegs{1,2,3}",
|
||||
},
|
||||
{
|
||||
[]*datapb.SegmentInfo{},
|
||||
5, false,
|
||||
"Input seg 5, no droppedSegs {}",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -178,7 +178,8 @@ func (dn *deleteNode) bufferDeleteMsg(msg *msgstream.DeleteMsg, tr TimeRange, st
|
|||
// If the key may exist in the segment, returns it in map.
|
||||
// If the key not exist in the segment, the segment is filter out.
|
||||
func (dn *deleteNode) filterSegmentByPK(partID UniqueID, pks []primaryKey, tss []Timestamp) (
|
||||
map[UniqueID][]primaryKey, map[UniqueID][]uint64) {
|
||||
map[UniqueID][]primaryKey, map[UniqueID][]uint64,
|
||||
) {
|
||||
segID2Pks := make(map[UniqueID][]primaryKey)
|
||||
segID2Tss := make(map[UniqueID][]uint64)
|
||||
segments := dn.channel.filterSegments(partID)
|
||||
|
|
|
@ -140,12 +140,18 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
in []Msg
|
||||
desc string
|
||||
}{
|
||||
{[]Msg{},
|
||||
"Invalid input length == 0"},
|
||||
{[]Msg{&flowGraphMsg{}, &flowGraphMsg{}, &flowGraphMsg{}},
|
||||
"Invalid input length == 3"},
|
||||
{[]Msg{&flowgraph.MsgStreamMsg{}},
|
||||
"Invalid input length == 1 but input message is not flowGraphMsg"},
|
||||
{
|
||||
[]Msg{},
|
||||
"Invalid input length == 0",
|
||||
},
|
||||
{
|
||||
[]Msg{&flowGraphMsg{}, &flowGraphMsg{}, &flowGraphMsg{}},
|
||||
"Invalid input length == 3",
|
||||
},
|
||||
{
|
||||
[]Msg{&flowgraph.MsgStreamMsg{}},
|
||||
"Invalid input length == 1 but input message is not flowGraphMsg",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range invalidInTests {
|
||||
|
@ -399,7 +405,7 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
})
|
||||
|
||||
t.Run("Test deleteNode auto flush function", func(t *testing.T) {
|
||||
//for issue
|
||||
// for issue
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
@ -422,8 +428,8 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
delNode, err := newDeleteNode(ctx, mockFlushManager, delBufManager, make(chan string, 1), c)
|
||||
assert.NoError(t, err)
|
||||
|
||||
//2. here we set flushing segments inside fgmsg to empty
|
||||
//in order to verify the validity of auto flush function
|
||||
// 2. here we set flushing segments inside fgmsg to empty
|
||||
// in order to verify the validity of auto flush function
|
||||
msg := genFlowGraphDeleteMsg(int64Pks, chanName)
|
||||
|
||||
// delete has to match segment partition ID
|
||||
|
@ -433,9 +439,9 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
msg.segmentsToSync = []UniqueID{}
|
||||
|
||||
var fgMsg flowgraph.Msg = &msg
|
||||
//1. here we set buffer bytes to a relatively high level
|
||||
//and the sum of memory consumption in this case is 208
|
||||
//so no segments will be flushed
|
||||
// 1. here we set buffer bytes to a relatively high level
|
||||
// and the sum of memory consumption in this case is 208
|
||||
// so no segments will be flushed
|
||||
paramtable.Get().Save(Params.DataNodeCfg.FlushDeleteBufferBytes.Key, "300")
|
||||
fgMsg.(*flowGraphMsg).segmentsToSync = delNode.delBufferManager.ShouldFlushSegments()
|
||||
delNode.Operate([]flowgraph.Msg{fgMsg})
|
||||
|
@ -443,10 +449,10 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
assert.Equal(t, int64(208), delNode.delBufferManager.usedMemory.Load())
|
||||
assert.Equal(t, 5, delNode.delBufferManager.delBufHeap.Len())
|
||||
|
||||
//3. note that the whole memory size used by 5 segments will be 208
|
||||
//so when setting delete buffer size equal to 200
|
||||
//there will only be one segment to be flushed then the
|
||||
//memory consumption will be reduced to 160(under 200)
|
||||
// 3. note that the whole memory size used by 5 segments will be 208
|
||||
// so when setting delete buffer size equal to 200
|
||||
// there will only be one segment to be flushed then the
|
||||
// memory consumption will be reduced to 160(under 200)
|
||||
msg.deleteMessages = []*msgstream.DeleteMsg{}
|
||||
msg.segmentsToSync = []UniqueID{}
|
||||
paramtable.Get().Save(Params.DataNodeCfg.FlushDeleteBufferBytes.Key, "200")
|
||||
|
@ -456,17 +462,17 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
assert.Equal(t, int64(160), delNode.delBufferManager.usedMemory.Load())
|
||||
assert.Equal(t, 4, delNode.delBufferManager.delBufHeap.Len())
|
||||
|
||||
//4. there is no new delete msg and delBufferSize is still 200
|
||||
//we expect there will not be any auto flush del
|
||||
// 4. there is no new delete msg and delBufferSize is still 200
|
||||
// we expect there will not be any auto flush del
|
||||
fgMsg.(*flowGraphMsg).segmentsToSync = delNode.delBufferManager.ShouldFlushSegments()
|
||||
delNode.Operate([]flowgraph.Msg{fgMsg})
|
||||
assert.Equal(t, 1, len(mockFlushManager.flushedSegIDs))
|
||||
assert.Equal(t, int64(160), delNode.delBufferManager.usedMemory.Load())
|
||||
assert.Equal(t, 4, delNode.delBufferManager.delBufHeap.Len())
|
||||
|
||||
//5. we reset buffer bytes to 150, then we expect there would be one more
|
||||
//segment which is 48 in size to be flushed, so the remained del memory size
|
||||
//will be 112
|
||||
// 5. we reset buffer bytes to 150, then we expect there would be one more
|
||||
// segment which is 48 in size to be flushed, so the remained del memory size
|
||||
// will be 112
|
||||
paramtable.Get().Save(Params.DataNodeCfg.FlushDeleteBufferBytes.Key, "150")
|
||||
fgMsg.(*flowGraphMsg).segmentsToSync = delNode.delBufferManager.ShouldFlushSegments()
|
||||
delNode.Operate([]flowgraph.Msg{fgMsg})
|
||||
|
@ -474,8 +480,8 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
assert.Equal(t, int64(112), delNode.delBufferManager.usedMemory.Load())
|
||||
assert.Equal(t, 3, delNode.delBufferManager.delBufHeap.Len())
|
||||
|
||||
//6. we reset buffer bytes to 60, then most of the segments will be flushed
|
||||
//except for the smallest entry with size equaling to 32
|
||||
// 6. we reset buffer bytes to 60, then most of the segments will be flushed
|
||||
// except for the smallest entry with size equaling to 32
|
||||
paramtable.Get().Save(Params.DataNodeCfg.FlushDeleteBufferBytes.Key, "60")
|
||||
fgMsg.(*flowGraphMsg).segmentsToSync = delNode.delBufferManager.ShouldFlushSegments()
|
||||
delNode.Operate([]flowgraph.Msg{fgMsg})
|
||||
|
@ -483,9 +489,9 @@ func TestFlowGraphDeleteNode_Operate(t *testing.T) {
|
|||
assert.Equal(t, int64(32), delNode.delBufferManager.usedMemory.Load())
|
||||
assert.Equal(t, 1, delNode.delBufferManager.delBufHeap.Len())
|
||||
|
||||
//7. we reset buffer bytes to 20, then as all segment-memory consumption
|
||||
//is more than 20, so all five segments will be flushed and the remained
|
||||
//del memory will be lowered to zero
|
||||
// 7. we reset buffer bytes to 20, then as all segment-memory consumption
|
||||
// is more than 20, so all five segments will be flushed and the remained
|
||||
// del memory will be lowered to zero
|
||||
paramtable.Get().Save(Params.DataNodeCfg.FlushDeleteBufferBytes.Key, "20")
|
||||
fgMsg.(*flowGraphMsg).segmentsToSync = delNode.delBufferManager.ShouldFlushSegments()
|
||||
delNode.Operate([]flowgraph.Msg{fgMsg})
|
||||
|
|
|
@ -60,8 +60,7 @@ func (mm *mockMsgStreamFactory) NewMsgStreamDisposer(ctx context.Context) func([
|
|||
return nil
|
||||
}
|
||||
|
||||
type mockTtMsgStream struct {
|
||||
}
|
||||
type mockTtMsgStream struct{}
|
||||
|
||||
func (mtm *mockTtMsgStream) Close() {}
|
||||
|
||||
|
|
|
@ -310,7 +310,7 @@ type syncTask struct {
|
|||
}
|
||||
|
||||
func (ibNode *insertBufferNode) FillInSyncTasks(fgMsg *flowGraphMsg, seg2Upload []UniqueID) map[UniqueID]*syncTask {
|
||||
var syncTasks = make(map[UniqueID]*syncTask)
|
||||
syncTasks := make(map[UniqueID]*syncTask)
|
||||
|
||||
if fgMsg.dropCollection {
|
||||
// All segments in the collection will be synced, not matter empty buffer or not
|
||||
|
@ -378,10 +378,10 @@ func (ibNode *insertBufferNode) FillInSyncTasks(fgMsg *flowGraphMsg, seg2Upload
|
|||
}
|
||||
|
||||
// sync delete
|
||||
//here we adopt a quite radical strategy:
|
||||
//every time we make sure that the N biggest delDataBuf can be flushed
|
||||
//when memsize usage reaches a certain level
|
||||
//the aim for taking all these actions is to guarantee that the memory consumed by delBuf will not exceed a limit
|
||||
// here we adopt a quite radical strategy:
|
||||
// every time we make sure that the N biggest delDataBuf can be flushed
|
||||
// when memsize usage reaches a certain level
|
||||
// the aim for taking all these actions is to guarantee that the memory consumed by delBuf will not exceed a limit
|
||||
segmentsToFlush := ibNode.delBufferManager.ShouldFlushSegments()
|
||||
for _, segID := range segmentsToFlush {
|
||||
syncTasks[segID] = &syncTask{
|
||||
|
@ -544,7 +544,6 @@ func (ibNode *insertBufferNode) Sync(fgMsg *flowGraphMsg, seg2Upload []UniqueID,
|
|||
func (ibNode *insertBufferNode) addSegmentAndUpdateRowNum(insertMsgs []*msgstream.InsertMsg, startPos, endPos *msgpb.MsgPosition) (seg2Upload []UniqueID, err error) {
|
||||
uniqueSeg := make(map[UniqueID]int64)
|
||||
for _, msg := range insertMsgs {
|
||||
|
||||
currentSegID := msg.GetSegmentID()
|
||||
collID := msg.GetCollectionID()
|
||||
partitionID := msg.GetPartitionID()
|
||||
|
@ -669,7 +668,6 @@ func (ibNode *insertBufferNode) getTimestampRange(tsData *storage.Int64FieldData
|
|||
|
||||
// WriteTimeTick writes timetick once insertBufferNode operates.
|
||||
func (ibNode *insertBufferNode) WriteTimeTick(ts Timestamp, segmentIDs []int64) {
|
||||
|
||||
select {
|
||||
case resendTTMsg := <-ibNode.resendTTChan:
|
||||
log.Info("resend TT msg received in insertBufferNode",
|
||||
|
@ -702,8 +700,8 @@ func (ibNode *insertBufferNode) getCollectionandPartitionIDbySegID(segmentID Uni
|
|||
}
|
||||
|
||||
func newInsertBufferNode(ctx context.Context, collID UniqueID, delBufManager *DeltaBufferManager, flushCh <-chan flushMsg, resendTTCh <-chan resendTTMsg,
|
||||
fm flushManager, flushingSegCache *Cache, config *nodeConfig, timeTickManager *timeTickSender) (*insertBufferNode, error) {
|
||||
|
||||
fm flushManager, flushingSegCache *Cache, config *nodeConfig, timeTickManager *timeTickSender,
|
||||
) (*insertBufferNode, error) {
|
||||
baseNode := BaseNode{}
|
||||
baseNode.SetMaxQueueLength(config.maxQueueLength)
|
||||
baseNode.SetMaxParallelism(config.maxParallelism)
|
||||
|
@ -726,7 +724,7 @@ func newInsertBufferNode(ctx context.Context, collID UniqueID, delBufManager *De
|
|||
}, nil
|
||||
}
|
||||
|
||||
//input stream, data node time tick
|
||||
// input stream, data node time tick
|
||||
wTt, err := config.msFactory.NewMsgStream(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue